当我在我的例子中使用麦克风时,我的应用程序崩溃了,微软团队在后台试图在我的应用程序中录制音频。
由于未登录的异常“com.apple.coreaudo.avf音频”终止应用程序,原因是:“必要条件为false: IsFormatSampleRateAndChannelCountValid(format)‘
请参阅以下代码:
func startRecording() {
// Clear all previous session data and cancel task
if recognitionTask != nil {
recognitionTask?.cancel()
recognitionTask = nil
}
// Create instance of audio session to record voice
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.record, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
let inputNode = audioEngine.inputNode
guard let recognitionRequest = recognitionRequest else {
fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
}
recognitionRequest.shouldReportPartialResults = true
self.recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
var isFinal = false
if result != nil {
self.textField.text = result?.bestTranscription.formattedString
isFinal = (result?.isFinal)!
}
if error != nil || isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
self.micButton.isEnabled = true
}
})
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
self.audioEngine.prepare()
do {
try self.audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
}
self.textField.text = ""
}
我很确定问题就在这里,但不知道如何解决。
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
发布于 2022-11-22 15:59:36
所以这个应用程序崩溃了,因为我没有应用正确的麦克风频道。
步骤1在导入之后在代码顶部创建一个协议,以表示文件中的一个错误: let audioEngine = AVAudioEngine()
protocol FeedbackViewDelegate : AnyObject {
func showFeedbackError(title: String, message: String)
func audioDidStart(forType type : FeedbackViewType)
}
开始的步骤2在函数中添加布尔值的返回
func startRecording() -> Bool {
}
步骤3在sharedInstance Catch部件中添加这一行代码(这将防止崩溃)
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
return false
}
以上返回将防止代码被执行..。
步骤4在视图控制器中创建一个扩展
extension codeFileName : name of the protocol in my case its a FeedbackViewDelegate {
func showFeedbackError(title: String, message: String) {
}
在这个函数中输入代码(web中有数百万个例子),您可以创建一个警报,并在" in“中使用self。
发布于 2022-11-23 17:21:03
fileprivate let NibName = "FeedbackView"
protocol FeedbackViewDelegate : AnyObject {
func showFeedbackError(title: String, message: String)
func audioDidStart(forType type : FeedbackViewType)
}
enum FeedbackViewType {
case feedbackView, rootcauseView, suggestionView, actionView
}
class FeedbackView: UIControl, ViewLoadable, SFSpeechRecognizerDelegate {
@IBOutlet weak var textField: UITextField!
static var nibName: String = NibName
var feedbackViewType : FeedbackViewType = .feedbackView
@IBOutlet var contentView: UIView!
@IBOutlet weak var micButton: UIButton!
@IBOutlet weak var micView: DefaultCardView!
@IBOutlet weak var micImageView: UIImageView!
weak var delegate : FeedbackViewDelegate?
var allowTextEntry = true
let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))
var recognitionRequest : SFSpeechAudioBufferRecognitionRequest?
var recognitionTask : SFSpeechRecognitionTask?
let audioEngine = AVAudioEngine()
override init(frame: CGRect) {
super.init(frame: frame)
commonInit()
}
required public init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
commonInit()
}
init() {
super.init(frame: CGRect.zero)
commonInit()
}
private func commonInit() {
Bundle(for: type(of: self)).loadNibNamed(NibName, owner: self, options: nil)
backgroundColor = .clear
addSubview(contentView)
contentView.frame = self.bounds
contentView.autoresizingMask = [.flexibleHeight, .flexibleWidth]
}
func configure(text: String, placeholder:String, contentType: UITextContentType,keyboardType:UIKeyboardType) {
print("Did configure keyboard")
self.textField.textContentType = contentType
self.textField.isSecureTextEntry = (contentType == .password)
self.textField.keyboardType = keyboardType
self.textField.delegate = self
self.textField.placeholder = placeholder
if(!text.isEmpty) {
self.textField.text = text
}
}
@IBAction func btnStartSpeechToText(_ sender: UIButton) {
// allowTextEntry = false
if audioEngine.isRunning {
let audioText = textField.text
self.audioEngine.stop()
DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
self.textField.text = audioText
// self.allowTextEntry = true
}
textField.text = audioText
self.micButton.isEnabled = true
self.micImageView.image = UIImage(named: "mic")
} else {
print("Audio did start")
self.delegate?.audioDidStart(forType: self.feedbackViewType)
self.setupSpeech()
if self.startRecording() {
self.micImageView.image = UIImage(named: "micRed")
}
}
}
func stopRecording() {
// allowTextEntry = false
let audioText = textField.text
self.audioEngine.stop()
self.recognitionRequest?.endAudio()
DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
self.textField.text = audioText
// self.allowTextEntry = true
}
self.micButton.isEnabled = true
self.micImageView.image = UIImage(named: "mic")
}
func setupSpeech() {
// self.micButton.isEnabled = false
self.speechRecognizer?.delegate = self
SFSpeechRecognizer.requestAuthorization { (authStatus) in
var isButtonEnabled = false
switch authStatus {
case .authorized:
isButtonEnabled = true
case .denied:
isButtonEnabled = false
print("User denied access to speech recognition")
case .restricted:
isButtonEnabled = false
print("Speech recognition restricted on this device")
case .notDetermined:
isButtonEnabled = false
print("Speech recognition not yet authorized")
}
OperationQueue.main.addOperation() {
// self.micButton.isEnabled = isButtonEnabled
}
}
}
// func audioInputIsBusy(recordingFormat: AVAudioFormat) -> Bool {
// guard recordingFormat.sampleRate == 0 || recordingFormat.channelCount == 0 else {
// return false
// }
// return true
// }
func startRecording() -> Bool {
// Clear all previous session data and cancel task
if recognitionTask != nil {
recognitionTask?.cancel()
recognitionTask = nil
}
// Create instance of audio session to record voice
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
return false
}
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
let inputNode = audioEngine.inputNode
guard let recognitionRequest = recognitionRequest else {
fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
}
recognitionRequest.shouldReportPartialResults = true
self.recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
var isFinal = false
if result != nil {
self.textField.text = result?.bestTranscription.formattedString
isFinal = (result?.isFinal)!
}
if error != nil || isFinal {
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
self.micButton.isEnabled = true
}
})
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
self.audioEngine.prepare()
do {
try self.audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Your microphone is used somewhere else")
return false
}
self.textField.text = ""
return true
}
func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
if available {
self.micButton.isEnabled = true
} else {
self.micButton.isEnabled = false
}
}
}
extension FeedbackView: UITextFieldDelegate {
func textFieldShouldReturn(_ textField: UITextField) -> Bool {
self.endEditing(true)
return false
}
func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool {
return allowTextEntry
}
}
https://stackoverflow.com/questions/74516660
复制相似问题