Some minor tweaks to try to get speech recognition working from MacOS. It is still not working from my x86 iMac but I just tested this on our new M1 MacBook Air and it does work there. Not sure what the difference is and hope it is just a problem on my iMac.

This commit is contained in:
Jeremy Rand 2022-02-21 23:22:58 -05:00
parent 72a49275a9
commit 241190600e
4 changed files with 33 additions and 8 deletions

View File

@ -273,7 +273,7 @@
9D5155E726A1EF7B0075EBC7 /* Project object */ = {
isa = PBXProject;
attributes = {
LastSwiftUpdateCheck = 1240;
LastSwiftUpdateCheck = 1320;
LastUpgradeCheck = 1240;
TargetAttributes = {
9D5155EE26A1EF7B0075EBC7 = {

View File

@ -15,5 +15,23 @@
<integer>0</integer>
</dict>
</dict>
<key>SuppressBuildableAutocreation</key>
<dict>
<key>9D62FC5F27C494D700AEE01F</key>
<dict>
<key>primary</key>
<true/>
</dict>
<key>9D62FC6F27C494D900AEE01F</key>
<dict>
<key>primary</key>
<true/>
</dict>
<key>9D62FC7927C494D900AEE01F</key>
<dict>
<key>primary</key>
<true/>
</dict>
</dict>
</dict>
</plist>

View File

@ -19,7 +19,7 @@
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>378</string>
<string>390</string>
<key>LSApplicationCategoryType</key>
<string>public.app-category.utilities</string>
<key>LSRequiresIPhoneOS</key>

View File

@ -113,6 +113,9 @@ class SpeechForwarder : ObservableObject {
logger.debug("Stopped listening")
audioEngine.stop()
recognitionRequest?.endAudio()
recognitionTask?.cancel()
audioEngine.inputNode.removeTap(onBus: 0);
audioEngine.inputNode.reset()
switch (client.send(data: isListening())) {
case .success:
break
@ -177,6 +180,12 @@ class SpeechForwarder : ObservableObject {
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
let inputNode = audioEngine.inputNode
// Configure the microphone input.
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}
// Create and configure the speech recognition request.
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
@ -202,6 +211,10 @@ class SpeechForwarder : ObservableObject {
print("Text \(result.bestTranscription.formattedString)")
}
if error != nil {
self.logger.error("Error from recognizer: \(String(describing: error))")
}
if error != nil || isFinal {
// Stop recognizing speech if there is a problem.
self.audioEngine.stop()
@ -220,12 +233,6 @@ class SpeechForwarder : ObservableObject {
}
}
}
// Configure the microphone input.
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
try audioEngine.start()