mirror of
https://github.com/jeremysrand/ListenerApp.git
synced 2024-12-13 20:29:47 +00:00
Some minor tweaks to try to get speech recognition working from MacOS. It is still not working from my x86 iMac but I just tested this on our new M1 MacBook Air and it does work there. Not sure what the difference is and hope it is just a problem on my iMac.
This commit is contained in:
parent
72a49275a9
commit
241190600e
@ -273,7 +273,7 @@
|
||||
9D5155E726A1EF7B0075EBC7 /* Project object */ = {
|
||||
isa = PBXProject;
|
||||
attributes = {
|
||||
LastSwiftUpdateCheck = 1240;
|
||||
LastSwiftUpdateCheck = 1320;
|
||||
LastUpgradeCheck = 1240;
|
||||
TargetAttributes = {
|
||||
9D5155EE26A1EF7B0075EBC7 = {
|
||||
|
@ -15,5 +15,23 @@
|
||||
<integer>0</integer>
|
||||
</dict>
|
||||
</dict>
|
||||
<key>SuppressBuildableAutocreation</key>
|
||||
<dict>
|
||||
<key>9D62FC5F27C494D700AEE01F</key>
|
||||
<dict>
|
||||
<key>primary</key>
|
||||
<true/>
|
||||
</dict>
|
||||
<key>9D62FC6F27C494D900AEE01F</key>
|
||||
<dict>
|
||||
<key>primary</key>
|
||||
<true/>
|
||||
</dict>
|
||||
<key>9D62FC7927C494D900AEE01F</key>
|
||||
<dict>
|
||||
<key>primary</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
</plist>
|
||||
|
@ -19,7 +19,7 @@
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>1.0</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>378</string>
|
||||
<string>390</string>
|
||||
<key>LSApplicationCategoryType</key>
|
||||
<string>public.app-category.utilities</string>
|
||||
<key>LSRequiresIPhoneOS</key>
|
||||
|
@ -113,6 +113,9 @@ class SpeechForwarder : ObservableObject {
|
||||
logger.debug("Stopped listening")
|
||||
audioEngine.stop()
|
||||
recognitionRequest?.endAudio()
|
||||
recognitionTask?.cancel()
|
||||
audioEngine.inputNode.removeTap(onBus: 0);
|
||||
audioEngine.inputNode.reset()
|
||||
switch (client.send(data: isListening())) {
|
||||
case .success:
|
||||
break
|
||||
@ -177,6 +180,12 @@ class SpeechForwarder : ObservableObject {
|
||||
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
|
||||
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
|
||||
let inputNode = audioEngine.inputNode
|
||||
|
||||
// Configure the microphone input.
|
||||
let recordingFormat = inputNode.outputFormat(forBus: 0)
|
||||
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
|
||||
self.recognitionRequest?.append(buffer)
|
||||
}
|
||||
|
||||
// Create and configure the speech recognition request.
|
||||
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
|
||||
@ -202,6 +211,10 @@ class SpeechForwarder : ObservableObject {
|
||||
print("Text \(result.bestTranscription.formattedString)")
|
||||
}
|
||||
|
||||
if error != nil {
|
||||
self.logger.error("Error from recognizer: \(String(describing: error))")
|
||||
}
|
||||
|
||||
if error != nil || isFinal {
|
||||
// Stop recognizing speech if there is a problem.
|
||||
self.audioEngine.stop()
|
||||
@ -220,12 +233,6 @@ class SpeechForwarder : ObservableObject {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Configure the microphone input.
|
||||
let recordingFormat = inputNode.outputFormat(forBus: 0)
|
||||
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
|
||||
self.recognitionRequest?.append(buffer)
|
||||
}
|
||||
|
||||
audioEngine.prepare()
|
||||
try audioEngine.start()
|
||||
|
Loading…
Reference in New Issue
Block a user