Remove the application logic code from the view code.

This commit is contained in:
Jeremy Rand 2021-10-19 00:48:37 -04:00
parent ada7984d32
commit fbdc3c4f99
4 changed files with 19 additions and 214 deletions

View File

@ -24,6 +24,7 @@
9D51566526A36F6D0075EBC7 /* BinUtils.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9D51566426A36F6C0075EBC7 /* BinUtils.swift */; };
9D51567326A36FEC0075EBC7 /* BinUtils.LICENSE in Resources */ = {isa = PBXBuildFile; fileRef = 9D51567226A36FEC0075EBC7 /* BinUtils.LICENSE */; };
9D51567E26A370380075EBC7 /* SwiftSocket.LICENSE in Resources */ = {isa = PBXBuildFile; fileRef = 9D51567D26A370380075EBC7 /* SwiftSocket.LICENSE */; };
9D6ED23A271E6BD600D773CD /* SpeechForwarder.swift in Sources */ = {isa = PBXBuildFile; fileRef = 9D6ED239271E6BD600D773CD /* SpeechForwarder.swift */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
@ -71,6 +72,7 @@
9D51566426A36F6C0075EBC7 /* BinUtils.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = BinUtils.swift; sourceTree = "<group>"; };
9D51567226A36FEC0075EBC7 /* BinUtils.LICENSE */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = BinUtils.LICENSE; sourceTree = "<group>"; };
9D51567D26A370380075EBC7 /* SwiftSocket.LICENSE */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = SwiftSocket.LICENSE; sourceTree = "<group>"; };
9D6ED239271E6BD600D773CD /* SpeechForwarder.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SpeechForwarder.swift; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
@ -128,6 +130,7 @@
9D0DC15826F2E47A007EB92D /* ListenerGS.entitlements */,
9D5155F226A1EF7B0075EBC7 /* ListenerGSApp.swift */,
9D5155F426A1EF7B0075EBC7 /* ContentView.swift */,
9D6ED239271E6BD600D773CD /* SpeechForwarder.swift */,
9D51566326A36F530075EBC7 /* BinUtils */,
9D51563626A36AD60075EBC7 /* SwiftSocket */,
9D5155F626A1EF7C0075EBC7 /* Assets.xcassets */,
@ -356,6 +359,7 @@
9D51565326A36B410075EBC7 /* yudpsocket.c in Sources */,
9D51565226A36B410075EBC7 /* Result.swift in Sources */,
9D5155F526A1EF7B0075EBC7 /* ContentView.swift in Sources */,
9D6ED23A271E6BD600D773CD /* SpeechForwarder.swift in Sources */,
9D5155F326A1EF7B0075EBC7 /* ListenerGSApp.swift in Sources */,
9D51566526A36F6D0075EBC7 /* BinUtils.swift in Sources */,
9D51565426A36B410075EBC7 /* ytcpsocket.c in Sources */,

View File

@ -9,6 +9,11 @@
<key>orderHint</key>
<integer>0</integer>
</dict>
<key>ListenerGS.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>0</integer>
</dict>
</dict>
</dict>
</plist>

View File

@ -9,237 +9,33 @@ import SwiftUI
import Speech
struct ContentView: View {
@State private var listening = false
@State private var listenEnabled = false
@State private var textHeard = ""
@State private var log = ""
@State private var ipAddress = ""
@State private var isEditing = false
let LISTEN_STATE_MSG = 1
let LISTEN_TEXT_MSG = 2
let port = 19026
@State private var client: TCPClient?
private let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))!
@State private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
@State private var recognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()
@StateObject private var speechForwarder = SpeechForwarder()
var body: some View {
VStack {
TextField("IP Address", text: $ipAddress) { isEditing in
self.isEditing = isEditing
TextField("IP Address", text: $speechForwarder.ipAddress) { isEditing in
speechForwarder.isEditing = isEditing
} onCommit: {
validate(destination: ipAddress)
speechForwarder.validate(destination: speechForwarder.ipAddress)
}
.padding()
ScrollView() {
Text(log)
Text(speechForwarder.log)
.multilineTextAlignment(.leading)
}
Button("Listen") {
listen()
speechForwarder.listen()
}
.padding()
.background(listening ? Color.red : Color.clear)
.foregroundColor(listening ? .black : .blue)
.disabled(listenEnabled == false)
.background(speechForwarder.listening ? Color.red : Color.clear)
.foregroundColor(speechForwarder.listening ? .black : .blue)
.disabled(speechForwarder.listenEnabled == false)
.frame(maxWidth: .infinity)
.buttonStyle(PlainButtonStyle())
}
}
func logError(message: String) {
log.append("ERROR: " + message + "\n")
}
func logEvent(message: String) {
log.append("EVENT: " + message + "\n")
}
func validate(destination : String) {
logEvent(message: "Attempting to connect to " + destination)
client = TCPClient(address: destination, port: Int32(port))
guard let client = client else { return }
switch client.connect(timeout: 10) {
case .success:
listenEnabled = true
logEvent(message: "Connected to " + destination)
case .failure(let error):
client.close()
self.client = nil
logError(message: String(describing: error))
break
}
}
func listen() {
self.listening.toggle()
if (self.listening) {
SFSpeechRecognizer.requestAuthorization { authStatus in
// The authorization status results in changes to the
// apps interface, so process the results on the apps
// main queue.
OperationQueue.main.addOperation {
switch authStatus {
case .authorized:
break
case .denied:
self.listening = false
break
case .restricted:
self.listening = false
break
case .notDetermined:
self.listening = false
break
default:
self.listening = false
break
}
}
}
}
guard let client = client else { return }
if (self.listening) {
switch (client.send(data: isListening())) {
case .success:
break
case .failure(let error):
self.listening = false
logError(message: String(describing: error))
}
}
if (self.listening) {
do {
try startRecording()
logEvent(message: "Listening...")
}
catch {
self.listening = false
}
}
if (!self.listening) {
logEvent(message: "Listening stopped")
audioEngine.stop()
recognitionRequest?.endAudio()
switch (client.send(data: isListening())) {
case .success:
break
case .failure(let error):
self.listening = false
logError(message: String(describing: error))
}
}
}
private func isListening() -> Data {
return pack("<hh", [LISTEN_STATE_MSG, listening ? 1 : 0])
}
private func send(latestText : String) {
guard let client = client else { return }
var commonChars = self.textHeard.count
while (commonChars > 0) {
if (latestText.prefix(commonChars) == self.textHeard.prefix(commonChars)) {
break
}
commonChars -= 1
}
var stringToSend = ""
if (commonChars < self.textHeard.count) {
stringToSend = String(repeating: "\u{7f}", count: self.textHeard.count - commonChars)
}
stringToSend.append(contentsOf: latestText.suffix(latestText.count - commonChars).replacingOccurrences(of: "\n", with: "\r"))
if (stringToSend.count > 0) {
// TODO - Handle strings to send that are longer than 64K (doubt that would happen though)
// TODO - Try to convert encoding from utf8 to something the GS can understand.
switch (client.send(data: pack("<hh\(stringToSend.count)s", [LISTEN_TEXT_MSG, stringToSend.count, stringToSend]))) {
case .success:
self.textHeard = latestText
logEvent(message: "Sent \"" + stringToSend + "\"")
break
case .failure(let error):
self.listening = false
logError(message: String(describing: error))
}
}
}
private func startRecording() throws {
// Cancel the previous task if it's running.
recognitionTask?.cancel()
self.recognitionTask = nil
// Configure the audio session for the app.
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.record, mode: .measurement, options: .duckOthers)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
let inputNode = audioEngine.inputNode
// Create and configure the speech recognition request.
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
guard let recognitionRequest = recognitionRequest else { fatalError("Unable to create a SFSpeechAudioBufferRecognitionRequest object") }
recognitionRequest.shouldReportPartialResults = true
// Keep speech recognition data on device
if #available(iOS 13, *) {
recognitionRequest.requiresOnDeviceRecognition = false
}
self.textHeard = ""
// Create a recognition task for the speech recognition session.
// Keep a reference to the task so that it can be canceled.
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
var isFinal = false
if let result = result {
// Update the text view with the results.
send(latestText: result.bestTranscription.formattedString)
isFinal = result.isFinal
print("Text \(result.bestTranscription.formattedString)")
}
if error != nil || isFinal {
// Stop recognizing speech if there is a problem.
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
self.listening = false
logEvent(message: "Listening stopped")
guard let client = client else { return }
client.send(data: isListening())
}
}
// Configure the microphone input.
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
try audioEngine.start()
}
}
struct ContentView_Previews: PreviewProvider {

View File

@ -19,7 +19,7 @@
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>100</string>
<string>122</string>
<key>LSApplicationCategoryType</key>
<string>public.app-category.utilities</string>
<key>LSRequiresIPhoneOS</key>