mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-19 11:08:37 +00:00
fix(ios): eliminate Swift warnings and clean build logs
This commit is contained in:
@@ -10,7 +10,7 @@ import Speech
|
||||
// This file intentionally centralizes talk mode state + behavior.
|
||||
// It's large, and splitting would force `private` -> `fileprivate` across many members.
|
||||
// We'll refactor into smaller files when the surface stabilizes.
|
||||
// swiftlint:disable type_body_length
|
||||
// swiftlint:disable type_body_length file_length
|
||||
@MainActor
|
||||
@Observable
|
||||
final class TalkModeManager: NSObject {
|
||||
@@ -156,9 +156,7 @@ final class TalkModeManager: NSObject {
|
||||
let micOk = await Self.requestMicrophonePermission()
|
||||
guard micOk else {
|
||||
self.logger.warning("start blocked: microphone permission denied")
|
||||
self.statusText = Self.permissionMessage(
|
||||
kind: "Microphone",
|
||||
status: AVAudioSession.sharedInstance().recordPermission)
|
||||
self.statusText = "Microphone permission denied"
|
||||
return
|
||||
}
|
||||
let speechOk = await Self.requestSpeechPermission()
|
||||
@@ -300,9 +298,7 @@ final class TalkModeManager: NSObject {
|
||||
if !self.allowSimulatorCapture {
|
||||
let micOk = await Self.requestMicrophonePermission()
|
||||
guard micOk else {
|
||||
self.statusText = Self.permissionMessage(
|
||||
kind: "Microphone",
|
||||
status: AVAudioSession.sharedInstance().recordPermission)
|
||||
self.statusText = "Microphone permission denied"
|
||||
throw NSError(domain: "TalkMode", code: 4, userInfo: [
|
||||
NSLocalizedDescriptionKey: "Microphone permission denied",
|
||||
])
|
||||
@@ -470,14 +466,15 @@ final class TalkModeManager: NSObject {
|
||||
|
||||
private func startRecognition() throws {
|
||||
#if targetEnvironment(simulator)
|
||||
if self.allowSimulatorCapture {
|
||||
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
|
||||
self.recognitionRequest?.shouldReportPartialResults = true
|
||||
return
|
||||
}
|
||||
if !self.allowSimulatorCapture {
|
||||
throw NSError(domain: "TalkMode", code: 2, userInfo: [
|
||||
NSLocalizedDescriptionKey: "Talk mode is not supported on the iOS simulator",
|
||||
])
|
||||
} else {
|
||||
self.recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
|
||||
self.recognitionRequest?.shouldReportPartialResults = true
|
||||
return
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -525,7 +522,9 @@ final class TalkModeManager: NSObject {
|
||||
self.noiseFloorSamples.removeAll(keepingCapacity: true)
|
||||
let threshold = min(0.35, max(0.12, avg + 0.10))
|
||||
GatewayDiagnostics.log(
|
||||
"talk audio: noiseFloor=\(String(format: "%.3f", avg)) threshold=\(String(format: "%.3f", threshold))")
|
||||
"talk audio: noiseFloor=\(String(format: "%.3f", avg)) "
|
||||
+ "threshold=\(String(format: "%.3f", threshold))"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -549,7 +548,9 @@ final class TalkModeManager: NSObject {
|
||||
self.loggedPartialThisCycle = false
|
||||
|
||||
GatewayDiagnostics.log(
|
||||
"talk speech: recognition started mode=\(String(describing: self.captureMode)) engineRunning=\(self.audioEngine.isRunning)")
|
||||
"talk speech: recognition started mode=\(String(describing: self.captureMode)) "
|
||||
+ "engineRunning=\(self.audioEngine.isRunning)"
|
||||
)
|
||||
self.recognitionTask = recognizer.recognitionTask(with: request) { [weak self] result, error in
|
||||
guard let self else { return }
|
||||
if let error {
|
||||
@@ -1316,11 +1317,11 @@ final class TalkModeManager: NSObject {
|
||||
try Task.checkCancellation()
|
||||
chunks.append(chunk)
|
||||
}
|
||||
await self?.completeIncrementalPrefetch(id: id, chunks: chunks)
|
||||
self?.completeIncrementalPrefetch(id: id, chunks: chunks)
|
||||
} catch is CancellationError {
|
||||
await self?.clearIncrementalPrefetch(id: id)
|
||||
self?.clearIncrementalPrefetch(id: id)
|
||||
} catch {
|
||||
await self?.failIncrementalPrefetch(id: id, error: error)
|
||||
self?.failIncrementalPrefetch(id: id, error: error)
|
||||
}
|
||||
}
|
||||
self.incrementalSpeechPrefetch = IncrementalSpeechPrefetchState(
|
||||
@@ -1426,7 +1427,10 @@ final class TalkModeManager: NSObject {
|
||||
for await evt in stream {
|
||||
if Task.isCancelled { return }
|
||||
guard evt.event == "agent", let payload = evt.payload else { continue }
|
||||
guard let agentEvent = try? GatewayPayloadDecoding.decode(payload, as: OpenClawAgentEventPayload.self) else {
|
||||
guard let agentEvent = try? GatewayPayloadDecoding.decode(
|
||||
payload,
|
||||
as: OpenClawAgentEventPayload.self
|
||||
) else {
|
||||
continue
|
||||
}
|
||||
guard agentEvent.runId == runId, agentEvent.stream == "assistant" else { continue }
|
||||
@@ -1726,23 +1730,20 @@ private struct IncrementalSpeechBuffer {
|
||||
|
||||
extension TalkModeManager {
|
||||
nonisolated static func requestMicrophonePermission() async -> Bool {
|
||||
let session = AVAudioSession.sharedInstance()
|
||||
switch session.recordPermission {
|
||||
switch AVAudioApplication.shared.recordPermission {
|
||||
case .granted:
|
||||
return true
|
||||
case .denied:
|
||||
return false
|
||||
case .undetermined:
|
||||
break
|
||||
return await self.requestPermissionWithTimeout { completion in
|
||||
AVAudioApplication.requestRecordPermission(completionHandler: { ok in
|
||||
completion(ok)
|
||||
})
|
||||
}
|
||||
@unknown default:
|
||||
return false
|
||||
}
|
||||
|
||||
return await self.requestPermissionWithTimeout { completion in
|
||||
AVAudioSession.sharedInstance().requestRecordPermission { ok in
|
||||
completion(ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nonisolated static func requestSpeechPermission() async -> Bool {
|
||||
@@ -1766,7 +1767,7 @@ extension TalkModeManager {
|
||||
}
|
||||
|
||||
private nonisolated static func requestPermissionWithTimeout(
|
||||
_ operation: @escaping @Sendable (@escaping (Bool) -> Void) -> Void) async -> Bool
|
||||
_ operation: @escaping @Sendable (@escaping @Sendable (Bool) -> Void) -> Void) async -> Bool
|
||||
{
|
||||
do {
|
||||
return try await AsyncTimeout.withTimeout(
|
||||
@@ -1910,7 +1911,7 @@ extension TalkModeManager {
|
||||
}
|
||||
let providerID =
|
||||
Self.normalizedTalkProviderID(rawProvider) ??
|
||||
normalizedProviders.keys.sorted().first ??
|
||||
normalizedProviders.keys.min() ??
|
||||
Self.defaultTalkProvider
|
||||
return TalkProviderConfigSelection(
|
||||
provider: providerID,
|
||||
@@ -1920,7 +1921,11 @@ extension TalkModeManager {
|
||||
func reloadConfig() async {
|
||||
guard let gateway else { return }
|
||||
do {
|
||||
let res = try await gateway.request(method: "talk.config", paramsJSON: "{\"includeSecrets\":true}", timeoutSeconds: 8)
|
||||
let res = try await gateway.request(
|
||||
method: "talk.config",
|
||||
paramsJSON: "{\"includeSecrets\":true}",
|
||||
timeoutSeconds: 8
|
||||
)
|
||||
guard let json = try JSONSerialization.jsonObject(with: res) as? [String: Any] else { return }
|
||||
guard let config = json["config"] as? [String: Any] else { return }
|
||||
let talk = config["talk"] as? [String: Any]
|
||||
@@ -2007,10 +2012,18 @@ extension TalkModeManager {
|
||||
|
||||
private static func describeAudioSession() -> String {
|
||||
let session = AVAudioSession.sharedInstance()
|
||||
let inputs = session.currentRoute.inputs.map { "\($0.portType.rawValue):\($0.portName)" }.joined(separator: ",")
|
||||
let outputs = session.currentRoute.outputs.map { "\($0.portType.rawValue):\($0.portName)" }.joined(separator: ",")
|
||||
let available = session.availableInputs?.map { "\($0.portType.rawValue):\($0.portName)" }.joined(separator: ",") ?? ""
|
||||
return "category=\(session.category.rawValue) mode=\(session.mode.rawValue) opts=\(session.categoryOptions.rawValue) inputAvail=\(session.isInputAvailable) routeIn=[\(inputs)] routeOut=[\(outputs)] availIn=[\(available)]"
|
||||
let inputs = session.currentRoute.inputs
|
||||
.map { "\($0.portType.rawValue):\($0.portName)" }
|
||||
.joined(separator: ",")
|
||||
let outputs = session.currentRoute.outputs
|
||||
.map { "\($0.portType.rawValue):\($0.portName)" }
|
||||
.joined(separator: ",")
|
||||
let available = session.availableInputs?
|
||||
.map { "\($0.portType.rawValue):\($0.portName)" }
|
||||
.joined(separator: ",") ?? ""
|
||||
return "category=\(session.category.rawValue) mode=\(session.mode.rawValue) "
|
||||
+ "opts=\(session.categoryOptions.rawValue) inputAvail=\(session.isInputAvailable) "
|
||||
+ "routeIn=[\(inputs)] routeOut=[\(outputs)] availIn=[\(available)]"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2078,7 +2091,9 @@ private final class AudioTapDiagnostics: @unchecked Sendable {
|
||||
|
||||
guard shouldLog else { return }
|
||||
GatewayDiagnostics.log(
|
||||
"\(label) mic: buffers=\(count) frames=\(frames) rate=\(Int(rate))Hz ch=\(ch) rms=\(String(format: "%.4f", resolvedRms)) max=\(String(format: "%.4f", maxRms))")
|
||||
"\(label) mic: buffers=\(count) frames=\(frames) rate=\(Int(rate))Hz ch=\(ch) "
|
||||
+ "rms=\(String(format: "%.4f", resolvedRms)) max=\(String(format: "%.4f", maxRms))"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2135,4 +2150,4 @@ private struct IncrementalPrefetchedAudio {
|
||||
let outputFormat: String?
|
||||
}
|
||||
|
||||
// swiftlint:enable type_body_length
|
||||
// swiftlint:enable type_body_length file_length
|
||||
|
||||
Reference in New Issue
Block a user