Hi,
I have an app developed in Xamarin.Forms. I Used SpeechToText functionality in the app. It works fine on Android and iPhone, but on iPad it failed with following run time exception on first or second tap:
Hardware Model: iPad4,8
OS Version: iPhone OS 10.2.1 (14D27)
```
Application Specific Information:
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
Last Exception Backtrace:
0 CoreFoundation 0x000000018cddd1b8 __exceptionPreprocess + 124
1 libobjc.A.dylib 0x000000018b81455c objc_exception_throw + 52
2 CoreFoundation 0x000000018cddd08c +[NSException raise:format:arguments:] + 100
3 AVFAudio 0x00000001a62a3300 AVAE_RaiseException(NSString*, ...) + 56
4 AVFAudio 0x00000001a6318abc AVAudioNodeImplBase::CreateRecordingTap(unsigned long, unsigned int, AVAudioFormat*, void (AVAudioPCMBuffer*, AVAudioTime*) block_pointer) + 268
5 AVFAudio 0x00000001a6316718 -[AVAudioNode installTapOnBus:bufferSize:format:block:] + 212
6 AgriSynciOS 0x00000001015d7588 wrapper_managed_to_native_ObjCRuntime_Messaging_objc_msgSend_intptr_intptr_System_nuint_uint_intptr_intptr (<unknown>:1)
7 AgriSynciOS 0x0000000101590788 AVFoundation_AVAudioNode_InstallTapOnBus_System_nuint_uint_AVFoundation_AVAudioFormat_AVFoundation_AVAudioNodeTapBlock (AVAudioNode.g.cs:118)
````
Below is the code (used with Dependencyservice):
` public class SpeechToTextImplementation : ISpeechToText
{
private Action _callback;
#region Private Variables
private AVAudioEngine AudioEngine;
private SFSpeechRecognizer SpeechRecognizer;
private SFSpeechAudioBufferRecognitionRequest LiveSpeechRequest;
private SFSpeechRecognitionTask RecognitionTask;
#endregion
public SpeechToTextImplementation()
{
}
public void InitializeProperties()
{
try
{
if (AudioEngine == null)
AudioEngine = new AVAudioEngine();
if (SpeechRecognizer == null)
SpeechRecognizer = new SFSpeechRecognizer();
if (LiveSpeechRequest == null)
LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();
}
catch (Exception ex)
{
LogController.LogError(ex.Message, ex);
}
}
public void Start(Action<EventArgsVoiceRecognition> handler)
{
_callback = handler;
AskPermission();
}
public void Stop()
{
CancelRecording();
}
void AskPermission()
{
try
{
// Request user authorization
SFSpeechRecognizer.RequestAuthorization((SFSpeechRecognizerAuthorizationStatus status) =>
{
// Take action based on status
switch (status)
{
case SFSpeechRecognizerAuthorizationStatus.Authorized:
InitializeProperties();
StartRecordingSession();
break;
case SFSpeechRecognizerAuthorizationStatus.Denied:
// User has declined speech recognition
break;
case SFSpeechRecognizerAuthorizationStatus.NotDetermined:
// Waiting on approval
break;
case SFSpeechRecognizerAuthorizationStatus.Restricted:
// The device is not permitted
break;
}
});
}
catch (Exception ex)
{
LogController.LogError("SpeechRecognition::AskPermission", ex);
}
}
public void StartRecordingSession()
{
try
{
//var format = new AVAudioFormat(AVAudioCommonFormat.PCMInt16, 44100, 2, false);
// Start recording
AudioEngine.InputNode.InstallTapOnBus(
bus: 0,
bufferSize: 1024,
format: AudioEngine.InputNode.GetBusOutputFormat(0),
tapBlock: (buffer, when) => LiveSpeechRequest?.Append(buffer)); ///Throw exception from here.
AudioEngine.Prepare();
NSError error;
AudioEngine.StartAndReturnError(out error);
//AudioEngine.MainMixerNode.
// Did recording start?
if (error != null)
{
return;
}
CheckAndStartReconition();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
public void CheckAndStartReconition()
{
if (RecognitionTask?.State == SFSpeechRecognitionTaskState.Running)
{
CancelRecording();
}
StartVoiceRecognition();
}
public void StartVoiceRecognition()
{
try
{
RecognitionTask = SpeechRecognizer.
GetRecognitionTask(LiveSpeechRequest,
(SFSpeechRecognitionResult result, NSError err) =>
{
try
{
if (result == null)
{
CancelRecording();
return;
}
// Was there an error?
if (err != null)
{
CancelRecording();
return;
}
// Is this the final translation?
if (result != null && result.BestTranscription != null && result.BestTranscription.FormattedString != null)
{
Console.WriteLine("You said \"{0}\".", result.BestTranscription.FormattedString);
TextChanged(result.BestTranscription.FormattedString);
}
if (result.Final)
{
TextChanged(result.BestTranscription.FormattedString, true);
CancelRecording();
return;
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
CancelRecording();
}
});
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
public void StopRecording()
{
try
{
AudioEngine?.Stop();
LiveSpeechRequest?.EndAudio();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
public void CancelRecording()
{
try
{
AudioEngine?.Stop();
RecognitionTask?.Cancel();
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
public void TextChanged(string text, bool isFinal = false)
{
// textChanged?.Invoke(this, new EventArgsVoiceRecognition(text, isFinal));
if (_callback != null)
_callback(new EventArgsVoiceRecognition(text, true));
}
}
`
Any thought on this?