#import <Speech/Speech.h>
签订
SFSpeechRecognizerDelegate协议
//speech原生识别
@property (nonatomic,strong) SFSpeechRecognizer *speechRecognizer;
@property (nonatomic,strong) AVAudioEngine *audioEngine;
@property (nonatomic,strong) SFSpeechRecognitionTask *recognitionTask;
@property (nonatomic,strong) SFSpeechAudioBufferRecognitionRequest *recognitionRequest;
- (void)viewDidLoad {
[super viewDidLoad];
[SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus status) {
dispatch_async(dispatch_get_main_queue(), ^{
switch (status) {
case SFSpeechRecognizerAuthorizationStatusNotDetermined:
self.recordButton.enabled = NO;
[self.recordButton setTitle:@"语音识别未授权" forState:UIControlStateDisabled];
break;
case SFSpeechRecognizerAuthorizationStatusDenied:
self.recordButton.enabled = NO;
[self.recordButton setTitle:@"用户未授权使用语音识别" forState:UIControlStateDisabled];
break;
case SFSpeechRecognizerAuthorizationStatusRestricted:
self.recordButton.enabled = NO;
[self.recordButton setTitle:@"语音识别在这台设备上受到限制" forState:UIControlStateDisabled];
break;
case SFSpeechRecognizerAuthorizationStatusAuthorized:
self.recordButton.enabled = YES;
[self.recordButton setTitle:@"开始录音" forState:UIControlStateNormal];
break;
default:
break;
}
});
}];
}
#pragma mark 原生语音识别
- (void)recordButtonClicked{
if (self.audioEngine.isRunning) {
[self.audioEngine stop];
if (_recognitionRequest) {
[_recognitionRequest endAudio];
}
}
else{
[self startRecording];
}
}
- (void)startRecording{
if (_recognitionTask) {
[_recognitionTask cancel];
_recognitionTask = nil;
}
AVAudioSession *audioSession = [AVAudioSession sharedInstance];
NSError *error;
[audioSession setCategory:AVAudioSessionCategoryRecord error:&error];
NSParameterAssert(!error);
[audioSession setMode:AVAudioSessionModeMeasurement error:&error];
NSParameterAssert(!error);
[audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:&error];
NSParameterAssert(!error);
_recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init];
AVAudioInputNode *inputNode = self.audioEngine.inputNode;
NSAssert(inputNode, @"录入设备没有准备好");
NSAssert(_recognitionRequest, @"请求初始化失败");
_recognitionRequest.shouldReportPartialResults = YES;
__weak typeof(self) weakSelf = self;
_recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:_recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) {
__strong typeof(weakSelf) strongSelf = weakSelf;
BOOL isFinal = NO;
if (result) {
NSLog(@"%@",result.bestTranscription.formattedString);
isFinal = result.isFinal;
}
if (error || isFinal) {
[self.audioEngine stop];
[inputNode removeTapOnBus:0];
strongSelf.recognitionTask = nil;
strongSelf.recognitionRequest = nil;
}
}];
AVAudioFormat *recordingFormat = [inputNode outputFormatForBus:0];
[inputNode installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) {
__strong typeof(weakSelf) strongSelf = weakSelf;
if (strongSelf.recognitionRequest) {
[strongSelf.recognitionRequest appendAudioPCMBuffer:buffer];
}
}];
[self.audioEngine prepare];
[self.audioEngine startAndReturnError:&error];
NSParameterAssert(!error);
}
#pragma mark - lazyload
- (AVAudioEngine *)audioEngine{
if (!_audioEngine) {
_audioEngine = [[AVAudioEngine alloc] init];
}
return _audioEngine;
}
- (SFSpeechRecognizer *)speechRecognizer{
if (!_speechRecognizer) {
//腰围语音识别对象设置语言,这里设置的是中文
NSLocale *local =[[NSLocale alloc] initWithLocaleIdentifier:@"zh_CN"];
_speechRecognizer =[[SFSpeechRecognizer alloc] initWithLocale:local];
_speechRecognizer.delegate = self;
}
return _speechRecognizer;
}
#pragma mark - SFSpeechRecognizerDelegate
- (void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available{
}