Prelude:
When IOS 10 comes out, our developers can also use Siri-like features. It uses a speech recognition framework inside Siri to speech the framework to handle Siri. Now, let's take a look at some of the main code. We need a uitextview and a UIButton to show it.
Realize:
- First step: Defining properties
@interfaceViewcontroller () <SFSpeechRecognizerDelegate>@property (Strong, nonatomic) UIButton*Siribtu; //siri button @property (Strong, nonatomic) Uitextview*Siritextview; //Display speech converted to text @property (strong, nonatomic) Sfspeechrecognitiontask*Recognitiontask; //Speech recognition Task @property (Strong, nonatomic) Sfspeechrecognizer*SpeechRecognizer; //Speech recognizer @property (Strong, nonatomic) Sfspeechaudiobufferrecognitionrequest*recognitionrequest; //Identification Request @property (strong, nonatomic) Avaudioengine*Audioengine; //Recording engine @end
- Step two: Make speech recognition detection
- (void) viewdidload {[Super viewdidload];
//Device identification language is Chinese Nslocale*cale = [[Nslocale alloc]initwithlocaleidentifier:@"ZH-CN"]; Self.speechrecognizer=[[Sfspeechrecognizer Alloc]initwithlocale:cale]; Self.siriBtu.enabled=false;
//Set proxy _speechrecognizer.Delegate=Self ;
//Send voice authentication request (first to determine whether the device supports speech recognition function) [Sfspeechrecognizer requestauthorization:^(sfspeechrecognizerauthorizationstatus status) {BOOLisbuttonenabled =false; Switch(status) { Casesfspeechrecognizerauthorizationstatusauthorized:isbuttonenabled=true; NSLog (@"can speech recognition"); Break; Casesfspeechrecognizerauthorizationstatusdenied:isbuttonenabled=false; NSLog (@"user is denied access to speech recognition"); Break; Casesfspeechrecognizerauthorizationstatusrestricted:isbuttonenabled=false; NSLog (@"cannot make speech recognition on this device"); Break; Casesfspeechrecognizerauthorizationstatusnotdetermined:isbuttonenabled=false; NSLog (@"No authorized speech recognition"); Break; default: Break; } self.siriBtu.enabled=isbuttonenabled; }];
//Create recording engine Self.audioengine=[[Avaudioengine alloc]init];}
- Step three: Click events for the button
-(void ) Microphonetap: (UIButton *if ([Self.audioengine isrunning]) {[SELF.A Udioengine stop]; [Self.recognitionrequest Endaudio]; self.siriBtu.enabled = YES; [Self.siribtu settitle: @ " start recording " Forstate:uicontrolstatenormal]; else {[self startrecording]; [Self.siribtu settitle: @ " stop recording " Forstate:uicontrolstatenormal]; }}
- Fourth step: Start recording voice and convert speech to text
-(void) startrecording{if(self.recognitiontask) {[Self.recognitiontask cancel]; Self.recognitiontask=Nil; } avaudiosession*audiosession =[Avaudiosession sharedinstance]; BOOLAudiobool =[Audiosession Setcategory:avaudiosessioncategoryrecord Error:nil]; BOOLaudiobool1=[Audiosession setmode:avaudiosessionmodemeasurement Error:nil]; BOOLaudiobool2= [audiosession setActive:truewithoptions:avaudiosessionsetactiveoptionnotifyothersondeactivation Error:nil]; if(Audiobool | | audiobool1| |audioBool2) {NSLog (@"can use"); }Else{NSLog (@"This shows that some features do not support"); } self.recognitionrequest=[[Sfspeechaudiobufferrecognitionrequest alloc]init]; Avaudioinputnode*inputnode =Self.audioEngine.inputNode; Sfspeechaudiobufferrecognitionrequest*recognitionrequest; Self.recognitionRequest.shouldReportPartialResults=true;
//start to identify task Self.recognitiontask= [Self.speechrecognizer recognitionTaskWithRequest:self.recognitionRequest resulthandler:^ ( Sfspeechrecognitionresult * _nullable result, Nserror *_nullable Error) { BOOLIsFinal =false; if(Result) {Self.siriTextView.text=[[Result besttranscription] formattedstring]; //Voice to text isfinal=[result isfinal]; } if(Error | |isfinal) {[Self.audioengine stop]; [Inputnode Removetaponbus:0]; Self.recognitionrequest=Nil; Self.recognitiontask=Nil; Self.siriBtu.enabled=true; } }]; Avaudioformat*recordingformat = [Inputnode outputformatforbus:0]; [Inputnode Installtaponbus:0BufferSize:1024x768Format:recordingformat block:^ (Avaudiopcmbuffer * _nonnull buffer, Avaudiotime *_nonnull when) {[Self.recognitionrequest appendaudiopcmbuffer:buffer]; }]; [Self.audioengine prepare]; BOOLAudioenginebool =[Self.audioengine Startandreturnerror:nil]; NSLog (@"%d", Audioenginebool); Self.siriTextView.text=@"I'm Xiaoice! Siri ice, you say I listen";}
- Fifth step: Implement this proxy method, the implementation of Siri voice function
-(void) SpeechRecognizer: (Sfspeechrecognizer *) SpeechRecognizer Availabilitydidchange: (BOOL) available{ if(available) { true; } Else { false; }}
Good for original blogger: HTTP://WWW.JIANSHU.COM/P/B29069529BC2
Ios:ios10 and Xcode8 Create Siri features