|
| 1 | +export type VoiceSearchStatus = |
| 2 | + | 'INITIAL' |
| 3 | + | 'REQUESTING_PERMISSION' |
| 4 | + | 'LISTENING' |
| 5 | + | 'RECOGNIZING' |
| 6 | + | 'ERROR'; |
| 7 | + |
| 8 | +type VoiceSearchState = { |
| 9 | + status: VoiceSearchStatus; |
| 10 | + transcript: string; |
| 11 | + errorCode: SpeechRecognitionErrorCode | null; |
| 12 | +}; |
| 13 | + |
| 14 | +type CreateVoiceSearchParams = { |
| 15 | + language?: string; |
| 16 | + onStateChange(state: VoiceSearchState): void; |
| 17 | + onTranscript(transcript: string): void; |
| 18 | +}; |
| 19 | + |
| 20 | +type VoiceSearchApi = { |
| 21 | + isBrowserSupported(): boolean; |
| 22 | + start(): void; |
| 23 | + stop(): void; |
| 24 | +}; |
| 25 | + |
| 26 | +function createState(state: Partial<VoiceSearchState>): VoiceSearchState { |
| 27 | + return { |
| 28 | + status: 'INITIAL', |
| 29 | + transcript: '', |
| 30 | + errorCode: null, |
| 31 | + ...state, |
| 32 | + }; |
| 33 | +} |
| 34 | + |
| 35 | +export function createVoiceSearch({ |
| 36 | + language, |
| 37 | + onTranscript, |
| 38 | + onStateChange, |
| 39 | +}: CreateVoiceSearchParams): VoiceSearchApi { |
| 40 | + const SpeechRecognitionAPI: new () => SpeechRecognition = |
| 41 | + (window as any).webkitSpeechRecognition || |
| 42 | + (window as any).SpeechRecognition; |
| 43 | + let state: VoiceSearchState = createState({}); |
| 44 | + let recognition: SpeechRecognition | undefined; |
| 45 | + |
| 46 | + function isBrowserSupported() { |
| 47 | + return Boolean(SpeechRecognitionAPI); |
| 48 | + } |
| 49 | + |
| 50 | + function setState(newState: Partial<VoiceSearchState>) { |
| 51 | + state = { ...state, ...newState }; |
| 52 | + onStateChange(state); |
| 53 | + } |
| 54 | + |
| 55 | + function onStart() { |
| 56 | + setState({ status: 'LISTENING' }); |
| 57 | + } |
| 58 | + |
| 59 | + function onError(event: SpeechRecognitionErrorEvent) { |
| 60 | + setState({ status: 'ERROR', errorCode: event.error }); |
| 61 | + } |
| 62 | + |
| 63 | + function onResult(event: SpeechRecognitionEvent) { |
| 64 | + setState({ |
| 65 | + status: 'RECOGNIZING', |
| 66 | + transcript: |
| 67 | + (event.results[0] && |
| 68 | + event.results[0][0] && |
| 69 | + event.results[0][0].transcript) || |
| 70 | + '', |
| 71 | + }); |
| 72 | + } |
| 73 | + |
| 74 | + function onEnd() { |
| 75 | + if (!state.errorCode && state.transcript) { |
| 76 | + onTranscript(state.transcript); |
| 77 | + } |
| 78 | + |
| 79 | + if (state.status !== 'ERROR') { |
| 80 | + setState(createState({ status: 'INITIAL' })); |
| 81 | + } |
| 82 | + } |
| 83 | + |
| 84 | + function start() { |
| 85 | + recognition = new SpeechRecognitionAPI(); |
| 86 | + if (!recognition) { |
| 87 | + return; |
| 88 | + } |
| 89 | + |
| 90 | + setState(createState({ status: 'REQUESTING_PERMISSION' })); |
| 91 | + recognition.interimResults = true; |
| 92 | + if (language) { |
| 93 | + recognition.lang = language; |
| 94 | + } |
| 95 | + recognition.addEventListener('start', onStart); |
| 96 | + recognition.addEventListener('error', onError); |
| 97 | + recognition.addEventListener('result', onResult); |
| 98 | + recognition.addEventListener('end', onEnd); |
| 99 | + recognition.start(); |
| 100 | + } |
| 101 | + |
| 102 | + function stop() { |
| 103 | + if (!recognition) { |
| 104 | + return; |
| 105 | + } |
| 106 | + |
| 107 | + recognition.stop(); |
| 108 | + recognition.removeEventListener('start', onStart); |
| 109 | + recognition.removeEventListener('error', onError); |
| 110 | + recognition.removeEventListener('result', onResult); |
| 111 | + recognition.removeEventListener('end', onEnd); |
| 112 | + recognition = undefined; |
| 113 | + |
| 114 | + setState(createState({ status: 'INITIAL' })); |
| 115 | + } |
| 116 | + |
| 117 | + return { |
| 118 | + isBrowserSupported, |
| 119 | + start, |
| 120 | + stop, |
| 121 | + }; |
| 122 | +} |
0 commit comments