鸿蒙应用开发之语音转文本基础

仿佛云烟
发布于 2025-6-28 17:07
浏览
0收藏

一、工具


鸿蒙应用开发之语音转文本基础-鸿蒙开发者社区

DevEco Studio



二、开发步骤


将一段中文音频转换为文本


1.在使用语音识别时,将实现语音识别相关的类添加至工程。


import { speechRecognizer } from '@kit.CoreSpeechKit';
import { BusinessError } from '@kit.BasicServicesKit';


2.调用​​createEngine​​​方法,对引擎进行初始化,并创建​​SpeechRecognitionEngine​​实例。


let asrEngine: speechRecognizer.SpeechRecognitionEngine;
let sessionId: string = '123456';
// 创建引擎,通过callback形式返回
// 设置创建引擎参数
let extraParam: Record<string, Object> = {"locate": "CN", "recognizerMode": "short"};
let initParamsInfo: speechRecognizer.CreateEngineParams = {
  language: 'zh-CN',
  online: 1,
  extraParams: extraParam
};
// 调用createEngine方法
speechRecognizer.createEngine(initParamsInfo, (err: BusinessError, speechRecognitionEngine: speechRecognizer.SpeechRecognitionEngine) => {
  if (!err) {
    console.info('Succeeded in creating engine.');
    // 接收创建引擎的实例
    asrEngine = speechRecognitionEngine;
  } else {
    console.error(Failed to create engine. Code: ${err.code}, message: ${err.message}.);
  }
});


3.得到​​SpeechRecognitionEngine​​​实例对象后,实例化​​RecognitionListener​​​对象,调用​​setListener​​方法设置回调,用来接收语音识别相关的回调信息。


// 创建回调对象
let setListener: speechRecognizer.RecognitionListener = {
  // 开始识别成功回调
  onStart(sessionId: string, eventMessage: string) {
    console.info(onStart, sessionId: ${sessionId} eventMessage: ${eventMessage});
  },
  // 事件回调
  onEvent(sessionId: string, eventCode: number, eventMessage: string) {
    console.info(onEvent, sessionId: ${sessionId} eventCode: ${eventCode} eventMessage: ${eventMessage});
  },
  // 识别结果回调,包括中间结果和最终结果
  onResult(sessionId: string, result: speechRecognizer.SpeechRecognitionResult) {
    console.info(onResult, sessionId: ${sessionId} sessionId: ${JSON.stringify(result)});
  },
  // 识别完成回调
  onComplete(sessionId: string, eventMessage: string) {
    console.info(onComplete, sessionId: ${sessionId} eventMessage: ${eventMessage});
  },
  // 错误回调,错误码通过本方法返回
  // 返回错误码1002200002,开始识别失败,重复启动startListening方法时触发
  // 更多错误码请参考错误码参考
  onError(sessionId: string, errorCode: number, errorMessage: string) {
    console.error(onError, sessionId: ${sessionId} errorCode: ${errorCode} errorMessage: ${errorMessage});
  },
}
// 设置回调
asrEngine.setListener(setListener);


4.分别为音频文件转文字和麦克风转文字功能设置开始识别的相关参数,调用​​startListening​​方法,开始合成。


// 开始识别
private startListeningForWriteAudio() {
  // 设置开始识别的相关参数
  let recognizerParams: speechRecognizer.StartParams = {
    sessionId: this.sessionId,
    audioInfo: { audioType: 'pcm', sampleRate: 16000, soundChannel: 1, sampleBit: 16 } //audioInfo参数配置请参考AudioInfo
  }
  // 调用开始识别方法
  asrEngine.startListening(recognizerParams);
};

private startListeningForRecording() {
  let audioParam: speechRecognizer.AudioInfo = { audioType: 'pcm', sampleRate: 16000, soundChannel: 1, sampleBit: 16 }
  let extraParam: Record<string, Object> = {
    "recognitionMode": 0,
    "vadBegin": 2000,
    "vadEnd": 3000,
    "maxAudioDuration": 20000
  }
  let recognizerParams: speechRecognizer.StartParams = {
    sessionId: this.sessionId,
    audioInfo: audioParam,
    extraParams: extraParam
  }
  console.info('startListening start');
  asrEngine.startListening(recognizerParams);
};


5.传入音频流,调用​​writeAudio​​方法,开始写入音频流。读取音频文件时,开发者需预先准备一个pcm格式音频文件。


let uint8Array: Uint8Array = new Uint8Array();
// 可以通过如下方式获取音频流:1、通过录音获取音频流;2、从音频文件中读取音频流
// 两种方式示例均已实现:demo参考
// 写入音频流,音频流长度仅支持640或1280
asrEngine.writeAudio(sessionId, uint8Array);


完整代码:


import { speechRecognizer } from '@kit.CoreSpeechKit';
import { BusinessError } from '@kit.BasicServicesKit';
import { fileIo } from '@kit.CoreFileKit';
import { hilog } from '@kit.PerformanceAnalysisKit';
import AudioCapturer from './AudioCapturer';

const TAG = 'CoreSpeechKitDemo';

let asrEngine: speechRecognizer.SpeechRecognitionEngine;

@Entry
@Component
struct Index {
  @State createCount: number = 0;
  @State result: boolean = false;
  @State voiceInfo: string = "";
  @State sessionId: string = "123456";
  @State sessionId2: string = "1234567";
  private mAudioCapturer = new AudioCapturer();

  aboutToAppear(): void {
    this.createCount++;
    this.createByCallback();
    this.setListener();

  }



  build() {
    Column() {
      Scroll() {
        Column() {

          Text(${this.voiceInfo})
            .margin(20)


          Button() {
            Text("语音转文本")
              .fontColor(Color.White)
              .fontSize(20)
          }
          .type(ButtonType.Capsule)
          .backgroundColor("#0x317AE7")
          .width("80%")
          .height(50)
          .margin(10)
          .onClick(() => {
            this.startRecording();
          })

          Button() {
            Text("写入音频流")
              .fontColor(Color.White)
              .fontSize(20)
          }
          .type(ButtonType.Capsule)
          .backgroundColor("#0x317AE7")
          .width("80%")
          .height(50)
          .margin(10)
          .onClick(() => {
            this.writeAudio();
          })


          Button() {
            Text("完成")
              .fontColor(Color.White)
              .fontSize(20)
          }
          .type(ButtonType.Capsule)
          .backgroundColor("#0x317AE7")
          .width("80%")
          .height(50)
          .margin(10)
          .onClick(() => {
            // 结束识别
            hilog.info(0x0000, TAG, "finish click:-->");
            asrEngine.finish(this.sessionId);
          })

          Button() {
            Text("取消")
              .fontColor(Color.White)
              .fontSize(20)
          }
          .type(ButtonType.Capsule)
          .backgroundColor("#0x317AE7")
          .width("80%")
          .height(50)
          .margin(10)
          .onClick(() => {
            // 取消识别
            hilog.info(0x0000, TAG, "cancel click:-->");
            asrEngine.cancel(this.sessionId);
          })

          Button() {
            Text("关闭")
              .fontColor(Color.White)
              .fontSize(20)
          }
          .type(ButtonType.Capsule)
          .backgroundColor("#0x317AA7")
          .width("80%")
          .height(50)
          .margin(10)
          .onClick(() => {
            // 释放引擎
            asrEngine.shutdown();
          })
        }
        .layoutWeight(1)
      }
      .width('100%')
      .height('100%')

    }
  }

  // 创建引擎,通过callback形式返回
  private createByCallback() {
    // 设置创建引擎参数
    let extraParam: Record<string, Object> = {"locate": "CN", "recognizerMode": "short"};
    let initParamsInfo: speechRecognizer.CreateEngineParams = {
      language: 'zh-CN',
      online: 1,
      extraParams: extraParam
    };

    // 调用createEngine方法
    speechRecognizer.createEngine(initParamsInfo, (err: BusinessError, speechRecognitionEngine:
      speechRecognizer.SpeechRecognitionEngine) => {
      if (!err) {
        hilog.info(0x0000, TAG, 'Succeeded in creating engine.');
        // 接收创建引擎的实例
        asrEngine = speechRecognitionEngine;
      } else {
        // 无法创建引擎时返回错误码1002200001,原因:语种不支持、模式不支持、初始化超时、资源不存在等导致创建引擎失败
        // 无法创建引擎时返回错误码1002200006,原因:引擎正在忙碌中,一般多个应用同时调用语音识别引擎时触发
        // 无法创建引擎时返回错误码1002200008,原因:引擎已被销毁
        hilog.error(0x0000, TAG, Failed to create engine. Code: ${err.code}, message: ${err.message}.);
      }
    });
  }

  // 查询语种信息,以callback形式返回
  private queryLanguagesCallback() {
    // 设置查询相关参数
    let languageQuery: speechRecognizer.LanguageQuery = {
      sessionId: this.sessionId
    };
    // 调用listLanguages方法
    asrEngine.listLanguages(languageQuery, (err: BusinessError, languages: Array) => {
      if (!err) {
        // 接收目前支持的语种信息
        hilog.info(0x0000, TAG, Succeeded in listing languages, result: ${JSON.stringify(languages)});
      } else {
        hilog.error(0x0000, TAG, Failed to create engine. Code: ${err.code}, message: ${err.message}.);
      }
    });
  };

  // 开始识别
  private startListeningForWriteAudio() {
    // 设置开始识别的相关参数
    let recognizerParams: speechRecognizer.StartParams = {
      sessionId: this.sessionId,
      audioInfo: { audioType: 'pcm', sampleRate: 16000, soundChannel: 1, sampleBit: 16 } //audioInfo参数配置请参考AudioInfo
    }
    // 调用开始识别方法
    asrEngine.startListening(recognizerParams);
  };

  private startListeningForRecording() {
    let audioParam: speechRecognizer.AudioInfo = { audioType: 'pcm', sampleRate: 16000, soundChannel: 1, sampleBit: 16 }
    let extraParam: Record<string, Object> = {
      "recognitionMode": 0,
      "vadBegin": 2000,
      "vadEnd": 3000,
      "maxAudioDuration": 20000
    }
    let recognizerParams: speechRecognizer.StartParams = {
      sessionId: this.sessionId,
      audioInfo: audioParam,
      extraParams: extraParam
    }
    hilog.info(0x0000, TAG, 'startListening start');
    asrEngine.startListening(recognizerParams);
  };



  // 写音频流
  private async writeAudio() {
    this.startListeningForWriteAudio();
    hilog.error(0x0000, TAG, Failed to read from file. Code);
    let ctx = getContext(this);
    let filenames: string[] = fileIo.listFileSync(ctx.filesDir);
    if (filenames.length <= 0) {
      hilog.error(0x0000, TAG, Failed to read from file. Code);
      return;
    }
    hilog.error(0x0000, TAG, Failed to read from file. Code);
    let filePath: string = ${ctx.filesDir}/${filenames[0]};
    let file = fileIo.openSync(filePath, fileIo.OpenMode.READ_WRITE);
    try {
      let buf: ArrayBuffer = new ArrayBuffer(1280);
      let offset: number = 0;
      while (1280 == fileIo.readSync(file.fd, buf, {
        offset: offset
      })) {
        let uint8Array: Uint8Array = new Uint8Array(buf);
        asrEngine.writeAudio(this.sessionId, uint8Array);
        await this.countDownLatch(1);
        offset = offset + 1280;
      }
    } catch (err) {
      hilog.error(0x0000, TAG, Failed to read from file. Code: ${err.code}, message: ${err.message}.);
    } finally {
      if (null != file) {
        fileIo.closeSync(file);
      }
    }
  }

  // 麦克风语音转文本
  private async startRecording() {
    this.startListeningForRecording();
    // 录音获取音频
    let data: ArrayBuffer;
    hilog.info(0x0000, TAG, 'create capture success');
    this.mAudioCapturer.init((dataBuffer: ArrayBuffer) => {
      hilog.info(0x0000, TAG, 'start write');
      hilog.info(0x0000, TAG, 'ArrayBuffer ' + JSON.stringify(dataBuffer));
      data = dataBuffer
      let uint8Array: Uint8Array = new Uint8Array(data);
      hilog.info(0x0000, TAG, 'ArrayBuffer uint8Array ' + JSON.stringify(uint8Array));
      // 写入音频流
      asrEngine.writeAudio(this.sessionId2, uint8Array);
    });
  };
  // 计时
  public async countDownLatch(count: number) {
    while (count > 0) {
      await this.sleep(40);
      count--;
    }
  }
  // 睡眠
  private sleep(ms: number):Promise {
    return new Promise(resolve => setTimeout(resolve, ms));
  }

  // 设置回调
  private setListener() {
    // 创建回调对象
    let setListener: speechRecognizer.RecognitionListener = {
      // 开始识别成功回调
      onStart(sessionId: string, eventMessage: string) {
        hilog.info(0x0000, TAG, onStart, sessionId: ${sessionId} eventMessage: ${eventMessage});
      },
      // 事件回调
      onEvent(sessionId: string, eventCode: number, eventMessage: string) {
        hilog.info(0x0000, TAG, onEvent, sessionId: ${sessionId} eventCode: ${eventCode} eventMessage: ${eventMessage});
      },
      // 识别结果回调,包括中间结果和最终结果
      onResult(sessionId: string, result: speechRecognizer.SpeechRecognitionResult) {
        hilog.info(0x0000, TAG, onResult, sessionId: ${sessionId} sessionId: ${JSON.stringify(result)});
        this.voiceInfo = result;
      },
      // 识别完成回调
      onComplete(sessionId: string, eventMessage: string) {
        hilog.info(0x0000, TAG, onComplete, sessionId: ${sessionId} eventMessage: ${eventMessage});
        this.voiceInfo = eventMessage;
      },
      // 错误回调,错误码通过本方法返回
      // 返回错误码1002200002,开始识别失败,重复启动startListening方法时触发
      // 更多错误码请参考错误码参考
      onError(sessionId: string, errorCode: number, errorMessage: string) {
        hilog.error(0x0000, TAG, onError, sessionId: ${sessionId} errorCode: ${errorCode} errorMessage: ${errorMessage});
      },
    }
    // 设置回调
    asrEngine.setListener(setListener);
  };
}


AudioCapturer.ts文件


'use strict';
/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.
 */

import {audio} from '@kit.AudioKit';
import { hilog } from '@kit.PerformanceAnalysisKit';

const TAG = 'AudioCapturer';

/ * Audio collector tool
 */
export default class AudioCapturer {
  /
   * Collector object
   */
  private mAudioCapturer = null;

  /   * Audio Data Callback Method
   */
  private mDataCallBack: (data: ArrayBuffer) => void = null;

  /
   * Indicates whether recording data can be obtained.
   */
  private mCanWrite: boolean = true;

  /   * Audio stream information
   */
  private audioStreamInfo = {
    samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_16000,
    channels: audio.AudioChannel.CHANNEL_1,
    sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE,
    encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW
  }

  /
   * Audio collector information
   */
  private audioCapturerInfo = {
    source: audio.SourceType.SOURCE_TYPE_MIC,
    capturerFlags: 0
  }

  /   * Audio Collector Option Information
   */
  private audioCapturerOptions = {
    streamInfo: this.audioStreamInfo,
    capturerInfo: this.audioCapturerInfo
  }

  /
   *  Initialize
   * @param audioListener
   */
  public async init(dataCallBack: (data: ArrayBuffer) => void) {
    if (null != this.mAudioCapturer) {
      hilog.error(0x0000, TAG, 'AudioCapturerUtil already init');
      return;
    }
    this.mDataCallBack = dataCallBack;
    this.mAudioCapturer = await audio.createAudioCapturer(this.audioCapturerOptions).catch(error => {
      hilog.error(0x0000, TAG, AudioCapturerUtil init createAudioCapturer failed, code is ${error.code}, message is ${error.message});
    });
  }

  /   * start recording
   */
  public async start() {
    hilog.error(0x0000, TAG, AudioCapturerUtil start);
    let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
    if (stateGroup.indexOf(this.mAudioCapturer.state) === -1) {
      hilog.error(0x0000, TAG, AudioCapturerUtil start failed);
      return;
    }
    this.mCanWrite = true;
    await this.mAudioCapturer.start();
    while (this.mCanWrite) {
      let bufferSize = await this.mAudioCapturer.getBufferSize();
      let buffer = await this.mAudioCapturer.read(bufferSize, true);
      this.mDataCallBack(buffer)
    }
  }

  /
   * stop recording
   */
  public async stop() {
    if (this.mAudioCapturer.state !== audio.AudioState.STATE_RUNNING && this.mAudioCapturer.state !== audio.AudioState.STATE_PAUSED) {
      hilog.error(0x0000, TAG, AudioCapturerUtil stop Capturer is not running or paused);
      return;
    }
    this.mCanWrite = false;
    await this.mAudioCapturer.stop();
    if (this.mAudioCapturer.state === audio.AudioState.STATE_STOPPED) {
      hilog.info(0x0000, TAG, AudioCapturerUtil Capturer stopped);
    } else {
      hilog.error(0x0000, TAG, Capturer stop failed);
    }
  }

  /**
   * release
   */
  public async release() {
    if (this.mAudioCapturer.state === audio.AudioState.STATE_RELEASED || this.mAudioCapturer.state === audio.AudioState.STATE_NEW) {
      hilog.error(0x0000, TAG, Capturer already released);
      return;
    }
    await this.mAudioCapturer.release();
    this.mAudioCapturer = null;
    if (this.mAudioCapturer.state == audio.AudioState.STATE_RELEASED) {
      hilog.info(0x0000, TAG, Capturer released);
    } else {
      hilog.error(0x0000, TAG, Capturer release failed);
    }
  }
}


示例代码Demo:​​CoreSpeech2: 鸿蒙应用开发之——语音转文本​


收藏
回复
举报
回复
    相关推荐