摄像头采集,获取YUV数据, 渲染YUV数据,能给一下这三项的样例代码吗?

做实时音视频的传输,需要用到以下功能:

1. 摄像头采集

2. 获取YUV数据

3. 渲染YUV数据

能分别给出样例代码吗?

HarmonyOS
2024-08-22 20:06:17
浏览
收藏 0
回答 1
待解决
回答 1
按赞同
/
按时间
zxjiu

使用ImageReceiver获取YUV数据,参考如下代码:

参考代码:

async onImageArrival(receiver: image.ImageReceiver): Promise<void> { 
  receiver.on('imageArrival', () => { 
  //保存文件 
  receiver.readLatestImage((err, nextImage: image.Image) => { 
  if (err || nextImage === undefined) { 
  return; 
} 
nextImage.getComponent(image.ComponentType.JPEG, async (err, imgComponent: image.Component) => { 
  if (err || imgComponent === undefined) { 
    return; 
  } 
  if (imgComponent.byteBuffer as ArrayBuffer) { 
    let path: string = getContext().filesDir + "/image.yuv"; 
    let file = fs.openSync(path, fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE); 
    let opt: WriteOptions = { 
      // 多出2048字节数据 
      length: imgComponent.byteBuffer.byteLength - 2048 
    } 
    fs.write(file.fd, imgComponent.byteBuffer, opt).then((writeLen) => { 
      console.info("write data to file succeed and size is:" + writeLen); 
      fs.closeSync(file); 
    }).catch((err: BusinessError) => { 
      console.info("write data to file failed with error message: " + err.message + ", error code: " + err.code); 
    }); 
  } else { 
    return; 
  } 
  nextImage.release(); 
}) 
}) 
}) 
}

OpenGLES渲染yuv数据demo:

1)顶点着色器

/** 
 * Vertex shader. 
 */ 
const char VERTEX_SHADER[] = 
  "attribute vec4 aPosition; \n" 
"attribute vec2 aTextCoord;\n" 
"varying vec2 vTextCoord;\n" 
"void main() {\n" 
"vTextCoord = vec2(aTextCoord.x, 1.0 - aTextCoord.y);\n" 
"gl_Position = aPosition;\n" 
"}\n";

2)片段着色器

/** 
 * Fragment shader. 
 */ 
const char FRAGMENT_SHADER[] = 
  "precision mediump float;\n" 
"varying vec2 vTextCoord;\n" 
"uniform sampler2D yTexture;\n" 
"uniform sampler2D uTexture;\n" 
"uniform sampler2D vTexture;\n" 
"\n" 
"void main()\n" 
"{\n" 
"    vec3 yuv;\n" 
"    vec3 rgb;\n" 
"    yuv.r = texture2D(yTexture, vTextCoord).g;\n" 
"    yuv.g = texture2D(uTexture, vTextCoord).g - 0.5;\n" 
"    yuv.b = texture2D(vTexture, vTextCoord).g - 0.5;\n" 
"\n" 
"    rgb = mat3(\n" 
"    1.0, 1.0, 1.0,\n" 
"    0.0, -0.39465, 2.03211,\n" 
"    1.13983, -0.5806, 0.0\n" 
"    ) * yuv;\n" 
"    gl_FragColor = vec4(rgb, 1.0);\n" 
"}\n";

3)初始化GL环境

bool EGLCore::EglContextInit(void *window, int width, int height) { 
  OH_LOG_Print(LOG_APP, LOG_INFO, LOG_PRINT_DOMAIN, "EGLCore", "EglContextInit execute"); 
  if ((nullptr == window) || (0 >= width) || (0 >= height)) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "EglContextInit: param error"); 
    return false; 
  } 
 
  m_width = width; 
  m_height = height; 
  if (0 < m_width) { 
    m_widthPercent = FIFTY_PERCENT * m_height / m_width; 
  } 
  m_eglWindow = static_cast<EGLNativeWindowType>(window); 
 
  // Init display. 
  m_eglDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY); 
  if (EGL_NO_DISPLAY == m_eglDisplay) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "eglGetDisplay: unable to get EGL display"); 
    return false; 
  } 
 
  EGLint majorVersion; 
  EGLint minorVersion; 
  if (!eglInitialize(m_eglDisplay, &majorVersion, &minorVersion)) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", 
      "eglInitialize: unable to get initialize EGL display"); 
    return false; 
  } 
 
  // Select configuration. 
  const EGLint maxConfigSize = 1; 
  EGLint numConfigs; 
  if (!eglChooseConfig(m_eglDisplay, ATTRIB_LIST, &m_eglConfig, maxConfigSize, &numConfigs)) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "eglChooseConfig: unable to choose configs"); 
    return false; 
  } 
 
 
  return CreateEnvironment(); 
} 
bool EGLCore::CreateEnvironment() { 
  // Create surface. 
  if (nullptr == m_eglWindow) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "m_eglWindow is null"); 
    return false; 
  } 
  m_eglSurface = eglCreateWindowSurface(m_eglDisplay, m_eglConfig, m_eglWindow, NULL); 
 
  if (nullptr == m_eglSurface) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", 
      "eglCreateWindowSurface: unable to create surface"); 
    return false; 
  } 
 
  // Create context. 
  m_eglContext = eglCreateContext(m_eglDisplay, m_eglConfig, EGL_NO_CONTEXT, CONTEXT_ATTRIBS); 
  if (!eglMakeCurrent(m_eglDisplay, m_eglSurface, m_eglSurface, m_eglContext)) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "eglMakeCurrent failed"); 
    return false; 
  } 
 
  // Create program. 
  m_program = CreateProgram(VERTEX_SHADER, FRAGMENT_SHADER); 
  if (PROGRAM_ERROR == m_program) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "CreateProgram: unable to create program"); 
    return false; 
  } 
  return true; 
}

4)加载yuv数据

void EGLCore::LoadYuv() { 
 
  if (!PrepareLoad()) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "unable to prepareLoad"); 
  } 
  // 根据yuv图片的宽高设置 
  int width = 320; 
  int height = 240; 
 
 
 
  if (!ReadYuvFile(YUV_DATASOURCE, width, height)) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "unable to readYuvFile"); 
  } 
  YuvRender(width, height); 
  FinishLoad(); 
} 
bool EGLCore::PrepareLoad() { 
  if ((nullptr == m_eglDisplay) || (nullptr == m_eglSurface) || (nullptr == m_eglContext) || 
    (!eglMakeCurrent(m_eglDisplay, m_eglSurface, m_eglSurface, m_eglContext))) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "EGLCore", "PrepareDraw: param error"); 
    return false; 
  } 
  glUseProgram(m_program); 
  // 加入三维顶点数据 
  static float ver[] = {1.0f, -1.0f, 0.0f, -1.0f, -1.0f, 0.0f, 1.0f, 1.0f, 0.0f, -1.0f, 1.0f, 0.0f}; 
 
  GLuint apos = static_cast<GLuint>(glGetAttribLocation(m_program, "aPosition")); 
  glEnableVertexAttribArray(apos); 
  glVertexAttribPointer(apos, 3, GL_FLOAT, GL_FALSE, 0, ver); 
 
  // 加入纹理坐标数据 
  static float fragment[] = {1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}; 
  GLuint aTex = static_cast<GLuint>(glGetAttribLocation(m_program, "aTextCoord")); 
  glEnableVertexAttribArray(aTex); 
  glVertexAttribPointer(aTex, 2, GL_FLOAT, GL_FALSE, 0, fragment); 
 
  // 对sampler变量,使用函数glUniform1i和glUniform1iv进行设置 
  glUniform1i(glGetUniformLocation(m_program, "yTexture"), 0); 
  glUniform1i(glGetUniformLocation(m_program, "uTexture"), 1); 
  glUniform1i(glGetUniformLocation(m_program, "vTexture"), 2); 
  return true; 
} 
bool EGLCore::ReadYuvFile(const char *data_source, int width, int height) { 
 
  FILE *fp = fopen(data_source, "rb"); 
  if (!fp) { 
    OH_LOG_Print(LOG_APP, LOG_ERROR, LOG_PRINT_DOMAIN, "file", "openFileErr"); 
    return false; 
  } 
  buf[0] = new unsigned char[width * height]; // y 
  buf[1] = new unsigned char[width * height / 4]; // u 
  buf[2] = new unsigned char[width * height / 4]; // v 
  if (feof(fp) == 0) { 
    fread(buf[0], 1, width * height, fp); 
    fread(buf[1], 1, width * height / 4, fp); 
    fread(buf[2], 1, width * height / 4, fp); 
    return true; 
  } 
  void EGLCore::YuvRender(int width, int height) { 
    // 纹理ID 
    GLuint texts[3] = {0}; 
    // 创建若干个纹理对象,并且得到纹理ID 
    glGenTextures(3, texts); 
 
    // 绑定纹理。后面的的设置和加载全部作用于当前绑定的纹理对象 
    // GL_TEXTURE0、GL_TEXTURE1、GL_TEXTURE2 的就是纹理单元,GL_TEXTURE_1D、GL_TEXTURE_2D、CUBE_MAP为纹理目标 
    // 通过 glBindTexture 函数将纹理目标和纹理绑定后,对纹理目标所进行的操作都反映到对纹理上 
    glBindTexture(GL_TEXTURE_2D, texts[0]); 
    // 缩小的过滤器 
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 
    // 放大的过滤器 
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 
 
    glTexImage2D(GL_TEXTURE_2D, 
      0, // 细节基本 默认0 
      GL_LUMINANCE, // gpu内部格式 亮度,灰度图(这里就是只取一个亮度的颜色通道的意思) 
      width, // 加载的纹理宽度 
      height, // 加载的纹理高度 
      0, // 纹理边框 
      GL_LUMINANCE, // 数据的像素格式 亮度,灰度图 
      GL_UNSIGNED_BYTE, // 像素点存储的数据类型 
      NULL // 纹理的数据(先不传) 
    ); 
 
    // 绑定纹理 
    glBindTexture(GL_TEXTURE_2D, texts[1]); 
    // 缩小的过滤器 
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 
    // 设置纹理的格式和大小 
    glTexImage2D(GL_TEXTURE_2D, 
      0, // 细节基本 默认0 
      GL_LUMINANCE, // gpu内部格式 亮度,灰度图(这里就是只取一个颜色通道的意思) 
      width / 2, // u数据数量为屏幕的4分之1 
      height / 2, 
      0, // 边框 
      GL_LUMINANCE, // 数据的像素格式 亮度,灰度图 
      GL_UNSIGNED_BYTE, // 像素点存储的数据类型 
      NULL // 纹理的数据(先不传) 
    ); 
 
    // 绑定纹理 
    glBindTexture(GL_TEXTURE_2D, texts[2]); 
    // 缩小的过滤器 
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 
    // 设置纹理的格式和大小 
    glTexImage2D(GL_TEXTURE_2D, 
      0, // 细节基本 默认0 
      GL_LUMINANCE, // gpu内部格式 亮度,灰度图(这里就是只取一个颜色通道的意思) 
      width / 2, 
      height / 2, 
      0, // 边框 
      GL_LUMINANCE, // 数据的像素格式 亮度,灰度图 
      GL_UNSIGNED_BYTE, // 像素点存储的数据类型 
      NULL // 纹理的数据(先不传) 
    ); 
 
 
    // 激活第一层纹理,绑定到创建的纹理 
    // 下面的width,height主要是显示尺寸? 
    glActiveTexture(GL_TEXTURE0); 
    // 绑定y对应的纹理` 
    glBindTexture(GL_TEXTURE_2D, texts[0]); 
    // 替换纹理,比重新使用glTexImage2D性能高多 
    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, // 相对原来的纹理的offset 
      width, height, // 加载的纹理宽度、高度。 
      GL_LUMINANCE, GL_UNSIGNED_BYTE, buf[0]); 
 
    // 激活第二层纹理,绑定到创建的纹理 
    glActiveTexture(GL_TEXTURE1); 
    // 绑定u对应的纹理 
    glBindTexture(GL_TEXTURE_2D, texts[1]); 
    // 替换纹理,比重新使用glTexImage2D性能高 
    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width / 2, height / 2, GL_LUMINANCE, GL_UNSIGNED_BYTE, buf[1]); 
    // 激活第三层纹理,绑定到创建的纹理 
    glActiveTexture(GL_TEXTURE2); 
    // 绑定v对应的纹理 
    glBindTexture(GL_TEXTURE_2D, texts[2]); 
    // 替换纹理,比重新使用glTexImage2D性能高 
    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width / 2, height / 2, GL_LUMINANCE, GL_UNSIGNED_BYTE, buf[2]); 
  }

5)上屏渲染

bool EGLCore::FinishLoad() { 
  // The gl function has no return value. 
  glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); 
  glFlush(); 
  glFinish(); 
  // 窗口显示,交换双缓冲区 
  bool swap = eglSwapBuffers(m_eglDisplay, m_eglSurface); 
  m_flag = true; 
  return swap; 
}

摄像头采集参考文档:https://developer.huawei.com/consumer/cn/doc/harmonyos-guides-V1/camera-shooting-case-0000001580185354-V1

https://developer.huawei.com/consumer/cn/doc/harmonyos-references/js-apis-camera-0000001820881333#ZH-CN_TOPIC_0000001820881333__videooutput

分享
微博
QQ
微信
回复
2024-08-23 14:04:44
相关问题
摄像头获取yuv数据是否有旋转
458浏览 • 1回复 待解决
OpenGL无法正常渲染某些分辨率YUV数据
219浏览 • 0回复 待解决
相机预览及切换摄像头
911浏览 • 1回复 待解决
如何获取前置摄像头预览图像
2204浏览 • 1回复 待解决
HarmonyOS 录制屏幕 录制摄像头咨询
306浏览 • 1回复 待解决
HiSpark_IPC_DIY 摄像头烧录失败
5558浏览 • 3回复 待解决
希望能提供一下图片处理工程
218浏览 • 1回复 待解决
请问3.1如何调用摄像头
2274浏览 • 1回复 待解决
录制过程中HarmonyOS如何切换摄像头
307浏览 • 1回复 待解决
HarmonyOS 摄像头预览画面方向错误
176浏览 • 1回复 待解决
鸿蒙webview调用摄像头和麦克风
2069浏览 • 0回复 待解决