欢迎访问移动开发之家(rcyd.net),关注移动开发教程。移动开发之家  移动开发问答|  每日更新

android camera2 拍摄yuv图片,并将yuv 保存成Bitmap的方法,

来源: 开发者 投稿于  被查看 46143 次 评论:54

android camera2 拍摄yuv图片,并将yuv 保存成Bitmap的方法,


一、yuv简介
yuv420p和yuv420sp
yuv420p(例如yv12):每两行的4个字节对应一个像素的y,每两行的2个字节(uv)对应前面的一个像素的y
yuv420sp(例如nv21):每两行的4个字节对应一个像素的y,每一行两个字节(uv)对应前面的一个像素的y
例如yv12 格式6*4
YYYYYY
YYYYYY
YYYYYY
YYYYYY
VVVVVV
UUUUUU
例如nv21 格式6*4
YYYYYY
YYYYYY
YYYYYY
YYYYYY
VUVUVU
VUVUVU

二、camera2 Android回调imagereader返回的 YUV_420_888 数据,存储方式
image = reader.acquireLatestImage();
Image.Plane[] planes = image.getPlanes(); //获取yuv图像的平面个数,plane0返回的是y分量
Image.Plane plane = planes[i];
Buffer buffer = plane.getBuffer();
1.buffer.remaining() 获取对应平面字节个数,
2.plane.getPixelStride() 获取对应平面的字节步长
3.plane.getRowStride() 获取对应平面的行步长
预览分辨率为:1280*720,这是获取的 YUV_420_888格式对应的yuv数据log日志,从log看看plane1和plane2 的getPixelStride 是2, 说明间隔的原色才是有效的元素
即plane1的行内索引为0,2,4,6..对应的是u分量中间插入的是v分量,且数组长度是1280*720/2 -1
即plane2的行内索引为0,2,4,6..对应的是v分量中间插入的是u分量,
2023-03-06 10:36:54.068 31203-31258 Camera2Fragment I getByteFromYuvReader() planes.length:3
2023-03-06 10:36:54.068 31203-31258 Camera2Fragment I getByteFromYuvReader() i:0 buffer.remaining:921600 getPixelStride:1 getRowStride:1280
2023-03-06 10:36:54.068 31203-31258 Camera2Fragment I getByteFromYuvReader() i:1 buffer.remaining:460799 getPixelStride:2 getRowStride:1280
2023-03-06 10:36:54.068 31203-31258 Camera2Fragment I getByteFromYuvReader() i:2 buffer.remaining:460799 getPixelStride:2 getRowStride:1280

三、拍摄yuv 并转换为bitmap 保存的代码实现
1.拍摄yuv格式图片的方法
mImageReader = ImageReader.newInstance(Config.SHOOT_PIC_WIDTH,
Config.SHOOT_PIC_HEIGHT, ImageFormat.YUV_420_888, 1);
2.在 imagereader.onImageAvailable 回调处理
if (ImageFormat.YUV_420_888 == reader.getImageFormat()) {
Bitmap bitmap = getBitmapFromYuvReader(reader);
}

//从ImageReader中读取yuv并转成bitmap
private synchronized Bitmap getBitmapFromYuvReader(ImageReader reader) {
if (null == reader) {
Logger.i(TAG, "getBitmapFromYuvReader() reader is null return null");
return null;
}

Image image = null;
try {
byte[] plane0Y = null;
byte[] plane1WithU = null; //plane1 包含u
byte[] plane2WithV = null; //plane2 包含v
byte[] u = null;//真实的u
byte[] v = null;//真实的u
// fos = new FileOutputStream(file);
//获取捕获的照片数据
image = reader.acquireLatestImage();
if (null == image) {
Logger.w(TAG, "getBitmapFromYuvReader() image is null");
return null;
}
Image.Plane[] planes = image.getPlanes();
Logger.i(TAG, "getBitmapFromYuvReader() planes.length:" + planes.length);
if (planes.length != 3) {
return null;
}
// 重复使用同一批byte数组,减少gc频率
if (plane0Y == null || plane1WithU == null || plane2WithV == null) {
plane0Y = new byte[planes[0].getBuffer().limit() - planes[0].getBuffer().position()];
plane1WithU = new byte[planes[1].getBuffer().limit() - planes[1].getBuffer().position()];
plane2WithV = new byte[planes[2].getBuffer().limit() - planes[2].getBuffer().position()];
}
for (int i = 0; i < planes.length; i++) {
Image.Plane plane = planes[i];
Buffer buffer = plane.getBuffer();
//1280*720
Logger.i(TAG, "getBitmapFromYuvReader() i:" + i + " buffer.remaining:" + buffer.remaining()
+ " getPixelStride:" + plane.getPixelStride() + " getRowStride:" + plane.getRowStride());
}
if (image.getPlanes()[0].getBuffer().remaining() == plane0Y.length) {
planes[0].getBuffer().get(plane0Y);
planes[1].getBuffer().get(plane1WithU);
planes[2].getBuffer().get(plane2WithV);
if (planes[1].getPixelStride() == 2) { //sp
//提取U v分量 ,这里需要+1,因为plane1和plane2都是少存储一个字节
u = new byte[(plane1WithU.length + 1) / 2];
v = new byte[(plane2WithV.length + 1) / 2];
int index_u = 0;
int index_v = 0;
for (int i = 0; i < plane1WithU.length; i++) {
if (0 == (i % 2)) {
u[index_u] = plane1WithU[i];
index_u++;
}
}
for (int j = 0; j < plane2WithV.length; j++) {
if (0 == (j % 2)) {
v[index_v] = plane2WithV[j];
index_v++;
}
}
}
byte[] arrayNV21 = getArrayNV21FromYuv(plane0Y, u, v);
final int WIDTH = Config.SHOOT_PIC_WIDTH;
final int HEIGHT = Config.SHOOT_PIC_HEIGHT;
Logger.i(TAG, "getBitmapFromYuvReader() arrayNV21.length:" + arrayNV21.length);
YuvImage yuvImage = new YuvImage(arrayNV21, ImageFormat.NV21, WIDTH, HEIGHT, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
yuvImage.compressToJpeg(new Rect(0, 0, WIDTH, HEIGHT), 80, stream);
Bitmap newBitmap = BitmapFactory.decodeByteArray(stream.toByteArray(), 0, stream.size());
stream.close();
return newBitmap;
}

} catch (Exception ex) {
Logger.i(TAG, "getBitmapFromYuvReader() error:" + ex);
}
return null;
}
//将yuv 数据合并成 nv21格式的byte数组
private byte[] getArrayNV21FromYuv(byte[] y, byte[] u, byte[] v) {
//正常来说y长度是WIDTH*HEIGHT,u和v的长度是WIDTH*HEIGHT/4
final int WIDTH = Config.SHOOT_PIC_WIDTH;//图片宽
final int HEIGHT = Config.SHOOT_PIC_HEIGHT;//图片宽
if (WIDTH * HEIGHT != y.length) {
Logger.i(TAG, "getArrayNV21FromYuv() y length is error");
return null;
}
if ((WIDTH * HEIGHT / 4) != u.length || (WIDTH * HEIGHT / 4) != v.length) {
Logger.i(TAG, "getArrayNV21FromYuv() u or v length is error!");
return null;
}
int lengthY = y.length;
int lengthU = u.length;
int lengthV = u.length;
int newLength = lengthY + lengthU + lengthV;
byte[] arrayNV21 = new byte[newLength];
//先将所有的Y数据存储进去
System.arraycopy(y, 0, arrayNV21, 0, y.length);

//然后交替存储VU数据(注意U,V数据的长度应该是相等的,记住顺序是VU VU)
for (int i = 0; i < v.length; i++) {
int index = lengthY + i * 2;
arrayNV21[index] = v[i];
}

for (int i = 0; i < u.length; i++) {
int index = lengthY + i * 2 + 1;
arrayNV21[index] = u[i];
}
Logger.i(TAG, "getArrayNV21FromYuv()");
return arrayNV21;
}

https://blog.csdn.net/weixin_41937380/article/details/127758173

用户评论