摄像头是我们比较常用的外设,很多场景我们都会用到摄像头。比如视频直播、视频监控等各个领域都会用到摄像头。摄像头图像数据的获取,方法有很多,比如可以使用Qt自带的API获取,也可以使用DirectShow、OpenCV、FFMpeg提供的API方式获取(本质上是通过DirectShow)。本篇文章主要讲述使用FFMpeg API获取摄像头的数据信息。
下面是一个简单的摄像头显示的例子的实现效果:
使用FFMpeg获取摄像头的图像数据基本分为如下步骤:
首先要获取当前设备的摄像头列表。主要是摄像头的名称,这里我们使用Qt的API获取。方法如下:
// 获取可用摄像头列表
QList<QCameraInfo> cameras = QCameraInfo::availableCameras();
foreach (const QCameraInfo &cameraInfo, cameras)
{
// 获取摄像头的名称
QString cameraName = cameraInfo.description();
// 添加到ComboBox中
m_pComboBox->addItem(cameraName);
}
如果想要查看摄像头的具体信息,可以使用FFMpeg命令:
ffmpeg -list_options true -f dshow -i video="BisonCam, NB Pro"
我这里摄像头的名称为 BisonCam, NB Pro 。
得到的结果如下:
这里直接贴了代码
// 打开摄像头
bool CameraCapture::open(const QString& deviceName)
{
m_avFrame = av_frame_alloc();
AVInputFormat *inputFormat = av_find_input_format("dshow");
AVDictionary *format_opts = nullptr;
//av_dict_set_int(&format_opts, "rtbufsize", 3041280 * 10, 0);
av_dict_set(&format_opts, "avioflags", "direct", 0);
av_dict_set(&format_opts, "video_size", "1280x720", 0);
av_dict_set(&format_opts, "framerate", "30", 0);
av_dict_set(&format_opts, "vcodec", "mjpeg", 0);
m_pFormatContent = avformat_alloc_context();
QString urlString = QString("video=") + deviceName;
// 打开输入
int result = avformat_open_input(&m_pFormatContent, urlString.toLocal8Bit().data(), inputFormat, &format_opts);
if (result < 0)
{
qDebug() << "AVFormat Open Input Error!";
return false;
}
result = avformat_find_stream_info(m_pFormatContent, nullptr);
if (result < 0)
{
qDebug() << "AVFormat Find Stream Info Error!";
return false;
}
// find Video Stream Index
int count = m_pFormatContent->nb_streams;
for (int i=0; i<count; ++i)
{
if (m_pFormatContent->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
m_nVideoStreamIndex = i;
break;
}
}
if (m_nVideoStreamIndex < 0)
return false;
// 查找解码器
m_pCaptureContext = m_pFormatContent->streams[m_nVideoStreamIndex]->codec;
AVCodec* codec = avcodec_find_decoder(m_pCaptureContext->codec_id);
if (codec == nullptr)
return false;
// 打开解码器
if (avcodec_open2(m_pCaptureContext, codec, nullptr) != 0)
return false;
// 设置尺寸、格式等信息
m_pCameraData.m_nWidth = m_pCaptureContext->width;
m_pCameraData.m_nHeight = m_pCaptureContext->height;
AVPixelFormat format = m_pCaptureContext->pix_fmt;
format = convertDeprecatedFormat(format);
if (m_isUsedSwsScale)
return true;
m_isUsedSwsScale = false;
if (format == AV_PIX_FMT_YUV420P)
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_YUV420P;
else if (format == AV_PIX_FMT_YUV422P)
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_YUV422P;
else if (format == AV_PIX_FMT_YUV444P)
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_YUV444P;
else {
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_RGB24;
m_isUsedSwsScale = true;
}
return true;
}
这里需要注意的有下面几点:
这里我就直接贴出代码了
// 获取一帧数据
bool CameraCapture::capture(void)
{
AVPacket pkt;
// 获取一帧数据
int result = av_read_frame(m_pFormatContent, &pkt);
if (result)
return false;
if (pkt.stream_index != m_nVideoStreamIndex)
{
av_packet_unref(&pkt);
return false;
}
// 解码视频数据
result = avcodec_send_packet(m_pCaptureContext, &pkt);
if (result)
{
av_packet_unref(&pkt);
return false;
}
result = avcodec_receive_frame(m_pCaptureContext, m_avFrame);
if (result)
{
av_packet_unref(&pkt);
return false;
}
// 转换成RGB24
if (m_isUsedSwsScale)
{
// 设置RGBFrame
if (m_pRGBFrame == nullptr)
{
m_pRGBFrame = av_frame_alloc();
m_pRGBFrame->width = m_avFrame->width;
m_pRGBFrame->height = m_avFrame->height;
m_pRGBFrame->linesize[0] = m_pRGBFrame->width * m_pRGBFrame->height * 3;
av_image_alloc(m_pRGBFrame->data, m_pRGBFrame->linesize,
m_pRGBFrame->width, m_pRGBFrame->height, AV_PIX_FMT_RGB24, 1);
}
// 转化为RGB24
frameToRgbImage(m_pRGBFrame, m_avFrame);
// 设置数据
m_pMutex.lock();
m_pCameraData.m_cameraData.clear();
m_pCameraData.m_cameraData.append((char*)m_pRGBFrame->data[0], \
m_pRGBFrame->width * m_pRGBFrame->height * 3);
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_RGB24;
m_pMutex.unlock();
}
else
{
disposeYUVData();
}
av_packet_unref(&pkt);
return true;
}
关于渲染
AVFrame 中的数据,它可能是 RGB24 类型的,也可能是 YUV 类型的。
AVFrame中的 format 记录这图像的数据格式。
我这里根据数据的类型,分别做了处理。如果是YUV的数据,使用 OepnGL 渲染YUV的数据,
其他类类型的数据, 使用 SWS 方法转成RGB的数据显示。(使用GPU解码YUV并渲染,效率会更高一下),当然也可以使用SDL渲染。
SWS 使用也很简单:
void CameraCapture::frameToRgbImage(AVFrame* pDest, AVFrame* frame)
{
// 创建SWS上下文
if (m_pSwsContext == nullptr)
{
m_pSwsContext = sws_getContext(frame->width, frame->height, convertDeprecatedFormat((AVPixelFormat)(frame->format)), \
frame->width, frame->height, AV_PIX_FMT_RGB24, \
SWS_BILINEAR, nullptr, nullptr, nullptr);
}
//avpicture_fill( )
sws_scale(m_pSwsContext, frame->data, frame->linesize, 0, frame->height, \
pDest->data, pDest->linesize);
}
对于YUV的数据,处理存在 lineSize 中的大小比实际的数据大的情况,也就是说如果分辨率为 1280*720, 实际存储的数据可能会比这个偏大(每一行都会多出一部分数据)。lineSize 表示每一行的大小, 一共有 Height 个行。这里贴出我处理的代码:
void CameraCapture::disposeYUVData(void)
{
QMutexLocker locker(&m_pMutex);
m_pCameraData.m_cameraData.clear();
AVPixelFormat pixFormat = convertDeprecatedFormat((AVPixelFormat)m_avFrame->format);
// 设置Y的数据
if (m_avFrame->linesize[0] == m_avFrame->width)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[0], \
m_avFrame->linesize[0] * m_avFrame->height);
}
else
{
for (int i=0; i<m_avFrame->height; ++i)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[0], m_avFrame->width);
}
}
// 设置U的数据
int uDataWidth = m_avFrame->width;
int uDataHeight = m_avFrame->height;
if (pixFormat == AV_PIX_FMT_YUV420P)
{
uDataWidth = uDataWidth / 2;
uDataHeight = uDataHeight / 2;
}
else if (pixFormat == AV_PIX_FMT_YUV422P)
uDataWidth = uDataWidth / 2;
if (m_avFrame->linesize[1] == uDataWidth)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[1], \
m_avFrame->linesize[1] * uDataHeight);
}
else
{
for (int i=0; i<uDataHeight; ++i)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[1], uDataWidth);
}
}
// 设置V的数据
int vDataWidth = uDataWidth;
int vDataHeight = uDataHeight;
if (m_avFrame->linesize[1] == vDataWidth)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[2], \
m_avFrame->linesize[2] * vDataHeight);
}
else
{
for (int i=0; i<vDataHeight; ++i)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[2], vDataWidth);
}
}
}
data[0] 和 linesize[0] 表示Y分量的数据和一行数据的大小
data[1] 和 linesize[1] 表示U分量的数据和一行数据的大小
data[2] 和 linesize[U2] 表示V分量的数据和一行数据的大小
函数 convertDeprecatedFormat 是一个格式转换的函数,目的是有些被废弃的格式得到转换, 比如说 AV_PIX_FMT_YUVJ420P 和 AV_PIX_FMT_YUV420P 实际上是一样的。
实现代码如下:
AVPixelFormat CameraCapture::convertDeprecatedFormat(enum AVPixelFormat format)
{
switch (format)
{
case AV_PIX_FMT_YUVJ420P:
return AV_PIX_FMT_YUV420P;
case AV_PIX_FMT_YUVJ422P:
return AV_PIX_FMT_YUV422P;
case AV_PIX_FMT_YUVJ444P:
return AV_PIX_FMT_YUV444P;
case AV_PIX_FMT_YUVJ440P:
return AV_PIX_FMT_YUV440P;
default:
return format;
}
}
完整代码:
头文件 CameraCapture.h
#ifndef CAMERACAPTURE_H
#define CAMERACAPTURE_H
#include <QObject>
#include <atomic>
#include <QMutex>
#include <QMutexLocker>
#include "audiovideocore_global.h"
extern "C"
{
#include <libavdevice/avdevice.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/imgutils.h>
#include <libavutil/parseutils.h>
}
class CameraCapture;
class AUDIOVIDEOCORESHARED_EXPORT CameraData
{
public:
enum PixelFormat
{
PIXFORMAT_YUV420P,
PIXFORMAT_YUV422P,
PIXFORMAT_YUV444P,
PIXFORMAT_RGB24
};
public:
CameraData(QMutex *mutex)
:m_pMutex(mutex){
//qRegisterMetaType<CameraData>("CameraData");
}
~CameraData(){}
int getWidth(void) const {return m_nWidth;}
int getHeight(void) const {return m_nHeight;}
QByteArray getCameraData(void) {
QMutexLocker locker(m_pMutex);
return m_cameraData;
}
PixelFormat getPixelFormat(void) const {return m_pixelFormat;}
friend class CameraCapture;
private:
QByteArray m_cameraData;
std::atomic<int> m_nWidth;
std::atomic<int> m_nHeight;
PixelFormat m_pixelFormat;
QMutex* m_pMutex = nullptr;
};
class CameraCapture : public QObject
{
public:
CameraCapture(QObject* parent = nullptr);
virtual ~CameraCapture();
// 打开摄像头
bool open(const QString& deviceName);
// 关闭摄像头
void close(void);
// 获取一帧数据
bool capture(void);
// 是否使用SWS转化为RGB格式
void setUsedSwsScaleEnabled(bool isEnabled);
// 获取数据
const CameraData& getCameraData(void) {return m_pCameraData;}
private:
AVFrame* m_avFrame = nullptr;
AVFrame* m_pRGBFrame = nullptr;
int m_nVideoStreamIndex = -1;
AVFormatContext* m_pFormatContent = nullptr;
AVCodecContext* m_pCaptureContext = nullptr;
SwsContext* m_pSwsContext = nullptr;
CameraData m_pCameraData;
AVPixelFormat m_pixelFormat;
QMutex m_pMutex;
bool m_isUsedSwsScale = false;
void frameToRgbImage(AVFrame* pDest, AVFrame* frame);
AVPixelFormat convertDeprecatedFormat(enum AVPixelFormat format);
// 处理YUV数据组合成一个Buffer
void disposeYUVData(void);
};
#endif
源文件 CameraCapture.cpp
#include "CameraCapture.h"
#include <QDebug>
CameraCapture::CameraCapture(QObject* parent)
:QObject(parent),
m_pCameraData(&m_pMutex)
{
av_register_all();
avdevice_register_all();
}
CameraCapture::~CameraCapture()
{
}
// 打开摄像头
bool CameraCapture::open(const QString& deviceName)
{
m_avFrame = av_frame_alloc();
AVInputFormat *inputFormat = av_find_input_format("dshow");
AVDictionary *format_opts = nullptr;
//av_dict_set_int(&format_opts, "rtbufsize", 3041280 * 10, 0);
av_dict_set(&format_opts, "avioflags", "direct", 0);
av_dict_set(&format_opts, "video_size", "1280x720", 0);
av_dict_set(&format_opts, "framerate", "30", 0);
av_dict_set(&format_opts, "vcodec", "mjpeg", 0);
m_pFormatContent = avformat_alloc_context();
QString urlString = QString("video=") + deviceName;
// 打开输入
int result = avformat_open_input(&m_pFormatContent, urlString.toLocal8Bit().data(), inputFormat, &format_opts);
if (result < 0)
{
qDebug() << "AVFormat Open Input Error!";
return false;
}
result = avformat_find_stream_info(m_pFormatContent, nullptr);
if (result < 0)
{
qDebug() << "AVFormat Find Stream Info Error!";
return false;
}
// find Video Stream Index
int count = m_pFormatContent->nb_streams;
for (int i=0; i<count; ++i)
{
if (m_pFormatContent->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
m_nVideoStreamIndex = i;
break;
}
}
if (m_nVideoStreamIndex < 0)
return false;
// 查找解码器
m_pCaptureContext = m_pFormatContent->streams[m_nVideoStreamIndex]->codec;
AVCodec* codec = avcodec_find_decoder(m_pCaptureContext->codec_id);
if (codec == nullptr)
return false;
// 打开解码器
if (avcodec_open2(m_pCaptureContext, codec, nullptr) != 0)
return false;
// 设置尺寸、格式等信息
m_pCameraData.m_nWidth = m_pCaptureContext->width;
m_pCameraData.m_nHeight = m_pCaptureContext->height;
AVPixelFormat format = m_pCaptureContext->pix_fmt;
format = convertDeprecatedFormat(format);
if (m_isUsedSwsScale)
return true;
m_isUsedSwsScale = false;
if (format == AV_PIX_FMT_YUV420P)
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_YUV420P;
else if (format == AV_PIX_FMT_YUV422P)
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_YUV422P;
else if (format == AV_PIX_FMT_YUV444P)
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_YUV444P;
else {
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_RGB24;
m_isUsedSwsScale = true;
}
return true;
}
// 关闭摄像头
void CameraCapture::close(void)
{
sws_freeContext(m_pSwsContext);
av_frame_free(&m_avFrame);
av_frame_free(&m_pRGBFrame);
m_pRGBFrame = nullptr;
m_avFrame = nullptr;
avcodec_close(m_pCaptureContext);
avformat_close_input(&m_pFormatContent);
}
void CameraCapture::setUsedSwsScaleEnabled(bool isEnabled)
{
m_isUsedSwsScale = isEnabled;
}
void CameraCapture::frameToRgbImage(AVFrame* pDest, AVFrame* frame)
{
// 创建SWS上下文
if (m_pSwsContext == nullptr)
{
m_pSwsContext = sws_getContext(frame->width, frame->height, convertDeprecatedFormat((AVPixelFormat)(frame->format)), \
frame->width, frame->height, AV_PIX_FMT_RGB24, \
SWS_BILINEAR, nullptr, nullptr, nullptr);
}
//avpicture_fill( )
sws_scale(m_pSwsContext, frame->data, frame->linesize, 0, frame->height, \
pDest->data, pDest->linesize);
}
AVPixelFormat CameraCapture::convertDeprecatedFormat(enum AVPixelFormat format)
{
switch (format)
{
case AV_PIX_FMT_YUVJ420P:
return AV_PIX_FMT_YUV420P;
case AV_PIX_FMT_YUVJ422P:
return AV_PIX_FMT_YUV422P;
case AV_PIX_FMT_YUVJ444P:
return AV_PIX_FMT_YUV444P;
case AV_PIX_FMT_YUVJ440P:
return AV_PIX_FMT_YUV440P;
default:
return format;
}
}
void CameraCapture::disposeYUVData(void)
{
QMutexLocker locker(&m_pMutex);
m_pCameraData.m_cameraData.clear();
AVPixelFormat pixFormat = convertDeprecatedFormat((AVPixelFormat)m_avFrame->format);
// 设置Y的数据
if (m_avFrame->linesize[0] == m_avFrame->width)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[0], \
m_avFrame->linesize[0] * m_avFrame->height);
}
else
{
for (int i=0; i<m_avFrame->height; ++i)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[0], m_avFrame->width);
}
}
// 设置U的数据
int uDataWidth = m_avFrame->width;
int uDataHeight = m_avFrame->height;
if (pixFormat == AV_PIX_FMT_YUV420P)
{
uDataWidth = uDataWidth / 2;
uDataHeight = uDataHeight / 2;
}
else if (pixFormat == AV_PIX_FMT_YUV422P)
uDataWidth = uDataWidth / 2;
if (m_avFrame->linesize[1] == uDataWidth)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[1], \
m_avFrame->linesize[1] * uDataHeight);
}
else
{
for (int i=0; i<uDataHeight; ++i)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[1], uDataWidth);
}
}
// 设置V的数据
int vDataWidth = uDataWidth;
int vDataHeight = uDataHeight;
if (m_avFrame->linesize[1] == vDataWidth)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[2], \
m_avFrame->linesize[2] * vDataHeight);
}
else
{
for (int i=0; i<vDataHeight; ++i)
{
m_pCameraData.m_cameraData.append((char*)m_avFrame->data[2], vDataWidth);
}
}
}
// 获取一帧数据
bool CameraCapture::capture(void)
{
AVPacket pkt;
// 获取一帧数据
int result = av_read_frame(m_pFormatContent, &pkt);
if (result)
return false;
if (pkt.stream_index != m_nVideoStreamIndex)
{
av_packet_unref(&pkt);
return false;
}
// 解码视频数据
result = avcodec_send_packet(m_pCaptureContext, &pkt);
if (result)
{
av_packet_unref(&pkt);
return false;
}
result = avcodec_receive_frame(m_pCaptureContext, m_avFrame);
if (result)
{
av_packet_unref(&pkt);
return false;
}
// 转换成RGB24
if (m_isUsedSwsScale)
{
// 设置RGBFrame
if (m_pRGBFrame == nullptr)
{
m_pRGBFrame = av_frame_alloc();
m_pRGBFrame->width = m_avFrame->width;
m_pRGBFrame->height = m_avFrame->height;
m_pRGBFrame->linesize[0] = m_pRGBFrame->width * m_pRGBFrame->height * 3;
av_image_alloc(m_pRGBFrame->data, m_pRGBFrame->linesize,
m_pRGBFrame->width, m_pRGBFrame->height, AV_PIX_FMT_RGB24, 1);
}
// 转化为RGB24
frameToRgbImage(m_pRGBFrame, m_avFrame);
// 设置数据
m_pMutex.lock();
m_pCameraData.m_cameraData.clear();
m_pCameraData.m_cameraData.append((char*)m_pRGBFrame->data[0], \
m_pRGBFrame->width * m_pRGBFrame->height * 3);
m_pCameraData.m_pixelFormat = CameraData::PIXFORMAT_RGB24;
m_pMutex.unlock();
}
else
{
disposeYUVData();
}
av_packet_unref(&pkt);
return true;
}
线程中,调用部分:
头文件 CameraCaptureThread.h
#ifndef CAMERACAPTURETHREAD_H
#define CAMERACAPTURETHREAD_H
#include <QThread>
#include "CameraCapture.h"
#include "audiovideocore_global.h"
class AUDIOVIDEOCORESHARED_EXPORT CameraCaptureThread : public QThread
{
Q_OBJECT
public:
CameraCaptureThread(QObject* parent = nullptr);
virtual ~CameraCaptureThread();
void run(void) override;
// 打开摄像头
bool openCamera(const QString& cameraName);
// 关闭摄像头
void closeCamera(void);
// 是否使用SWS转化为RGB格式
void setUsedSwsScaleEnabled(bool isEnabled){
m_pCameraCapture->setUsedSwsScaleEnabled(isEnabled);
}
// 获取数据
const CameraData& getCameraData(void) {return m_pCameraCapture->getCameraData();}
private:
CameraCapture* m_pCameraCapture = nullptr;
signals:
void needUpdate();
};
#endif
cpp文件 CameraCaptureThread.cpp
#include "CameraCaptureThread.h"
CameraCaptureThread::CameraCaptureThread(QObject* parent)
:QThread(parent)
{
m_pCameraCapture = new CameraCapture(this);
}
CameraCaptureThread::~CameraCaptureThread()
{
closeCamera();
}
void CameraCaptureThread::run(void)
{
while (!this->isInterruptionRequested())
{
// 获取摄像头数据
m_pCameraCapture->capture();
// 同步通知显示
emit needUpdate();
}
}
bool CameraCaptureThread::openCamera(const QString& cameraName)
{
bool isOpened = m_pCameraCapture->open(cameraName);
if (isOpened)
this->start();
return isOpened;
}
void CameraCaptureThread::closeCamera(void)
{
if (this->isRunning())
{
this->requestInterruption();
this->wait();
m_pCameraCapture->close();
}
}
如果想打开摄像头 直接使用函数 openCamera 打开摄像头就可以了,关联信号 needUpdate 刷界面显示即可。
我这里关联的槽函数
void Widget::onNeedUpdate(void)
{
const CameraData& cameraData = m_pCameraCaptureControl->getCameraData();
CameraData& tempCameraData = (CameraData&)cameraData;
QByteArray byteArray = tempCameraData.getCameraData();
int width = cameraData.getWidth();
int height = cameraData.getHeight();
// 设置YUV的数据
uchar* pData[4] = {0};
pData[0] = (uchar*)byteArray.constData();
pData[1] = (uchar*)byteArray.constData() + width * height;
pData[2] = (uchar*)byteArray.constData() + width * height + width / 2 * height;
// 渲染YUV数据
m_pImageViewer->setYUVData(pData, width, height);
}
关于使用OpenGL渲染YUV数据,后面的文章会做相关的讲解。