由于在目标视频中有两路视频流合成,与在视频中加入文字稍微有所不同。
1、初始化一个Filter对象:
(1)调用avfilter_graph_alloc()函数创建一个Filter对象
(2)调用函数avfilter_graph_parse2()把描述信息加到Filter对象上。创建一个输入、一个输出 AVFilterInOut对象。
(3)调用avfilter_graph_create_filter创建媒体流输入端的filter;
(4)调用avfilter_graph_create_filter创建图片输入端的filter;
(5)调用avfilter_graph_create_filter创建输出端的filter;
(6)调用avfilter_link()把输入端和输出端连接在一起。
(7)调用avfilter_graph_config()函数配置filter.。
2、使用Filter对象:
(1)使用av_buffersrc_add_frame_flags()把解码的帧数据传给Filter。
(2)使用av_buffersink_get_frame()从Filter中把处理好的数据取出。
3、实例代码:
int AddPictureToVideoStream(string fileInputMediaStream, string pictureFileName, string fileOutput)
{
// 初始化库,加载编码器
Init();
// 打开媒体文件(本地或网络流文件URL)
if (OpenMediaInputFile(&g_inputMediaStreamContext, (char *)fileInputMediaStream.c_str()) < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 打开媒体文件(图片)
if (OpenMediaInputFile(&g_inputPictureContext, (char *)pictureFileName.c_str()) < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 打开媒体流和图片的解码器
int ret = InitDecodeCodec();
if (ret <0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 指定输出视频的高宽等各个参数,初始化视频编码器(H.264)
ret = InitVideoEncoderCodec(g_decoderMediaStreamContext->width, g_decoderMediaStreamContext->height);
if (ret < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
// 初始化过滤器
InitFilter(g_outputEncContext);
// Check validity and configure all the links and formats in the graph.
ret = avfilter_graph_config(g_filter_graph, NULL);
if (ret < 0)
{
return 0;
}
// 打开输出媒体文件
if (OpenOutputMediaFile((char *)fileOutput.c_str()) < 0)
{
this_thread::sleep_for(chrono::seconds(10));
return 0;
}
auto pSrcMediaStreamFrame = av_frame_alloc();
auto pSrcPictureFrame = av_frame_alloc();
auto filterFrame = av_frame_alloc();
auto pInputMediaStreamFrame = av_frame_alloc();
auto pInputPictureFrame = av_frame_alloc();
int got_output = 0;
int64_t firstPacketTime = 0;
int64_t outLastTime = av_gettime();
int64_t inLastTime = av_gettime();
auto packet_picture = ReadPacketFromPictureInputStream();
DecodePicturePacket(packet_picture.get(), pSrcPictureFrame);
firstPacketTime = av_gettime();
while (true)
{
outLastTime = av_gettime();
auto packet = ReadPacketFromInputMediaStream();
if (packet)
{
if (packet->stream_index == 0 && DecodeMediaStreamPacket(packet.get(), pSrcMediaStreamFrame))
{
// Set up a new reference to the data described by the source frame.
// Copy frame properties from src to dst and
// create a new reference for each AVBufferRef from src.
av_frame_ref(pInputMediaStreamFrame, pSrcMediaStreamFrame);
// 加一帧媒体流数据到buffer源,
if (av_buffersrc_add_frame_flags(g_buffersrcMediaStreamContext, pInputMediaStreamFrame, AV_BUFFERSRC_FLAG_PUSH) >= 0)
{
pSrcPictureFrame->pts = pSrcMediaStreamFrame->pts;
// 加一帧图片数据到buffer源,
if (av_buffersrc_add_frame_flags(g_buffersrcPictureContext, pSrcPictureFrame, AV_BUFFERSRC_FLAG_PUSH) >= 0)
{
ret = av_buffersink_get_frame_flags(g_buffersinkContext, filterFrame, AV_BUFFERSINK_FLAG_NO_REQUEST);
if (ret >= 0)
{
std::shared_ptr<AVPacket> pTmpPkt(static_cast<AVPacket*>(av_malloc(sizeof(AVPacket))), [&](AVPacket *p) { av_packet_free(&p); av_freep(&p); });
av_init_packet(pTmpPkt.get());
pTmpPkt->data = NULL;
pTmpPkt->size = 0;
//
// Encode a frame of video.Takes input raw video data from frame and writes the next output packet
// if available, to avpkt.The output packet does not necessarily contain data for
// the most recent frame, as encoders can delay and reorder input frames internally as needed.
//
// got_output: set to 1 by libavcodec if the output packet is non - empty, and to 0 if it is
// empty.If the function returns an error, the packet can be assumed to be invalid, and the
// value of got_packet_ptr is undefined and should not be used.
//
// 0 on success, negative error code on failure
ret = avcodec_encode_video2(g_outputEncContext, pTmpPkt.get(), filterFrame, &got_output);
if (ret >= 0 && got_output)
{
int ret = av_write_frame(g_outputContext, pTmpPkt.get());
}
}
}
}
}
av_frame_unref(filterFrame); // 减少帧对象的引用数。复位其中的字段。
av_frame_unref(pInputMediaStreamFrame);
av_frame_unref(pSrcMediaStreamFrame);
// 只输出10秒的数据。
if ((outLastTime - firstPacketTime) > (10 * 1000 * 1000))
{
break;
}
}
else break;
}
av_frame_free(&pInputMediaStreamFrame);
av_frame_free(&pSrcPictureFrame);
av_frame_free(&filterFrame);
if(g_outputs)
avfilter_inout_free(&g_outputs);
if(g_inputs)
avfilter_inout_free(&g_inputs);
avfilter_graph_free(&g_filter_graph);
av_write_trailer(g_outputContext);// 将还未输出的AVPacket输出出来。输出文件尾。
this_thread::sleep_for(chrono::seconds(2));
CloseInputContext();
CloseOutputContext();
return 0;
}
4、工程项目源文件下载:
在Debug – x86下编译运行。
项目下自带开发环境。
开载源文件