吉林快三平台出租 Q1157880099
1.区域视频增强
FFmpeg滤镜功能十分强大,用滤镜可以实现视频的区域增强功能。
用eq滤镜就可以实现亮度、对比度、饱和度等的常用视频增强功能。
推荐两篇写得不错的博文:
(1)ffmpeg综合应用示例(二)——为直播流添加特效 - 张晖的专栏 - 博客频道 - CSDN.NET;
(2)ffmpeg 滤镜及其效果 - 党玉涛 - 博客频道 - CSDN.NET
第(1)篇博客对于如何用代码来写滤镜讲得比较清楚,第(2)篇则列出了许多滤镜写法的例子。
参考第(1)篇博客,滤镜的代码如下:
设置滤镜:
int FFmpeg_filter::apply_filters(AVFormatContext *ifmt_ctx) { char args[512]; int ret; AVFilterInOut *outputs = avfilter_inout_alloc(); if (!outputs) { printf("Cannot alloc output\n"); return -1; } AVFilterInOut *inputs = avfilter_inout_alloc(); if (!inputs) { printf("Cannot alloc input\n"); return -1; } AVFilterGraph *filter_graph = NULL; if (filter_graph) avfilter_graph_free(&filter_graph); filter_graph = avfilter_graph_alloc(); if (!filter_graph) { printf("Cannot create filter graph\n"); return -1; } /* buffer video source: the decoded frames from the decoder will be inserted here. */ snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", ifmt_ctx->streams[0]->codec->width, ifmt_ctx->streams[0]->codec->height, ifmt_ctx->streams[0]->codec->pix_fmt, ifmt_ctx->streams[0]->time_base.num, ifmt_ctx->streams[0]->time_base.den, ifmt_ctx->streams[0]->codec->sample_aspect_ratio.num, ifmt_ctx->streams[0]->codec->sample_aspect_ratio.den); ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph); if (ret < 0) { printf("Cannot create buffer source\n"); return ret; } /* buffer video sink: to terminate the filter chain. */ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph); if (ret < 0) { printf("Cannot create buffer sink\n"); return ret; } /* Endpoints for the filter graph. */ outputs->name = av_strdup("in"); outputs->filter_ctx = buffersrc_ctx; outputs->pad_idx = 0; outputs->next = NULL; inputs->name = av_strdup("out"); inputs->filter_ctx = buffersink_ctx; inputs->pad_idx = 0; inputs->next = NULL; const char *filter_descr = "null"; const char *filter_mirror = "crop=iw/2:ih:0:0,split[left][tmp];[tmp]hflip[right];[left]pad=iw*2[a];[a][right]overlay=w"; const char *filter_watermark = "movie=logo.png[wm];[in][wm]overlay=5:5[out]"; const char *filter_negate = "negate[out]"; const char *filter_edge = "edgedetect[out]"; const char *filter_split4 = "scale=iw/2:ih/2[in_tmp];[in_tmp]split=4[in_1][in_2][in_3][in_4];[in_1]pad=iw*2:ih*2[a];[a][in_2]overlay=w[b];[b][in_3]overlay=0:h[d];[d][in_4]overlay=w:h[out]"; const char *filter_vintage = "curves=vintage"; const char *filter_brightness = "eq=brightness=0.5[out] "; //亮度。The value must be a float value in range -1.0 to 1.0. The default value is "0". const char *filter_contrast = "eq=contrast=1.5[out] "; //对比度。The value must be a float value in range -2.0 to 2.0. The default value is "1". const char *filter_saturation = "eq=saturation=1.5[out] "; //饱和度。The value must be a float in range 0.0 to 3.0. The default value is "1". //const char *filter_eq = "eq=contrast=1.0:brightness=-0.0:saturation=1.0 "; char filter_eq[512]; float t_brightness_value = _brightness_value ; float t_contrast_value = _contrast_value ; float t_saturation_value = _saturation_value ; snprintf(filter_eq, sizeof(filter_eq), "eq=brightness=%f:contrast=%f:saturation=%f", t_brightness_value, t_contrast_value, t_saturation_value); printf("eq=brightness=%f:contrast=%f:saturation=%f \n", t_brightness_value, t_contrast_value, t_saturation_value); int x = 50 ; int y = 60 ; int iWidth = 300 ; int iHeight = 300 ; char filter_test[512]; snprintf(filter_test, sizeof(filter_test), "[in]split[ori][tmp];[tmp]crop=%d:%d:%d:%d,eq=brightness=%f:contrast=%f:saturation=%f[eq_enhance];[ori][eq_enhance]overlay=%d:%d[out]", iWidth, iHeight, x, y, t_brightness_value, t_contrast_value, t_saturation_value, x, y); switch(_filter) { case FILTER_NULL: filter_descr = "null"; break; case FILTER_MIRROR: filter_descr = filter_mirror; break; case FILTER_WATERMARK: filter_descr = filter_watermark; break; case FILTER_NEGATE: filter_descr = filter_negate; break; case FILTER_EDGE: filter_descr = filter_edge; break; case FILTER_SPLIT4: filter_descr = filter_split4; break; case FILTER_VINTAGE: filter_descr = filter_vintage; break; case FILTER_BRIGHTNESS: filter_descr = filter_brightness; break; case FILTER_CONTRAST: filter_descr = filter_contrast; break; case FILTER_SATURATION: filter_descr = filter_saturation; break; case FILTER_EQ: filter_descr = filter_eq; break; case FILTER_TEST: filter_descr = filter_test; break; default: break; } if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr, &inputs, &outputs, NULL)) < 0) return ret; if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) return ret; avfilter_inout_free(&inputs); avfilter_inout_free(&outputs); return 0; }
应用滤镜:
while (av_read_frame(pFormatCtx, packet) >= 0){ EnterCriticalSection(&cs) ; if (packet->stream_index == videoindex){ ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); if (ret < 0){ printf("Decode Error.\n"); return -1; } if (got_picture){ pFrame->pts = av_frame_get_best_effort_timestamp(pFrame); if (filter_change) apply_filters(pFormatCtx); filter_change = 0; /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, pFrame) < 0) { printf("Error while feeding the filtergraph\n"); break; } picref = av_frame_alloc(); while (1) { ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) return ret; if (picref) { img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, target_format, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize); _d3d_video_render.Render_Texture(pFrameYUV->data[0],picref->width,picref->height) ; sws_freeContext(img_convert_ctx); } av_frame_unref(picref); } SDL_Delay(10); } } av_free_packet(packet); LeaveCriticalSection(&cs) ; }
apply_filters函数修改滤镜的设置。另外有两个比较重要的函数:av_buffersrc_add_frame函数和av_buffersink_get_frame_flags函数。至于区域增强,其实主要就是如下代码:
snprintf(filter_test, sizeof(filter_test), "[in]split[ori][tmp];[tmp]crop=%d:%d:%d:%d,eq=brightness=%f:contrast=%f:saturation=%f[eq_enhance];[ori][eq_enhance]overlay=%d:%d[out]", iWidth, iHeight, x, y, t_brightness_value, t_contrast_value, t_saturation_value, x, y);
含义就是把输入流分成ori和tmp,把tmp从像素坐标(x,y)开始,以iWidth为宽,以iHeight为高的部分crop出来,使用eq滤镜后再叠加到ori对应位置。
2.D3D实现视频播放区的拉大缩小
用D3D的纹理可以做出许多非常有意思的效果。使用纹理可以做各种变换轻松实现视频播放区的拉大缩小,视频旋转也可以非常轻松的实现,我没有做,不过只需要修改一下相机的向上方向就可以轻松实现90°的旋转,但要实现任意角度,则还需要稍作计算,我没有做,就不多说了。