ffmpeg綜合應用示例(五)——多路視訊合併
本文的示例將實現:把多個子視訊根據配置引數在空間上合併成一個視訊,並可以對每一路的視訊和音訊加入特效。示例包含了
1、如何利用avfilter程式設計實現視訊合併以及加入各類不同濾鏡的功能
具有較強的綜合性。
最終效果如下:
對四路視訊進行了合併實現了分屏效果,並且給第一路視訊加了邊緣檢測效果,給最後一路視訊加了反相效果。
在ffmpeg超詳細綜合教程(二)——為直播流新增濾鏡 一文中講述瞭如何利用avfilter為單路視訊新增特效,其中還提到了如何通過事先寫好固定的filter指令來把輸入視訊切分為四路再實現田字格的拼接效果,但以上這些都侷限於單路視訊的輸入情況,本文將在此基礎上進行擴充套件,真正實現多路視訊輸入的處理,並且可以更自由地新增特效、進行縮放等。
首先定義兩個結構體,InputFile用於描述每一路輸入視訊,包含了檔名、位置索引、要新增的視訊特效和音訊特效。GlobalContext則描述了輸出視訊的寬、高、位元速率,其中的grid_num表示視訊合併的幾何方式,即採用田字格方式的話則grid_num=4,採用九宮格方式的話則grid_num=9。其中的video_num表示輸入視訊數,可以小於grid_num,對應於部分視訊有縮放的情況。如下圖
正常情況時
有縮放時
目前的程式碼裡只針對普通情況進行了實現,但也留出了擴充套件的藉口。
typedef struct InputFile{
const char* filenames;
/*
* position index
* 0 - 1 - 2
* 3 - 4 - 5
* 6 - 7 - 8
* ……
*/
uint32_t video_idx;
//scale level, 0 means keep the same
//uint32_t video_expand;
uint32_t video_effect;
uint32_t audio_effect;
} InputFile;
InputFile* inputfiles;
typedef struct GlobalContext{
//always be a square,such as 2x2, 3x3
uint32_t grid_num;
uint32_t video_num;
uint32_t enc_width;
uint32_t enc_height;
uint32_t enc_bit_rate;
InputFile* input_file;
const char* outfilename;
} GlobalContext;
GlobalContext* global_ctx;
並且對GlobalContext定義瞭如下了配置函式,用於進行基本的錯誤檢查和配置,包含兩點,首先是grid_num一定大於等於video_num,其次是各路輸入視訊的位置索引不能有錯。
static int global_ctx_config()
{
int i;
if (global_ctx->grid_num < global_ctx->video_num)
{
av_log(NULL, AV_LOG_ERROR, "Setting a wrong grid_num %d \t The grid_num is smaller than video_num!! \n", global_ctx->grid_num);
global_ctx->grid_num = global_ctx->video_num;
//global_ctx->stride = sqrt((double)global_ctx->grid_num);
av_log(NULL, AV_LOG_ERROR, "Automatically change the grid_num to be same as video_num!! \n");
}
//global_ctx->stride = sqrt((double)global_ctx->grid_num);
for (i = 0; i < global_ctx->video_num; i++)
{
if (global_ctx->input_file[i].video_idx >= global_ctx->grid_num)
{
av_log(NULL, AV_LOG_ERROR, "Invalid video_inx value in the No.%d input\n", global_ctx->input_file[i].video_idx);
return -1;
}
}
return 0;
}
不同於ffmpeg超詳細綜合教程(二)——為直播流新增濾鏡 中的做法,這裡我們把開啟輸入、開啟輸出、Filter初始化的步驟各自寫成獨立的函式。
開啟輸入的部分如下,可以看到對多路輸入分別執行了開啟操作
//multiple input
static AVFormatContext **ifmt_ctx;
AVFrame **frame = NULL;
//single output
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
AVFilterContext *buffersink_ctx;
AVFilterContext **buffersrc_ctx;
AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;
……
static int open_input_file(InputFile *input_file)
{
int ret;
unsigned int i;
unsigned int j;
ifmt_ctx = (AVFormatContext**)av_malloc((global_ctx->video_num)*sizeof(AVFormatContext*));
for (i = 0; i < global_ctx->video_num; i++)
{
*(ifmt_ctx + i) = NULL;
if ((ret = avformat_open_input((ifmt_ctx + i), input_file[i].filenames, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx[i], NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
for (j = 0; j < ifmt_ctx[i]->nb_streams; j++) {
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ifmt_ctx[i]->streams[j];
codec_ctx = stream->codec;
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx,
avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
av_dump_format(ifmt_ctx[i], 0, input_file[i].filenames, 0);
}
return 0;
}
開啟輸出的部分如下,這裡設定輸出全部用H.264+AAC編碼。由於輸出還是單路的,所以跟以前沒什麼太大區別。
static int open_output_file(const char *filename)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", filename);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < ifmt_ctx[0]->nb_streams; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx[0]->streams[i];
out_stream->time_base = in_stream->time_base;
dec_ctx = in_stream->codec;
enc_ctx = out_stream->codec;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
enc_ctx->height = global_ctx->enc_height;
enc_ctx->width = global_ctx->enc_width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
enc_ctx->pix_fmt = encoder->pix_fmts[0];
enc_ctx->me_range = 16;
enc_ctx->max_qdiff = 4;
enc_ctx->bit_rate = global_ctx->enc_bit_rate;
enc_ctx->qcompress = 0.6;
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base.num = 1;
enc_ctx->time_base.den = 25;
enc_ctx->gop_size = 250;
enc_ctx->max_b_frames = 3;
AVDictionary * d = NULL;
char *k = av_strdup("preset"); // if your strings are already allocated,
char *v = av_strdup("ultrafast"); // you can avoid copying them like this
av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
ret = avcodec_open2(enc_ctx, encoder, &d);
}
else {
encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
AVRational time_base = { 1, enc_ctx->sample_rate };
enc_ctx->time_base = time_base;
ret = avcodec_open2(enc_ctx, encoder, NULL);
}
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
}
else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
}
else {
/* if this stream must be remuxed */
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
ifmt_ctx[0]->streams[i]->codec);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
return ret;
}
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
#if USE_H264BSF
h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
#if USE_AACBSF
aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif
return 0;
}
Filter初始化的部分如下,這裡的輸入引數中有一個filter_spec,描述了詳細的filter設定,關於它的內容在後面會寫。此初始化函式結合前面的輸入、輸出以及filter的詳細設定得到一個filter_graph。跟之前文章中不同的是,這裡給輸入端的各個pad進行了命名,分別是in1, in2, in3等等,通過這樣的方式,我們就可以對每一路輸入再新增各自的filter了。
static int init_filter(FilteringContext* fctx, AVCodecContext **dec_ctx,
AVCodecContext *enc_ctx, const char *filter_spec)
{
char args[512];
char pad_name[10];
int ret = 0;
int i;
AVFilter **buffersrc = (AVFilter**)av_malloc(global_ctx->video_num*sizeof(AVFilter*));
AVFilter *buffersink = NULL;
AVFilterContext **buffersrc_ctx = (AVFilterContext**)av_malloc(global_ctx->video_num*sizeof(AVFilterContext*));
AVFilterContext *buffersink_ctx = NULL;
AVFilterInOut **outputs = (AVFilterInOut**)av_malloc(global_ctx->video_num*sizeof(AVFilterInOut*));
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterGraph *filter_graph = avfilter_graph_alloc();
for (i = 0; i < global_ctx->video_num; i++)
{
buffersrc[i] = NULL;
buffersrc_ctx[i] = NULL;
outputs[i] = avfilter_inout_alloc();
}
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
if (dec_ctx[0]->codec_type == AVMEDIA_TYPE_VIDEO) {
for (i = 0; i < global_ctx->video_num; i++)
{
buffersrc[i] = avfilter_get_by_name("buffer");
}
buffersink = avfilter_get_by_name("buffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
for (i = 0; i < global_ctx->video_num; i++)
{
_snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx[i]->width, dec_ctx[i]->height, dec_ctx[i]->pix_fmt,
dec_ctx[i]->time_base.num, dec_ctx[i]->time_base.den,
dec_ctx[i]->sample_aspect_ratio.num,
dec_ctx[i]->sample_aspect_ratio.den);
_snprintf(pad_name, sizeof(pad_name), "in%d", i);
ret = avfilter_graph_create_filter(&(buffersrc_ctx[i]), buffersrc[i], pad_name,
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
}
else if (dec_ctx[0]->codec_type == AVMEDIA_TYPE_AUDIO) {
for (i = 0; i < global_ctx->video_num; i++)
{
buffersrc[i] = avfilter_get_by_name("abuffer");
}
buffersink = avfilter_get_by_name("abuffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
for (i = 0; i < global_ctx->video_num; i++)
{
if (!dec_ctx[i]->channel_layout)
dec_ctx[i]->channel_layout =
av_get_default_channel_layout(dec_ctx[i]->channels);
_snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
dec_ctx[i]->time_base.num, dec_ctx[i]->time_base.den, dec_ctx[i]->sample_rate,
av_get_sample_fmt_name(dec_ctx[i]->sample_fmt),
dec_ctx[i]->channel_layout);
_snprintf(pad_name, sizeof(pad_name), "in%d", i);
ret = avfilter_graph_create_filter(&(buffersrc_ctx[i]), buffersrc[i], pad_name,
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
}
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
(uint8_t*)&enc_ctx->channel_layout,
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
}
/* Endpoints for the filter graph. */
for (i = 0; i < global_ctx->video_num; i++)
{
_snprintf(pad_name, sizeof(pad_name), "in%d", i);
outputs[i]->name = av_strdup(pad_name);
outputs[i]->filter_ctx = buffersrc_ctx[i];
outputs[i]->pad_idx = 0;
if (i == global_ctx->video_num - 1)
outputs[i]->next = NULL;
else
outputs[i]->next = outputs[i + 1];
}
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if (!outputs[0]->name || !inputs->name) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
&inputs, outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:
avfilter_inout_free(&inputs);
av_free(buffersrc);
// av_free(buffersrc_ctx);
avfilter_inout_free(outputs);
av_free(outputs);
return ret;
}
然後我們再來看看具體的filter_spec是怎樣獲得的。最後我們要得到形如
ffmpeg -i test1.mp4 -i test2.mp4 -i test3.mp4 -i test4.mp4 -filter_complex "[0:v]pad=iw*2:ih*2[a];[a][1:v]overlay=w[b];[b][2:v]overlay=0:h[c];[c][3:v]overlay=w:h" out.mp4
這樣的命令。可以看到,很簡單,基本就是字串拼接以及根據幾何關係計算拼接的位置等。如下
static int init_spec_filter(void)
{
char filter_spec[512];
char spec_temp[128];
unsigned int i;
unsigned int j;
unsigned int k;
unsigned int x_coor;
unsigned int y_coor;
AVCodecContext** dec_ctx_array;
int stream_num = ifmt_ctx[0]->nb_streams;
int stride = (int)sqrt((long double)global_ctx->grid_num);
int ret;
filter_ctx = (FilteringContext *)av_malloc_array(stream_num, sizeof(*filter_ctx));
dec_ctx_array = (AVCodecContext**)av_malloc(global_ctx->video_num*sizeof(AVCodecContext));
if (!filter_ctx || !dec_ctx_array)
return AVERROR(ENOMEM);
for (i = 0; i < stream_num; i++) {
filter_ctx[i].buffersrc_ctx = NULL;
filter_ctx[i].buffersink_ctx = NULL;
filter_ctx[i].filter_graph = NULL;
for (j = 0; j < global_ctx->video_num; j++)
dec_ctx_array[j] = ifmt_ctx[j]->streams[i]->codec;
if (!(ifmt_ctx[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
continue;
if (ifmt_ctx[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
if (global_ctx->grid_num == 1)
_snprintf(filter_spec, sizeof(filter_spec), "null");
else
{
_snprintf(filter_spec, sizeof(filter_spec), "color=c=black@1:s=%dx%d[x0];", global_ctx->enc_width, global_ctx->enc_height);
k = 1;
for (j = 0; j < global_ctx->video_num; j++)
{
switch (global_ctx->input_file[j].video_effect)
{
case VFX_NULL:
_snprintf(spec_temp, sizeof(spec_temp), "[in%d]null[ine%d];", j, j);
strcat(filter_spec, spec_temp);
break;
case VFX_EDGE:
_snprintf(spec_temp, sizeof(spec_temp), "[in%d]edgedetect[ine%d];", j, j);
strcat(filter_spec, spec_temp);
break;
case VFX_NEGATE:
_snprintf(spec_temp, sizeof(spec_temp), "[in%d]negate[ine%d];", j, j);
strcat(filter_spec, spec_temp);
break;
}
x_coor = global_ctx->input_file[j].video_idx % stride;
y_coor = global_ctx->input_file[j].video_idx / stride;
_snprintf(spec_temp, sizeof(spec_temp), "[ine%d]scale=w=%d:h=%d[inn%d];[x%d][inn%d]overlay=%d*%d/%d:%d*%d/%d[x%d];", j, global_ctx->enc_width / stride, global_ctx->enc_height / stride, j, k - 1, j, global_ctx->enc_width, x_coor, stride, global_ctx->enc_height, y_coor, stride, k);
k++;
strcat(filter_spec, spec_temp);
}
_snprintf(spec_temp, sizeof(spec_temp), "[x%d]null[out]", k - 1, global_ctx->enc_width, global_ctx->enc_height);
strcat(filter_spec, spec_temp);
}
}
else
{
if (global_ctx->video_num == 1)
_snprintf(filter_spec, sizeof(filter_spec), "anull");
else{
_snprintf(filter_spec, sizeof(filter_spec), "");
for (j = 0; j < global_ctx->video_num; j++)
{
_snprintf(spec_temp, sizeof(spec_temp), "[in%d]", j);
strcat(filter_spec, spec_temp);
}
_snprintf(spec_temp, sizeof(spec_temp), "amix=inputs=%d[out]", global_ctx->video_num);
strcat(filter_spec, spec_temp);
}
}
ret = init_filter(&filter_ctx[i], dec_ctx_array,
ofmt_ctx->streams[i]->codec, filter_spec);
if (ret)
return ret;
}
av_free(dec_ctx_array);
return 0;
}
至此,我們就可以正式開始視訊合併的工作了。這裡採用的方式是
1、對各路視訊分別使用av_read_frame獲取視訊幀,並通過av_buffersrc_add_frame送入Filter
2、通過av_buffersink_get_frame_flags從Filter中取出處理好的視訊幀,進行編碼並寫入輸出檔案
3、返回第一步,當視訊幀全部讀取完畢後進行flush
如下
int videocombine(GlobalContext* video_ctx)
{
int ret;
int tmp = 0;
int got_frame_num = 0;
unsigned int stream_index;
AVPacket packet;
AVPacket enc_pkt;
AVFrame* picref;
enum AVMediaType mediatype;
int64_t frame_pts;
int read_frame_done = 0;
int flush_now = 0;
int framecnt = 0;
int i, j,k;
int got_frame;
int enc_got_frame = 0;
int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *);
global_ctx = video_ctx;
global_ctx_config();
frame = (AVFrame**)av_malloc(global_ctx->video_num*sizeof(AVFrame*));
picref = av_frame_alloc();
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(global_ctx->input_file)) < 0)
goto end;
if ((ret = open_output_file(global_ctx->outfilename)) < 0)
goto end;
if ((ret = init_spec_filter()) < 0)
goto end;
while (1) {
for (i = 0; i < global_ctx->video_num; i++)
{
if (read_frame_done < 0)
{
flush_now = 1;
goto flush;
}
while ((read_frame_done=av_read_frame(ifmt_ctx[i], &packet)) >= 0)
{
stream_index = packet.stream_index;
mediatype = ifmt_ctx[i]->streams[stream_index]->codec->codec_type;
if (mediatype == AVMEDIA_TYPE_VIDEO || mediatype == AVMEDIA_TYPE_AUDIO)
{
frame[i] = av_frame_alloc();
if (!(frame[i]))
{
ret = AVERROR(ENOMEM);
goto end;
}
av_packet_rescale_ts(&packet,
ifmt_ctx[i]->streams[stream_index]->time_base,
ifmt_ctx[i]->streams[stream_index]->codec->time_base);
dec_func = (mediatype == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 : avcodec_decode_audio4;
ret = dec_func(ifmt_ctx[i]->streams[stream_index]->codec, frame[i], &got_frame, &packet);
if (ret < 0)
{
av_frame_free(&frame[i]);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
goto end;
}
if (got_frame) {
frame[i]->pts = av_frame_get_best_effort_timestamp(frame[i]);
ret = av_buffersrc_add_frame(filter_ctx[stream_index].buffersrc_ctx[i], frame[i]);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
goto end;
}
}
else
{
av_frame_free(&(frame[i]));
}
}
av_free_packet(&packet);
if (got_frame)
{
got_frame = 0;
break;
}
}
}
while (1) {
ret = av_buffersink_get_frame_flags(filter_ctx[stream_index].buffersink_ctx, picref, 0);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
{
ret = 0;
break;
}
if (ret < 0)
goto end;
if (picref) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
enc_func = (mediatype == AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
picref, &enc_got_frame);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Encoding failed\n");
goto end;
}
if (enc_got_frame == 1){
framecnt++;
enc_pkt.stream_index = stream_index;
//Write PTS
AVRational time_base = ofmt_ctx->streams[stream_index]->time_base;//{ 1, 1000 };
AVRational r_framerate1 = ifmt_ctx[0]->streams[stream_index]->r_frame_rate;// { 50, 2 };
AVRational time_base_q = { 1, AV_TIME_BASE };
//Duration between 2 frames (us)
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //內部時間戳
//Parameters
//enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
enc_pkt.dts = enc_pkt.pts;
enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
enc_pkt.pos = -1;
#if USE_H264BSF
av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
av_log(NULL, AV_LOG_INFO, "write frame %d\n", framecnt);
av_free_packet(&enc_pkt);
}
av_frame_unref(picref);
}
}
}
flush:
/* flush filters and encoders */
for (i = 0; i < ifmt_ctx[0]->nb_streams; i++) {
stream_index = i;
/* flush encoder */
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
CODEC_CAP_DELAY))
return 0;
while (1) {
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
enc_func = (ifmt_ctx[0]->streams[stream_index]->codec->codec_type == AVMEDIA_TYPE_VIDEO) ?
avcodec_encode_video2 : avcodec_encode_audio2;
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
NULL, &enc_got_frame);
av_frame_free(NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Encoding failed\n");
goto end;
}
if (!enc_got_frame){
ret = 0;
break;
}
printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
//Write PTS
AVRational time_base = ofmt_ctx->streams[stream_index]->time_base;//{ 1, 1000 };
AVRational r_framerate1 = ifmt_ctx[0]->streams[stream_index]->r_frame_rate;// { 50, 2 };
AVRational time_base_q = { 1, AV_TIME_BASE };
//Duration between 2 frames (us)
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //內部時間戳
//Parameters
enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
enc_pkt.dts = enc_pkt.pts;
enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base);
/* copy packet*/
//轉換PTS/DTS(Convert PTS/DTS)
enc_pkt.pos = -1;
framecnt++;
ofmt_ctx->duration = enc_pkt.duration * framecnt;
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
}
}
av_write_trailer(ofmt_ctx);
#if USE_H264BSF
av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
av_bitstream_filter_close(aacbsfc);
#endif
end:
av_free_packet(&packet);
for (i = 0; i < global_ctx->video_num; i++)
{
av_frame_free(&(frame[i]));
for (j = 0; j < ofmt_ctx->nb_streams; j++) {
avcodec_close(ifmt_ctx[i]->streams[j]->codec);
}
}
av_free(frame);
av_free(picref);
for (i = 0; i<ofmt_ctx->nb_streams; i++)
{
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
avcodec_close(ofmt_ctx->streams[i]->codec);
av_free(filter_ctx[i].buffersrc_ctx);
if (filter_ctx && filter_ctx[i].filter_graph)
avfilter_graph_free(&filter_ctx[i].filter_graph);
}
av_free(filter_ctx);
for (i = 0; i < global_ctx->video_num; i++)
avformat_close_input(&(ifmt_ctx[i]));
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_close(ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_free(ifmt_ctx);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred\n");
return (ret ? 1 : 0);
}
關注下方公眾號,回覆“多路合併”,檢視原始碼地址
關注公眾號,掌握更多多媒體領域知識與資訊
文章幫到你了?可以掃描如下二維碼進行打賞~,打賞多少您隨意~
相關文章
- Linux使用ffmpeg合併視訊Linux
- ffmpeg filter命令解讀--以多路視訊拼接為例Filter
- ffmpeg合併影片
- 視訊合併軟體有什麼,怎麼合併多個視訊
- 如何合併視訊?是否適合新手操作?
- ffmpeg+Python實現B站MP4格式音訊與視訊的合併Python音訊
- FFmpeg音視訊同步
- [Java併發]IO多路複用Java
- 如何一鍵批量合併視訊、文案和音訊?音訊
- php ffmpeg 視訊擷取PHP
- FFmpeg 視訊處理入門教程
- 使用FFmpeg處理音視訊
- 基於python+ffmpeg的視訊併發直播壓力測試Python
- Android開發 海康威視 多路視訊播放(同時播放視訊)Android
- ffmpeg實戰-音視訊合成案例
- FFmpeg程式碼實現視訊剪下
- php實現ffmpeg處理視訊PHP
- ffmpeg命令錄製windows音視訊Windows
- NDK開發——FFmpeg視訊解碼
- ffmpeg為視訊新增時間戳 - 手動編譯ffmpeg時間戳編譯
- 請求合併與拆分在併發場景中應用
- Ossim應用體驗視訊
- iOS整合FFmpeg及視訊格式轉碼iOS
- C#程式呼叫FFmpeg操作音視訊C#
- ffmpeg實戰-音視訊基礎概念
- [FFmpeg + OpenGL + OpenSL ES]音視訊同步- 8
- 使用ffmpeg拼接視訊踩坑記錄
- FFmpeg音視訊編譯配置選項編譯
- Ffmpeg快速應用開發
- IO通訊模型(三)多路複用IO模型
- 智慧合併剪輯視訊,一鍵自動新增視訊、音訊以及文案,輕鬆偽原創音訊
- ffmpeg 多路實時問題之解決思路
- FFmpeg在遊戲影片錄製中的應用:畫質與檔案大小的綜合比較遊戲
- Oracle bbed 五個 實用示例Oracle
- python+ffmpeg視訊轉碼轉格式Python
- FFMPEG視音訊編解碼學習(1)音訊
- 有什麼方法可以自動合併視訊並替換音訊內容音訊
- [FFmpeg]ffmpeg各類引數說明與使用示例