X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=avconv.c;h=11d77419eb916db2f2c99b2f3fcf850c898be0da;hb=b62b5cb6fd5f97b7c1cd9b84f183d458e97622a9;hp=fe01dc6c088e442bd53ccd6e0ec2c166a1d13c2a;hpb=2886aee73f12f11e201f4c229d80eef549a43e1f;p=ffmpeg diff --git a/avconv.c b/avconv.c index fe01dc6c088..11d77419eb9 100644 --- a/avconv.c +++ b/avconv.c @@ -143,7 +143,6 @@ static int qp_hist = 0; static char *vfilters = NULL; #endif -static int intra_only = 0; static int audio_sample_rate = 0; #define QSCALE_NONE -99999 static float audio_qscale = QSCALE_NONE; @@ -198,14 +197,12 @@ static int64_t extra_size = 0; static int nb_frames_dup = 0; static int nb_frames_drop = 0; static int input_sync; -static uint64_t limit_filesize = 0; +static uint64_t limit_filesize = UINT64_MAX; static int force_fps = 0; static char *forced_key_frames = NULL; static float dts_delta_threshold = 10; -static int64_t timer_start; - static uint8_t *audio_buf; static uint8_t *audio_out; static unsigned int allocated_audio_out_size, allocated_audio_buf_size; @@ -218,7 +215,31 @@ static AVBitStreamFilterContext *subtitle_bitstream_filters=NULL; #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass" -struct InputStream; +typedef struct InputStream { + int file_index; + AVStream *st; + int discard; /* true if stream data should be discarded */ + int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ + AVCodec *dec; + + int64_t start; /* time when read started */ + int64_t next_pts; /* synthetic pts for cases where pkt.pts + is not defined */ + int64_t pts; /* current pts */ + PtsCorrectionContext pts_ctx; + double ts_scale; + int is_start; /* is 1 at the start and after a discontinuity */ + int showed_multi_packet_warning; + AVDictionary *opts; +} InputStream; + +typedef struct InputFile { + AVFormatContext *ctx; + int eof_reached; /* true if eof reached */ + int ist_index; /* index of first stream in ist_table */ + int buffer_size; /* current total buffer size */ + int64_t ts_offset; +} InputFile; typedef struct OutputStream { int file_index; /* file index */ @@ -272,39 +293,17 @@ typedef struct OutputStream { int sws_flags; AVDictionary *opts; + int is_past_recording_time; } OutputStream; -typedef struct InputStream { - int file_index; - AVStream *st; - int discard; /* true if stream data should be discarded */ - int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ - AVCodec *dec; - - int64_t start; /* time when read started */ - int64_t next_pts; /* synthetic pts for cases where pkt.pts - is not defined */ - int64_t pts; /* current pts */ - PtsCorrectionContext pts_ctx; - double ts_scale; - int is_start; /* is 1 at the start and after a discontinuity */ - int showed_multi_packet_warning; - int is_past_recording_time; - AVDictionary *opts; -} InputStream; - -typedef struct InputFile { - AVFormatContext *ctx; - int eof_reached; /* true if eof reached */ - int ist_index; /* index of first stream in ist_table */ - int buffer_size; /* current total buffer size */ - int64_t ts_offset; -} InputFile; typedef struct OutputFile { AVFormatContext *ctx; AVDictionary *opts; int ost_index; /* index of the first stream in output_streams */ + int64_t recording_time; /* desired length of the resulting file in microseconds */ + int64_t start_time; /* start time in microseconds */ + uint64_t limit_filesize; } OutputFile; static InputStream *input_streams = NULL; @@ -395,7 +394,7 @@ static int configure_video_filters(InputStream *ist, OutputStream *ost) codec->width = ost->output_video_filter->inputs[0]->w; codec->height = ost->output_video_filter->inputs[0]->h; codec->sample_aspect_ratio = ost->st->sample_aspect_ratio = - ost->frame_aspect_ratio ? // overriden by the -aspect cli option + ost->frame_aspect_ratio ? // overridden by the -aspect cli option av_d2q(ost->frame_aspect_ratio*codec->height/codec->width, 255) : ost->output_video_filter->inputs[0]->sample_aspect_ratio; @@ -458,7 +457,6 @@ static int exit_program(int ret) fclose(vstats_file); av_free(vstats_filename); - av_free(streamid_map); av_free(meta_data_maps); av_freep(&input_streams); @@ -643,7 +641,8 @@ static double get_sync_ipts(const OutputStream *ost) { const InputStream *ist = ost->sync_ist; - return (double)(ist->pts - start_time)/AV_TIME_BASE; + OutputFile *of = &output_files[ost->file_index]; + return (double)(ist->pts - of->start_time)/AV_TIME_BASE; } static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){ @@ -678,8 +677,6 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx } } -#define MAX_AUDIO_PACKET_SIZE (128 * 1024) - static void do_audio_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, @@ -963,9 +960,6 @@ static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void * *bufp = buf; } -/* we begin to correct av delay at this threshold */ -#define AV_DELAY_MAX 0.100 - static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, @@ -1081,7 +1075,7 @@ static void do_video_resample(OutputStream *ost, if (resample_changed) { avfilter_graph_free(&ost->graph); if (configure_video_filters(ist, ost)) { - fprintf(stderr, "Error reinitialising filters!\n"); + fprintf(stderr, "Error reinitializing filters!\n"); exit_program(1); } } @@ -1158,7 +1152,7 @@ static void do_video_out(AVFormatContext *s, if (s->oformat->flags & AVFMT_RAWPICTURE) { /* raw pictures are written as AVPicture structure to - avoid any copies. We support temorarily the older + avoid any copies. We support temporarily the older method. */ AVFrame* old_frame = enc->coded_frame; enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack @@ -1275,7 +1269,7 @@ static void do_video_stats(AVFormatContext *os, OutputStream *ost, static void print_report(OutputFile *output_files, OutputStream *ost_table, int nb_ostreams, - int is_last_report) + int is_last_report, int64_t timer_start) { char buf[1024]; OutputStream *ost; @@ -1403,6 +1397,91 @@ static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_ memset(buf, fill_char, size); } +static void flush_encoders(OutputStream *ost_table, int nb_ostreams) +{ + int i, ret; + + for (i = 0; i < nb_ostreams; i++) { + OutputStream *ost = &ost_table[i]; + AVCodecContext *enc = ost->st->codec; + AVFormatContext *os = output_files[ost->file_index].ctx; + + if (!ost->encoding_needed) + continue; + + if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1) + continue; + if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE)) + continue; + + for(;;) { + AVPacket pkt; + int fifo_bytes; + av_init_packet(&pkt); + pkt.stream_index= ost->index; + + switch (ost->st->codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + fifo_bytes = av_fifo_size(ost->fifo); + ret = 0; + /* encode any samples remaining in fifo */ + if (fifo_bytes > 0) { + int osize = av_get_bytes_per_sample(enc->sample_fmt); + int fs_tmp = enc->frame_size; + + av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL); + if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { + enc->frame_size = fifo_bytes / (osize * enc->channels); + } else { /* pad */ + int frame_bytes = enc->frame_size*osize*enc->channels; + if (allocated_audio_buf_size < frame_bytes) + exit_program(1); + generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes); + } + + ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf); + pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den, + ost->st->time_base.num, enc->sample_rate); + enc->frame_size = fs_tmp; + } + if (ret <= 0) { + ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL); + } + if (ret < 0) { + fprintf(stderr, "Audio encoding failed\n"); + exit_program(1); + } + audio_size += ret; + pkt.flags |= AV_PKT_FLAG_KEY; + break; + case AVMEDIA_TYPE_VIDEO: + ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); + if (ret < 0) { + fprintf(stderr, "Video encoding failed\n"); + exit_program(1); + } + video_size += ret; + if(enc->coded_frame && enc->coded_frame->key_frame) + pkt.flags |= AV_PKT_FLAG_KEY; + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); + } + break; + default: + ret=-1; + } + + if (ret <= 0) + break; + pkt.data = bit_buffer; + pkt.size = ret; + if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) + pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); + write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters); + } + } +} + /* pkt = NULL means EOF (needed to flush decoder buffers) */ static int output_packet(InputStream *ist, int ist_index, OutputStream *ost_table, int nb_ostreams, @@ -1575,128 +1654,138 @@ static int output_packet(InputStream *ist, int ist_index, } /* if output time reached then transcode raw format, encode packets and output them */ - if (start_time == 0 || ist->pts >= start_time) - for(i=0;isource_index != ist_index) - continue; + ost = &ost_table[i]; + if (ost->source_index != ist_index) + continue; + + if (of->start_time && ist->pts < of->start_time) + continue; + + if (of->recording_time != INT64_MAX && + av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time, + (AVRational){1, 1000000}) >= 0) { + ost->is_past_recording_time = 1; + continue; + } #if CONFIG_AVFILTER - if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && - ost->input_video_filter) { - AVRational sar; - if (ist->st->sample_aspect_ratio.num) - sar = ist->st->sample_aspect_ratio; - else - sar = ist->st->codec->sample_aspect_ratio; - av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, ist->pts, sar); - } - frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || - !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]); - while (frame_available) { - AVRational ist_pts_tb; - if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) - get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb); - if (ost->picref) - ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && + ost->input_video_filter) { + AVRational sar; + if (ist->st->sample_aspect_ratio.num) + sar = ist->st->sample_aspect_ratio; + else + sar = ist->st->codec->sample_aspect_ratio; + av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, ist->pts, sar); + } + frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || + !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]); + while (frame_available) { + AVRational ist_pts_tb; + if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) + get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb); + if (ost->picref) + ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q); #endif - os = output_files[ost->file_index].ctx; + os = output_files[ost->file_index].ctx; - /* set the input output pts pairs */ - //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE; + /* set the input output pts pairs */ + //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE; - if (ost->encoding_needed) { - av_assert0(ist->decoding_needed); - switch(ost->st->codec->codec_type) { - case AVMEDIA_TYPE_AUDIO: - do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size); - break; - case AVMEDIA_TYPE_VIDEO: + if (ost->encoding_needed) { + av_assert0(ist->decoding_needed); + switch(ost->st->codec->codec_type) { + case AVMEDIA_TYPE_AUDIO: + do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size); + break; + case AVMEDIA_TYPE_VIDEO: #if CONFIG_AVFILTER - if (ost->picref->video && !ost->frame_aspect_ratio) - ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; + if (ost->picref->video && !ost->frame_aspect_ratio) + ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect; #endif - do_video_out(os, ost, ist, &picture, &frame_size, - same_quant ? quality : ost->st->codec->global_quality); - if (vstats_filename && frame_size) - do_video_stats(os, ost, frame_size); - break; - case AVMEDIA_TYPE_SUBTITLE: - do_subtitle_out(os, ost, ist, &subtitle, - pkt->pts); - break; - default: - abort(); - } - } else { - AVFrame avframe; //FIXME/XXX remove this - AVPacket opkt; - int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base); + do_video_out(os, ost, ist, &picture, &frame_size, + same_quant ? quality : ost->st->codec->global_quality); + if (vstats_filename && frame_size) + do_video_stats(os, ost, frame_size); + break; + case AVMEDIA_TYPE_SUBTITLE: + do_subtitle_out(os, ost, ist, &subtitle, + pkt->pts); + break; + default: + abort(); + } + } else { + AVFrame avframe; //FIXME/XXX remove this + AVPacket opkt; + int64_t ost_tb_start_time= av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base); - av_init_packet(&opkt); + av_init_packet(&opkt); - if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes) + if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes) #if !CONFIG_AVFILTER - continue; + continue; #else - goto cont; + goto cont; #endif - /* no reencoding needed : output the packet directly */ - /* force the input stream PTS */ + /* no reencoding needed : output the packet directly */ + /* force the input stream PTS */ - avcodec_get_frame_defaults(&avframe); - ost->st->codec->coded_frame= &avframe; - avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY; + avcodec_get_frame_defaults(&avframe); + ost->st->codec->coded_frame= &avframe; + avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY; - if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) - audio_size += data_size; - else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { - video_size += data_size; - ost->sync_opts++; - } + if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) + audio_size += data_size; + else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) { + video_size += data_size; + ost->sync_opts++; + } - opkt.stream_index= ost->index; - if(pkt->pts != AV_NOPTS_VALUE) - opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time; - else - opkt.pts= AV_NOPTS_VALUE; - - if (pkt->dts == AV_NOPTS_VALUE) - opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base); - else - opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base); - opkt.dts -= ost_tb_start_time; - - opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); - opkt.flags= pkt->flags; - - //FIXME remove the following 2 lines they shall be replaced by the bitstream filters - if( ost->st->codec->codec_id != CODEC_ID_H264 - && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO - && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO - ) { - if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY)) - opkt.destruct= av_destruct_packet; - } else { - opkt.data = data_buf; - opkt.size = data_size; - } + opkt.stream_index= ost->index; + if(pkt->pts != AV_NOPTS_VALUE) + opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time; + else + opkt.pts= AV_NOPTS_VALUE; - write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters); - ost->st->codec->frame_number++; - ost->frame_number++; - av_free_packet(&opkt); + if (pkt->dts == AV_NOPTS_VALUE) + opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base); + else + opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base); + opkt.dts -= ost_tb_start_time; + + opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base); + opkt.flags= pkt->flags; + + //FIXME remove the following 2 lines they shall be replaced by the bitstream filters + if( ost->st->codec->codec_id != CODEC_ID_H264 + && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO + && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO + ) { + if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY)) + opkt.destruct= av_destruct_packet; + } else { + opkt.data = data_buf; + opkt.size = data_size; } -#if CONFIG_AVFILTER - cont: - frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && - ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); - if (ost->picref) - avfilter_unref_buffer(ost->picref); + + write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters); + ost->st->codec->frame_number++; + ost->frame_number++; + av_free_packet(&opkt); } +#if CONFIG_AVFILTER + cont: + frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) && + ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]); + if (ost->picref) + avfilter_unref_buffer(ost->picref); + } #endif } @@ -1708,90 +1797,6 @@ static int output_packet(InputStream *ist, int ist_index, } } discard_packet: - if (pkt == NULL) { - /* EOF handling */ - - for(i=0;isource_index == ist_index) { - AVCodecContext *enc= ost->st->codec; - os = output_files[ost->file_index].ctx; - - if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1) - continue; - if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE)) - continue; - - if (ost->encoding_needed) { - for(;;) { - AVPacket pkt; - int fifo_bytes; - av_init_packet(&pkt); - pkt.stream_index= ost->index; - - switch(ost->st->codec->codec_type) { - case AVMEDIA_TYPE_AUDIO: - fifo_bytes = av_fifo_size(ost->fifo); - ret = 0; - /* encode any samples remaining in fifo */ - if (fifo_bytes > 0) { - int osize = av_get_bytes_per_sample(enc->sample_fmt); - int fs_tmp = enc->frame_size; - - av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL); - if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { - enc->frame_size = fifo_bytes / (osize * enc->channels); - } else { /* pad */ - int frame_bytes = enc->frame_size*osize*enc->channels; - if (allocated_audio_buf_size < frame_bytes) - exit_program(1); - generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes); - } - - ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf); - pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den, - ost->st->time_base.num, enc->sample_rate); - enc->frame_size = fs_tmp; - } - if(ret <= 0) { - ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL); - } - if (ret < 0) { - fprintf(stderr, "Audio encoding failed\n"); - exit_program(1); - } - audio_size += ret; - pkt.flags |= AV_PKT_FLAG_KEY; - break; - case AVMEDIA_TYPE_VIDEO: - ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL); - if (ret < 0) { - fprintf(stderr, "Video encoding failed\n"); - exit_program(1); - } - video_size += ret; - if(enc->coded_frame && enc->coded_frame->key_frame) - pkt.flags |= AV_PKT_FLAG_KEY; - if (ost->logfile && enc->stats_out) { - fprintf(ost->logfile, "%s", enc->stats_out); - } - break; - default: - ret=-1; - } - - if(ret<=0) - break; - pkt.data= bit_buffer; - pkt.size= ret; - if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE) - pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base); - write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters); - } - } - } - } - } return 0; } @@ -1813,6 +1818,48 @@ static void print_sdp(OutputFile *output_files, int n) av_freep(&avc); } +static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams, + char *error, int error_len) +{ + int i; + InputStream *ist = &input_streams[ist_index]; + if (ist->decoding_needed) { + AVCodec *codec = ist->dec; + if (!codec) + codec = avcodec_find_decoder(ist->st->codec->codec_id); + if (!codec) { + snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", + ist->st->codec->codec_id, ist->file_index, ist->st->index); + return AVERROR(EINVAL); + } + + /* update requested sample format for the decoder based on the + corresponding encoder sample format */ + for (i = 0; i < nb_output_streams; i++) { + OutputStream *ost = &output_streams[i]; + if (ost->source_index == ist_index) { + update_sample_fmt(ist->st->codec, codec, ost->st->codec); + break; + } + } + + if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { + snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", + ist->file_index, ist->st->index); + return AVERROR(EINVAL); + } + assert_codec_experimental(ist->st->codec, 0); + assert_avoptions(ist->opts); + } + + ist->pts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames*AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; + ist->next_pts = AV_NOPTS_VALUE; + init_pts_correction(&ist->pts_ctx); + ist->is_start = 1; + + return 0; +} + /* * The following code is the main loop of the file converter */ @@ -1821,7 +1868,7 @@ static int transcode(OutputFile *output_files, InputFile *input_files, int nb_input_files) { - int ret = 0, i, j; + int ret = 0, i; AVFormatContext *is, *os; AVCodecContext *codec, *icodec; OutputStream *ost; @@ -1830,6 +1877,7 @@ static int transcode(OutputFile *output_files, int want_sdp = 1; uint8_t *no_packet; int no_packet_count=0; + int64_t timer_start; if (!(no_packet = av_mallocz(nb_input_files))) exit_program(1); @@ -1865,8 +1913,10 @@ static int transcode(OutputFile *output_files, if (ost->st->stream_copy) { uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; - if (extra_size > INT_MAX) + if (extra_size > INT_MAX) { + ret = AVERROR(EINVAL); goto fail; + } /* if stream_copy is selected, no need to decode or encode */ codec->codec_id = icodec->codec_id; @@ -1883,8 +1933,10 @@ static int transcode(OutputFile *output_files, codec->rc_max_rate = icodec->rc_max_rate; codec->rc_buffer_size = icodec->rc_buffer_size; codec->extradata= av_mallocz(extra_size); - if (!codec->extradata) + if (!codec->extradata) { + ret = AVERROR(ENOMEM); goto fail; + } memcpy(codec->extradata, icodec->extradata, icodec->extradata_size); codec->extradata_size= icodec->extradata_size; if(!copy_tb && av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) && av_q2d(ist->st->time_base) < 1.0/500){ @@ -1939,8 +1991,10 @@ static int transcode(OutputFile *output_files, switch(codec->codec_type) { case AVMEDIA_TYPE_AUDIO: ost->fifo= av_fifo_alloc(1024); - if(!ost->fifo) + if (!ost->fifo) { + ret = AVERROR(ENOMEM); goto fail; + } ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE); if (!codec->sample_rate) { codec->sample_rate = icodec->sample_rate; @@ -2114,51 +2168,10 @@ static int transcode(OutputFile *output_files, } } - /* open each decoder */ - for (i = 0; i < nb_input_streams; i++) { - ist = &input_streams[i]; - if (ist->decoding_needed) { - AVCodec *codec = ist->dec; - if (!codec) - codec = avcodec_find_decoder(ist->st->codec->codec_id); - if (!codec) { - snprintf(error, sizeof(error), "Decoder (codec id %d) not found for input stream #%d.%d", - ist->st->codec->codec_id, ist->file_index, ist->st->index); - ret = AVERROR(EINVAL); - goto dump_format; - } - - /* update requested sample format for the decoder based on the - corresponding encoder sample format */ - for (j = 0; j < nb_output_streams; j++) { - ost = &output_streams[j]; - if (ost->source_index == i) { - update_sample_fmt(ist->st->codec, codec, ost->st->codec); - break; - } - } - - if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) { - snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d", - ist->file_index, ist->st->index); - ret = AVERROR(EINVAL); - goto dump_format; - } - assert_codec_experimental(ist->st->codec, 0); - assert_avoptions(ost->opts); - } - } - - /* init pts */ - for (i = 0; i < nb_input_streams; i++) { - AVStream *st; - ist = &input_streams[i]; - st= ist->st; - ist->pts = st->avg_frame_rate.num ? - st->codec->has_b_frames*AV_TIME_BASE / av_q2d(st->avg_frame_rate) : 0; - ist->next_pts = AV_NOPTS_VALUE; - init_pts_correction(&ist->pts_ctx); - ist->is_start = 1; - } + /* init input streams */ + for (i = 0; i < nb_input_streams; i++) + if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error)) < 0)) + goto dump_format; /* open files and write file headers */ for (i = 0; i < nb_output_files; i++) { @@ -2230,14 +2243,17 @@ static int transcode(OutputFile *output_files, smallest output pts */ file_index = -1; for (i = 0; i < nb_output_streams; i++) { + OutputFile *of; int64_t ipts; double opts; ost = &output_streams[i]; + of = &output_files[ost->file_index]; os = output_files[ost->file_index].ctx; ist = &input_streams[ost->source_index]; - if(ist->is_past_recording_time || no_packet[ist->file_index]) + if (ost->is_past_recording_time || no_packet[ist->file_index] || + (os->pb && avio_tell(os->pb) >= of->limit_filesize)) continue; - opts = ost->st->pts.val * av_q2d(ost->st->time_base); + opts = ost->st->pts.val * av_q2d(ost->st->time_base); ipts = ist->pts; if (!input_files[ist->file_index].eof_reached){ if(ipts < ipts_min) { @@ -2265,10 +2281,6 @@ static int transcode(OutputFile *output_files, break; } - /* finish if limit size exhausted */ - if (limit_filesize != 0 && limit_filesize <= avio_tell(output_files[0].ctx->pb)) - break; - /* read a frame from it and output it in the fifo */ is = input_files[file_index].ctx; ret= av_read_frame(is, &pkt); @@ -2329,13 +2341,6 @@ static int transcode(OutputFile *output_files, } } - /* finish if recording time exhausted */ - if (recording_time != INT64_MAX && - av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) { - ist->is_past_recording_time = 1; - goto discard_packet; - } - //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size); if (output_packet(ist, ist_index, output_streams, nb_output_streams, &pkt) < 0) { @@ -2352,7 +2357,7 @@ static int transcode(OutputFile *output_files, av_free_packet(&pkt); /* dump report by using the output first video and audio streams */ - print_report(output_files, output_streams, nb_output_streams, 0); + print_report(output_files, output_streams, nb_output_streams, 0, timer_start); } /* at the end of stream, we must flush the decoder buffers */ @@ -2362,6 +2367,7 @@ static int transcode(OutputFile *output_files, output_packet(ist, i, output_streams, nb_output_streams, NULL); } } + flush_encoders(output_streams, nb_output_streams); term_exit(); @@ -2372,7 +2378,7 @@ static int transcode(OutputFile *output_files, } /* dump report by using the first video and audio streams */ - print_report(output_files, output_streams, nb_output_streams, 1); + print_report(output_files, output_streams, nb_output_streams, 1, timer_start); /* close each encoder */ for (i = 0; i < nb_output_streams; i++) { @@ -3168,8 +3174,6 @@ static OutputStream *new_video_stream(AVFormatContext *oc) video_enc->pix_fmt = frame_pix_fmt; st->sample_aspect_ratio = video_enc->sample_aspect_ratio; - if (intra_only) - video_enc->gop_size = 0; if (video_qscale || same_quant) { video_enc->flags |= CODEC_FLAG_QSCALE; video_enc->global_quality = FF_QP2LAMBDA * video_qscale; @@ -3559,6 +3563,9 @@ static void opt_output_file(const char *filename) output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1); output_files[nb_output_files - 1].ctx = oc; output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams; + output_files[nb_output_files - 1].recording_time = recording_time; + output_files[nb_output_files - 1].start_time = start_time; + output_files[nb_output_files - 1].limit_filesize = limit_filesize; av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0); /* check filename in case of an image number is expected */ @@ -3684,6 +3691,9 @@ static void opt_output_file(const char *filename) audio_channels = 0; audio_sample_fmt = AV_SAMPLE_FMT_NONE; chapters_input_file = INT_MAX; + recording_time = INT64_MAX; + start_time = 0; + limit_filesize = UINT64_MAX; av_freep(&meta_data_maps); nb_meta_data_maps = 0; @@ -3692,6 +3702,8 @@ static void opt_output_file(const char *filename) metadata_chapters_autocopy = 1; av_freep(&stream_maps); nb_stream_maps = 0; + av_freep(&streamid_map); + nb_streamid_map = 0; av_dict_free(&codec_names); @@ -4085,13 +4097,12 @@ static const OptionDef options[] = { { "padleft", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" }, { "padright", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "size" }, { "padcolor", HAS_ARG | OPT_VIDEO, {(void*)opt_pad}, "Removed, use the pad filter instead", "color" }, - { "intra", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_only}, "use only intra frames"}, { "vn", OPT_BOOL | OPT_VIDEO, {(void*)&video_disable}, "disable video" }, { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" }, { "qscale", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_qscale}, "use fixed video quantizer scale (VBR)", "q" }, { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_video_rc_override_string}, "rate control override for specific intervals", "override" }, { "vcodec", HAS_ARG | OPT_VIDEO, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" }, - { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimaton threshold", "threshold" }, + { "me_threshold", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_me_threshold}, "motion estimation threshold", "threshold" }, { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant}, "use same quantizer as source (implies VBR)" }, { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },