X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=inline;f=avconv.c;h=6b37d6e7d6c066de594de0990dca194300771959;hb=9d3ea5cbf57e30bf2717a9ce64e858dad8a02aa6;hp=319823b056642b481e9cca70959bf17ae80311b8;hpb=40cf1bbacc6220a0aa6bed5c331871d43f9ce370;p=ffmpeg diff --git a/avconv.c b/avconv.c index 319823b0566..6b37d6e7d6c 100644 --- a/avconv.c +++ b/avconv.c @@ -249,7 +249,7 @@ static void abort_codec_experimental(AVCodec *c, int encoder) "results.\nAdd '-strict experimental' if you want to use it.\n", codec_string, c->name); codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id); - if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL)) + if (!(codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL)) av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n", codec_string, codec->name); exit_program(1); @@ -270,7 +270,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) */ if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) { if (ost->frame_number >= ost->max_frames) { - av_free_packet(pkt); + av_packet_unref(pkt); return; } ost->frame_number++; @@ -279,6 +279,11 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR, NULL); ost->quality = sd ? *(int *)sd : -1; + + if (ost->frame_rate.num) { + pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate), + ost->st->time_base); + } } while (bsfc) { @@ -288,7 +293,7 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost) pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY); if (a > 0) { - av_free_packet(pkt); + av_packet_unref(pkt); new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size, av_buffer_default_free, NULL, 0); if (!new_pkt.buf) @@ -452,7 +457,7 @@ static void do_video_out(AVFormatContext *s, AVFrame *in_picture, int *frame_size) { - int ret, format_video_sync; + int ret, format_video_sync, got_packet; AVPacket pkt; AVCodecContext *enc = ost->enc_ctx; @@ -488,57 +493,37 @@ static void do_video_out(AVFormatContext *s, if (ost->frame_number >= ost->max_frames) return; - if (s->oformat->flags & AVFMT_RAWPICTURE && - enc->codec->id == AV_CODEC_ID_RAWVIDEO) { - /* raw pictures are written as AVPicture structure to - avoid any copies. We support temporarily the older - method. */ -#if FF_API_CODED_FRAME -FF_DISABLE_DEPRECATION_WARNINGS - enc->coded_frame->interlaced_frame = in_picture->interlaced_frame; - enc->coded_frame->top_field_first = in_picture->top_field_first; -FF_ENABLE_DEPRECATION_WARNINGS -#endif - pkt.data = (uint8_t *)in_picture; - pkt.size = sizeof(AVPicture); - pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base); - pkt.flags |= AV_PKT_FLAG_KEY; + if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) && + ost->top_field_first >= 0) + in_picture->top_field_first = !!ost->top_field_first; - write_frame(s, &pkt, ost); - } else { - int got_packet; - - if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) && - ost->top_field_first >= 0) - in_picture->top_field_first = !!ost->top_field_first; - - in_picture->quality = enc->global_quality; - in_picture->pict_type = 0; - if (ost->forced_kf_index < ost->forced_kf_count && - in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { - in_picture->pict_type = AV_PICTURE_TYPE_I; - ost->forced_kf_index++; - } + in_picture->quality = enc->global_quality; + in_picture->pict_type = 0; + if (ost->forced_kf_index < ost->forced_kf_count && + in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) { + in_picture->pict_type = AV_PICTURE_TYPE_I; + ost->forced_kf_index++; + } - ost->frames_encoded++; + ost->frames_encoded++; - ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet); - if (ret < 0) { - av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); - exit_program(1); - } + ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet); + if (ret < 0) { + av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n"); + exit_program(1); + } - if (got_packet) { - av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); - write_frame(s, &pkt, ost); - *frame_size = pkt.size; + if (got_packet) { + av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base); + write_frame(s, &pkt, ost); + *frame_size = pkt.size; - /* if two pass, output log */ - if (ost->logfile && enc->stats_out) { - fprintf(ost->logfile, "%s", enc->stats_out); - } + /* if two pass, output log */ + if (ost->logfile && enc->stats_out) { + fprintf(ost->logfile, "%s", enc->stats_out); } } + ost->sync_opts++; /* * For video, number of frames in == number of packets out. @@ -576,7 +561,7 @@ static void do_video_stats(OutputStream *ost, int frame_size) #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS - if (enc->flags&CODEC_FLAG_PSNR) + if (enc->flags & AV_CODEC_FLAG_PSNR) fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0))); FF_ENABLE_DEPRECATION_WARNINGS #endif @@ -614,7 +599,7 @@ static int poll_filter(OutputStream *ost) filtered_frame = ost->filtered_frame; if (ost->enc->type == AVMEDIA_TYPE_AUDIO && - !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) + !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame, ost->enc_ctx->frame_size); else @@ -891,7 +876,7 @@ static void print_report(int is_last_report, int64_t timer_start) #if FF_API_CODED_FRAME FF_DISABLE_DEPRECATION_WARNINGS - if (enc->flags&CODEC_FLAG_PSNR) { + if (enc->flags & AV_CODEC_FLAG_PSNR) { int j; double error, error_sum = 0; double scale, scale_sum = 0; @@ -959,8 +944,6 @@ static void flush_encoders(void) if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1) continue; - if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO) - continue; for (;;) { int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL; @@ -1185,6 +1168,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) decoded_frame->pts = av_rescale_q(decoded_frame->pts, ist->st->time_base, (AVRational){1, avctx->sample_rate}); + ist->nb_samples = decoded_frame->nb_samples; for (i = 0; i < ist->nb_filters; i++) { if (i < ist->nb_filters - 1) { f = ist->filter_frame; @@ -1247,7 +1231,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format)); ret = poll_filters(); - if (ret < 0 && (ret != AVERROR_EOF && ret != AVERROR(EAGAIN))) { + if (ret < 0 && ret != AVERROR_EOF) { char errbuf[128]; av_strerror(ret, errbuf, sizeof(errbuf)); @@ -1323,7 +1307,7 @@ static int send_filter_eof(InputStream *ist) } /* pkt = NULL means EOF (needed to flush decoder buffers) */ -static void process_input_packet(InputStream *ist, const AVPacket *pkt) +static void process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) { int i; int got_output; @@ -1353,7 +1337,7 @@ static void process_input_packet(InputStream *ist, const AVPacket *pkt) ist->last_dts = ist->next_dts; if (avpkt.size && avpkt.size != pkt->size && - !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) { + !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) { av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING, "Multiple frames in a packet from stream %d\n", pkt->stream_index); ist->showed_multi_packet_warning = 1; @@ -1402,7 +1386,8 @@ static void process_input_packet(InputStream *ist, const AVPacket *pkt) } /* after flushing, send an EOF on all the filter inputs attached to the stream */ - if (!pkt && ist->decoding_needed) { + /* except when looping we need to flush but not to send an EOF */ + if (!pkt && ist->decoding_needed && !no_eof) { int ret = send_filter_eof(ist); if (ret < 0) { av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n"); @@ -1595,7 +1580,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) } if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0)) av_dict_set(&ost->encoder_opts, "threads", "auto", 0); - av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0); if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) { if (ret == AVERROR_EXPERIMENTAL) @@ -1618,6 +1602,28 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len) exit_program(1); } + if (ost->enc_ctx->nb_coded_side_data) { + int i; + + ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data, + sizeof(*ost->st->side_data)); + if (!ost->st->side_data) + return AVERROR(ENOMEM); + + for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) { + const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i]; + AVPacketSideData *sd_dst = &ost->st->side_data[i]; + + sd_dst->data = av_malloc(sd_src->size); + if (!sd_dst->data) + return AVERROR(ENOMEM); + memcpy(sd_dst->data, sd_src->data, sd_src->size); + sd_dst->size = sd_src->size; + sd_dst->type = sd_src->type; + ost->st->nb_side_data++; + } + } + ost->st->time_base = ost->enc_ctx->time_base; } else { ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts); @@ -1732,7 +1738,7 @@ static int transcode_init(void) av_assert0(ist && !ost->filter); - extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE; + extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE; if (extra_size > INT_MAX) { return AVERROR(EINVAL); @@ -1873,6 +1879,11 @@ static int transcode_init(void) } } +#if CONFIG_LIBMFX + if (qsv_transcode_init(ost)) + exit_program(1); +#endif + if (!ost->filter && (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO || enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) { @@ -2146,7 +2157,6 @@ static void *input_thread(void *arg) while (!av_fifo_space(f->fifo)) pthread_cond_wait(&f->fifo_cond, &f->fifo_lock); - av_dup_packet(&pkt); av_fifo_generic_write(f->fifo, &pkt, sizeof(pkt), NULL); pthread_mutex_unlock(&f->fifo_lock); @@ -2175,7 +2185,7 @@ static void free_input_threads(void) pthread_mutex_lock(&f->fifo_lock); while (av_fifo_size(f->fifo)) { av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL); - av_free_packet(&pkt); + av_packet_unref(&pkt); } pthread_cond_signal(&f->fifo_cond); pthread_mutex_unlock(&f->fifo_lock); @@ -2185,7 +2195,7 @@ static void free_input_threads(void) while (av_fifo_size(f->fifo)) { av_fifo_generic_read(f->fifo, &pkt, sizeof(pkt), NULL); - av_free_packet(&pkt); + av_packet_unref(&pkt); } av_fifo_free(f->fifo); } @@ -2271,6 +2281,87 @@ static void reset_eagain(void) input_files[i]->eagain = 0; } +// set duration to max(tmp, duration) in a proper time base and return duration's time_base +static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, + AVRational time_base) +{ + int ret; + + if (!*duration) { + *duration = tmp; + return tmp_time_base; + } + + ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base); + if (ret < 0) { + *duration = tmp; + return tmp_time_base; + } + + return time_base; +} + +static int seek_to_start(InputFile *ifile, AVFormatContext *is) +{ + InputStream *ist; + AVCodecContext *avctx; + int i, ret, has_audio = 0; + int64_t duration = 0; + + ret = av_seek_frame(is, -1, is->start_time, 0); + if (ret < 0) + return ret; + + for (i = 0; i < ifile->nb_streams; i++) { + ist = input_streams[ifile->ist_index + i]; + avctx = ist->dec_ctx; + + // flush decoders + if (ist->decoding_needed) { + process_input_packet(ist, NULL, 1); + avcodec_flush_buffers(avctx); + } + + /* duration is the length of the last frame in a stream + * when audio stream is present we don't care about + * last video frame length because it's not defined exactly */ + if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) + has_audio = 1; + } + + for (i = 0; i < ifile->nb_streams; i++) { + ist = input_streams[ifile->ist_index + i]; + avctx = ist->dec_ctx; + + if (has_audio) { + if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) { + AVRational sample_rate = {1, avctx->sample_rate}; + + duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base); + } else + continue; + } else { + if (ist->framerate.num) { + duration = av_rescale_q(1, ist->framerate, ist->st->time_base); + } else if (ist->st->avg_frame_rate.num) { + duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base); + } else duration = 1; + } + if (!ifile->duration) + ifile->time_base = ist->st->time_base; + /* the total duration of the stream, max_pts - min_pts is + * the duration of the stream without the last frame */ + duration += ist->max_pts - ist->min_pts; + ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base, + ifile->time_base); + } + + if (ifile->loop > 0) + ifile->loop--; + + return ret; +} + /* * Read one packet from an input file and send it for * - decoding -> lavfi (audio/video) @@ -2290,6 +2381,7 @@ static int process_input(void) InputStream *ist; AVPacket pkt; int ret, i, j; + int64_t duration; /* select the stream that we must read now */ ifile = select_input_file(); @@ -2311,6 +2403,11 @@ static int process_input(void) ifile->eagain = 1; return ret; } + if (ret < 0 && ifile->loop) { + if ((ret = seek_to_start(ifile, is)) < 0) + return ret; + ret = get_input_packet(ifile, &pkt); + } if (ret < 0) { if (ret != AVERROR_EOF) { print_error(is->filename, ret); @@ -2322,7 +2419,7 @@ static int process_input(void) for (i = 0; i < ifile->nb_streams; i++) { ist = input_streams[ifile->ist_index + i]; if (ist->decoding_needed) - process_input_packet(ist, NULL); + process_input_packet(ist, NULL, 0); /* mark all outputs that don't go through lavfi as finished */ for (j = 0; j < nb_output_streams; j++) { @@ -2401,11 +2498,20 @@ static int process_input(void) pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base); } } + duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base); + if (pkt.pts != AV_NOPTS_VALUE) { + pkt.pts += duration; + ist->max_pts = FFMAX(pkt.pts, ist->max_pts); + ist->min_pts = FFMIN(pkt.pts, ist->min_pts); + } - process_input_packet(ist, &pkt); + if (pkt.dts != AV_NOPTS_VALUE) + pkt.dts += duration; + + process_input_packet(ist, &pkt, 0); discard_packet: - av_free_packet(&pkt); + av_packet_unref(&pkt); return 0; } @@ -2450,16 +2556,12 @@ static int transcode(void) } ret = poll_filters(); - if (ret < 0) { - if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) { - continue; - } else { - char errbuf[128]; - av_strerror(ret, errbuf, sizeof(errbuf)); + if (ret < 0 && ret != AVERROR_EOF) { + char errbuf[128]; + av_strerror(ret, errbuf, sizeof(errbuf)); - av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf); - break; - } + av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf); + break; } /* dump report by using the output first video and audio streams */ @@ -2473,7 +2575,7 @@ static int transcode(void) for (i = 0; i < nb_input_streams; i++) { ist = input_streams[i]; if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) { - process_input_packet(ist, NULL); + process_input_packet(ist, NULL, 0); } } poll_filters();