]> git.sesse.net Git - ffmpeg/blobdiff - avconv.c
qsvenc: add an API for allocating opaque surfaces
[ffmpeg] / avconv.c
index deaa40b38134e39941d25eab78dffcc2daf4b8f4..23f6db6fae905630fc454edacc08c3a00cca092f 100644 (file)
--- a/avconv.c
+++ b/avconv.c
@@ -452,7 +452,7 @@ static void do_video_out(AVFormatContext *s,
                          AVFrame *in_picture,
                          int *frame_size)
 {
-    int ret, format_video_sync;
+    int ret, format_video_sync, got_packet;
     AVPacket pkt;
     AVCodecContext *enc = ost->enc_ctx;
 
@@ -488,57 +488,37 @@ static void do_video_out(AVFormatContext *s,
     if (ost->frame_number >= ost->max_frames)
         return;
 
-    if (s->oformat->flags & AVFMT_RAWPICTURE &&
-        enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
-        /* raw pictures are written as AVPicture structure to
-           avoid any copies. We support temporarily the older
-           method. */
-#if FF_API_CODED_FRAME
-FF_DISABLE_DEPRECATION_WARNINGS
-        enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
-        enc->coded_frame->top_field_first  = in_picture->top_field_first;
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
-        pkt.data   = (uint8_t *)in_picture;
-        pkt.size   =  sizeof(AVPicture);
-        pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
-        pkt.flags |= AV_PKT_FLAG_KEY;
+    if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
+        ost->top_field_first >= 0)
+        in_picture->top_field_first = !!ost->top_field_first;
 
-        write_frame(s, &pkt, ost);
-    } else {
-        int got_packet;
-
-        if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
-            ost->top_field_first >= 0)
-            in_picture->top_field_first = !!ost->top_field_first;
-
-        in_picture->quality = enc->global_quality;
-        in_picture->pict_type = 0;
-        if (ost->forced_kf_index < ost->forced_kf_count &&
-            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
-            in_picture->pict_type = AV_PICTURE_TYPE_I;
-            ost->forced_kf_index++;
-        }
+    in_picture->quality = enc->global_quality;
+    in_picture->pict_type = 0;
+    if (ost->forced_kf_index < ost->forced_kf_count &&
+        in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
+        in_picture->pict_type = AV_PICTURE_TYPE_I;
+        ost->forced_kf_index++;
+    }
 
-        ost->frames_encoded++;
+    ost->frames_encoded++;
 
-        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
-        if (ret < 0) {
-            av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
-            exit_program(1);
-        }
+    ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
+    if (ret < 0) {
+        av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
+        exit_program(1);
+    }
 
-        if (got_packet) {
-            av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
-            write_frame(s, &pkt, ost);
-            *frame_size = pkt.size;
+    if (got_packet) {
+        av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+        write_frame(s, &pkt, ost);
+        *frame_size = pkt.size;
 
-            /* if two pass, output log */
-            if (ost->logfile && enc->stats_out) {
-                fprintf(ost->logfile, "%s", enc->stats_out);
-            }
+        /* if two pass, output log */
+        if (ost->logfile && enc->stats_out) {
+            fprintf(ost->logfile, "%s", enc->stats_out);
         }
     }
+
     ost->sync_opts++;
     /*
      * For video, number of frames in == number of packets out.
@@ -959,8 +939,6 @@ static void flush_encoders(void)
 
         if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
             continue;
-        if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
-            continue;
 
         for (;;) {
             int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
@@ -1185,6 +1163,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         decoded_frame->pts = av_rescale_q(decoded_frame->pts,
                                           ist->st->time_base,
                                           (AVRational){1, avctx->sample_rate});
+    ist->nb_samples = decoded_frame->nb_samples;
     for (i = 0; i < ist->nb_filters; i++) {
         if (i < ist->nb_filters - 1) {
             f = ist->filter_frame;
@@ -1323,7 +1302,7 @@ static int send_filter_eof(InputStream *ist)
 }
 
 /* pkt = NULL means EOF (needed to flush decoder buffers) */
-static void process_input_packet(InputStream *ist, const AVPacket *pkt)
+static void process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
 {
     int i;
     int got_output;
@@ -1402,7 +1381,8 @@ static void process_input_packet(InputStream *ist, const AVPacket *pkt)
     }
 
     /* after flushing, send an EOF on all the filter inputs attached to the stream */
-    if (!pkt && ist->decoding_needed) {
+    /* except when looping we need to flush but not to send an EOF */
+    if (!pkt && ist->decoding_needed && !no_eof) {
         int ret = send_filter_eof(ist);
         if (ret < 0) {
             av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
@@ -1595,7 +1575,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
         }
         if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
             av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
-        av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
 
         if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
             if (ret == AVERROR_EXPERIMENTAL)
@@ -2271,6 +2250,86 @@ static void reset_eagain(void)
         input_files[i]->eagain = 0;
 }
 
+// set duration to max(tmp, duration) in a proper time base and return duration's time_base
+static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
+                                AVRational time_base)
+{
+    int ret;
+
+    if (!*duration) {
+        *duration = tmp;
+        return tmp_time_base;
+    }
+
+    ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
+    if (ret < 0) {
+        *duration = tmp;
+        return tmp_time_base;
+    }
+
+    return time_base;
+}
+
+static int seek_to_start(InputFile *ifile, AVFormatContext *is)
+{
+    InputStream *ist;
+    AVCodecContext *avctx;
+    int i, ret, has_audio = 0;
+    int64_t duration = 0;
+
+    ret = av_seek_frame(is, -1, is->start_time, 0);
+    if (ret < 0)
+        return ret;
+
+    for (i = 0; i < ifile->nb_streams; i++) {
+        ist   = input_streams[ifile->ist_index + i];
+        avctx = ist->dec_ctx;
+
+        // flush decoders
+        if (ist->decoding_needed) {
+            process_input_packet(ist, NULL, 1);
+            avcodec_flush_buffers(avctx);
+        }
+
+        /* duration is the length of the last frame in a stream
+         * when audio stream is present we don't care about
+         * last video frame length because it's not defined exactly */
+        if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
+            has_audio = 1;
+    }
+
+    for (i = 0; i < ifile->nb_streams; i++) {
+        ist   = input_streams[ifile->ist_index + i];
+        avctx = ist->dec_ctx;
+
+        if (has_audio) {
+            if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
+                AVRational sample_rate = {1, avctx->sample_rate};
+
+                duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
+            } else
+                continue;
+        } else {
+            if (ist->framerate.num) {
+                duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
+            } else if (ist->st->avg_frame_rate.num) {
+                duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
+            } else duration = 1;
+        }
+        if (!ifile->duration)
+            ifile->time_base = ist->st->time_base;
+        /* the total duration of the stream, max_pts - min_pts is
+         * the duration of the stream without the last frame */
+        duration += ist->max_pts - ist->min_pts;
+        ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
+                                        ifile->time_base);
+    }
+
+    ifile->loop--;
+
+    return ret;
+}
+
 /*
  * Read one packet from an input file and send it for
  * - decoding -> lavfi (audio/video)
@@ -2290,6 +2349,7 @@ static int process_input(void)
     InputStream *ist;
     AVPacket pkt;
     int ret, i, j;
+    int64_t duration;
 
     /* select the stream that we must read now */
     ifile = select_input_file();
@@ -2311,6 +2371,11 @@ static int process_input(void)
         ifile->eagain = 1;
         return ret;
     }
+    if ((ret < 0) && (ifile->loop > 1)) {
+        if ((ret = seek_to_start(ifile, is)) < 0)
+            return ret;
+        ret = get_input_packet(ifile, &pkt);
+    }
     if (ret < 0) {
         if (ret != AVERROR_EOF) {
             print_error(is->filename, ret);
@@ -2322,7 +2387,7 @@ static int process_input(void)
         for (i = 0; i < ifile->nb_streams; i++) {
             ist = input_streams[ifile->ist_index + i];
             if (ist->decoding_needed)
-                process_input_packet(ist, NULL);
+                process_input_packet(ist, NULL, 0);
 
             /* mark all outputs that don't go through lavfi as finished */
             for (j = 0; j < nb_output_streams; j++) {
@@ -2401,8 +2466,17 @@ static int process_input(void)
                 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
         }
     }
+    duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
+    if (pkt.pts != AV_NOPTS_VALUE) {
+        pkt.pts += duration;
+        ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
+        ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
+    }
+
+    if (pkt.dts != AV_NOPTS_VALUE)
+        pkt.dts += duration;
 
-    process_input_packet(ist, &pkt);
+    process_input_packet(ist, &pkt, 0);
 
 discard_packet:
     av_free_packet(&pkt);
@@ -2473,7 +2547,7 @@ static int transcode(void)
     for (i = 0; i < nb_input_streams; i++) {
         ist = input_streams[i];
         if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
-            process_input_packet(ist, NULL);
+            process_input_packet(ist, NULL, 0);
         }
     }
     poll_filters();