]> git.sesse.net Git - ffmpeg/blobdiff - ffmpeg.c
lavfi/vf_crop: use standard options parsing.
[ffmpeg] / ffmpeg.c
index 2ea36943942824411854e4ccf27037368120353d..12c7a2891f6da365f72ce187e3d2e2adffc8ceaf 100644 (file)
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -168,7 +168,20 @@ static int restore_tty;
    This is a temporary solution until libavfilter gets real subtitles support.
  */
 
+static int sub2video_get_blank_frame(InputStream *ist)
+{
+    int ret;
+    AVFrame *frame = ist->sub2video.frame;
 
+    av_frame_unref(frame);
+    ist->sub2video.frame->width  = ist->sub2video.w;
+    ist->sub2video.frame->height = ist->sub2video.h;
+    ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
+    if ((ret = av_frame_get_buffer(frame, 32)) < 0)
+        return ret;
+    memset(frame->data[0], 0, frame->height * frame->linesize[0]);
+    return 0;
+}
 
 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
                                 AVSubtitleRect *r)
@@ -201,28 +214,27 @@ static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
 
 static void sub2video_push_ref(InputStream *ist, int64_t pts)
 {
-    AVFilterBufferRef *ref = ist->sub2video.ref;
+    AVFrame *frame = ist->sub2video.frame;
     int i;
 
-    ist->sub2video.last_pts = ref->pts = pts;
+    av_assert1(frame->data[0]);
+    ist->sub2video.last_pts = frame->pts = pts;
     for (i = 0; i < ist->nb_filters; i++)
-        av_buffersrc_add_ref(ist->filters[i]->filter,
-                             avfilter_ref_buffer(ref, ~0),
-                             AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
-                             AV_BUFFERSRC_FLAG_NO_COPY |
-                             AV_BUFFERSRC_FLAG_PUSH);
+        av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
+                                     AV_BUFFERSRC_FLAG_KEEP_REF |
+                                     AV_BUFFERSRC_FLAG_PUSH);
 }
 
 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
 {
     int w = ist->sub2video.w, h = ist->sub2video.h;
-    AVFilterBufferRef *ref = ist->sub2video.ref;
+    AVFrame *frame = ist->sub2video.frame;
     int8_t *dst;
     int     dst_linesize;
     int num_rects, i;
     int64_t pts, end_pts;
 
-    if (!ref)
+    if (!frame)
         return;
     if (sub) {
         pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000,
@@ -235,9 +247,13 @@ static void sub2video_update(InputStream *ist, AVSubtitle *sub)
         end_pts   = INT64_MAX;
         num_rects = 0;
     }
-    dst          = ref->data    [0];
-    dst_linesize = ref->linesize[0];
-    memset(dst, 0, h * dst_linesize);
+    if (sub2video_get_blank_frame(ist) < 0) {
+        av_log(ist->st->codec, AV_LOG_ERROR,
+               "Impossible to get a blank canvas.\n");
+        return;
+    }
+    dst          = frame->data    [0];
+    dst_linesize = frame->linesize[0];
     for (i = 0; i < num_rects; i++)
         sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
     sub2video_push_ref(ist, pts);
@@ -256,7 +272,7 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
        (possibly overlay) is desperately waiting for a subtitle frame. */
     for (i = 0; i < infile->nb_streams; i++) {
         InputStream *ist2 = input_streams[infile->ist_index + i];
-        if (!ist2->sub2video.ref)
+        if (!ist2->sub2video.frame)
             continue;
         /* subtitles seem to be usually muxed ahead of other streams;
            if not, substracting a larger time here is necessary */
@@ -264,7 +280,7 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
         /* do not send the heartbeat frame if the subtitle is already ahead */
         if (pts2 <= ist2->sub2video.last_pts)
             continue;
-        if (pts2 >= ist2->sub2video.end_pts)
+        if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
             sub2video_update(ist2, NULL);
         for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
             nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
@@ -462,11 +478,11 @@ static void exit_program(void)
         av_freep(&input_files[i]);
     }
     for (i = 0; i < nb_input_streams; i++) {
-        avcodec_free_frame(&input_streams[i]->decoded_frame);
+        av_frame_free(&input_streams[i]->decoded_frame);
+        av_frame_free(&input_streams[i]->filter_frame);
         av_dict_free(&input_streams[i]->opts);
-        free_buffer_pool(&input_streams[i]->buffer_pool);
         avsubtitle_free(&input_streams[i]->prev_sub.subtitle);
-        avfilter_unref_bufferp(&input_streams[i]->sub2video.ref);
+        av_frame_free(&input_streams[i]->sub2video.frame);
         av_freep(&input_streams[i]->filters);
         av_freep(&input_streams[i]);
     }
@@ -570,13 +586,17 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
                 memcpy(t, new_pkt.data, new_pkt.size);
                 memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
                 new_pkt.data = t;
+                new_pkt.buf = NULL;
                 a = 1;
             } else
                 a = AVERROR(ENOMEM);
         }
         if (a > 0) {
             av_free_packet(pkt);
-            new_pkt.destruct = av_destruct_packet;
+            new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
+                                           av_buffer_default_free, NULL, 0);
+            if (!new_pkt.buf)
+                exit(1);
         } else if (a < 0) {
             av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
                    bsfc->filter->name, pkt->stream_index,
@@ -892,36 +912,28 @@ static void do_video_out(AVFormatContext *s,
         write_frame(s, &pkt, ost);
     } else {
         int got_packet, forced_keyframe = 0;
-        AVFrame big_picture;
         double pts_time;
 
-        big_picture = *in_picture;
-        /* better than nothing: use input picture interlaced
-           settings */
-        big_picture.interlaced_frame = in_picture->interlaced_frame;
-        if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
-            if (ost->top_field_first == -1)
-                big_picture.top_field_first = in_picture->top_field_first;
-            else
-                big_picture.top_field_first = !!ost->top_field_first;
-        }
+        if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
+            ost->top_field_first >= 0)
+            in_picture->top_field_first = !!ost->top_field_first;
 
-        if (big_picture.interlaced_frame) {
+        if (in_picture->interlaced_frame) {
             if (enc->codec->id == AV_CODEC_ID_MJPEG)
-                enc->field_order = big_picture.top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
+                enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
             else
-                enc->field_order = big_picture.top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
+                enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
         } else
             enc->field_order = AV_FIELD_PROGRESSIVE;
 
-        big_picture.quality = ost->st->codec->global_quality;
+        in_picture->quality = ost->st->codec->global_quality;
         if (!enc->me_threshold)
-            big_picture.pict_type = 0;
+            in_picture->pict_type = 0;
 
-        pts_time = big_picture.pts != AV_NOPTS_VALUE ?
-            big_picture.pts * av_q2d(enc->time_base) : NAN;
+        pts_time = in_picture->pts != AV_NOPTS_VALUE ?
+            in_picture->pts * av_q2d(enc->time_base) : NAN;
         if (ost->forced_kf_index < ost->forced_kf_count &&
-            big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
+            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
             ost->forced_kf_index++;
             forced_keyframe = 1;
         } else if (ost->forced_keyframes_pexpr) {
@@ -948,12 +960,12 @@ static void do_video_out(AVFormatContext *s,
             ost->forced_keyframes_expr_const_values[FKF_N] += 1;
         }
         if (forced_keyframe) {
-            big_picture.pict_type = AV_PICTURE_TYPE_I;
+            in_picture->pict_type = AV_PICTURE_TYPE_I;
             av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
         }
 
         update_benchmark(NULL);
-        ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
+        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
         update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
         if (ret < 0) {
             av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
@@ -1049,7 +1061,6 @@ static void do_video_stats(OutputStream *ost, int frame_size)
  */
 static int reap_filters(void)
 {
-    AVFilterBufferRef *picref;
     AVFrame *filtered_frame = NULL;
     int i;
     int64_t frame_pts;
@@ -1070,7 +1081,7 @@ static int reap_filters(void)
         filtered_frame = ost->filtered_frame;
 
         while (1) {
-            ret = av_buffersink_get_buffer_ref(ost->filter->filter, &picref,
+            ret = av_buffersink_get_frame_flags(ost->filter->filter, filtered_frame,
                                                AV_BUFFERSINK_FLAG_NO_REQUEST);
             if (ret < 0) {
                 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
@@ -1082,8 +1093,8 @@ static int reap_filters(void)
                 break;
             }
             frame_pts = AV_NOPTS_VALUE;
-            if (picref->pts != AV_NOPTS_VALUE) {
-                filtered_frame->pts = frame_pts = av_rescale_q(picref->pts,
+            if (filtered_frame->pts != AV_NOPTS_VALUE) {
+                filtered_frame->pts = frame_pts = av_rescale_q(filtered_frame->pts,
                                                 ost->filter->filter->inputs[0]->time_base,
                                                 ost->st->codec->time_base) -
                                     av_rescale_q(of->start_time,
@@ -1091,7 +1102,7 @@ static int reap_filters(void)
                                                 ost->st->codec->time_base);
 
                 if (of->start_time && filtered_frame->pts < 0) {
-                    avfilter_unref_buffer(picref);
+                    av_frame_unref(filtered_frame);
                     continue;
                 }
             }
@@ -1101,15 +1112,13 @@ static int reap_filters(void)
 
             switch (ost->filter->filter->inputs[0]->type) {
             case AVMEDIA_TYPE_VIDEO:
-                avfilter_copy_buf_props(filtered_frame, picref);
                 filtered_frame->pts = frame_pts;
                 if (!ost->frame_aspect_ratio)
-                    ost->st->codec->sample_aspect_ratio = picref->video->sample_aspect_ratio;
+                    ost->st->codec->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
 
                 do_video_out(of->ctx, ost, filtered_frame);
                 break;
             case AVMEDIA_TYPE_AUDIO:
-                avfilter_copy_buf_props(filtered_frame, picref);
                 filtered_frame->pts = frame_pts;
                 if (!(ost->st->codec->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
                     ost->st->codec->channels != av_frame_get_channels(filtered_frame)) {
@@ -1124,7 +1133,7 @@ static int reap_filters(void)
                 av_assert0(0);
             }
 
-            avfilter_unref_buffer(picref);
+            av_frame_unref(filtered_frame);
         }
     }
 
@@ -1465,8 +1474,11 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
        && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
        && ost->st->codec->codec_id != AV_CODEC_ID_VC1
        ) {
-        if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
-            opkt.destruct = av_destruct_packet;
+        if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY)) {
+            opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
+            if (!opkt.buf)
+                exit(1);
+        }
     } else {
         opkt.data = pkt->data;
         opkt.size = pkt->size;
@@ -1516,13 +1528,15 @@ int guess_input_channel_layout(InputStream *ist)
 
 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
 {
-    AVFrame *decoded_frame;
+    AVFrame *decoded_frame, *f;
     AVCodecContext *avctx = ist->st->codec;
-    int i, ret, resample_changed;
+    int i, ret, err = 0, resample_changed;
     AVRational decoded_frame_tb;
 
     if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
         return AVERROR(ENOMEM);
+    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
     decoded_frame = ist->decoded_frame;
 
     update_benchmark(NULL);
@@ -1537,7 +1551,11 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
     if (!*got_output || ret < 0) {
         if (!pkt->size) {
             for (i = 0; i < ist->nb_filters; i++)
+#if 1
                 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
+#else
+                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+#endif
         }
         return ret;
     }
@@ -1625,24 +1643,37 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
         decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
                                               (AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
                                               (AVRational){1, ist->st->codec->sample_rate});
-    for (i = 0; i < ist->nb_filters; i++)
-        av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
-                               AV_BUFFERSRC_FLAG_PUSH);
-
+    for (i = 0; i < ist->nb_filters; i++) {
+        if (i < ist->nb_filters - 1) {
+            f = ist->filter_frame;
+            err = av_frame_ref(f, decoded_frame);
+            if (err < 0)
+                break;
+        } else
+            f = decoded_frame;
+        err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
+                                     AV_BUFFERSRC_FLAG_PUSH);
+        if (err < 0)
+            break;
+    }
     decoded_frame->pts = AV_NOPTS_VALUE;
 
-    return ret;
+    av_frame_unref(ist->filter_frame);
+    av_frame_unref(decoded_frame);
+    return err < 0 ? err : ret;
 }
 
 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
 {
-    AVFrame *decoded_frame;
+    AVFrame *decoded_frame, *f;
     void *buffer_to_free = NULL;
-    int i, ret = 0, resample_changed;
+    int i, ret = 0, err = 0, resample_changed;
     int64_t best_effort_timestamp;
     AVRational *frame_sample_aspect;
 
-    if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
+    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
         return AVERROR(ENOMEM);
     decoded_frame = ist->decoded_frame;
     pkt->dts  = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
@@ -1654,7 +1685,11 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
     if (!*got_output || ret < 0) {
         if (!pkt->size) {
             for (i = 0; i < ist->nb_filters; i++)
+#if 1
                 av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
+#else
+                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+#endif
         }
         return ret;
     }
@@ -1711,40 +1746,28 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
 
     frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
     for (i = 0; i < ist->nb_filters; i++) {
-        int changed =      ist->st->codec->width   != ist->filters[i]->filter->outputs[0]->w
-                        || ist->st->codec->height  != ist->filters[i]->filter->outputs[0]->h
-                        || ist->st->codec->pix_fmt != ist->filters[i]->filter->outputs[0]->format;
-
         if (!frame_sample_aspect->num)
             *frame_sample_aspect = ist->st->sample_aspect_ratio;
-        if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
-            FrameBuffer      *buf = decoded_frame->opaque;
-            AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
-                                        decoded_frame->data, decoded_frame->linesize,
-                                        AV_PERM_READ | AV_PERM_PRESERVE,
-                                        ist->st->codec->width, ist->st->codec->height,
-                                        ist->st->codec->pix_fmt);
-
-            avfilter_copy_frame_props(fb, decoded_frame);
-            fb->buf->priv           = buf;
-            fb->buf->free           = filter_release_buffer;
-
-            av_assert0(buf->refcount>0);
-            buf->refcount++;
-            av_buffersrc_add_ref(ist->filters[i]->filter, fb,
-                                 AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT |
-                                 AV_BUFFERSRC_FLAG_NO_COPY |
-                                 AV_BUFFERSRC_FLAG_PUSH);
+
+        if (i < ist->nb_filters - 1) {
+            f = ist->filter_frame;
+            err = av_frame_ref(f, decoded_frame);
+            if (err < 0)
+                break;
         } else
-        if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
+            f = decoded_frame;
+        if(av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
+                                        AV_BUFFERSRC_FLAG_PUSH)<0) {
             av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
             exit(1);
         }
 
     }
 
+    av_frame_unref(ist->filter_frame);
+    av_frame_unref(decoded_frame);
     av_free(buffer_to_free);
-    return ret;
+    return err < 0 ? err : ret;
 }
 
 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
@@ -1960,12 +1983,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
             return AVERROR(EINVAL);
         }
 
-        ist->dr1 = (codec->capabilities & CODEC_CAP_DR1) && !(FF_API_DEINTERLACE && do_deinterlace);
-        if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
-            ist->st->codec->get_buffer     = codec_get_buffer;
-            ist->st->codec->release_buffer = codec_release_buffer;
-            ist->st->codec->opaque         = &ist->buffer_pool;
-        }
+        av_opt_set_int(ist->st->codec, "refcounted_frames", 1, 0);
 
         if (!av_dict_get(ist->opts, "threads", NULL, 0))
             av_dict_set(&ist->opts, "threads", "auto", 0);
@@ -2323,6 +2341,13 @@ static int transcode_init(void)
                     ost->frame_aspect_ratio ? // overridden by the -aspect cli option
                     av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
                     ost->filter->filter->inputs[0]->sample_aspect_ratio;
+                if (!strncmp(ost->enc->name, "libx264", 7) &&
+                    codec->pix_fmt == AV_PIX_FMT_NONE &&
+                    ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+                    av_log(NULL, AV_LOG_INFO,
+                           "No pixel format specified, %s for H.264 encoding chosen.\n"
+                           "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+                           av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
                 codec->pix_fmt = ost->filter->filter->inputs[0]->format;
 
                 if (!icodec ||