int w, h;
enum AVPixelFormat pix_fmt;
AVRational pixel_aspect;
+#if FF_API_SWS_PARAM_OPTION
char *sws_param;
+#endif
AVBufferRef *hw_frames_ctx;
return av_buffersrc_add_frame_flags(ctx, frame, 0);
}
-static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
- AVFrame *frame, int flags);
-
-int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
-{
- AVFrame *copy = NULL;
- int ret = 0;
-
- if (frame && frame->channel_layout &&
- av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
- av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
- return AVERROR(EINVAL);
- }
-
- if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame)
- return av_buffersrc_add_frame_internal(ctx, frame, flags);
-
- if (!(copy = av_frame_alloc()))
- return AVERROR(ENOMEM);
- ret = av_frame_ref(copy, frame);
- if (ret >= 0)
- ret = av_buffersrc_add_frame_internal(ctx, copy, flags);
-
- av_frame_free(©);
- return ret;
-}
-
static int push_frame(AVFilterGraph *graph)
{
int ret;
return 0;
}
-static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
- AVFrame *frame, int flags)
+int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int refcounted, ret;
+ if (frame && frame->channel_layout &&
+ av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
+ av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
+ return AVERROR(EINVAL);
+ }
+
s->nb_failed_requests = 0;
if (!frame)
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
- if (refcounted) {
+ if (refcounted && !(flags & AV_BUFFERSRC_FLAG_KEEP_REF)) {
av_frame_move_ref(copy, frame);
} else {
ret = av_frame_ref(copy, frame);
}
ret = ff_filter_frame(ctx->outputs[0], copy);
- if (ret < 0) {
- av_frame_free(©);
+ if (ret < 0)
return ret;
- }
if ((flags & AV_BUFFERSRC_FLAG_PUSH)) {
ret = push_frame(ctx->graph);
return AVERROR(EINVAL);
}
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d\n",
c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
- c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
+ c->pixel_aspect.num, c->pixel_aspect.den);
+
+#if FF_API_SWS_PARAM_OPTION
+ if (c->sws_param)
+ av_log(ctx, AV_LOG_WARNING, "sws_param option is deprecated and ignored\n");
+#endif
+
return 0;
}
{ "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+#if FF_API_SWS_PARAM_OPTION
{ "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
+#endif
{ NULL },
};
static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
- AVFrame *frame;
- int ret;
if (c->eof)
return AVERROR_EOF;
{ NULL }
};
-AVFilter ff_vsrc_buffer = {
+const AVFilter ff_vsrc_buffer = {
.name = "buffer",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
{ NULL }
};
-AVFilter ff_asrc_abuffer = {
+const AVFilter ff_asrc_abuffer = {
.name = "abuffer",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),