typedef struct BufferSourceContext {
const AVClass *class;
- AVFrame *queued_frame;
AVRational time_base; ///< time_base to set in the output link
AVRational frame_rate; ///< frame_rate to set in the output link
unsigned nb_failed_requests;
int w, h;
enum AVPixelFormat pix_fmt;
AVRational pixel_aspect;
+#if FF_API_SWS_PARAM_OPTION
char *sws_param;
+#endif
AVBufferRef *hw_frames_ctx;
uint64_t channel_layout;
char *channel_layout_str;
- int got_format_from_params;
int eof;
} BufferSourceContext;
switch (ctx->filter->outputs[0].type) {
case AVMEDIA_TYPE_VIDEO:
if (param->format != AV_PIX_FMT_NONE) {
- s->got_format_from_params = 1;
s->pix_fmt = param->format;
}
if (param->width > 0)
break;
case AVMEDIA_TYPE_AUDIO:
if (param->format != AV_SAMPLE_FMT_NONE) {
- s->got_format_from_params = 1;
s->sample_fmt = param->format;
}
if (param->sample_rate > 0)
return av_buffersrc_add_frame_flags(ctx, frame, 0);
}
-static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
- AVFrame *frame, int flags);
-
-int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
-{
- AVFrame *copy = NULL;
- int ret = 0;
-
- if (frame && frame->channel_layout &&
- av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
- av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
- return AVERROR(EINVAL);
- }
-
- if (!(flags & AV_BUFFERSRC_FLAG_KEEP_REF) || !frame)
- return av_buffersrc_add_frame_internal(ctx, frame, flags);
-
- if (!(copy = av_frame_alloc()))
- return AVERROR(ENOMEM);
- ret = av_frame_ref(copy, frame);
- if (ret >= 0)
- ret = av_buffersrc_add_frame_internal(ctx, copy, flags);
-
- av_frame_free(©);
- return ret;
-}
-
static int push_frame(AVFilterGraph *graph)
{
int ret;
return 0;
}
-static int av_buffersrc_add_frame_internal(AVFilterContext *ctx,
- AVFrame *frame, int flags)
+int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
BufferSourceContext *s = ctx->priv;
AVFrame *copy;
int refcounted, ret;
+ if (frame && frame->channel_layout &&
+ av_get_channel_layout_nb_channels(frame->channel_layout) != frame->channels) {
+ av_log(ctx, AV_LOG_ERROR, "Layout indicates a different number of channels than actually present\n");
+ return AVERROR(EINVAL);
+ }
+
s->nb_failed_requests = 0;
if (!frame)
if (!(copy = av_frame_alloc()))
return AVERROR(ENOMEM);
- if (refcounted) {
+ if (refcounted && !(flags & AV_BUFFERSRC_FLAG_KEEP_REF)) {
av_frame_move_ref(copy, frame);
} else {
ret = av_frame_ref(copy, frame);
}
}
- av_assert0(s->queued_frame == NULL);
- s->queued_frame = copy;
- if ((ret = ctx->output_pads[0].request_frame(ctx->outputs[0])) < 0)
+ ret = ff_filter_frame(ctx->outputs[0], copy);
+ if (ret < 0)
return ret;
- av_assert0(s->queued_frame == NULL);
if ((flags & AV_BUFFERSRC_FLAG_PUSH)) {
ret = push_frame(ctx->graph);
{
BufferSourceContext *c = ctx->priv;
- if (!(c->pix_fmt != AV_PIX_FMT_NONE || c->got_format_from_params) || !c->w || !c->h ||
+ if (c->pix_fmt == AV_PIX_FMT_NONE || !c->w || !c->h ||
av_q2d(c->time_base) <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n");
return AVERROR(EINVAL);
}
- av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d sws_param:%s\n",
+ av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d fr:%d/%d sar:%d/%d\n",
c->w, c->h, av_get_pix_fmt_name(c->pix_fmt),
c->time_base.num, c->time_base.den, c->frame_rate.num, c->frame_rate.den,
- c->pixel_aspect.num, c->pixel_aspect.den, (char *)av_x_if_null(c->sws_param, ""));
+ c->pixel_aspect.num, c->pixel_aspect.den);
+
+#if FF_API_SWS_PARAM_OPTION
+ if (c->sws_param)
+ av_log(ctx, AV_LOG_WARNING, "sws_param option is deprecated and ignored\n");
+#endif
+
return 0;
}
{ "pixel_aspect", "sample aspect ratio", OFFSET(pixel_aspect), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
{ "frame_rate", NULL, OFFSET(frame_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 0 }, 0, DBL_MAX, V },
+#if FF_API_SWS_PARAM_OPTION
{ "sws_param", NULL, OFFSET(sws_param), AV_OPT_TYPE_STRING, .flags = V },
+#endif
{ NULL },
};
BufferSourceContext *s = ctx->priv;
int ret = 0;
- if (!(s->sample_fmt != AV_SAMPLE_FMT_NONE || s->got_format_from_params)) {
+ if (s->sample_fmt == AV_SAMPLE_FMT_NONE) {
av_log(ctx, AV_LOG_ERROR, "Sample format was not set or was invalid\n");
return AVERROR(EINVAL);
}
static av_cold void uninit(AVFilterContext *ctx)
{
BufferSourceContext *s = ctx->priv;
- av_assert0(s->queued_frame == NULL);
av_buffer_unref(&s->hw_frames_ctx);
}
static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
- AVFrame *frame;
- int ret;
-
- if (!c->queued_frame) {
- if (c->eof)
- return AVERROR_EOF;
- c->nb_failed_requests++;
- return AVERROR(EAGAIN);
- }
- frame = c->queued_frame;
- c->queued_frame = NULL;
- ret = ff_filter_frame(link, frame);
- return ret;
-}
-static int poll_frame(AVFilterLink *link)
-{
- BufferSourceContext *c = link->src->priv;
- av_assert0(c->queued_frame == NULL);
- return c->eof ? AVERROR_EOF : 0;
+ if (c->eof)
+ return AVERROR_EOF;
+ c->nb_failed_requests++;
+ return AVERROR(EAGAIN);
}
static const AVFilterPad avfilter_vsrc_buffer_outputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.request_frame = request_frame,
- .poll_frame = poll_frame,
.config_props = config_props,
},
{ NULL }
};
-AVFilter ff_vsrc_buffer = {
+const AVFilter ff_vsrc_buffer = {
.name = "buffer",
.description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
- .poll_frame = poll_frame,
.config_props = config_props,
},
{ NULL }
};
-AVFilter ff_asrc_abuffer = {
+const AVFilter ff_asrc_abuffer = {
.name = "abuffer",
.description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them accessible to the filterchain."),
.priv_size = sizeof(BufferSourceContext),