2 * Copyright (c) 2013 Vittorio Giovara
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Generate a frame packed video, by combining two views in a single surface.
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/rational.h"
33 #include "libavutil/stereo3d.h"
44 typedef struct FramepackContext {
48 const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
50 enum AVStereo3DType format; ///< frame pack type output
52 AVFrame *input_views[2]; ///< input frames
55 static const enum AVPixelFormat formats_supported[] = {
56 AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9,
57 AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14,
59 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
60 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
61 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
62 AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P,
63 AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ444P,
65 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
66 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
68 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
70 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
71 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
72 AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
73 AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
74 AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
75 AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_YUVA444P16,
76 AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA422P16,
77 AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
78 AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
82 static int query_formats(AVFilterContext *ctx)
84 // this will ensure that formats are the same on all pads
85 AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
87 return AVERROR(ENOMEM);
88 return ff_set_common_formats(ctx, fmts_list);
91 static av_cold void framepack_uninit(AVFilterContext *ctx)
93 FramepackContext *s = ctx->priv;
95 // clean any leftover frame
96 av_frame_free(&s->input_views[LEFT]);
97 av_frame_free(&s->input_views[RIGHT]);
100 static int config_output(AVFilterLink *outlink)
102 AVFilterContext *ctx = outlink->src;
103 FramepackContext *s = outlink->src->priv;
105 int width = ctx->inputs[LEFT]->w;
106 int height = ctx->inputs[LEFT]->h;
107 AVRational time_base = ctx->inputs[LEFT]->time_base;
108 AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
110 // check size and fps match on the other input
111 if (width != ctx->inputs[RIGHT]->w ||
112 height != ctx->inputs[RIGHT]->h) {
113 av_log(ctx, AV_LOG_ERROR,
114 "Left and right sizes differ (%dx%d vs %dx%d).\n",
116 ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
117 return AVERROR_INVALIDDATA;
118 } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
119 av_log(ctx, AV_LOG_ERROR,
120 "Left and right time bases differ (%d/%d vs %d/%d).\n",
121 time_base.num, time_base.den,
122 ctx->inputs[RIGHT]->time_base.num,
123 ctx->inputs[RIGHT]->time_base.den);
124 return AVERROR_INVALIDDATA;
125 } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
126 av_log(ctx, AV_LOG_ERROR,
127 "Left and right framerates differ (%d/%d vs %d/%d).\n",
128 frame_rate.num, frame_rate.den,
129 ctx->inputs[RIGHT]->frame_rate.num,
130 ctx->inputs[RIGHT]->frame_rate.den);
131 return AVERROR_INVALIDDATA;
134 s->pix_desc = av_pix_fmt_desc_get(outlink->format);
137 s->depth = s->pix_desc->comp[0].depth;
139 // modify output properties as needed
141 case AV_STEREO3D_FRAMESEQUENCE:
145 case AV_STEREO3D_COLUMNS:
146 case AV_STEREO3D_SIDEBYSIDE:
149 case AV_STEREO3D_LINES:
150 case AV_STEREO3D_TOPBOTTOM:
154 av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
155 return AVERROR_INVALIDDATA;
160 outlink->time_base = time_base;
161 outlink->frame_rate = frame_rate;
166 static void horizontal_frame_pack(AVFilterLink *outlink,
170 AVFilterContext *ctx = outlink->src;
171 FramepackContext *s = ctx->priv;
174 if (interleaved && s->depth <= 8) {
175 const uint8_t *leftp = s->input_views[LEFT]->data[0];
176 const uint8_t *rightp = s->input_views[RIGHT]->data[0];
177 uint8_t *dstp = out->data[0];
178 int length = out->width / 2;
179 int lines = out->height;
181 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
182 if (plane == 1 || plane == 2) {
183 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
184 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
186 for (i = 0; i < lines; i++) {
188 leftp = s->input_views[LEFT]->data[plane] +
189 s->input_views[LEFT]->linesize[plane] * i;
190 rightp = s->input_views[RIGHT]->data[plane] +
191 s->input_views[RIGHT]->linesize[plane] * i;
192 dstp = out->data[plane] + out->linesize[plane] * i;
193 for (j = 0; j < length; j++) {
194 // interpolate chroma as necessary
195 if ((s->pix_desc->log2_chroma_w ||
196 s->pix_desc->log2_chroma_h) &&
197 (plane == 1 || plane == 2)) {
198 *dstp++ = (*leftp + *rightp) / 2;
199 *dstp++ = (*leftp + *rightp) / 2;
209 } else if (interleaved && s->depth > 8) {
210 const uint16_t *leftp = (const uint16_t *)s->input_views[LEFT]->data[0];
211 const uint16_t *rightp = (const uint16_t *)s->input_views[RIGHT]->data[0];
212 uint16_t *dstp = (uint16_t *)out->data[0];
213 int length = out->width / 2;
214 int lines = out->height;
216 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
217 if (plane == 1 || plane == 2) {
218 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
219 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
221 for (i = 0; i < lines; i++) {
223 leftp = (const uint16_t *)s->input_views[LEFT]->data[plane] +
224 s->input_views[LEFT]->linesize[plane] * i / 2;
225 rightp = (const uint16_t *)s->input_views[RIGHT]->data[plane] +
226 s->input_views[RIGHT]->linesize[plane] * i / 2;
227 dstp = (uint16_t *)out->data[plane] + out->linesize[plane] * i / 2;
228 for (j = 0; j < length; j++) {
229 // interpolate chroma as necessary
230 if ((s->pix_desc->log2_chroma_w ||
231 s->pix_desc->log2_chroma_h) &&
232 (plane == 1 || plane == 2)) {
233 *dstp++ = (*leftp + *rightp) / 2;
234 *dstp++ = (*leftp + *rightp) / 2;
245 for (i = 0; i < 2; i++) {
246 const int psize = 1 + (s->depth > 8);
247 const uint8_t *src[4];
249 int sub_w = psize * s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
251 src[0] = s->input_views[i]->data[0];
252 src[1] = s->input_views[i]->data[1];
253 src[2] = s->input_views[i]->data[2];
255 dst[0] = out->data[0] + i * s->input_views[i]->width * psize;
256 dst[1] = out->data[1] + i * sub_w;
257 dst[2] = out->data[2] + i * sub_w;
259 av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
260 s->input_views[i]->format,
261 s->input_views[i]->width,
262 s->input_views[i]->height);
267 static void vertical_frame_pack(AVFilterLink *outlink,
271 AVFilterContext *ctx = outlink->src;
272 FramepackContext *s = ctx->priv;
275 for (i = 0; i < 2; i++) {
276 const uint8_t *src[4];
279 int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
281 src[0] = s->input_views[i]->data[0];
282 src[1] = s->input_views[i]->data[1];
283 src[2] = s->input_views[i]->data[2];
285 dst[0] = out->data[0] + i * out->linesize[0] *
286 (interleaved + s->input_views[i]->height * (1 - interleaved));
287 dst[1] = out->data[1] + i * out->linesize[1] *
288 (interleaved + sub_h * (1 - interleaved));
289 dst[2] = out->data[2] + i * out->linesize[2] *
290 (interleaved + sub_h * (1 - interleaved));
292 linesizes[0] = out->linesize[0] +
293 interleaved * out->linesize[0];
294 linesizes[1] = out->linesize[1] +
295 interleaved * out->linesize[1];
296 linesizes[2] = out->linesize[2] +
297 interleaved * out->linesize[2];
299 av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
300 s->input_views[i]->format,
301 s->input_views[i]->width,
302 s->input_views[i]->height);
306 static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
309 AVFilterContext *ctx = outlink->src;
310 FramepackContext *s = ctx->priv;
312 case AV_STEREO3D_SIDEBYSIDE:
313 horizontal_frame_pack(outlink, dst, 0);
315 case AV_STEREO3D_COLUMNS:
316 horizontal_frame_pack(outlink, dst, 1);
318 case AV_STEREO3D_TOPBOTTOM:
319 vertical_frame_pack(outlink, dst, 0);
321 case AV_STEREO3D_LINES:
322 vertical_frame_pack(outlink, dst, 1);
327 static int try_push_frame(AVFilterContext *ctx)
329 FramepackContext *s = ctx->priv;
330 AVFilterLink *outlink = ctx->outputs[0];
334 if (!(s->input_views[0] && s->input_views[1]))
336 if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
337 int64_t pts = s->input_views[0]->pts;
339 for (i = 0; i < 2; i++) {
340 // set correct timestamps
341 if (pts != AV_NOPTS_VALUE)
342 s->input_views[i]->pts = i == 0 ? pts * 2 : pts * 2 + av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
344 // set stereo3d side data
345 stereo = av_stereo3d_create_side_data(s->input_views[i]);
347 return AVERROR(ENOMEM);
348 stereo->type = s->format;
349 stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT
350 : AV_STEREO3D_VIEW_RIGHT;
352 // filter the frame and immediately relinquish its pointer
353 ret = ff_filter_frame(outlink, s->input_views[i]);
354 s->input_views[i] = NULL;
360 AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
362 return AVERROR(ENOMEM);
364 spatial_frame_pack(outlink, dst);
366 // get any property from the original frame
367 ret = av_frame_copy_props(dst, s->input_views[LEFT]);
373 for (i = 0; i < 2; i++)
374 av_frame_free(&s->input_views[i]);
376 // set stereo3d side data
377 stereo = av_stereo3d_create_side_data(dst);
380 return AVERROR(ENOMEM);
382 stereo->type = s->format;
384 return ff_filter_frame(outlink, dst);
388 static int activate(AVFilterContext *ctx)
390 AVFilterLink *outlink = ctx->outputs[0];
391 FramepackContext *s = ctx->priv;
394 FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
396 if (!s->input_views[0]) {
397 ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_views[0]);
402 if (!s->input_views[1]) {
403 ret = ff_inlink_consume_frame(ctx->inputs[1], &s->input_views[1]);
408 if (s->input_views[0] && s->input_views[1])
409 return try_push_frame(ctx);
411 FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink);
412 FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
414 if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
415 !ff_outlink_get_status(ctx->inputs[0]) &&
416 !s->input_views[0]) {
417 ff_inlink_request_frame(ctx->inputs[0]);
421 if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
422 !ff_outlink_get_status(ctx->inputs[1]) &&
423 !s->input_views[1]) {
424 ff_inlink_request_frame(ctx->inputs[1]);
428 return FFERROR_NOT_READY;
431 #define OFFSET(x) offsetof(FramepackContext, x)
432 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
433 static const AVOption framepack_options[] = {
434 { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
435 { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = VF, .unit = "format" },
436 { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
437 { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
438 { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
439 { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
440 { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
441 { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
442 { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
443 { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
444 { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
445 { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
449 AVFILTER_DEFINE_CLASS(framepack);
451 static const AVFilterPad framepack_inputs[] = {
454 .type = AVMEDIA_TYPE_VIDEO,
458 .type = AVMEDIA_TYPE_VIDEO,
463 static const AVFilterPad framepack_outputs[] = {
466 .type = AVMEDIA_TYPE_VIDEO,
467 .config_props = config_output,
472 const AVFilter ff_vf_framepack = {
474 .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
475 .priv_size = sizeof(FramepackContext),
476 .priv_class = &framepack_class,
477 .query_formats = query_formats,
478 .inputs = framepack_inputs,
479 .outputs = framepack_outputs,
480 .activate = activate,
481 .uninit = framepack_uninit,