]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolbox.c
8671608a35ec7b752bb25b379185ccbc4789a808
[ffmpeg] / libavcodec / videotoolbox.c
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 #include "config.h"
24 #include "videotoolbox.h"
25 #include "libavutil/hwcontext_videotoolbox.h"
26 #include "vt_internal.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/hwcontext.h"
29 #include "bytestream.h"
30 #include "decode.h"
31 #include "h264dec.h"
32 #include "hevcdec.h"
33 #include "mpegvideo.h"
34 #include <TargetConditionals.h>
35
36 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
37 #  define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
38 #endif
39 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
40 #  define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
41 #endif
42
43 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
44 enum { kCMVideoCodecType_HEVC = 'hvc1' };
45 #endif
46
47 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING  12
48
49 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
50 {
51     CVPixelBufferRef cv_buffer = *(CVPixelBufferRef *)data;
52     CVPixelBufferRelease(cv_buffer);
53
54     av_free(data);
55 }
56
57 static int videotoolbox_buffer_copy(VTContext *vtctx,
58                                     const uint8_t *buffer,
59                                     uint32_t size)
60 {
61     void *tmp;
62
63     tmp = av_fast_realloc(vtctx->bitstream,
64                          &vtctx->allocated_size,
65                          size);
66
67     if (!tmp)
68         return AVERROR(ENOMEM);
69
70     vtctx->bitstream = tmp;
71     memcpy(vtctx->bitstream, buffer, size);
72     vtctx->bitstream_size = size;
73
74     return 0;
75 }
76
77 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
78 {
79     CVPixelBufferRef ref = *(CVPixelBufferRef *)frame->buf[0]->data;
80
81     if (!ref) {
82         av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
83         av_frame_unref(frame);
84         return AVERROR_EXTERNAL;
85     }
86
87     frame->data[3] = (uint8_t*)ref;
88
89     return 0;
90 }
91
92 int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
93 {
94     size_t      size = sizeof(CVPixelBufferRef);
95     uint8_t    *data = NULL;
96     AVBufferRef *buf = NULL;
97     int ret = ff_attach_decode_data(frame);
98     FrameDecodeData *fdd;
99     if (ret < 0)
100         return ret;
101
102     data = av_mallocz(size);
103     if (!data)
104         return AVERROR(ENOMEM);
105     buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
106     if (!buf) {
107         av_freep(&data);
108         return AVERROR(ENOMEM);
109     }
110     frame->buf[0] = buf;
111
112     fdd = (FrameDecodeData*)frame->private_ref->data;
113     fdd->post_process = videotoolbox_postproc_frame;
114
115     frame->width  = avctx->width;
116     frame->height = avctx->height;
117     frame->format = avctx->pix_fmt;
118
119     return 0;
120 }
121
122 #define AV_W8(p, v) *(p) = (v)
123
124 CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
125 {
126     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
127     H264Context *h = avctx->priv_data;
128     CFDataRef data = NULL;
129     uint8_t *p;
130     int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
131     uint8_t *vt_extradata = av_malloc(vt_extradata_size);
132     if (!vt_extradata)
133         return NULL;
134
135     p = vt_extradata;
136
137     AV_W8(p + 0, 1); /* version */
138     AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
139     AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
140     AV_W8(p + 3, h->ps.sps->data[3]); /* level */
141     AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
142     AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
143     AV_WB16(p + 6, h->ps.sps->data_size);
144     memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
145     p += 8 + h->ps.sps->data_size;
146     AV_W8(p + 0, 1); /* number of pps */
147     AV_WB16(p + 1, h->ps.pps->data_size);
148     memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
149
150     p += 3 + h->ps.pps->data_size;
151     av_assert0(p - vt_extradata == vt_extradata_size);
152
153     // save sps header (profile/level) used to create decoder session,
154     // so we can detect changes and recreate it.
155     if (vtctx)
156         memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
157
158     data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
159     av_free(vt_extradata);
160     return data;
161 }
162
163 CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
164 {
165     HEVCContext *h = avctx->priv_data;
166     const HEVCVPS *vps = (const HEVCVPS *)h->ps.vps_list[0]->data;
167     const HEVCSPS *sps = (const HEVCSPS *)h->ps.sps_list[0]->data;
168     int i, num_pps = 0;
169     const HEVCPPS *pps = h->ps.pps;
170     PTLCommon ptlc = vps->ptl.general_ptl;
171     VUI vui = sps->vui;
172     uint8_t parallelismType;
173     CFDataRef data = NULL;
174     uint8_t *p;
175     int vt_extradata_size = 23 + 5 + vps->data_size + 5 + sps->data_size + 3;
176     uint8_t *vt_extradata;
177
178     for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
179         if (h->ps.pps_list[i]) {
180             const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
181             vt_extradata_size += 2 + pps->data_size;
182             num_pps++;
183         }
184     }
185
186     vt_extradata = av_malloc(vt_extradata_size);
187     if (!vt_extradata)
188         return NULL;
189     p = vt_extradata;
190
191     /* unsigned int(8) configurationVersion = 1; */
192     AV_W8(p + 0, 1);
193
194     /*
195      * unsigned int(2) general_profile_space;
196      * unsigned int(1) general_tier_flag;
197      * unsigned int(5) general_profile_idc;
198      */
199     AV_W8(p + 1, ptlc.profile_space << 6 |
200                  ptlc.tier_flag     << 5 |
201                  ptlc.profile_idc);
202
203     /* unsigned int(32) general_profile_compatibility_flags; */
204     memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
205
206     /* unsigned int(48) general_constraint_indicator_flags; */
207     AV_W8(p + 6, ptlc.progressive_source_flag    << 7 |
208                  ptlc.interlaced_source_flag     << 6 |
209                  ptlc.non_packed_constraint_flag << 5 |
210                  ptlc.frame_only_constraint_flag << 4);
211     AV_W8(p + 7, 0);
212     AV_WN32(p + 8, 0);
213
214     /* unsigned int(8) general_level_idc; */
215     AV_W8(p + 12, ptlc.level_idc);
216
217     /*
218      * bit(4) reserved = ‘1111’b;
219      * unsigned int(12) min_spatial_segmentation_idc;
220      */
221     AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
222     AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
223
224     /*
225      * bit(6) reserved = ‘111111’b;
226      * unsigned int(2) parallelismType;
227      */
228     if (!vui.min_spatial_segmentation_idc)
229         parallelismType = 0;
230     else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
231         parallelismType = 0;
232     else if (pps->entropy_coding_sync_enabled_flag)
233         parallelismType = 3;
234     else if (pps->tiles_enabled_flag)
235         parallelismType = 2;
236     else
237         parallelismType = 1;
238     AV_W8(p + 15, 0xfc | parallelismType);
239
240     /*
241      * bit(6) reserved = ‘111111’b;
242      * unsigned int(2) chromaFormat;
243      */
244     AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
245
246     /*
247      * bit(5) reserved = ‘11111’b;
248      * unsigned int(3) bitDepthLumaMinus8;
249      */
250     AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
251
252     /*
253      * bit(5) reserved = ‘11111’b;
254      * unsigned int(3) bitDepthChromaMinus8;
255      */
256     AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
257
258     /* bit(16) avgFrameRate; */
259     AV_WB16(p + 19, 0);
260
261     /*
262      * bit(2) constantFrameRate;
263      * bit(3) numTemporalLayers;
264      * bit(1) temporalIdNested;
265      * unsigned int(2) lengthSizeMinusOne;
266      */
267     AV_W8(p + 21, 0                             << 6 |
268                   sps->max_sub_layers           << 3 |
269                   sps->temporal_id_nesting_flag << 2 |
270                   3);
271
272     /* unsigned int(8) numOfArrays; */
273     AV_W8(p + 22, 3);
274
275     p += 23;
276     /* vps */
277     /*
278      * bit(1) array_completeness;
279      * unsigned int(1) reserved = 0;
280      * unsigned int(6) NAL_unit_type;
281      */
282     AV_W8(p, 1 << 7 |
283              HEVC_NAL_VPS & 0x3f);
284     /* unsigned int(16) numNalus; */
285     AV_WB16(p + 1, 1);
286     /* unsigned int(16) nalUnitLength; */
287     AV_WB16(p + 3, vps->data_size);
288     /* bit(8*nalUnitLength) nalUnit; */
289     memcpy(p + 5, vps->data, vps->data_size);
290     p += 5 + vps->data_size;
291
292     /* sps */
293     AV_W8(p, 1 << 7 |
294              HEVC_NAL_SPS & 0x3f);
295     AV_WB16(p + 1, 1);
296     AV_WB16(p + 3, sps->data_size);
297     memcpy(p + 5, sps->data, sps->data_size);
298     p += 5 + sps->data_size;
299
300     /* pps */
301     AV_W8(p, 1 << 7 |
302              HEVC_NAL_PPS & 0x3f);
303     AV_WB16(p + 1, num_pps);
304     p += 3;
305     for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
306         if (h->ps.pps_list[i]) {
307             const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
308             AV_WB16(p, pps->data_size);
309             memcpy(p + 2, pps->data, pps->data_size);
310             p += 2 + pps->data_size;
311         }
312     }
313
314     av_assert0(p - vt_extradata == vt_extradata_size);
315
316     data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
317     av_free(vt_extradata);
318     return data;
319 }
320
321 static int videotoolbox_set_frame(AVCodecContext *avctx, AVFrame *frame)
322 {
323     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
324     if (!frame->buf[0] || frame->data[3]) {
325         av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
326         av_frame_unref(frame);
327         return AVERROR_EXTERNAL;
328     }
329
330     CVPixelBufferRef *ref = (CVPixelBufferRef *)frame->buf[0]->data;
331
332     if (*ref)
333         CVPixelBufferRelease(*ref);
334
335     *ref = vtctx->frame;
336     vtctx->frame = NULL;
337
338     return 0;
339 }
340
341 int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
342                                      const uint8_t *buffer,
343                                      uint32_t size)
344 {
345     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
346     H264Context *h  = avctx->priv_data;
347
348     if (h->is_avc == 1) {
349         return videotoolbox_buffer_copy(vtctx, buffer, size);
350     }
351
352     return 0;
353 }
354
355 static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
356                                            int type,
357                                            const uint8_t *buffer,
358                                            uint32_t size)
359 {
360     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
361     H264Context *h = avctx->priv_data;
362
363     // save sps header (profile/level) used to create decoder session
364     if (!vtctx->sps[0])
365         memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
366
367     if (type == H264_NAL_SPS) {
368         if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
369             vtctx->reconfig_needed = true;
370             memcpy(vtctx->sps, buffer + 1, 3);
371         }
372     }
373
374     // pass-through SPS/PPS changes to the decoder
375     return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
376 }
377
378 int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
379                                       const uint8_t *buffer,
380                                       uint32_t size)
381 {
382     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
383     H264Context *h  = avctx->priv_data;
384     void *tmp;
385
386     if (h->is_avc == 1)
387         return 0;
388
389     tmp = av_fast_realloc(vtctx->bitstream,
390                           &vtctx->allocated_size,
391                           vtctx->bitstream_size+size+4);
392     if (!tmp)
393         return AVERROR(ENOMEM);
394
395     vtctx->bitstream = tmp;
396
397     AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
398     memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
399
400     vtctx->bitstream_size += size + 4;
401
402     return 0;
403 }
404
405 int ff_videotoolbox_uninit(AVCodecContext *avctx)
406 {
407     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
408     if (vtctx) {
409         av_freep(&vtctx->bitstream);
410         if (vtctx->frame)
411             CVPixelBufferRelease(vtctx->frame);
412     }
413
414     return 0;
415 }
416
417 #if CONFIG_VIDEOTOOLBOX
418 // Return the AVVideotoolboxContext that matters currently. Where it comes from
419 // depends on the API used.
420 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
421 {
422     // Somewhat tricky because the user can call av_videotoolbox_default_free()
423     // at any time, even when the codec is closed.
424     if (avctx->internal && avctx->internal->hwaccel_priv_data) {
425         VTContext *vtctx = avctx->internal->hwaccel_priv_data;
426         if (vtctx->vt_ctx)
427             return vtctx->vt_ctx;
428     }
429     return avctx->hwaccel_context;
430 }
431
432 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
433 {
434     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
435     CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
436     OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
437     enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
438     int width = CVPixelBufferGetWidth(pixbuf);
439     int height = CVPixelBufferGetHeight(pixbuf);
440     AVHWFramesContext *cached_frames;
441     int ret;
442
443     ret = videotoolbox_set_frame(avctx, frame);
444     if (ret < 0)
445         return ret;
446
447     // Old API code path.
448     if (!vtctx->cached_hw_frames_ctx)
449         return 0;
450
451     cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
452
453     if (cached_frames->sw_format != sw_format ||
454         cached_frames->width != width ||
455         cached_frames->height != height) {
456         AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
457         AVHWFramesContext *hw_frames;
458         if (!hw_frames_ctx)
459             return AVERROR(ENOMEM);
460
461         hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
462         hw_frames->format = cached_frames->format;
463         hw_frames->sw_format = sw_format;
464         hw_frames->width = width;
465         hw_frames->height = height;
466
467         ret = av_hwframe_ctx_init(hw_frames_ctx);
468         if (ret < 0) {
469             av_buffer_unref(&hw_frames_ctx);
470             return ret;
471         }
472
473         av_buffer_unref(&vtctx->cached_hw_frames_ctx);
474         vtctx->cached_hw_frames_ctx = hw_frames_ctx;
475     }
476
477     av_buffer_unref(&frame->hw_frames_ctx);
478     frame->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
479     if (!frame->hw_frames_ctx)
480         return AVERROR(ENOMEM);
481
482     return 0;
483 }
484
485 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
486 {
487     int i;
488     uint8_t b;
489
490     for (i = 3; i >= 0; i--) {
491         b = (length >> (i * 7)) & 0x7F;
492         if (i != 0)
493             b |= 0x80;
494
495         bytestream2_put_byteu(pb, b);
496     }
497 }
498
499 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
500 {
501     CFDataRef data;
502     uint8_t *rw_extradata;
503     PutByteContext pb;
504     int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
505     // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
506     int config_size = 13 + 5 + avctx->extradata_size;
507     int s;
508
509     if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
510         return NULL;
511
512     bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
513     bytestream2_put_byteu(&pb, 0);        // version
514     bytestream2_put_ne24(&pb, 0);         // flags
515
516     // elementary stream descriptor
517     bytestream2_put_byteu(&pb, 0x03);     // ES_DescrTag
518     videotoolbox_write_mp4_descr_length(&pb, full_size);
519     bytestream2_put_ne16(&pb, 0);         // esid
520     bytestream2_put_byteu(&pb, 0);        // stream priority (0-32)
521
522     // decoder configuration descriptor
523     bytestream2_put_byteu(&pb, 0x04);     // DecoderConfigDescrTag
524     videotoolbox_write_mp4_descr_length(&pb, config_size);
525     bytestream2_put_byteu(&pb, 32);       // object type indication. 32 = AV_CODEC_ID_MPEG4
526     bytestream2_put_byteu(&pb, 0x11);     // stream type
527     bytestream2_put_ne24(&pb, 0);         // buffer size
528     bytestream2_put_ne32(&pb, 0);         // max bitrate
529     bytestream2_put_ne32(&pb, 0);         // avg bitrate
530
531     // decoder specific descriptor
532     bytestream2_put_byteu(&pb, 0x05);     ///< DecSpecificInfoTag
533     videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
534
535     bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
536
537     // SLConfigDescriptor
538     bytestream2_put_byteu(&pb, 0x06);     // SLConfigDescrTag
539     bytestream2_put_byteu(&pb, 0x01);     // length
540     bytestream2_put_byteu(&pb, 0x02);     //
541
542     s = bytestream2_size_p(&pb);
543
544     data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
545
546     av_freep(&rw_extradata);
547     return data;
548 }
549
550 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
551                                                            void *buffer,
552                                                            int size)
553 {
554     OSStatus status;
555     CMBlockBufferRef  block_buf;
556     CMSampleBufferRef sample_buf;
557
558     block_buf  = NULL;
559     sample_buf = NULL;
560
561     status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
562                                                 buffer,             // memoryBlock
563                                                 size,               // blockLength
564                                                 kCFAllocatorNull,   // blockAllocator
565                                                 NULL,               // customBlockSource
566                                                 0,                  // offsetToData
567                                                 size,               // dataLength
568                                                 0,                  // flags
569                                                 &block_buf);
570
571     if (!status) {
572         status = CMSampleBufferCreate(kCFAllocatorDefault,  // allocator
573                                       block_buf,            // dataBuffer
574                                       TRUE,                 // dataReady
575                                       0,                    // makeDataReadyCallback
576                                       0,                    // makeDataReadyRefcon
577                                       fmt_desc,             // formatDescription
578                                       1,                    // numSamples
579                                       0,                    // numSampleTimingEntries
580                                       NULL,                 // sampleTimingArray
581                                       0,                    // numSampleSizeEntries
582                                       NULL,                 // sampleSizeArray
583                                       &sample_buf);
584     }
585
586     if (block_buf)
587         CFRelease(block_buf);
588
589     return sample_buf;
590 }
591
592 static void videotoolbox_decoder_callback(void *opaque,
593                                           void *sourceFrameRefCon,
594                                           OSStatus status,
595                                           VTDecodeInfoFlags flags,
596                                           CVImageBufferRef image_buffer,
597                                           CMTime pts,
598                                           CMTime duration)
599 {
600     AVCodecContext *avctx = opaque;
601     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
602
603     if (vtctx->frame) {
604         CVPixelBufferRelease(vtctx->frame);
605         vtctx->frame = NULL;
606     }
607
608     if (!image_buffer) {
609         av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
610         return;
611     }
612
613     vtctx->frame = CVPixelBufferRetain(image_buffer);
614 }
615
616 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
617 {
618     OSStatus status;
619     CMSampleBufferRef sample_buf;
620     AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
621     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
622
623     sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
624                                                    vtctx->bitstream,
625                                                    vtctx->bitstream_size);
626
627     if (!sample_buf)
628         return -1;
629
630     status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
631                                                sample_buf,
632                                                0,       // decodeFlags
633                                                NULL,    // sourceFrameRefCon
634                                                0);      // infoFlagsOut
635     if (status == noErr)
636         status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
637
638     CFRelease(sample_buf);
639
640     return status;
641 }
642
643 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
644                                                                    CFDictionaryRef decoder_spec,
645                                                                    int width,
646                                                                    int height)
647 {
648     CMFormatDescriptionRef cm_fmt_desc;
649     OSStatus status;
650
651     status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
652                                             codec_type,
653                                             width,
654                                             height,
655                                             decoder_spec, // Dictionary of extension
656                                             &cm_fmt_desc);
657
658     if (status)
659         return NULL;
660
661     return cm_fmt_desc;
662 }
663
664 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
665                                                              int height,
666                                                              OSType pix_fmt)
667 {
668     CFMutableDictionaryRef buffer_attributes;
669     CFMutableDictionaryRef io_surface_properties;
670     CFNumberRef cv_pix_fmt;
671     CFNumberRef w;
672     CFNumberRef h;
673
674     w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
675     h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
676     cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
677
678     buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
679                                                   4,
680                                                   &kCFTypeDictionaryKeyCallBacks,
681                                                   &kCFTypeDictionaryValueCallBacks);
682     io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
683                                                       0,
684                                                       &kCFTypeDictionaryKeyCallBacks,
685                                                       &kCFTypeDictionaryValueCallBacks);
686
687     if (pix_fmt)
688         CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
689     CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
690     CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
691     CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
692 #if TARGET_OS_IPHONE
693     CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
694 #else
695     CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
696 #endif
697
698     CFRelease(io_surface_properties);
699     CFRelease(cv_pix_fmt);
700     CFRelease(w);
701     CFRelease(h);
702
703     return buffer_attributes;
704 }
705
706 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
707                                                           AVCodecContext *avctx)
708 {
709     CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
710                                                                    0,
711                                                                    &kCFTypeDictionaryKeyCallBacks,
712                                                                    &kCFTypeDictionaryValueCallBacks);
713
714     CFDictionarySetValue(config_info,
715                          codec_type == kCMVideoCodecType_HEVC ?
716                             kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
717                             kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
718                          kCFBooleanTrue);
719
720     CFMutableDictionaryRef avc_info;
721     CFDataRef data = NULL;
722
723     avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
724                                          1,
725                                          &kCFTypeDictionaryKeyCallBacks,
726                                          &kCFTypeDictionaryValueCallBacks);
727
728     switch (codec_type) {
729     case kCMVideoCodecType_MPEG4Video :
730         if (avctx->extradata_size)
731             data = videotoolbox_esds_extradata_create(avctx);
732         if (data)
733             CFDictionarySetValue(avc_info, CFSTR("esds"), data);
734         break;
735     case kCMVideoCodecType_H264 :
736         data = ff_videotoolbox_avcc_extradata_create(avctx);
737         if (data)
738             CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
739         break;
740     case kCMVideoCodecType_HEVC :
741         data = ff_videotoolbox_hvcc_extradata_create(avctx);
742         if (data)
743             CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
744         break;
745     default:
746         break;
747     }
748
749     CFDictionarySetValue(config_info,
750             kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
751             avc_info);
752
753     if (data)
754         CFRelease(data);
755
756     CFRelease(avc_info);
757     return config_info;
758 }
759
760 static int videotoolbox_start(AVCodecContext *avctx)
761 {
762     AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
763     OSStatus status;
764     VTDecompressionOutputCallbackRecord decoder_cb;
765     CFDictionaryRef decoder_spec;
766     CFDictionaryRef buf_attr;
767
768     if (!videotoolbox) {
769         av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
770         return -1;
771     }
772
773     switch( avctx->codec_id ) {
774     case AV_CODEC_ID_H263 :
775         videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
776         break;
777     case AV_CODEC_ID_H264 :
778         videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
779         break;
780     case AV_CODEC_ID_HEVC :
781         videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
782         break;
783     case AV_CODEC_ID_MPEG1VIDEO :
784         videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
785         break;
786     case AV_CODEC_ID_MPEG2VIDEO :
787         videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
788         break;
789     case AV_CODEC_ID_MPEG4 :
790         videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
791         break;
792     default :
793         break;
794     }
795
796     decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
797
798     if (!decoder_spec) {
799         av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
800         return -1;
801     }
802
803     videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
804                                                                 decoder_spec,
805                                                                 avctx->width,
806                                                                 avctx->height);
807     if (!videotoolbox->cm_fmt_desc) {
808         if (decoder_spec)
809             CFRelease(decoder_spec);
810
811         av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
812         return -1;
813     }
814
815     buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
816                                                      avctx->height,
817                                                      videotoolbox->cv_pix_fmt_type);
818
819     decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
820     decoder_cb.decompressionOutputRefCon   = avctx;
821
822     status = VTDecompressionSessionCreate(NULL,                      // allocator
823                                           videotoolbox->cm_fmt_desc, // videoFormatDescription
824                                           decoder_spec,              // videoDecoderSpecification
825                                           buf_attr,                  // destinationImageBufferAttributes
826                                           &decoder_cb,               // outputCallback
827                                           &videotoolbox->session);   // decompressionSessionOut
828
829     if (decoder_spec)
830         CFRelease(decoder_spec);
831     if (buf_attr)
832         CFRelease(buf_attr);
833
834     switch (status) {
835     case kVTVideoDecoderNotAvailableNowErr:
836         av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
837         return AVERROR(ENOSYS);
838     case kVTVideoDecoderUnsupportedDataFormatErr:
839         av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
840         return AVERROR(ENOSYS);
841     case kVTCouldNotFindVideoDecoderErr:
842         av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
843         return AVERROR(ENOSYS);
844     case kVTVideoDecoderMalfunctionErr:
845         av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
846         return AVERROR(EINVAL);
847     case kVTVideoDecoderBadDataErr:
848         av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
849         return AVERROR_INVALIDDATA;
850     case 0:
851         return 0;
852     default:
853         av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
854         return AVERROR_UNKNOWN;
855     }
856 }
857
858 static void videotoolbox_stop(AVCodecContext *avctx)
859 {
860     AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
861     if (!videotoolbox)
862         return;
863
864     if (videotoolbox->cm_fmt_desc) {
865         CFRelease(videotoolbox->cm_fmt_desc);
866         videotoolbox->cm_fmt_desc = NULL;
867     }
868
869     if (videotoolbox->session) {
870         VTDecompressionSessionInvalidate(videotoolbox->session);
871         CFRelease(videotoolbox->session);
872         videotoolbox->session = NULL;
873     }
874 }
875
876 static const char *videotoolbox_error_string(OSStatus status)
877 {
878     switch (status) {
879         case kVTVideoDecoderBadDataErr:
880             return "bad data";
881         case kVTVideoDecoderMalfunctionErr:
882             return "decoder malfunction";
883         case kVTInvalidSessionErr:
884             return "invalid session";
885     }
886     return "unknown";
887 }
888
889 static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
890 {
891     OSStatus status;
892     AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
893     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
894
895     if (vtctx->reconfig_needed == true) {
896         vtctx->reconfig_needed = false;
897         av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
898         videotoolbox_stop(avctx);
899         if (videotoolbox_start(avctx) != 0) {
900             return AVERROR_EXTERNAL;
901         }
902     }
903
904     if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
905         return AVERROR_INVALIDDATA;
906
907     status = videotoolbox_session_decode_frame(avctx);
908     if (status != noErr) {
909         if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
910             vtctx->reconfig_needed = true;
911         av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
912         return AVERROR_UNKNOWN;
913     }
914
915     if (!vtctx->frame) {
916         vtctx->reconfig_needed = true;
917         return AVERROR_UNKNOWN;
918     }
919
920     return videotoolbox_buffer_create(avctx, frame);
921 }
922
923 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
924 {
925     H264Context *h = avctx->priv_data;
926     AVFrame *frame = h->cur_pic_ptr->f;
927     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
928     int ret = videotoolbox_common_end_frame(avctx, frame);
929     vtctx->bitstream_size = 0;
930     return ret;
931 }
932
933 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
934                                            int type,
935                                            const uint8_t *buffer,
936                                            uint32_t size)
937 {
938     return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
939 }
940
941 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
942 {
943     HEVCContext *h = avctx->priv_data;
944     AVFrame *frame = h->ref->frame;
945     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
946     int ret;
947
948     ret = videotoolbox_common_end_frame(avctx, frame);
949     vtctx->bitstream_size = 0;
950     return ret;
951 }
952
953 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
954                                          const uint8_t *buffer,
955                                          uint32_t size)
956 {
957     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
958
959     return videotoolbox_buffer_copy(vtctx, buffer, size);
960 }
961
962 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
963                                           const uint8_t *buffer,
964                                           uint32_t size)
965 {
966     return 0;
967 }
968
969 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
970 {
971     MpegEncContext *s = avctx->priv_data;
972     AVFrame *frame = s->current_picture_ptr->f;
973
974     return videotoolbox_common_end_frame(avctx, frame);
975 }
976
977 static int videotoolbox_uninit(AVCodecContext *avctx)
978 {
979     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
980     if (!vtctx)
981         return 0;
982
983     ff_videotoolbox_uninit(avctx);
984
985     if (vtctx->vt_ctx)
986         videotoolbox_stop(avctx);
987
988     av_buffer_unref(&vtctx->cached_hw_frames_ctx);
989     av_freep(&vtctx->vt_ctx);
990
991     return 0;
992 }
993
994 static int videotoolbox_common_init(AVCodecContext *avctx)
995 {
996     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
997     AVHWFramesContext *hw_frames;
998     int err;
999
1000     // Old API - do nothing.
1001     if (avctx->hwaccel_context)
1002         return 0;
1003
1004     if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1005         av_log(avctx, AV_LOG_ERROR,
1006                "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1007         return AVERROR(EINVAL);
1008     }
1009
1010     vtctx->vt_ctx = av_videotoolbox_alloc_context();
1011     if (!vtctx->vt_ctx) {
1012         err = AVERROR(ENOMEM);
1013         goto fail;
1014     }
1015
1016     if (avctx->hw_frames_ctx) {
1017         hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1018     } else {
1019         avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
1020         if (!avctx->hw_frames_ctx) {
1021             err = AVERROR(ENOMEM);
1022             goto fail;
1023         }
1024
1025         hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1026         hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1027         hw_frames->sw_format = AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1028         hw_frames->width = avctx->width;
1029         hw_frames->height = avctx->height;
1030
1031         err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1032         if (err < 0) {
1033             av_buffer_unref(&avctx->hw_frames_ctx);
1034             goto fail;
1035         }
1036     }
1037
1038     vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
1039     if (!vtctx->cached_hw_frames_ctx) {
1040         err = AVERROR(ENOMEM);
1041         goto fail;
1042     }
1043
1044     vtctx->vt_ctx->cv_pix_fmt_type =
1045         av_map_videotoolbox_format_from_pixfmt(hw_frames->sw_format);
1046     if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1047         av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
1048         err = AVERROR(EINVAL);
1049         goto fail;
1050     }
1051
1052     err = videotoolbox_start(avctx);
1053     if (err < 0)
1054         goto fail;
1055
1056     return 0;
1057
1058 fail:
1059     videotoolbox_uninit(avctx);
1060     return err;
1061 }
1062
1063 static int videotoolbox_frame_params(AVCodecContext *avctx,
1064                                      AVBufferRef *hw_frames_ctx)
1065 {
1066     AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1067
1068     frames_ctx->format            = AV_PIX_FMT_VIDEOTOOLBOX;
1069     frames_ctx->width             = avctx->coded_width;
1070     frames_ctx->height            = avctx->coded_height;
1071     frames_ctx->sw_format         = AV_PIX_FMT_NV12;
1072
1073     return 0;
1074 }
1075
1076 const AVHWAccel ff_h263_videotoolbox_hwaccel = {
1077     .name           = "h263_videotoolbox",
1078     .type           = AVMEDIA_TYPE_VIDEO,
1079     .id             = AV_CODEC_ID_H263,
1080     .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
1081     .alloc_frame    = ff_videotoolbox_alloc_frame,
1082     .start_frame    = videotoolbox_mpeg_start_frame,
1083     .decode_slice   = videotoolbox_mpeg_decode_slice,
1084     .end_frame      = videotoolbox_mpeg_end_frame,
1085     .frame_params   = videotoolbox_frame_params,
1086     .init           = videotoolbox_common_init,
1087     .uninit         = videotoolbox_uninit,
1088     .priv_data_size = sizeof(VTContext),
1089 };
1090
1091 const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
1092     .name           = "hevc_videotoolbox",
1093     .type           = AVMEDIA_TYPE_VIDEO,
1094     .id             = AV_CODEC_ID_HEVC,
1095     .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
1096     .alloc_frame    = ff_videotoolbox_alloc_frame,
1097     .start_frame    = ff_videotoolbox_h264_start_frame,
1098     .decode_slice   = ff_videotoolbox_h264_decode_slice,
1099     .decode_params  = videotoolbox_hevc_decode_params,
1100     .end_frame      = videotoolbox_hevc_end_frame,
1101     .frame_params   = videotoolbox_frame_params,
1102     .init           = videotoolbox_common_init,
1103     .uninit         = ff_videotoolbox_uninit,
1104     .priv_data_size = sizeof(VTContext),
1105 };
1106
1107 const AVHWAccel ff_h264_videotoolbox_hwaccel = {
1108     .name           = "h264_videotoolbox",
1109     .type           = AVMEDIA_TYPE_VIDEO,
1110     .id             = AV_CODEC_ID_H264,
1111     .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
1112     .alloc_frame    = ff_videotoolbox_alloc_frame,
1113     .start_frame    = ff_videotoolbox_h264_start_frame,
1114     .decode_slice   = ff_videotoolbox_h264_decode_slice,
1115     .decode_params  = videotoolbox_h264_decode_params,
1116     .end_frame      = videotoolbox_h264_end_frame,
1117     .frame_params   = videotoolbox_frame_params,
1118     .init           = videotoolbox_common_init,
1119     .uninit         = videotoolbox_uninit,
1120     .priv_data_size = sizeof(VTContext),
1121 };
1122
1123 const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
1124     .name           = "mpeg1_videotoolbox",
1125     .type           = AVMEDIA_TYPE_VIDEO,
1126     .id             = AV_CODEC_ID_MPEG1VIDEO,
1127     .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
1128     .alloc_frame    = ff_videotoolbox_alloc_frame,
1129     .start_frame    = videotoolbox_mpeg_start_frame,
1130     .decode_slice   = videotoolbox_mpeg_decode_slice,
1131     .end_frame      = videotoolbox_mpeg_end_frame,
1132     .frame_params   = videotoolbox_frame_params,
1133     .init           = videotoolbox_common_init,
1134     .uninit         = videotoolbox_uninit,
1135     .priv_data_size = sizeof(VTContext),
1136 };
1137
1138 const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
1139     .name           = "mpeg2_videotoolbox",
1140     .type           = AVMEDIA_TYPE_VIDEO,
1141     .id             = AV_CODEC_ID_MPEG2VIDEO,
1142     .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
1143     .alloc_frame    = ff_videotoolbox_alloc_frame,
1144     .start_frame    = videotoolbox_mpeg_start_frame,
1145     .decode_slice   = videotoolbox_mpeg_decode_slice,
1146     .end_frame      = videotoolbox_mpeg_end_frame,
1147     .frame_params   = videotoolbox_frame_params,
1148     .init           = videotoolbox_common_init,
1149     .uninit         = videotoolbox_uninit,
1150     .priv_data_size = sizeof(VTContext),
1151 };
1152
1153 const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
1154     .name           = "mpeg4_videotoolbox",
1155     .type           = AVMEDIA_TYPE_VIDEO,
1156     .id             = AV_CODEC_ID_MPEG4,
1157     .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
1158     .alloc_frame    = ff_videotoolbox_alloc_frame,
1159     .start_frame    = videotoolbox_mpeg_start_frame,
1160     .decode_slice   = videotoolbox_mpeg_decode_slice,
1161     .end_frame      = videotoolbox_mpeg_end_frame,
1162     .frame_params   = videotoolbox_frame_params,
1163     .init           = videotoolbox_common_init,
1164     .uninit         = videotoolbox_uninit,
1165     .priv_data_size = sizeof(VTContext),
1166 };
1167
1168 AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
1169 {
1170     AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1171
1172     if (ret) {
1173         ret->output_callback = videotoolbox_decoder_callback;
1174         ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1175     }
1176
1177     return ret;
1178 }
1179
1180 int av_videotoolbox_default_init(AVCodecContext *avctx)
1181 {
1182     return av_videotoolbox_default_init2(avctx, NULL);
1183 }
1184
1185 int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
1186 {
1187     avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
1188     if (!avctx->hwaccel_context)
1189         return AVERROR(ENOMEM);
1190     return videotoolbox_start(avctx);
1191 }
1192
1193 void av_videotoolbox_default_free(AVCodecContext *avctx)
1194 {
1195
1196     videotoolbox_stop(avctx);
1197     av_freep(&avctx->hwaccel_context);
1198 }
1199 #endif /* CONFIG_VIDEOTOOLBOX */