]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
d76bb7f64698810b80562487882493618830c94c
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41
42 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
43 enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
44 enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
45 #endif
46
47 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
48                                            size_t parameterSetIndex,
49                                            const uint8_t **parameterSetPointerOut,
50                                            size_t *parameterSetSizeOut,
51                                            size_t *parameterSetCountOut,
52                                            int *NALUnitHeaderLengthOut);
53
54 //These symbols may not be present
55 static struct{
56     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
57     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
58     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
59
60     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
61     CFStringRef kVTH264EntropyMode_CAVLC;
62     CFStringRef kVTH264EntropyMode_CABAC;
63
64     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
65     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
66     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
67     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
68     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
69     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
70     CFStringRef kVTProfileLevel_H264_Main_4_2;
71     CFStringRef kVTProfileLevel_H264_Main_5_1;
72     CFStringRef kVTProfileLevel_H264_Main_5_2;
73     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
74     CFStringRef kVTProfileLevel_H264_High_3_0;
75     CFStringRef kVTProfileLevel_H264_High_3_1;
76     CFStringRef kVTProfileLevel_H264_High_3_2;
77     CFStringRef kVTProfileLevel_H264_High_4_0;
78     CFStringRef kVTProfileLevel_H264_High_4_1;
79     CFStringRef kVTProfileLevel_H264_High_4_2;
80     CFStringRef kVTProfileLevel_H264_High_5_1;
81     CFStringRef kVTProfileLevel_H264_High_5_2;
82     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
83
84     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
85     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
86
87     CFStringRef kVTCompressionPropertyKey_RealTime;
88
89     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
90     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
91
92     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
93 } compat_keys;
94
95 #define GET_SYM(symbol, defaultVal)                                     \
96 do{                                                                     \
97     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
98     if(!handle)                                                         \
99         compat_keys.symbol = CFSTR(defaultVal);                         \
100     else                                                                \
101         compat_keys.symbol = *handle;                                   \
102 }while(0)
103
104 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
105
106 static void loadVTEncSymbols(){
107     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
108         (getParameterSetAtIndex)dlsym(
109             RTLD_DEFAULT,
110             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
111         );
112
113     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
114     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
115     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
116
117     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
118     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
119     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
120
121     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
122     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
123     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
124     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
125     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
126     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
127     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
128     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
129     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
130     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
131     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
132     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
133     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
134     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
135     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
136     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
137     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
138     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
139     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
140
141     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
142     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
143
144     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
145
146     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
147             "EnableHardwareAcceleratedVideoEncoder");
148     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
149             "RequireHardwareAcceleratedVideoEncoder");
150 }
151
152 typedef enum VT_H264Profile {
153     H264_PROF_AUTO,
154     H264_PROF_BASELINE,
155     H264_PROF_MAIN,
156     H264_PROF_HIGH,
157     H264_PROF_COUNT
158 } VT_H264Profile;
159
160 typedef enum VTH264Entropy{
161     VT_ENTROPY_NOT_SET,
162     VT_CAVLC,
163     VT_CABAC
164 } VTH264Entropy;
165
166 typedef enum VT_HEVCProfile {
167     HEVC_PROF_AUTO,
168     HEVC_PROF_MAIN,
169     HEVC_PROF_MAIN10,
170     HEVC_PROF_COUNT
171 } VT_HEVCProfile;
172
173 static const uint8_t start_code[] = { 0, 0, 0, 1 };
174
175 typedef struct ExtraSEI {
176   void *data;
177   size_t size;
178 } ExtraSEI;
179
180 typedef struct BufNode {
181     CMSampleBufferRef cm_buffer;
182     ExtraSEI *sei;
183     struct BufNode* next;
184     int error;
185 } BufNode;
186
187 typedef struct VTEncContext {
188     AVClass *class;
189     enum AVCodecID codec_id;
190     VTCompressionSessionRef session;
191     CFStringRef ycbcr_matrix;
192     CFStringRef color_primaries;
193     CFStringRef transfer_function;
194     getParameterSetAtIndex get_param_set_func;
195
196     pthread_mutex_t lock;
197     pthread_cond_t  cv_sample_sent;
198
199     int async_error;
200
201     BufNode *q_head;
202     BufNode *q_tail;
203
204     int64_t frame_ct_out;
205     int64_t frame_ct_in;
206
207     int64_t first_pts;
208     int64_t dts_delta;
209
210     int64_t profile;
211     int64_t level;
212     int64_t entropy;
213     int64_t realtime;
214     int64_t frames_before;
215     int64_t frames_after;
216
217     int64_t allow_sw;
218     int64_t require_sw;
219
220     bool flushing;
221     bool has_b_frames;
222     bool warned_color_range;
223     bool a53_cc;
224 } VTEncContext;
225
226 static int vtenc_populate_extradata(AVCodecContext   *avctx,
227                                     CMVideoCodecType codec_type,
228                                     CFStringRef      profile_level,
229                                     CFNumberRef      gamma_level,
230                                     CFDictionaryRef  enc_info,
231                                     CFDictionaryRef  pixel_buffer_info);
232
233 /**
234  * NULL-safe release of *refPtr, and sets value to NULL.
235  */
236 static void vt_release_num(CFNumberRef* refPtr){
237     if (!*refPtr) {
238         return;
239     }
240
241     CFRelease(*refPtr);
242     *refPtr = NULL;
243 }
244
245 static void set_async_error(VTEncContext *vtctx, int err)
246 {
247     BufNode *info;
248
249     pthread_mutex_lock(&vtctx->lock);
250
251     vtctx->async_error = err;
252
253     info = vtctx->q_head;
254     vtctx->q_head = vtctx->q_tail = NULL;
255
256     while (info) {
257         BufNode *next = info->next;
258         CFRelease(info->cm_buffer);
259         av_free(info);
260         info = next;
261     }
262
263     pthread_mutex_unlock(&vtctx->lock);
264 }
265
266 static void clear_frame_queue(VTEncContext *vtctx)
267 {
268     set_async_error(vtctx, 0);
269 }
270
271 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
272 {
273     BufNode *info;
274
275     pthread_mutex_lock(&vtctx->lock);
276
277     if (vtctx->async_error) {
278         pthread_mutex_unlock(&vtctx->lock);
279         return vtctx->async_error;
280     }
281
282     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
283         *buf = NULL;
284
285         pthread_mutex_unlock(&vtctx->lock);
286         return 0;
287     }
288
289     while (!vtctx->q_head && !vtctx->async_error && wait) {
290         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
291     }
292
293     if (!vtctx->q_head) {
294         pthread_mutex_unlock(&vtctx->lock);
295         *buf = NULL;
296         return 0;
297     }
298
299     info = vtctx->q_head;
300     vtctx->q_head = vtctx->q_head->next;
301     if (!vtctx->q_head) {
302         vtctx->q_tail = NULL;
303     }
304
305     pthread_mutex_unlock(&vtctx->lock);
306
307     *buf = info->cm_buffer;
308     if (sei && *buf) {
309         *sei = info->sei;
310     } else if (info->sei) {
311         if (info->sei->data) av_free(info->sei->data);
312         av_free(info->sei);
313     }
314     av_free(info);
315
316     vtctx->frame_ct_out++;
317
318     return 0;
319 }
320
321 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
322 {
323     BufNode *info = av_malloc(sizeof(BufNode));
324     if (!info) {
325         set_async_error(vtctx, AVERROR(ENOMEM));
326         return;
327     }
328
329     CFRetain(buffer);
330     info->cm_buffer = buffer;
331     info->sei = sei;
332     info->next = NULL;
333
334     pthread_mutex_lock(&vtctx->lock);
335     pthread_cond_signal(&vtctx->cv_sample_sent);
336
337     if (!vtctx->q_head) {
338         vtctx->q_head = info;
339     } else {
340         vtctx->q_tail->next = info;
341     }
342
343     vtctx->q_tail = info;
344
345     pthread_mutex_unlock(&vtctx->lock);
346 }
347
348 static int count_nalus(size_t length_code_size,
349                        CMSampleBufferRef sample_buffer,
350                        int *count)
351 {
352     size_t offset = 0;
353     int status;
354     int nalu_ct = 0;
355     uint8_t size_buf[4];
356     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
357     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
358
359     if (length_code_size > 4)
360         return AVERROR_INVALIDDATA;
361
362     while (offset < src_size) {
363         size_t curr_src_len;
364         size_t box_len = 0;
365         size_t i;
366
367         status = CMBlockBufferCopyDataBytes(block,
368                                             offset,
369                                             length_code_size,
370                                             size_buf);
371
372         for (i = 0; i < length_code_size; i++) {
373             box_len <<= 8;
374             box_len |= size_buf[i];
375         }
376
377         curr_src_len = box_len + length_code_size;
378         offset += curr_src_len;
379
380         nalu_ct++;
381     }
382
383     *count = nalu_ct;
384     return 0;
385 }
386
387 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
388 {
389     switch (id) {
390     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
391     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
392     default:               return 0;
393     }
394 }
395
396 /**
397  * Get the parameter sets from a CMSampleBufferRef.
398  * @param dst If *dst isn't NULL, the parameters are copied into existing
399  *            memory. *dst_size must be set accordingly when *dst != NULL.
400  *            If *dst is NULL, it will be allocated.
401  *            In all cases, *dst_size is set to the number of bytes used starting
402  *            at *dst.
403  */
404 static int get_params_size(
405     AVCodecContext              *avctx,
406     CMVideoFormatDescriptionRef vid_fmt,
407     size_t                      *size)
408 {
409     VTEncContext *vtctx = avctx->priv_data;
410     size_t total_size = 0;
411     size_t ps_count;
412     int is_count_bad = 0;
413     size_t i;
414     int status;
415     status = vtctx->get_param_set_func(vid_fmt,
416                                        0,
417                                        NULL,
418                                        NULL,
419                                        &ps_count,
420                                        NULL);
421     if (status) {
422         is_count_bad = 1;
423         ps_count     = 0;
424         status       = 0;
425     }
426
427     for (i = 0; i < ps_count || is_count_bad; i++) {
428         const uint8_t *ps;
429         size_t ps_size;
430         status = vtctx->get_param_set_func(vid_fmt,
431                                            i,
432                                            &ps,
433                                            &ps_size,
434                                            NULL,
435                                            NULL);
436         if (status) {
437             /*
438              * When ps_count is invalid, status != 0 ends the loop normally
439              * unless we didn't get any parameter sets.
440              */
441             if (i > 0 && is_count_bad) status = 0;
442
443             break;
444         }
445
446         total_size += ps_size + sizeof(start_code);
447     }
448
449     if (status) {
450         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
451         return AVERROR_EXTERNAL;
452     }
453
454     *size = total_size;
455     return 0;
456 }
457
458 static int copy_param_sets(
459     AVCodecContext              *avctx,
460     CMVideoFormatDescriptionRef vid_fmt,
461     uint8_t                     *dst,
462     size_t                      dst_size)
463 {
464     VTEncContext *vtctx = avctx->priv_data;
465     size_t ps_count;
466     int is_count_bad = 0;
467     int status;
468     size_t offset = 0;
469     size_t i;
470
471     status = vtctx->get_param_set_func(vid_fmt,
472                                        0,
473                                        NULL,
474                                        NULL,
475                                        &ps_count,
476                                        NULL);
477     if (status) {
478         is_count_bad = 1;
479         ps_count     = 0;
480         status       = 0;
481     }
482
483
484     for (i = 0; i < ps_count || is_count_bad; i++) {
485         const uint8_t *ps;
486         size_t ps_size;
487         size_t next_offset;
488
489         status = vtctx->get_param_set_func(vid_fmt,
490                                            i,
491                                            &ps,
492                                            &ps_size,
493                                            NULL,
494                                            NULL);
495         if (status) {
496             if (i > 0 && is_count_bad) status = 0;
497
498             break;
499         }
500
501         next_offset = offset + sizeof(start_code) + ps_size;
502         if (dst_size < next_offset) {
503             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
504             return AVERROR_BUFFER_TOO_SMALL;
505         }
506
507         memcpy(dst + offset, start_code, sizeof(start_code));
508         offset += sizeof(start_code);
509
510         memcpy(dst + offset, ps, ps_size);
511         offset = next_offset;
512     }
513
514     if (status) {
515         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
516         return AVERROR_EXTERNAL;
517     }
518
519     return 0;
520 }
521
522 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
523 {
524     CMVideoFormatDescriptionRef vid_fmt;
525     size_t total_size;
526     int status;
527
528     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
529     if (!vid_fmt) {
530         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
531         return AVERROR_EXTERNAL;
532     }
533
534     status = get_params_size(avctx, vid_fmt, &total_size);
535     if (status) {
536         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
537         return status;
538     }
539
540     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
541     if (!avctx->extradata) {
542         return AVERROR(ENOMEM);
543     }
544     avctx->extradata_size = total_size;
545
546     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
547
548     if (status) {
549         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
550         return status;
551     }
552
553     return 0;
554 }
555
556 static void vtenc_output_callback(
557     void *ctx,
558     void *sourceFrameCtx,
559     OSStatus status,
560     VTEncodeInfoFlags flags,
561     CMSampleBufferRef sample_buffer)
562 {
563     AVCodecContext *avctx = ctx;
564     VTEncContext   *vtctx = avctx->priv_data;
565     ExtraSEI *sei = sourceFrameCtx;
566
567     if (vtctx->async_error) {
568         if(sample_buffer) CFRelease(sample_buffer);
569         return;
570     }
571
572     if (status || !sample_buffer) {
573         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
574         set_async_error(vtctx, AVERROR_EXTERNAL);
575         return;
576     }
577
578     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
579         int set_status = set_extradata(avctx, sample_buffer);
580         if (set_status) {
581             set_async_error(vtctx, set_status);
582             return;
583         }
584     }
585
586     vtenc_q_push(vtctx, sample_buffer, sei);
587 }
588
589 static int get_length_code_size(
590     AVCodecContext    *avctx,
591     CMSampleBufferRef sample_buffer,
592     size_t            *size)
593 {
594     VTEncContext *vtctx = avctx->priv_data;
595     CMVideoFormatDescriptionRef vid_fmt;
596     int isize;
597     int status;
598
599     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
600     if (!vid_fmt) {
601         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
602         return AVERROR_EXTERNAL;
603     }
604
605     status = vtctx->get_param_set_func(vid_fmt,
606                                        0,
607                                        NULL,
608                                        NULL,
609                                        NULL,
610                                        &isize);
611     if (status) {
612         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
613         return AVERROR_EXTERNAL;
614     }
615
616     *size = isize;
617     return 0;
618 }
619
620 /*
621  * Returns true on success.
622  *
623  * If profile_level_val is NULL and this method returns true, don't specify the
624  * profile/level to the encoder.
625  */
626 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
627                                       CFStringRef    *profile_level_val)
628 {
629     VTEncContext *vtctx = avctx->priv_data;
630     int64_t profile = vtctx->profile;
631
632     if (profile == H264_PROF_AUTO && vtctx->level) {
633         //Need to pick a profile if level is not auto-selected.
634         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
635     }
636
637     *profile_level_val = NULL;
638
639     switch (profile) {
640         case H264_PROF_AUTO:
641             return true;
642
643         case H264_PROF_BASELINE:
644             switch (vtctx->level) {
645                 case  0: *profile_level_val =
646                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
647                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
648                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
649                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
650                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
651                 case 40: *profile_level_val =
652                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
653                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
654                 case 42: *profile_level_val =
655                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
656                 case 50: *profile_level_val =
657                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
658                 case 51: *profile_level_val =
659                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
660                 case 52: *profile_level_val =
661                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
662             }
663             break;
664
665         case H264_PROF_MAIN:
666             switch (vtctx->level) {
667                 case  0: *profile_level_val =
668                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
669                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
670                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
671                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
672                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
673                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
674                 case 42: *profile_level_val =
675                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
676                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
677                 case 51: *profile_level_val =
678                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
679                 case 52: *profile_level_val =
680                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
681             }
682             break;
683
684         case H264_PROF_HIGH:
685             switch (vtctx->level) {
686                 case  0: *profile_level_val =
687                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
688                 case 30: *profile_level_val =
689                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
690                 case 31: *profile_level_val =
691                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
692                 case 32: *profile_level_val =
693                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
694                 case 40: *profile_level_val =
695                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
696                 case 41: *profile_level_val =
697                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
698                 case 42: *profile_level_val =
699                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
700                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
701                 case 51: *profile_level_val =
702                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
703                 case 52: *profile_level_val =
704                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
705             }
706             break;
707     }
708
709     if (!*profile_level_val) {
710         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
711         return false;
712     }
713
714     return true;
715 }
716
717 /*
718  * Returns true on success.
719  *
720  * If profile_level_val is NULL and this method returns true, don't specify the
721  * profile/level to the encoder.
722  */
723 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
724                                       CFStringRef    *profile_level_val)
725 {
726     VTEncContext *vtctx = avctx->priv_data;
727     int64_t profile = vtctx->profile;
728
729     *profile_level_val = NULL;
730
731     switch (profile) {
732         case HEVC_PROF_AUTO:
733             return true;
734         case HEVC_PROF_MAIN:
735             *profile_level_val =
736                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
737             break;
738         case HEVC_PROF_MAIN10:
739             *profile_level_val =
740                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
741             break;
742     }
743
744     if (!*profile_level_val) {
745         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
746         return false;
747     }
748
749     return true;
750 }
751
752 static int get_cv_pixel_format(AVCodecContext* avctx,
753                                enum AVPixelFormat fmt,
754                                enum AVColorRange range,
755                                int* av_pixel_format,
756                                int* range_guessed)
757 {
758     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
759                                         range != AVCOL_RANGE_JPEG;
760
761     //MPEG range is used when no range is set
762     if (fmt == AV_PIX_FMT_NV12) {
763         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
764                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
765                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
766     } else if (fmt == AV_PIX_FMT_YUV420P) {
767         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
768                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
769                                         kCVPixelFormatType_420YpCbCr8Planar;
770     } else if (fmt == AV_PIX_FMT_P010LE) {
771         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
772                                         kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
773                                         kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
774         *av_pixel_format = kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
775     } else {
776         return AVERROR(EINVAL);
777     }
778
779     return 0;
780 }
781
782 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
783     VTEncContext *vtctx = avctx->priv_data;
784
785     if (vtctx->color_primaries) {
786         CFDictionarySetValue(dict,
787                              kCVImageBufferColorPrimariesKey,
788                              vtctx->color_primaries);
789     }
790
791     if (vtctx->transfer_function) {
792         CFDictionarySetValue(dict,
793                              kCVImageBufferTransferFunctionKey,
794                              vtctx->transfer_function);
795     }
796
797     if (vtctx->ycbcr_matrix) {
798         CFDictionarySetValue(dict,
799                              kCVImageBufferYCbCrMatrixKey,
800                              vtctx->ycbcr_matrix);
801     }
802 }
803
804 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
805                                        CFMutableDictionaryRef* dict)
806 {
807     CFNumberRef cv_color_format_num = NULL;
808     CFNumberRef width_num = NULL;
809     CFNumberRef height_num = NULL;
810     CFMutableDictionaryRef pixel_buffer_info = NULL;
811     int cv_color_format;
812     int status = get_cv_pixel_format(avctx,
813                                      avctx->pix_fmt,
814                                      avctx->color_range,
815                                      &cv_color_format,
816                                      NULL);
817     if (status) return status;
818
819     pixel_buffer_info = CFDictionaryCreateMutable(
820                             kCFAllocatorDefault,
821                             20,
822                             &kCFCopyStringDictionaryKeyCallBacks,
823                             &kCFTypeDictionaryValueCallBacks);
824
825     if (!pixel_buffer_info) goto pbinfo_nomem;
826
827     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
828                                          kCFNumberSInt32Type,
829                                          &cv_color_format);
830     if (!cv_color_format_num) goto pbinfo_nomem;
831
832     CFDictionarySetValue(pixel_buffer_info,
833                          kCVPixelBufferPixelFormatTypeKey,
834                          cv_color_format_num);
835     vt_release_num(&cv_color_format_num);
836
837     width_num = CFNumberCreate(kCFAllocatorDefault,
838                                kCFNumberSInt32Type,
839                                &avctx->width);
840     if (!width_num) return AVERROR(ENOMEM);
841
842     CFDictionarySetValue(pixel_buffer_info,
843                          kCVPixelBufferWidthKey,
844                          width_num);
845     vt_release_num(&width_num);
846
847     height_num = CFNumberCreate(kCFAllocatorDefault,
848                                 kCFNumberSInt32Type,
849                                 &avctx->height);
850     if (!height_num) goto pbinfo_nomem;
851
852     CFDictionarySetValue(pixel_buffer_info,
853                          kCVPixelBufferHeightKey,
854                          height_num);
855     vt_release_num(&height_num);
856
857     add_color_attr(avctx, pixel_buffer_info);
858
859     *dict = pixel_buffer_info;
860     return 0;
861
862 pbinfo_nomem:
863     vt_release_num(&cv_color_format_num);
864     vt_release_num(&width_num);
865     vt_release_num(&height_num);
866     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
867
868     return AVERROR(ENOMEM);
869 }
870
871 static int get_cv_color_primaries(AVCodecContext *avctx,
872                                   CFStringRef *primaries)
873 {
874     enum AVColorPrimaries pri = avctx->color_primaries;
875     switch (pri) {
876         case AVCOL_PRI_UNSPECIFIED:
877             *primaries = NULL;
878             break;
879
880         case AVCOL_PRI_BT709:
881             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
882             break;
883
884         case AVCOL_PRI_BT2020:
885             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
886             break;
887
888         default:
889             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
890             *primaries = NULL;
891             return -1;
892     }
893
894     return 0;
895 }
896
897 static int get_cv_transfer_function(AVCodecContext *avctx,
898                                     CFStringRef *transfer_fnc,
899                                     CFNumberRef *gamma_level)
900 {
901     enum AVColorTransferCharacteristic trc = avctx->color_trc;
902     Float32 gamma;
903     *gamma_level = NULL;
904
905     switch (trc) {
906         case AVCOL_TRC_UNSPECIFIED:
907             *transfer_fnc = NULL;
908             break;
909
910         case AVCOL_TRC_BT709:
911             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
912             break;
913
914         case AVCOL_TRC_SMPTE240M:
915             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
916             break;
917
918 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
919         case AVCOL_TRC_SMPTE2084:
920             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
921             break;
922 #endif
923 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
924         case AVCOL_TRC_LINEAR:
925             *transfer_fnc = kCVImageBufferTransferFunction_Linear;
926             break;
927 #endif
928 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
929         case AVCOL_TRC_ARIB_STD_B67:
930             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
931             break;
932 #endif
933
934         case AVCOL_TRC_GAMMA22:
935             gamma = 2.2;
936             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
937             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
938             break;
939
940         case AVCOL_TRC_GAMMA28:
941             gamma = 2.8;
942             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
943             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
944             break;
945
946         case AVCOL_TRC_BT2020_10:
947         case AVCOL_TRC_BT2020_12:
948             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
949             break;
950
951         default:
952             *transfer_fnc = NULL;
953             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
954             return -1;
955     }
956
957     return 0;
958 }
959
960 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
961     switch(avctx->colorspace) {
962         case AVCOL_SPC_BT709:
963             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
964             break;
965
966         case AVCOL_SPC_UNSPECIFIED:
967             *matrix = NULL;
968             break;
969
970         case AVCOL_SPC_BT470BG:
971         case AVCOL_SPC_SMPTE170M:
972             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
973             break;
974
975         case AVCOL_SPC_SMPTE240M:
976             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
977             break;
978
979         case AVCOL_SPC_BT2020_NCL:
980             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
981             break;
982
983         default:
984             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
985             return -1;
986     }
987
988     return 0;
989 }
990
991 static int vtenc_create_encoder(AVCodecContext   *avctx,
992                                 CMVideoCodecType codec_type,
993                                 CFStringRef      profile_level,
994                                 CFNumberRef      gamma_level,
995                                 CFDictionaryRef  enc_info,
996                                 CFDictionaryRef  pixel_buffer_info,
997                                 VTCompressionSessionRef *session)
998 {
999     VTEncContext *vtctx = avctx->priv_data;
1000     SInt32       bit_rate = avctx->bit_rate;
1001     SInt32       max_rate = avctx->rc_max_rate;
1002     CFNumberRef  bit_rate_num;
1003     CFNumberRef  bytes_per_second;
1004     CFNumberRef  one_second;
1005     CFArrayRef   data_rate_limits;
1006     int64_t      bytes_per_second_value = 0;
1007     int64_t      one_second_value = 0;
1008     void         *nums[2];
1009
1010     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1011                                             avctx->width,
1012                                             avctx->height,
1013                                             codec_type,
1014                                             enc_info,
1015                                             pixel_buffer_info,
1016                                             kCFAllocatorDefault,
1017                                             vtenc_output_callback,
1018                                             avctx,
1019                                             session);
1020
1021     if (status || !vtctx->session) {
1022         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1023
1024 #if !TARGET_OS_IPHONE
1025         if (!vtctx->allow_sw) {
1026             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1027         }
1028 #endif
1029
1030         return AVERROR_EXTERNAL;
1031     }
1032
1033     bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1034                                   kCFNumberSInt32Type,
1035                                   &bit_rate);
1036     if (!bit_rate_num) return AVERROR(ENOMEM);
1037
1038     status = VTSessionSetProperty(vtctx->session,
1039                                   kVTCompressionPropertyKey_AverageBitRate,
1040                                   bit_rate_num);
1041     CFRelease(bit_rate_num);
1042
1043     if (status) {
1044         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1045         return AVERROR_EXTERNAL;
1046     }
1047
1048     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1049         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1050         bytes_per_second_value = max_rate >> 3;
1051         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1052                                           kCFNumberSInt64Type,
1053                                           &bytes_per_second_value);
1054         if (!bytes_per_second) {
1055             return AVERROR(ENOMEM);
1056         }
1057         one_second_value = 1;
1058         one_second = CFNumberCreate(kCFAllocatorDefault,
1059                                     kCFNumberSInt64Type,
1060                                     &one_second_value);
1061         if (!one_second) {
1062             CFRelease(bytes_per_second);
1063             return AVERROR(ENOMEM);
1064         }
1065         nums[0] = (void *)bytes_per_second;
1066         nums[1] = (void *)one_second;
1067         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1068                                          (const void **)nums,
1069                                          2,
1070                                          &kCFTypeArrayCallBacks);
1071
1072         if (!data_rate_limits) {
1073             CFRelease(bytes_per_second);
1074             CFRelease(one_second);
1075             return AVERROR(ENOMEM);
1076         }
1077         status = VTSessionSetProperty(vtctx->session,
1078                                       kVTCompressionPropertyKey_DataRateLimits,
1079                                       data_rate_limits);
1080
1081         CFRelease(bytes_per_second);
1082         CFRelease(one_second);
1083         CFRelease(data_rate_limits);
1084
1085         if (status) {
1086             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1087             return AVERROR_EXTERNAL;
1088         }
1089     }
1090
1091     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1092         // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1093         if (profile_level) {
1094             status = VTSessionSetProperty(vtctx->session,
1095                                         kVTCompressionPropertyKey_ProfileLevel,
1096                                         profile_level);
1097             if (status) {
1098                 av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
1099             }
1100         }
1101     }
1102
1103     if (avctx->gop_size > 0) {
1104         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1105                                               kCFNumberIntType,
1106                                               &avctx->gop_size);
1107         if (!interval) {
1108             return AVERROR(ENOMEM);
1109         }
1110
1111         status = VTSessionSetProperty(vtctx->session,
1112                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1113                                       interval);
1114         CFRelease(interval);
1115
1116         if (status) {
1117             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1118             return AVERROR_EXTERNAL;
1119         }
1120     }
1121
1122     if (vtctx->frames_before) {
1123         status = VTSessionSetProperty(vtctx->session,
1124                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1125                                       kCFBooleanTrue);
1126
1127         if (status == kVTPropertyNotSupportedErr) {
1128             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1129         } else if (status) {
1130             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1131         }
1132     }
1133
1134     if (vtctx->frames_after) {
1135         status = VTSessionSetProperty(vtctx->session,
1136                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1137                                       kCFBooleanTrue);
1138
1139         if (status == kVTPropertyNotSupportedErr) {
1140             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1141         } else if (status) {
1142             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1143         }
1144     }
1145
1146     if (avctx->sample_aspect_ratio.num != 0) {
1147         CFNumberRef num;
1148         CFNumberRef den;
1149         CFMutableDictionaryRef par;
1150         AVRational *avpar = &avctx->sample_aspect_ratio;
1151
1152         av_reduce(&avpar->num, &avpar->den,
1153                    avpar->num,  avpar->den,
1154                   0xFFFFFFFF);
1155
1156         num = CFNumberCreate(kCFAllocatorDefault,
1157                              kCFNumberIntType,
1158                              &avpar->num);
1159
1160         den = CFNumberCreate(kCFAllocatorDefault,
1161                              kCFNumberIntType,
1162                              &avpar->den);
1163
1164
1165
1166         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1167                                         2,
1168                                         &kCFCopyStringDictionaryKeyCallBacks,
1169                                         &kCFTypeDictionaryValueCallBacks);
1170
1171         if (!par || !num || !den) {
1172             if (par) CFRelease(par);
1173             if (num) CFRelease(num);
1174             if (den) CFRelease(den);
1175
1176             return AVERROR(ENOMEM);
1177         }
1178
1179         CFDictionarySetValue(
1180             par,
1181             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1182             num);
1183
1184         CFDictionarySetValue(
1185             par,
1186             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1187             den);
1188
1189         status = VTSessionSetProperty(vtctx->session,
1190                                       kVTCompressionPropertyKey_PixelAspectRatio,
1191                                       par);
1192
1193         CFRelease(par);
1194         CFRelease(num);
1195         CFRelease(den);
1196
1197         if (status) {
1198             av_log(avctx,
1199                    AV_LOG_ERROR,
1200                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1201                    avctx->sample_aspect_ratio.num,
1202                    avctx->sample_aspect_ratio.den,
1203                    status);
1204
1205             return AVERROR_EXTERNAL;
1206         }
1207     }
1208
1209
1210     if (vtctx->transfer_function) {
1211         status = VTSessionSetProperty(vtctx->session,
1212                                       kVTCompressionPropertyKey_TransferFunction,
1213                                       vtctx->transfer_function);
1214
1215         if (status) {
1216             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1217         }
1218     }
1219
1220
1221     if (vtctx->ycbcr_matrix) {
1222         status = VTSessionSetProperty(vtctx->session,
1223                                       kVTCompressionPropertyKey_YCbCrMatrix,
1224                                       vtctx->ycbcr_matrix);
1225
1226         if (status) {
1227             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1228         }
1229     }
1230
1231
1232     if (vtctx->color_primaries) {
1233         status = VTSessionSetProperty(vtctx->session,
1234                                       kVTCompressionPropertyKey_ColorPrimaries,
1235                                       vtctx->color_primaries);
1236
1237         if (status) {
1238             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1239         }
1240     }
1241
1242     if (gamma_level) {
1243         status = VTSessionSetProperty(vtctx->session,
1244                                       kCVImageBufferGammaLevelKey,
1245                                       gamma_level);
1246
1247         if (status) {
1248             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1249         }
1250     }
1251
1252     if (!vtctx->has_b_frames) {
1253         status = VTSessionSetProperty(vtctx->session,
1254                                       kVTCompressionPropertyKey_AllowFrameReordering,
1255                                       kCFBooleanFalse);
1256
1257         if (status) {
1258             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1259             return AVERROR_EXTERNAL;
1260         }
1261     }
1262
1263     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1264         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1265                                 compat_keys.kVTH264EntropyMode_CABAC:
1266                                 compat_keys.kVTH264EntropyMode_CAVLC;
1267
1268         status = VTSessionSetProperty(vtctx->session,
1269                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1270                                       entropy);
1271
1272         if (status) {
1273             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1274         }
1275     }
1276
1277     if (vtctx->realtime) {
1278         status = VTSessionSetProperty(vtctx->session,
1279                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1280                                       kCFBooleanTrue);
1281
1282         if (status) {
1283             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1284         }
1285     }
1286
1287     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1288     if (status) {
1289         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1290         return AVERROR_EXTERNAL;
1291     }
1292
1293     return 0;
1294 }
1295
1296 static int vtenc_configure_encoder(AVCodecContext *avctx)
1297 {
1298     CFMutableDictionaryRef enc_info;
1299     CFMutableDictionaryRef pixel_buffer_info;
1300     CMVideoCodecType       codec_type;
1301     VTEncContext           *vtctx = avctx->priv_data;
1302     CFStringRef            profile_level;
1303     CFNumberRef            gamma_level = NULL;
1304     int                    status;
1305
1306     codec_type = get_cm_codec_type(avctx->codec_id);
1307     if (!codec_type) {
1308         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1309         return AVERROR(EINVAL);
1310     }
1311
1312     vtctx->codec_id = avctx->codec_id;
1313
1314     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1315         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1316
1317         vtctx->has_b_frames = avctx->max_b_frames > 0;
1318         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1319             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1320             vtctx->has_b_frames = false;
1321         }
1322
1323         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1324             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1325             vtctx->entropy = VT_ENTROPY_NOT_SET;
1326         }
1327
1328         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1329     } else {
1330         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1331         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1332         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1333     }
1334
1335     enc_info = CFDictionaryCreateMutable(
1336         kCFAllocatorDefault,
1337         20,
1338         &kCFCopyStringDictionaryKeyCallBacks,
1339         &kCFTypeDictionaryValueCallBacks
1340     );
1341
1342     if (!enc_info) return AVERROR(ENOMEM);
1343
1344 #if !TARGET_OS_IPHONE
1345     if(vtctx->require_sw) {
1346         CFDictionarySetValue(enc_info,
1347                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1348                              kCFBooleanFalse);
1349     } else if (!vtctx->allow_sw) {
1350         CFDictionarySetValue(enc_info,
1351                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1352                              kCFBooleanTrue);
1353     } else {
1354         CFDictionarySetValue(enc_info,
1355                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1356                              kCFBooleanTrue);
1357     }
1358 #endif
1359
1360     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1361         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1362         if (status)
1363             goto init_cleanup;
1364     } else {
1365         pixel_buffer_info = NULL;
1366     }
1367
1368     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1369
1370     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1371     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1372     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1373
1374
1375     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1376         status = vtenc_populate_extradata(avctx,
1377                                           codec_type,
1378                                           profile_level,
1379                                           gamma_level,
1380                                           enc_info,
1381                                           pixel_buffer_info);
1382         if (status)
1383             goto init_cleanup;
1384     }
1385
1386     status = vtenc_create_encoder(avctx,
1387                                   codec_type,
1388                                   profile_level,
1389                                   gamma_level,
1390                                   enc_info,
1391                                   pixel_buffer_info,
1392                                   &vtctx->session);
1393
1394 init_cleanup:
1395     if (gamma_level)
1396         CFRelease(gamma_level);
1397
1398     if (pixel_buffer_info)
1399         CFRelease(pixel_buffer_info);
1400
1401     CFRelease(enc_info);
1402
1403     return status;
1404 }
1405
1406 static av_cold int vtenc_init(AVCodecContext *avctx)
1407 {
1408     VTEncContext    *vtctx = avctx->priv_data;
1409     CFBooleanRef    has_b_frames_cfbool;
1410     int             status;
1411
1412     pthread_once(&once_ctrl, loadVTEncSymbols);
1413
1414     pthread_mutex_init(&vtctx->lock, NULL);
1415     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1416
1417     vtctx->session = NULL;
1418     status = vtenc_configure_encoder(avctx);
1419     if (status) return status;
1420
1421     status = VTSessionCopyProperty(vtctx->session,
1422                                    kVTCompressionPropertyKey_AllowFrameReordering,
1423                                    kCFAllocatorDefault,
1424                                    &has_b_frames_cfbool);
1425
1426     if (!status && has_b_frames_cfbool) {
1427         //Some devices don't output B-frames for main profile, even if requested.
1428         vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1429         CFRelease(has_b_frames_cfbool);
1430     }
1431     avctx->has_b_frames = vtctx->has_b_frames;
1432
1433     return 0;
1434 }
1435
1436 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1437 {
1438     CFArrayRef      attachments;
1439     CFDictionaryRef attachment;
1440     CFBooleanRef    not_sync;
1441     CFIndex         len;
1442
1443     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1444     len = !attachments ? 0 : CFArrayGetCount(attachments);
1445
1446     if (!len) {
1447         *is_key_frame = true;
1448         return;
1449     }
1450
1451     attachment = CFArrayGetValueAtIndex(attachments, 0);
1452
1453     if (CFDictionaryGetValueIfPresent(attachment,
1454                                       kCMSampleAttachmentKey_NotSync,
1455                                       (const void **)&not_sync))
1456     {
1457         *is_key_frame = !CFBooleanGetValue(not_sync);
1458     } else {
1459         *is_key_frame = true;
1460     }
1461 }
1462
1463 static int is_post_sei_nal_type(int nal_type){
1464     return nal_type != H264_NAL_SEI &&
1465            nal_type != H264_NAL_SPS &&
1466            nal_type != H264_NAL_PPS &&
1467            nal_type != H264_NAL_AUD;
1468 }
1469
1470 /*
1471  * Finds the sei message start/size of type find_sei_type.
1472  * If more than one of that type exists, the last one is returned.
1473  */
1474 static int find_sei_end(AVCodecContext *avctx,
1475                         uint8_t        *nal_data,
1476                         size_t          nal_size,
1477                         uint8_t       **sei_end)
1478 {
1479     int nal_type;
1480     size_t sei_payload_size = 0;
1481     int sei_payload_type = 0;
1482     *sei_end = NULL;
1483     uint8_t *nal_start = nal_data;
1484
1485     if (!nal_size)
1486         return 0;
1487
1488     nal_type = *nal_data & 0x1F;
1489     if (nal_type != H264_NAL_SEI)
1490         return 0;
1491
1492     nal_data++;
1493     nal_size--;
1494
1495     if (nal_data[nal_size - 1] == 0x80)
1496         nal_size--;
1497
1498     while (nal_size > 0 && *nal_data > 0) {
1499         do{
1500             sei_payload_type += *nal_data;
1501             nal_data++;
1502             nal_size--;
1503         } while (nal_size > 0 && *nal_data == 0xFF);
1504
1505         if (!nal_size) {
1506             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1507             return AVERROR_INVALIDDATA;
1508         }
1509
1510         do{
1511             sei_payload_size += *nal_data;
1512             nal_data++;
1513             nal_size--;
1514         } while (nal_size > 0 && *nal_data == 0xFF);
1515
1516         if (nal_size < sei_payload_size) {
1517             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1518             return AVERROR_INVALIDDATA;
1519         }
1520
1521         nal_data += sei_payload_size;
1522         nal_size -= sei_payload_size;
1523     }
1524
1525     *sei_end = nal_data;
1526
1527     return nal_data - nal_start + 1;
1528 }
1529
1530 /**
1531  * Copies the data inserting emulation prevention bytes as needed.
1532  * Existing data in the destination can be taken into account by providing
1533  * dst with a dst_offset > 0.
1534  *
1535  * @return The number of bytes copied on success. On failure, the negative of
1536  *         the number of bytes needed to copy src is returned.
1537  */
1538 static int copy_emulation_prev(const uint8_t *src,
1539                                size_t         src_size,
1540                                uint8_t       *dst,
1541                                ssize_t        dst_offset,
1542                                size_t         dst_size)
1543 {
1544     int zeros = 0;
1545     int wrote_bytes;
1546     uint8_t* dst_start;
1547     uint8_t* dst_end = dst + dst_size;
1548     const uint8_t* src_end = src + src_size;
1549     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1550     int i;
1551     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1552         if (!dst[i])
1553             zeros++;
1554         else
1555             zeros = 0;
1556     }
1557
1558     dst += dst_offset;
1559     dst_start = dst;
1560     for (; src < src_end; src++, dst++) {
1561         if (zeros == 2) {
1562             int insert_ep3_byte = *src <= 3;
1563             if (insert_ep3_byte) {
1564                 if (dst < dst_end)
1565                     *dst = 3;
1566                 dst++;
1567             }
1568
1569             zeros = 0;
1570         }
1571
1572         if (dst < dst_end)
1573             *dst = *src;
1574
1575         if (!*src)
1576             zeros++;
1577         else
1578             zeros = 0;
1579     }
1580
1581     wrote_bytes = dst - dst_start;
1582
1583     if (dst > dst_end)
1584         return -wrote_bytes;
1585
1586     return wrote_bytes;
1587 }
1588
1589 static int write_sei(const ExtraSEI *sei,
1590                      int             sei_type,
1591                      uint8_t        *dst,
1592                      size_t          dst_size)
1593 {
1594     uint8_t *sei_start = dst;
1595     size_t remaining_sei_size = sei->size;
1596     size_t remaining_dst_size = dst_size;
1597     int header_bytes;
1598     int bytes_written;
1599     ssize_t offset;
1600
1601     if (!remaining_dst_size)
1602         return AVERROR_BUFFER_TOO_SMALL;
1603
1604     while (sei_type && remaining_dst_size != 0) {
1605         int sei_byte = sei_type > 255 ? 255 : sei_type;
1606         *dst = sei_byte;
1607
1608         sei_type -= sei_byte;
1609         dst++;
1610         remaining_dst_size--;
1611     }
1612
1613     if (!dst_size)
1614         return AVERROR_BUFFER_TOO_SMALL;
1615
1616     while (remaining_sei_size && remaining_dst_size != 0) {
1617         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1618         *dst = size_byte;
1619
1620         remaining_sei_size -= size_byte;
1621         dst++;
1622         remaining_dst_size--;
1623     }
1624
1625     if (remaining_dst_size < sei->size)
1626         return AVERROR_BUFFER_TOO_SMALL;
1627
1628     header_bytes = dst - sei_start;
1629
1630     offset = header_bytes;
1631     bytes_written = copy_emulation_prev(sei->data,
1632                                         sei->size,
1633                                         sei_start,
1634                                         offset,
1635                                         dst_size);
1636     if (bytes_written < 0)
1637         return AVERROR_BUFFER_TOO_SMALL;
1638
1639     bytes_written += header_bytes;
1640     return bytes_written;
1641 }
1642
1643 /**
1644  * Copies NAL units and replaces length codes with
1645  * H.264 Annex B start codes. On failure, the contents of
1646  * dst_data may have been modified.
1647  *
1648  * @param length_code_size Byte length of each length code
1649  * @param sample_buffer NAL units prefixed with length codes.
1650  * @param sei Optional A53 closed captions SEI data.
1651  * @param dst_data Must be zeroed before calling this function.
1652  *                 Contains the copied NAL units prefixed with
1653  *                 start codes when the function returns
1654  *                 successfully.
1655  * @param dst_size Length of dst_data
1656  * @return 0 on success
1657  *         AVERROR_INVALIDDATA if length_code_size is invalid
1658  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1659  *         or if a length_code in src_data specifies data beyond
1660  *         the end of its buffer.
1661  */
1662 static int copy_replace_length_codes(
1663     AVCodecContext *avctx,
1664     size_t        length_code_size,
1665     CMSampleBufferRef sample_buffer,
1666     ExtraSEI      *sei,
1667     uint8_t       *dst_data,
1668     size_t        dst_size)
1669 {
1670     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1671     size_t remaining_src_size = src_size;
1672     size_t remaining_dst_size = dst_size;
1673     size_t src_offset = 0;
1674     int wrote_sei = 0;
1675     int status;
1676     uint8_t size_buf[4];
1677     uint8_t nal_type;
1678     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1679
1680     if (length_code_size > 4) {
1681         return AVERROR_INVALIDDATA;
1682     }
1683
1684     while (remaining_src_size > 0) {
1685         size_t curr_src_len;
1686         size_t curr_dst_len;
1687         size_t box_len = 0;
1688         size_t i;
1689
1690         uint8_t       *dst_box;
1691
1692         status = CMBlockBufferCopyDataBytes(block,
1693                                             src_offset,
1694                                             length_code_size,
1695                                             size_buf);
1696         if (status) {
1697             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1698             return AVERROR_EXTERNAL;
1699         }
1700
1701         status = CMBlockBufferCopyDataBytes(block,
1702                                             src_offset + length_code_size,
1703                                             1,
1704                                             &nal_type);
1705
1706         if (status) {
1707             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1708             return AVERROR_EXTERNAL;
1709         }
1710
1711         nal_type &= 0x1F;
1712
1713         for (i = 0; i < length_code_size; i++) {
1714             box_len <<= 8;
1715             box_len |= size_buf[i];
1716         }
1717
1718         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1719             //No SEI NAL unit - insert.
1720             int wrote_bytes;
1721
1722             memcpy(dst_data, start_code, sizeof(start_code));
1723             dst_data += sizeof(start_code);
1724             remaining_dst_size -= sizeof(start_code);
1725
1726             *dst_data = H264_NAL_SEI;
1727             dst_data++;
1728             remaining_dst_size--;
1729
1730             wrote_bytes = write_sei(sei,
1731                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1732                                     dst_data,
1733                                     remaining_dst_size);
1734
1735             if (wrote_bytes < 0)
1736                 return wrote_bytes;
1737
1738             remaining_dst_size -= wrote_bytes;
1739             dst_data += wrote_bytes;
1740
1741             if (remaining_dst_size <= 0)
1742                 return AVERROR_BUFFER_TOO_SMALL;
1743
1744             *dst_data = 0x80;
1745
1746             dst_data++;
1747             remaining_dst_size--;
1748
1749             wrote_sei = 1;
1750         }
1751
1752         curr_src_len = box_len + length_code_size;
1753         curr_dst_len = box_len + sizeof(start_code);
1754
1755         if (remaining_src_size < curr_src_len) {
1756             return AVERROR_BUFFER_TOO_SMALL;
1757         }
1758
1759         if (remaining_dst_size < curr_dst_len) {
1760             return AVERROR_BUFFER_TOO_SMALL;
1761         }
1762
1763         dst_box = dst_data + sizeof(start_code);
1764
1765         memcpy(dst_data, start_code, sizeof(start_code));
1766         status = CMBlockBufferCopyDataBytes(block,
1767                                             src_offset + length_code_size,
1768                                             box_len,
1769                                             dst_box);
1770
1771         if (status) {
1772             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1773             return AVERROR_EXTERNAL;
1774         }
1775
1776         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1777             //Found SEI NAL unit - append.
1778             int wrote_bytes;
1779             int old_sei_length;
1780             int extra_bytes;
1781             uint8_t *new_sei;
1782             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1783             if (old_sei_length < 0)
1784                 return status;
1785
1786             wrote_bytes = write_sei(sei,
1787                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1788                                     new_sei,
1789                                     remaining_dst_size - old_sei_length);
1790             if (wrote_bytes < 0)
1791                 return wrote_bytes;
1792
1793             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1794                 return AVERROR_BUFFER_TOO_SMALL;
1795
1796             new_sei[wrote_bytes++] = 0x80;
1797             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1798
1799             dst_data += extra_bytes;
1800             remaining_dst_size -= extra_bytes;
1801
1802             wrote_sei = 1;
1803         }
1804
1805         src_offset += curr_src_len;
1806         dst_data += curr_dst_len;
1807
1808         remaining_src_size -= curr_src_len;
1809         remaining_dst_size -= curr_dst_len;
1810     }
1811
1812     return 0;
1813 }
1814
1815 /**
1816  * Returns a sufficient number of bytes to contain the sei data.
1817  * It may be greater than the minimum required.
1818  */
1819 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1820     int copied_size;
1821     if (sei->size == 0)
1822         return 0;
1823
1824     copied_size = -copy_emulation_prev(sei->data,
1825                                        sei->size,
1826                                        NULL,
1827                                        0,
1828                                        0);
1829
1830     if ((sei->size % 255) == 0) //may result in an extra byte
1831         copied_size++;
1832
1833     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1834 }
1835
1836 static int vtenc_cm_to_avpacket(
1837     AVCodecContext    *avctx,
1838     CMSampleBufferRef sample_buffer,
1839     AVPacket          *pkt,
1840     ExtraSEI          *sei)
1841 {
1842     VTEncContext *vtctx = avctx->priv_data;
1843
1844     int     status;
1845     bool    is_key_frame;
1846     bool    add_header;
1847     size_t  length_code_size;
1848     size_t  header_size = 0;
1849     size_t  in_buf_size;
1850     size_t  out_buf_size;
1851     size_t  sei_nalu_size = 0;
1852     int64_t dts_delta;
1853     int64_t time_base_num;
1854     int nalu_count;
1855     CMTime  pts;
1856     CMTime  dts;
1857     CMVideoFormatDescriptionRef vid_fmt;
1858
1859
1860     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1861     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1862     if (status) return status;
1863
1864     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1865
1866     if (add_header) {
1867         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1868         if (!vid_fmt) {
1869             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1870             return AVERROR_EXTERNAL;
1871         }
1872
1873         int status = get_params_size(avctx, vid_fmt, &header_size);
1874         if (status) return status;
1875     }
1876
1877     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1878     if(status)
1879         return status;
1880
1881     if (sei) {
1882         size_t msg_size = get_sei_msg_bytes(sei,
1883                                             H264_SEI_TYPE_USER_DATA_REGISTERED);
1884
1885         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1886     }
1887
1888     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1889     out_buf_size = header_size +
1890                    in_buf_size +
1891                    sei_nalu_size +
1892                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1893
1894     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1895     if (status < 0)
1896         return status;
1897
1898     if (add_header) {
1899         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1900         if(status) return status;
1901     }
1902
1903     status = copy_replace_length_codes(
1904         avctx,
1905         length_code_size,
1906         sample_buffer,
1907         sei,
1908         pkt->data + header_size,
1909         pkt->size - header_size
1910     );
1911
1912     if (status) {
1913         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1914         return status;
1915     }
1916
1917     if (is_key_frame) {
1918         pkt->flags |= AV_PKT_FLAG_KEY;
1919     }
1920
1921     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1922     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1923
1924     if (CMTIME_IS_INVALID(dts)) {
1925         if (!vtctx->has_b_frames) {
1926             dts = pts;
1927         } else {
1928             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1929             return AVERROR_EXTERNAL;
1930         }
1931     }
1932
1933     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1934     time_base_num = avctx->time_base.num;
1935     pkt->pts = pts.value / time_base_num;
1936     pkt->dts = dts.value / time_base_num - dts_delta;
1937     pkt->size = out_buf_size;
1938
1939     return 0;
1940 }
1941
1942 /*
1943  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1944  * containing all planes if so.
1945  */
1946 static int get_cv_pixel_info(
1947     AVCodecContext *avctx,
1948     const AVFrame  *frame,
1949     int            *color,
1950     int            *plane_count,
1951     size_t         *widths,
1952     size_t         *heights,
1953     size_t         *strides,
1954     size_t         *contiguous_buf_size)
1955 {
1956     VTEncContext *vtctx = avctx->priv_data;
1957     int av_format       = frame->format;
1958     int av_color_range  = frame->color_range;
1959     int i;
1960     int range_guessed;
1961     int status;
1962
1963     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1964     if (status) {
1965         av_log(avctx,
1966             AV_LOG_ERROR,
1967             "Could not get pixel format for color format '%s' range '%s'.\n",
1968             av_get_pix_fmt_name(av_format),
1969             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1970             av_color_range < AVCOL_RANGE_NB ?
1971                av_color_range_name(av_color_range) :
1972                "Unknown");
1973
1974         return AVERROR(EINVAL);
1975     }
1976
1977     if (range_guessed) {
1978         if (!vtctx->warned_color_range) {
1979             vtctx->warned_color_range = true;
1980             av_log(avctx,
1981                    AV_LOG_WARNING,
1982                    "Color range not set for %s. Using MPEG range.\n",
1983                    av_get_pix_fmt_name(av_format));
1984         }
1985     }
1986
1987     switch (av_format) {
1988     case AV_PIX_FMT_NV12:
1989         *plane_count = 2;
1990
1991         widths [0] = avctx->width;
1992         heights[0] = avctx->height;
1993         strides[0] = frame ? frame->linesize[0] : avctx->width;
1994
1995         widths [1] = (avctx->width  + 1) / 2;
1996         heights[1] = (avctx->height + 1) / 2;
1997         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
1998         break;
1999
2000     case AV_PIX_FMT_YUV420P:
2001         *plane_count = 3;
2002
2003         widths [0] = avctx->width;
2004         heights[0] = avctx->height;
2005         strides[0] = frame ? frame->linesize[0] : avctx->width;
2006
2007         widths [1] = (avctx->width  + 1) / 2;
2008         heights[1] = (avctx->height + 1) / 2;
2009         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2010
2011         widths [2] = (avctx->width  + 1) / 2;
2012         heights[2] = (avctx->height + 1) / 2;
2013         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2014         break;
2015
2016     case AV_PIX_FMT_P010LE:
2017         *plane_count = 2;
2018         widths[0] = avctx->width;
2019         heights[0] = avctx->height;
2020         strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2021
2022         widths[1] = (avctx->width + 1) / 2;
2023         heights[1] = (avctx->height + 1) / 2;
2024         strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2025         break;
2026
2027     default:
2028         av_log(
2029                avctx,
2030                AV_LOG_ERROR,
2031                "Could not get frame format info for color %d range %d.\n",
2032                av_format,
2033                av_color_range);
2034
2035         return AVERROR(EINVAL);
2036     }
2037
2038     *contiguous_buf_size = 0;
2039     for (i = 0; i < *plane_count; i++) {
2040         if (i < *plane_count - 1 &&
2041             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2042             *contiguous_buf_size = 0;
2043             break;
2044         }
2045
2046         *contiguous_buf_size += strides[i] * heights[i];
2047     }
2048
2049     return 0;
2050 }
2051
2052 //Not used on OSX - frame is never copied.
2053 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2054                                         const AVFrame    *frame,
2055                                         CVPixelBufferRef cv_img,
2056                                         const size_t     *plane_strides,
2057                                         const size_t     *plane_rows)
2058 {
2059     int i, j;
2060     size_t plane_count;
2061     int status;
2062     int rows;
2063     int src_stride;
2064     int dst_stride;
2065     uint8_t *src_addr;
2066     uint8_t *dst_addr;
2067     size_t copy_bytes;
2068
2069     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2070     if (status) {
2071         av_log(
2072             avctx,
2073             AV_LOG_ERROR,
2074             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2075             status
2076         );
2077     }
2078
2079     if (CVPixelBufferIsPlanar(cv_img)) {
2080         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2081         for (i = 0; frame->data[i]; i++) {
2082             if (i == plane_count) {
2083                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2084                 av_log(avctx,
2085                     AV_LOG_ERROR,
2086                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2087                 );
2088
2089                 return AVERROR_EXTERNAL;
2090             }
2091
2092             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2093             src_addr = (uint8_t*)frame->data[i];
2094             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2095             src_stride = plane_strides[i];
2096             rows = plane_rows[i];
2097
2098             if (dst_stride == src_stride) {
2099                 memcpy(dst_addr, src_addr, src_stride * rows);
2100             } else {
2101                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2102
2103                 for (j = 0; j < rows; j++) {
2104                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2105                 }
2106             }
2107         }
2108     } else {
2109         if (frame->data[1]) {
2110             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2111             av_log(avctx,
2112                 AV_LOG_ERROR,
2113                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2114             );
2115
2116             return AVERROR_EXTERNAL;
2117         }
2118
2119         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2120         src_addr = (uint8_t*)frame->data[0];
2121         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2122         src_stride = plane_strides[0];
2123         rows = plane_rows[0];
2124
2125         if (dst_stride == src_stride) {
2126             memcpy(dst_addr, src_addr, src_stride * rows);
2127         } else {
2128             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2129
2130             for (j = 0; j < rows; j++) {
2131                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2132             }
2133         }
2134     }
2135
2136     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2137     if (status) {
2138         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2139         return AVERROR_EXTERNAL;
2140     }
2141
2142     return 0;
2143 }
2144
2145 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2146                                   const AVFrame    *frame,
2147                                   CVPixelBufferRef *cv_img)
2148 {
2149     int plane_count;
2150     int color;
2151     size_t widths [AV_NUM_DATA_POINTERS];
2152     size_t heights[AV_NUM_DATA_POINTERS];
2153     size_t strides[AV_NUM_DATA_POINTERS];
2154     int status;
2155     size_t contiguous_buf_size;
2156     CVPixelBufferPoolRef pix_buf_pool;
2157     VTEncContext* vtctx = avctx->priv_data;
2158
2159     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2160         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2161
2162         *cv_img = (CVPixelBufferRef)frame->data[3];
2163         av_assert0(*cv_img);
2164
2165         CFRetain(*cv_img);
2166         return 0;
2167     }
2168
2169     memset(widths,  0, sizeof(widths));
2170     memset(heights, 0, sizeof(heights));
2171     memset(strides, 0, sizeof(strides));
2172
2173     status = get_cv_pixel_info(
2174         avctx,
2175         frame,
2176         &color,
2177         &plane_count,
2178         widths,
2179         heights,
2180         strides,
2181         &contiguous_buf_size
2182     );
2183
2184     if (status) {
2185         av_log(
2186             avctx,
2187             AV_LOG_ERROR,
2188             "Error: Cannot convert format %d color_range %d: %d\n",
2189             frame->format,
2190             frame->color_range,
2191             status
2192         );
2193
2194         return AVERROR_EXTERNAL;
2195     }
2196
2197     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2198     if (!pix_buf_pool) {
2199         /* On iOS, the VT session is invalidated when the APP switches from
2200          * foreground to background and vice versa. Fetch the actual error code
2201          * of the VT session to detect that case and restart the VT session
2202          * accordingly. */
2203         OSStatus vtstatus;
2204
2205         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2206         if (vtstatus == kVTInvalidSessionErr) {
2207             CFRelease(vtctx->session);
2208             vtctx->session = NULL;
2209             status = vtenc_configure_encoder(avctx);
2210             if (status == 0)
2211                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2212         }
2213         if (!pix_buf_pool) {
2214             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2215             return AVERROR_EXTERNAL;
2216         }
2217         else
2218             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2219                    "kVTInvalidSessionErr error.\n");
2220     }
2221
2222     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2223                                                 pix_buf_pool,
2224                                                 cv_img);
2225
2226
2227     if (status) {
2228         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2229         return AVERROR_EXTERNAL;
2230     }
2231
2232     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2233     if (status) {
2234         CFRelease(*cv_img);
2235         *cv_img = NULL;
2236         return status;
2237     }
2238
2239     return 0;
2240 }
2241
2242 static int create_encoder_dict_h264(const AVFrame *frame,
2243                                     CFDictionaryRef* dict_out)
2244 {
2245     CFDictionaryRef dict = NULL;
2246     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2247         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2248         const void *vals[] = { kCFBooleanTrue };
2249
2250         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2251         if(!dict) return AVERROR(ENOMEM);
2252     }
2253
2254     *dict_out = dict;
2255     return 0;
2256 }
2257
2258 static int vtenc_send_frame(AVCodecContext *avctx,
2259                             VTEncContext   *vtctx,
2260                             const AVFrame  *frame)
2261 {
2262     CMTime time;
2263     CFDictionaryRef frame_dict;
2264     CVPixelBufferRef cv_img = NULL;
2265     AVFrameSideData *side_data = NULL;
2266     ExtraSEI *sei = NULL;
2267     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2268
2269     if (status) return status;
2270
2271     status = create_encoder_dict_h264(frame, &frame_dict);
2272     if (status) {
2273         CFRelease(cv_img);
2274         return status;
2275     }
2276
2277     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2278     if (vtctx->a53_cc && side_data && side_data->size) {
2279         sei = av_mallocz(sizeof(*sei));
2280         if (!sei) {
2281             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2282         } else {
2283             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2284             if (ret < 0) {
2285                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2286                 av_free(sei);
2287                 sei = NULL;
2288             }
2289         }
2290     }
2291
2292     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2293     status = VTCompressionSessionEncodeFrame(
2294         vtctx->session,
2295         cv_img,
2296         time,
2297         kCMTimeInvalid,
2298         frame_dict,
2299         sei,
2300         NULL
2301     );
2302
2303     if (frame_dict) CFRelease(frame_dict);
2304     CFRelease(cv_img);
2305
2306     if (status) {
2307         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2308         return AVERROR_EXTERNAL;
2309     }
2310
2311     return 0;
2312 }
2313
2314 static av_cold int vtenc_frame(
2315     AVCodecContext *avctx,
2316     AVPacket       *pkt,
2317     const AVFrame  *frame,
2318     int            *got_packet)
2319 {
2320     VTEncContext *vtctx = avctx->priv_data;
2321     bool get_frame;
2322     int status;
2323     CMSampleBufferRef buf = NULL;
2324     ExtraSEI *sei = NULL;
2325
2326     if (frame) {
2327         status = vtenc_send_frame(avctx, vtctx, frame);
2328
2329         if (status) {
2330             status = AVERROR_EXTERNAL;
2331             goto end_nopkt;
2332         }
2333
2334         if (vtctx->frame_ct_in == 0) {
2335             vtctx->first_pts = frame->pts;
2336         } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2337             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2338         }
2339
2340         vtctx->frame_ct_in++;
2341     } else if(!vtctx->flushing) {
2342         vtctx->flushing = true;
2343
2344         status = VTCompressionSessionCompleteFrames(vtctx->session,
2345                                                     kCMTimeIndefinite);
2346
2347         if (status) {
2348             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2349             status = AVERROR_EXTERNAL;
2350             goto end_nopkt;
2351         }
2352     }
2353
2354     *got_packet = 0;
2355     get_frame = vtctx->dts_delta >= 0 || !frame;
2356     if (!get_frame) {
2357         status = 0;
2358         goto end_nopkt;
2359     }
2360
2361     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2362     if (status) goto end_nopkt;
2363     if (!buf)   goto end_nopkt;
2364
2365     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2366     if (sei) {
2367         if (sei->data) av_free(sei->data);
2368         av_free(sei);
2369     }
2370     CFRelease(buf);
2371     if (status) goto end_nopkt;
2372
2373     *got_packet = 1;
2374     return 0;
2375
2376 end_nopkt:
2377     av_packet_unref(pkt);
2378     return status;
2379 }
2380
2381 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2382                                     CMVideoCodecType codec_type,
2383                                     CFStringRef      profile_level,
2384                                     CFNumberRef      gamma_level,
2385                                     CFDictionaryRef  enc_info,
2386                                     CFDictionaryRef  pixel_buffer_info)
2387 {
2388     VTEncContext *vtctx = avctx->priv_data;
2389     int status;
2390     CVPixelBufferPoolRef pool = NULL;
2391     CVPixelBufferRef pix_buf = NULL;
2392     CMTime time;
2393     CMSampleBufferRef buf = NULL;
2394
2395     status = vtenc_create_encoder(avctx,
2396                                   codec_type,
2397                                   profile_level,
2398                                   gamma_level,
2399                                   enc_info,
2400                                   pixel_buffer_info,
2401                                   &vtctx->session);
2402     if (status)
2403         goto pe_cleanup;
2404
2405     pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2406     if(!pool){
2407         av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2408         goto pe_cleanup;
2409     }
2410
2411     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2412                                                 pool,
2413                                                 &pix_buf);
2414
2415     if(status != kCVReturnSuccess){
2416         av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2417         goto pe_cleanup;
2418     }
2419
2420     time = CMTimeMake(0, avctx->time_base.den);
2421     status = VTCompressionSessionEncodeFrame(vtctx->session,
2422                                              pix_buf,
2423                                              time,
2424                                              kCMTimeInvalid,
2425                                              NULL,
2426                                              NULL,
2427                                              NULL);
2428
2429     if (status) {
2430         av_log(avctx,
2431                AV_LOG_ERROR,
2432                "Error sending frame for extradata: %d\n",
2433                status);
2434
2435         goto pe_cleanup;
2436     }
2437
2438     //Populates extradata - output frames are flushed and param sets are available.
2439     status = VTCompressionSessionCompleteFrames(vtctx->session,
2440                                                 kCMTimeIndefinite);
2441
2442     if (status)
2443         goto pe_cleanup;
2444
2445     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2446     if (status) {
2447         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2448         goto pe_cleanup;
2449     }
2450
2451     CFRelease(buf);
2452
2453
2454
2455 pe_cleanup:
2456     if(vtctx->session)
2457         CFRelease(vtctx->session);
2458
2459     vtctx->session = NULL;
2460     vtctx->frame_ct_out = 0;
2461
2462     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2463
2464     return status;
2465 }
2466
2467 static av_cold int vtenc_close(AVCodecContext *avctx)
2468 {
2469     VTEncContext *vtctx = avctx->priv_data;
2470
2471     pthread_cond_destroy(&vtctx->cv_sample_sent);
2472     pthread_mutex_destroy(&vtctx->lock);
2473
2474     if(!vtctx->session) return 0;
2475
2476     VTCompressionSessionCompleteFrames(vtctx->session,
2477                                        kCMTimeIndefinite);
2478     clear_frame_queue(vtctx);
2479     CFRelease(vtctx->session);
2480     vtctx->session = NULL;
2481
2482     if (vtctx->color_primaries) {
2483         CFRelease(vtctx->color_primaries);
2484         vtctx->color_primaries = NULL;
2485     }
2486
2487     if (vtctx->transfer_function) {
2488         CFRelease(vtctx->transfer_function);
2489         vtctx->transfer_function = NULL;
2490     }
2491
2492     if (vtctx->ycbcr_matrix) {
2493         CFRelease(vtctx->ycbcr_matrix);
2494         vtctx->ycbcr_matrix = NULL;
2495     }
2496
2497     return 0;
2498 }
2499
2500 static const enum AVPixelFormat avc_pix_fmts[] = {
2501     AV_PIX_FMT_VIDEOTOOLBOX,
2502     AV_PIX_FMT_NV12,
2503     AV_PIX_FMT_YUV420P,
2504     AV_PIX_FMT_NONE
2505 };
2506
2507 static const enum AVPixelFormat hevc_pix_fmts[] = {
2508     AV_PIX_FMT_VIDEOTOOLBOX,
2509     AV_PIX_FMT_NV12,
2510     AV_PIX_FMT_YUV420P,
2511     AV_PIX_FMT_P010LE,
2512     AV_PIX_FMT_NONE
2513 };
2514
2515 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2516 #define COMMON_OPTIONS \
2517     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2518         { .i64 = 0 }, 0, 1, VE }, \
2519     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2520         { .i64 = 0 }, 0, 1, VE }, \
2521     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2522         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2523     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2524         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2525     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2526         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2527
2528 #define OFFSET(x) offsetof(VTEncContext, x)
2529 static const AVOption h264_options[] = {
2530     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2531     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2532     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2533     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2534
2535     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2536     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2537     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2538     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2539     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2540     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2541     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2542     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2543     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2544     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2545     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2546
2547     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2548     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2549     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2550     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2551     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2552
2553     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2554
2555     COMMON_OPTIONS
2556     { NULL },
2557 };
2558
2559 static const AVClass h264_videotoolbox_class = {
2560     .class_name = "h264_videotoolbox",
2561     .item_name  = av_default_item_name,
2562     .option     = h264_options,
2563     .version    = LIBAVUTIL_VERSION_INT,
2564 };
2565
2566 AVCodec ff_h264_videotoolbox_encoder = {
2567     .name             = "h264_videotoolbox",
2568     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2569     .type             = AVMEDIA_TYPE_VIDEO,
2570     .id               = AV_CODEC_ID_H264,
2571     .priv_data_size   = sizeof(VTEncContext),
2572     .pix_fmts         = avc_pix_fmts,
2573     .init             = vtenc_init,
2574     .encode2          = vtenc_frame,
2575     .close            = vtenc_close,
2576     .capabilities     = AV_CODEC_CAP_DELAY,
2577     .priv_class       = &h264_videotoolbox_class,
2578     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2579                         FF_CODEC_CAP_INIT_CLEANUP,
2580 };
2581
2582 static const AVOption hevc_options[] = {
2583     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2584     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2585     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2586
2587     COMMON_OPTIONS
2588     { NULL },
2589 };
2590
2591 static const AVClass hevc_videotoolbox_class = {
2592     .class_name = "hevc_videotoolbox",
2593     .item_name  = av_default_item_name,
2594     .option     = hevc_options,
2595     .version    = LIBAVUTIL_VERSION_INT,
2596 };
2597
2598 AVCodec ff_hevc_videotoolbox_encoder = {
2599     .name             = "hevc_videotoolbox",
2600     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2601     .type             = AVMEDIA_TYPE_VIDEO,
2602     .id               = AV_CODEC_ID_HEVC,
2603     .priv_data_size   = sizeof(VTEncContext),
2604     .pix_fmts         = hevc_pix_fmts,
2605     .init             = vtenc_init,
2606     .encode2          = vtenc_frame,
2607     .close            = vtenc_close,
2608     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2609     .priv_class       = &hevc_videotoolbox_class,
2610     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2611                         FF_CODEC_CAP_INIT_CLEANUP,
2612     .wrapper_name     = "videotoolbox",
2613 };