]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
lavc/videotoolboxenc: add require_sw option to force software encoding.
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41
42 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
43                                            size_t parameterSetIndex,
44                                            const uint8_t **parameterSetPointerOut,
45                                            size_t *parameterSetSizeOut,
46                                            size_t *parameterSetCountOut,
47                                            int *NALUnitHeaderLengthOut);
48
49 //These symbols may not be present
50 static struct{
51     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
52     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
53     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
54
55     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
56     CFStringRef kVTH264EntropyMode_CAVLC;
57     CFStringRef kVTH264EntropyMode_CABAC;
58
59     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
60     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
61     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
62     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
63     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
64     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
65     CFStringRef kVTProfileLevel_H264_Main_4_2;
66     CFStringRef kVTProfileLevel_H264_Main_5_1;
67     CFStringRef kVTProfileLevel_H264_Main_5_2;
68     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
69     CFStringRef kVTProfileLevel_H264_High_3_0;
70     CFStringRef kVTProfileLevel_H264_High_3_1;
71     CFStringRef kVTProfileLevel_H264_High_3_2;
72     CFStringRef kVTProfileLevel_H264_High_4_0;
73     CFStringRef kVTProfileLevel_H264_High_4_1;
74     CFStringRef kVTProfileLevel_H264_High_4_2;
75     CFStringRef kVTProfileLevel_H264_High_5_1;
76     CFStringRef kVTProfileLevel_H264_High_5_2;
77     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
78
79     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
80     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
81
82     CFStringRef kVTCompressionPropertyKey_RealTime;
83
84     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
85     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
86
87     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
88 } compat_keys;
89
90 #define GET_SYM(symbol, defaultVal)                                     \
91 do{                                                                     \
92     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
93     if(!handle)                                                         \
94         compat_keys.symbol = CFSTR(defaultVal);                         \
95     else                                                                \
96         compat_keys.symbol = *handle;                                   \
97 }while(0)
98
99 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
100
101 static void loadVTEncSymbols(){
102     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
103         (getParameterSetAtIndex)dlsym(
104             RTLD_DEFAULT,
105             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
106         );
107
108     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
109     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
110     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
111
112     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
113     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
114     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
115
116     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
117     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
118     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
119     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
120     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
121     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
122     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
123     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
124     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
125     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
126     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
127     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
128     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
129     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
130     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
131     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
132     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
133     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
134     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
135
136     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
137     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
138
139     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
140
141     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
142             "EnableHardwareAcceleratedVideoEncoder");
143     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
144             "RequireHardwareAcceleratedVideoEncoder");
145 }
146
147 typedef enum VT_H264Profile {
148     H264_PROF_AUTO,
149     H264_PROF_BASELINE,
150     H264_PROF_MAIN,
151     H264_PROF_HIGH,
152     H264_PROF_COUNT
153 } VT_H264Profile;
154
155 typedef enum VTH264Entropy{
156     VT_ENTROPY_NOT_SET,
157     VT_CAVLC,
158     VT_CABAC
159 } VTH264Entropy;
160
161 typedef enum VT_HEVCProfile {
162     HEVC_PROF_AUTO,
163     HEVC_PROF_MAIN,
164     HEVC_PROF_MAIN10,
165     HEVC_PROF_COUNT
166 } VT_HEVCProfile;
167
168 static const uint8_t start_code[] = { 0, 0, 0, 1 };
169
170 typedef struct ExtraSEI {
171   void *data;
172   size_t size;
173 } ExtraSEI;
174
175 typedef struct BufNode {
176     CMSampleBufferRef cm_buffer;
177     ExtraSEI *sei;
178     struct BufNode* next;
179     int error;
180 } BufNode;
181
182 typedef struct VTEncContext {
183     AVClass *class;
184     enum AVCodecID codec_id;
185     VTCompressionSessionRef session;
186     CFStringRef ycbcr_matrix;
187     CFStringRef color_primaries;
188     CFStringRef transfer_function;
189     getParameterSetAtIndex get_param_set_func;
190
191     pthread_mutex_t lock;
192     pthread_cond_t  cv_sample_sent;
193
194     int async_error;
195
196     BufNode *q_head;
197     BufNode *q_tail;
198
199     int64_t frame_ct_out;
200     int64_t frame_ct_in;
201
202     int64_t first_pts;
203     int64_t dts_delta;
204
205     int64_t profile;
206     int64_t level;
207     int64_t entropy;
208     int64_t realtime;
209     int64_t frames_before;
210     int64_t frames_after;
211
212     int64_t allow_sw;
213     int64_t require_sw;
214
215     bool flushing;
216     bool has_b_frames;
217     bool warned_color_range;
218     bool a53_cc;
219 } VTEncContext;
220
221 static int vtenc_populate_extradata(AVCodecContext   *avctx,
222                                     CMVideoCodecType codec_type,
223                                     CFStringRef      profile_level,
224                                     CFNumberRef      gamma_level,
225                                     CFDictionaryRef  enc_info,
226                                     CFDictionaryRef  pixel_buffer_info);
227
228 /**
229  * NULL-safe release of *refPtr, and sets value to NULL.
230  */
231 static void vt_release_num(CFNumberRef* refPtr){
232     if (!*refPtr) {
233         return;
234     }
235
236     CFRelease(*refPtr);
237     *refPtr = NULL;
238 }
239
240 static void set_async_error(VTEncContext *vtctx, int err)
241 {
242     BufNode *info;
243
244     pthread_mutex_lock(&vtctx->lock);
245
246     vtctx->async_error = err;
247
248     info = vtctx->q_head;
249     vtctx->q_head = vtctx->q_tail = NULL;
250
251     while (info) {
252         BufNode *next = info->next;
253         CFRelease(info->cm_buffer);
254         av_free(info);
255         info = next;
256     }
257
258     pthread_mutex_unlock(&vtctx->lock);
259 }
260
261 static void clear_frame_queue(VTEncContext *vtctx)
262 {
263     set_async_error(vtctx, 0);
264 }
265
266 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
267 {
268     BufNode *info;
269
270     pthread_mutex_lock(&vtctx->lock);
271
272     if (vtctx->async_error) {
273         pthread_mutex_unlock(&vtctx->lock);
274         return vtctx->async_error;
275     }
276
277     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
278         *buf = NULL;
279
280         pthread_mutex_unlock(&vtctx->lock);
281         return 0;
282     }
283
284     while (!vtctx->q_head && !vtctx->async_error && wait) {
285         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
286     }
287
288     if (!vtctx->q_head) {
289         pthread_mutex_unlock(&vtctx->lock);
290         *buf = NULL;
291         return 0;
292     }
293
294     info = vtctx->q_head;
295     vtctx->q_head = vtctx->q_head->next;
296     if (!vtctx->q_head) {
297         vtctx->q_tail = NULL;
298     }
299
300     pthread_mutex_unlock(&vtctx->lock);
301
302     *buf = info->cm_buffer;
303     if (sei && *buf) {
304         *sei = info->sei;
305     } else if (info->sei) {
306         if (info->sei->data) av_free(info->sei->data);
307         av_free(info->sei);
308     }
309     av_free(info);
310
311     vtctx->frame_ct_out++;
312
313     return 0;
314 }
315
316 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
317 {
318     BufNode *info = av_malloc(sizeof(BufNode));
319     if (!info) {
320         set_async_error(vtctx, AVERROR(ENOMEM));
321         return;
322     }
323
324     CFRetain(buffer);
325     info->cm_buffer = buffer;
326     info->sei = sei;
327     info->next = NULL;
328
329     pthread_mutex_lock(&vtctx->lock);
330     pthread_cond_signal(&vtctx->cv_sample_sent);
331
332     if (!vtctx->q_head) {
333         vtctx->q_head = info;
334     } else {
335         vtctx->q_tail->next = info;
336     }
337
338     vtctx->q_tail = info;
339
340     pthread_mutex_unlock(&vtctx->lock);
341 }
342
343 static int count_nalus(size_t length_code_size,
344                        CMSampleBufferRef sample_buffer,
345                        int *count)
346 {
347     size_t offset = 0;
348     int status;
349     int nalu_ct = 0;
350     uint8_t size_buf[4];
351     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
352     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
353
354     if (length_code_size > 4)
355         return AVERROR_INVALIDDATA;
356
357     while (offset < src_size) {
358         size_t curr_src_len;
359         size_t box_len = 0;
360         size_t i;
361
362         status = CMBlockBufferCopyDataBytes(block,
363                                             offset,
364                                             length_code_size,
365                                             size_buf);
366
367         for (i = 0; i < length_code_size; i++) {
368             box_len <<= 8;
369             box_len |= size_buf[i];
370         }
371
372         curr_src_len = box_len + length_code_size;
373         offset += curr_src_len;
374
375         nalu_ct++;
376     }
377
378     *count = nalu_ct;
379     return 0;
380 }
381
382 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
383 {
384     switch (id) {
385     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
386     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
387     default:               return 0;
388     }
389 }
390
391 /**
392  * Get the parameter sets from a CMSampleBufferRef.
393  * @param dst If *dst isn't NULL, the parameters are copied into existing
394  *            memory. *dst_size must be set accordingly when *dst != NULL.
395  *            If *dst is NULL, it will be allocated.
396  *            In all cases, *dst_size is set to the number of bytes used starting
397  *            at *dst.
398  */
399 static int get_params_size(
400     AVCodecContext              *avctx,
401     CMVideoFormatDescriptionRef vid_fmt,
402     size_t                      *size)
403 {
404     VTEncContext *vtctx = avctx->priv_data;
405     size_t total_size = 0;
406     size_t ps_count;
407     int is_count_bad = 0;
408     size_t i;
409     int status;
410     status = vtctx->get_param_set_func(vid_fmt,
411                                        0,
412                                        NULL,
413                                        NULL,
414                                        &ps_count,
415                                        NULL);
416     if (status) {
417         is_count_bad = 1;
418         ps_count     = 0;
419         status       = 0;
420     }
421
422     for (i = 0; i < ps_count || is_count_bad; i++) {
423         const uint8_t *ps;
424         size_t ps_size;
425         status = vtctx->get_param_set_func(vid_fmt,
426                                            i,
427                                            &ps,
428                                            &ps_size,
429                                            NULL,
430                                            NULL);
431         if (status) {
432             /*
433              * When ps_count is invalid, status != 0 ends the loop normally
434              * unless we didn't get any parameter sets.
435              */
436             if (i > 0 && is_count_bad) status = 0;
437
438             break;
439         }
440
441         total_size += ps_size + sizeof(start_code);
442     }
443
444     if (status) {
445         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
446         return AVERROR_EXTERNAL;
447     }
448
449     *size = total_size;
450     return 0;
451 }
452
453 static int copy_param_sets(
454     AVCodecContext              *avctx,
455     CMVideoFormatDescriptionRef vid_fmt,
456     uint8_t                     *dst,
457     size_t                      dst_size)
458 {
459     VTEncContext *vtctx = avctx->priv_data;
460     size_t ps_count;
461     int is_count_bad = 0;
462     int status;
463     size_t offset = 0;
464     size_t i;
465
466     status = vtctx->get_param_set_func(vid_fmt,
467                                        0,
468                                        NULL,
469                                        NULL,
470                                        &ps_count,
471                                        NULL);
472     if (status) {
473         is_count_bad = 1;
474         ps_count     = 0;
475         status       = 0;
476     }
477
478
479     for (i = 0; i < ps_count || is_count_bad; i++) {
480         const uint8_t *ps;
481         size_t ps_size;
482         size_t next_offset;
483
484         status = vtctx->get_param_set_func(vid_fmt,
485                                            i,
486                                            &ps,
487                                            &ps_size,
488                                            NULL,
489                                            NULL);
490         if (status) {
491             if (i > 0 && is_count_bad) status = 0;
492
493             break;
494         }
495
496         next_offset = offset + sizeof(start_code) + ps_size;
497         if (dst_size < next_offset) {
498             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
499             return AVERROR_BUFFER_TOO_SMALL;
500         }
501
502         memcpy(dst + offset, start_code, sizeof(start_code));
503         offset += sizeof(start_code);
504
505         memcpy(dst + offset, ps, ps_size);
506         offset = next_offset;
507     }
508
509     if (status) {
510         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
511         return AVERROR_EXTERNAL;
512     }
513
514     return 0;
515 }
516
517 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
518 {
519     CMVideoFormatDescriptionRef vid_fmt;
520     size_t total_size;
521     int status;
522
523     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
524     if (!vid_fmt) {
525         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
526         return AVERROR_EXTERNAL;
527     }
528
529     status = get_params_size(avctx, vid_fmt, &total_size);
530     if (status) {
531         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
532         return status;
533     }
534
535     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
536     if (!avctx->extradata) {
537         return AVERROR(ENOMEM);
538     }
539     avctx->extradata_size = total_size;
540
541     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
542
543     if (status) {
544         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
545         return status;
546     }
547
548     return 0;
549 }
550
551 static void vtenc_output_callback(
552     void *ctx,
553     void *sourceFrameCtx,
554     OSStatus status,
555     VTEncodeInfoFlags flags,
556     CMSampleBufferRef sample_buffer)
557 {
558     AVCodecContext *avctx = ctx;
559     VTEncContext   *vtctx = avctx->priv_data;
560     ExtraSEI *sei = sourceFrameCtx;
561
562     if (vtctx->async_error) {
563         if(sample_buffer) CFRelease(sample_buffer);
564         return;
565     }
566
567     if (status || !sample_buffer) {
568         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
569         set_async_error(vtctx, AVERROR_EXTERNAL);
570         return;
571     }
572
573     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
574         int set_status = set_extradata(avctx, sample_buffer);
575         if (set_status) {
576             set_async_error(vtctx, set_status);
577             return;
578         }
579     }
580
581     vtenc_q_push(vtctx, sample_buffer, sei);
582 }
583
584 static int get_length_code_size(
585     AVCodecContext    *avctx,
586     CMSampleBufferRef sample_buffer,
587     size_t            *size)
588 {
589     VTEncContext *vtctx = avctx->priv_data;
590     CMVideoFormatDescriptionRef vid_fmt;
591     int isize;
592     int status;
593
594     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
595     if (!vid_fmt) {
596         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
597         return AVERROR_EXTERNAL;
598     }
599
600     status = vtctx->get_param_set_func(vid_fmt,
601                                        0,
602                                        NULL,
603                                        NULL,
604                                        NULL,
605                                        &isize);
606     if (status) {
607         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
608         return AVERROR_EXTERNAL;
609     }
610
611     *size = isize;
612     return 0;
613 }
614
615 /*
616  * Returns true on success.
617  *
618  * If profile_level_val is NULL and this method returns true, don't specify the
619  * profile/level to the encoder.
620  */
621 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
622                                       CFStringRef    *profile_level_val)
623 {
624     VTEncContext *vtctx = avctx->priv_data;
625     int64_t profile = vtctx->profile;
626
627     if (profile == H264_PROF_AUTO && vtctx->level) {
628         //Need to pick a profile if level is not auto-selected.
629         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
630     }
631
632     *profile_level_val = NULL;
633
634     switch (profile) {
635         case H264_PROF_AUTO:
636             return true;
637
638         case H264_PROF_BASELINE:
639             switch (vtctx->level) {
640                 case  0: *profile_level_val =
641                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
642                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
643                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
644                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
645                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
646                 case 40: *profile_level_val =
647                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
648                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
649                 case 42: *profile_level_val =
650                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
651                 case 50: *profile_level_val =
652                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
653                 case 51: *profile_level_val =
654                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
655                 case 52: *profile_level_val =
656                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
657             }
658             break;
659
660         case H264_PROF_MAIN:
661             switch (vtctx->level) {
662                 case  0: *profile_level_val =
663                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
664                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
665                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
666                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
667                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
668                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
669                 case 42: *profile_level_val =
670                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
671                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
672                 case 51: *profile_level_val =
673                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
674                 case 52: *profile_level_val =
675                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
676             }
677             break;
678
679         case H264_PROF_HIGH:
680             switch (vtctx->level) {
681                 case  0: *profile_level_val =
682                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
683                 case 30: *profile_level_val =
684                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
685                 case 31: *profile_level_val =
686                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
687                 case 32: *profile_level_val =
688                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
689                 case 40: *profile_level_val =
690                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
691                 case 41: *profile_level_val =
692                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
693                 case 42: *profile_level_val =
694                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
695                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
696                 case 51: *profile_level_val =
697                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
698                 case 52: *profile_level_val =
699                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
700             }
701             break;
702     }
703
704     if (!*profile_level_val) {
705         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
706         return false;
707     }
708
709     return true;
710 }
711
712 /*
713  * Returns true on success.
714  *
715  * If profile_level_val is NULL and this method returns true, don't specify the
716  * profile/level to the encoder.
717  */
718 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
719                                       CFStringRef    *profile_level_val)
720 {
721     VTEncContext *vtctx = avctx->priv_data;
722     int64_t profile = vtctx->profile;
723
724     *profile_level_val = NULL;
725
726     switch (profile) {
727         case HEVC_PROF_AUTO:
728             return true;
729         case HEVC_PROF_MAIN:
730             *profile_level_val =
731                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
732             break;
733         case HEVC_PROF_MAIN10:
734             *profile_level_val =
735                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
736             break;
737     }
738
739     if (!*profile_level_val) {
740         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
741         return false;
742     }
743
744     return true;
745 }
746
747 static int get_cv_pixel_format(AVCodecContext* avctx,
748                                enum AVPixelFormat fmt,
749                                enum AVColorRange range,
750                                int* av_pixel_format,
751                                int* range_guessed)
752 {
753     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
754                                         range != AVCOL_RANGE_JPEG;
755
756     //MPEG range is used when no range is set
757     if (fmt == AV_PIX_FMT_NV12) {
758         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
759                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
760                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
761     } else if (fmt == AV_PIX_FMT_YUV420P) {
762         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
763                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
764                                         kCVPixelFormatType_420YpCbCr8Planar;
765     } else {
766         return AVERROR(EINVAL);
767     }
768
769     return 0;
770 }
771
772 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
773     VTEncContext *vtctx = avctx->priv_data;
774
775     if (vtctx->color_primaries) {
776         CFDictionarySetValue(dict,
777                              kCVImageBufferColorPrimariesKey,
778                              vtctx->color_primaries);
779     }
780
781     if (vtctx->transfer_function) {
782         CFDictionarySetValue(dict,
783                              kCVImageBufferTransferFunctionKey,
784                              vtctx->transfer_function);
785     }
786
787     if (vtctx->ycbcr_matrix) {
788         CFDictionarySetValue(dict,
789                              kCVImageBufferYCbCrMatrixKey,
790                              vtctx->ycbcr_matrix);
791     }
792 }
793
794 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
795                                        CFMutableDictionaryRef* dict)
796 {
797     CFNumberRef cv_color_format_num = NULL;
798     CFNumberRef width_num = NULL;
799     CFNumberRef height_num = NULL;
800     CFMutableDictionaryRef pixel_buffer_info = NULL;
801     int cv_color_format;
802     int status = get_cv_pixel_format(avctx,
803                                      avctx->pix_fmt,
804                                      avctx->color_range,
805                                      &cv_color_format,
806                                      NULL);
807     if (status) return status;
808
809     pixel_buffer_info = CFDictionaryCreateMutable(
810                             kCFAllocatorDefault,
811                             20,
812                             &kCFCopyStringDictionaryKeyCallBacks,
813                             &kCFTypeDictionaryValueCallBacks);
814
815     if (!pixel_buffer_info) goto pbinfo_nomem;
816
817     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
818                                          kCFNumberSInt32Type,
819                                          &cv_color_format);
820     if (!cv_color_format_num) goto pbinfo_nomem;
821
822     CFDictionarySetValue(pixel_buffer_info,
823                          kCVPixelBufferPixelFormatTypeKey,
824                          cv_color_format_num);
825     vt_release_num(&cv_color_format_num);
826
827     width_num = CFNumberCreate(kCFAllocatorDefault,
828                                kCFNumberSInt32Type,
829                                &avctx->width);
830     if (!width_num) return AVERROR(ENOMEM);
831
832     CFDictionarySetValue(pixel_buffer_info,
833                          kCVPixelBufferWidthKey,
834                          width_num);
835     vt_release_num(&width_num);
836
837     height_num = CFNumberCreate(kCFAllocatorDefault,
838                                 kCFNumberSInt32Type,
839                                 &avctx->height);
840     if (!height_num) goto pbinfo_nomem;
841
842     CFDictionarySetValue(pixel_buffer_info,
843                          kCVPixelBufferHeightKey,
844                          height_num);
845     vt_release_num(&height_num);
846
847     add_color_attr(avctx, pixel_buffer_info);
848
849     *dict = pixel_buffer_info;
850     return 0;
851
852 pbinfo_nomem:
853     vt_release_num(&cv_color_format_num);
854     vt_release_num(&width_num);
855     vt_release_num(&height_num);
856     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
857
858     return AVERROR(ENOMEM);
859 }
860
861 static int get_cv_color_primaries(AVCodecContext *avctx,
862                                   CFStringRef *primaries)
863 {
864     enum AVColorPrimaries pri = avctx->color_primaries;
865     switch (pri) {
866         case AVCOL_PRI_UNSPECIFIED:
867             *primaries = NULL;
868             break;
869
870         case AVCOL_PRI_BT709:
871             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
872             break;
873
874         case AVCOL_PRI_BT2020:
875             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
876             break;
877
878         default:
879             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
880             *primaries = NULL;
881             return -1;
882     }
883
884     return 0;
885 }
886
887 static int get_cv_transfer_function(AVCodecContext *avctx,
888                                     CFStringRef *transfer_fnc,
889                                     CFNumberRef *gamma_level)
890 {
891     enum AVColorTransferCharacteristic trc = avctx->color_trc;
892     Float32 gamma;
893     *gamma_level = NULL;
894
895     switch (trc) {
896         case AVCOL_TRC_UNSPECIFIED:
897             *transfer_fnc = NULL;
898             break;
899
900         case AVCOL_TRC_BT709:
901             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
902             break;
903
904         case AVCOL_TRC_SMPTE240M:
905             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
906             break;
907
908         case AVCOL_TRC_GAMMA22:
909             gamma = 2.2;
910             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
911             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
912             break;
913
914         case AVCOL_TRC_GAMMA28:
915             gamma = 2.8;
916             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
917             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
918             break;
919
920         case AVCOL_TRC_BT2020_10:
921         case AVCOL_TRC_BT2020_12:
922             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
923             break;
924
925         default:
926             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
927             return -1;
928     }
929
930     return 0;
931 }
932
933 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
934     switch(avctx->colorspace) {
935         case AVCOL_SPC_BT709:
936             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
937             break;
938
939         case AVCOL_SPC_UNSPECIFIED:
940             *matrix = NULL;
941             break;
942
943         case AVCOL_SPC_BT470BG:
944         case AVCOL_SPC_SMPTE170M:
945             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
946             break;
947
948         case AVCOL_SPC_SMPTE240M:
949             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
950             break;
951
952         case AVCOL_SPC_BT2020_NCL:
953             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
954             break;
955
956         default:
957             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
958             return -1;
959     }
960
961     return 0;
962 }
963
964 static int vtenc_create_encoder(AVCodecContext   *avctx,
965                                 CMVideoCodecType codec_type,
966                                 CFStringRef      profile_level,
967                                 CFNumberRef      gamma_level,
968                                 CFDictionaryRef  enc_info,
969                                 CFDictionaryRef  pixel_buffer_info,
970                                 VTCompressionSessionRef *session)
971 {
972     VTEncContext *vtctx = avctx->priv_data;
973     SInt32       bit_rate = avctx->bit_rate;
974     SInt32       max_rate = avctx->rc_max_rate;
975     CFNumberRef  bit_rate_num;
976     CFNumberRef  bytes_per_second;
977     CFNumberRef  one_second;
978     CFArrayRef   data_rate_limits;
979     int64_t      bytes_per_second_value = 0;
980     int64_t      one_second_value = 0;
981     void         *nums[2];
982
983     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
984                                             avctx->width,
985                                             avctx->height,
986                                             codec_type,
987                                             enc_info,
988                                             pixel_buffer_info,
989                                             kCFAllocatorDefault,
990                                             vtenc_output_callback,
991                                             avctx,
992                                             session);
993
994     if (status || !vtctx->session) {
995         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
996
997 #if !TARGET_OS_IPHONE
998         if (!vtctx->allow_sw) {
999             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1000         }
1001 #endif
1002
1003         return AVERROR_EXTERNAL;
1004     }
1005
1006     bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1007                                   kCFNumberSInt32Type,
1008                                   &bit_rate);
1009     if (!bit_rate_num) return AVERROR(ENOMEM);
1010
1011     status = VTSessionSetProperty(vtctx->session,
1012                                   kVTCompressionPropertyKey_AverageBitRate,
1013                                   bit_rate_num);
1014     CFRelease(bit_rate_num);
1015
1016     if (status) {
1017         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1018         return AVERROR_EXTERNAL;
1019     }
1020
1021     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1022         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1023         bytes_per_second_value = max_rate >> 3;
1024         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1025                                           kCFNumberSInt64Type,
1026                                           &bytes_per_second_value);
1027         if (!bytes_per_second) {
1028             return AVERROR(ENOMEM);
1029         }
1030         one_second_value = 1;
1031         one_second = CFNumberCreate(kCFAllocatorDefault,
1032                                     kCFNumberSInt64Type,
1033                                     &one_second_value);
1034         if (!one_second) {
1035             CFRelease(bytes_per_second);
1036             return AVERROR(ENOMEM);
1037         }
1038         nums[0] = (void *)bytes_per_second;
1039         nums[1] = (void *)one_second;
1040         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1041                                          (const void **)nums,
1042                                          2,
1043                                          &kCFTypeArrayCallBacks);
1044
1045         if (!data_rate_limits) {
1046             CFRelease(bytes_per_second);
1047             CFRelease(one_second);
1048             return AVERROR(ENOMEM);
1049         }
1050         status = VTSessionSetProperty(vtctx->session,
1051                                       kVTCompressionPropertyKey_DataRateLimits,
1052                                       data_rate_limits);
1053
1054         CFRelease(bytes_per_second);
1055         CFRelease(one_second);
1056         CFRelease(data_rate_limits);
1057
1058         if (status) {
1059             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1060             return AVERROR_EXTERNAL;
1061         }
1062     }
1063
1064     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1065         // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1066         if (profile_level) {
1067             status = VTSessionSetProperty(vtctx->session,
1068                                         kVTCompressionPropertyKey_ProfileLevel,
1069                                         profile_level);
1070             if (status) {
1071                 av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
1072             }
1073         }
1074     }
1075
1076     if (avctx->gop_size > 0) {
1077         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1078                                               kCFNumberIntType,
1079                                               &avctx->gop_size);
1080         if (!interval) {
1081             return AVERROR(ENOMEM);
1082         }
1083
1084         status = VTSessionSetProperty(vtctx->session,
1085                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1086                                       interval);
1087         CFRelease(interval);
1088
1089         if (status) {
1090             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1091             return AVERROR_EXTERNAL;
1092         }
1093     }
1094
1095     if (vtctx->frames_before) {
1096         status = VTSessionSetProperty(vtctx->session,
1097                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1098                                       kCFBooleanTrue);
1099
1100         if (status == kVTPropertyNotSupportedErr) {
1101             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1102         } else if (status) {
1103             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1104         }
1105     }
1106
1107     if (vtctx->frames_after) {
1108         status = VTSessionSetProperty(vtctx->session,
1109                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1110                                       kCFBooleanTrue);
1111
1112         if (status == kVTPropertyNotSupportedErr) {
1113             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1114         } else if (status) {
1115             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1116         }
1117     }
1118
1119     if (avctx->sample_aspect_ratio.num != 0) {
1120         CFNumberRef num;
1121         CFNumberRef den;
1122         CFMutableDictionaryRef par;
1123         AVRational *avpar = &avctx->sample_aspect_ratio;
1124
1125         av_reduce(&avpar->num, &avpar->den,
1126                    avpar->num,  avpar->den,
1127                   0xFFFFFFFF);
1128
1129         num = CFNumberCreate(kCFAllocatorDefault,
1130                              kCFNumberIntType,
1131                              &avpar->num);
1132
1133         den = CFNumberCreate(kCFAllocatorDefault,
1134                              kCFNumberIntType,
1135                              &avpar->den);
1136
1137
1138
1139         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1140                                         2,
1141                                         &kCFCopyStringDictionaryKeyCallBacks,
1142                                         &kCFTypeDictionaryValueCallBacks);
1143
1144         if (!par || !num || !den) {
1145             if (par) CFRelease(par);
1146             if (num) CFRelease(num);
1147             if (den) CFRelease(den);
1148
1149             return AVERROR(ENOMEM);
1150         }
1151
1152         CFDictionarySetValue(
1153             par,
1154             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1155             num);
1156
1157         CFDictionarySetValue(
1158             par,
1159             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1160             den);
1161
1162         status = VTSessionSetProperty(vtctx->session,
1163                                       kVTCompressionPropertyKey_PixelAspectRatio,
1164                                       par);
1165
1166         CFRelease(par);
1167         CFRelease(num);
1168         CFRelease(den);
1169
1170         if (status) {
1171             av_log(avctx,
1172                    AV_LOG_ERROR,
1173                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1174                    avctx->sample_aspect_ratio.num,
1175                    avctx->sample_aspect_ratio.den,
1176                    status);
1177
1178             return AVERROR_EXTERNAL;
1179         }
1180     }
1181
1182
1183     if (vtctx->transfer_function) {
1184         status = VTSessionSetProperty(vtctx->session,
1185                                       kVTCompressionPropertyKey_TransferFunction,
1186                                       vtctx->transfer_function);
1187
1188         if (status) {
1189             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1190         }
1191     }
1192
1193
1194     if (vtctx->ycbcr_matrix) {
1195         status = VTSessionSetProperty(vtctx->session,
1196                                       kVTCompressionPropertyKey_YCbCrMatrix,
1197                                       vtctx->ycbcr_matrix);
1198
1199         if (status) {
1200             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1201         }
1202     }
1203
1204
1205     if (vtctx->color_primaries) {
1206         status = VTSessionSetProperty(vtctx->session,
1207                                       kVTCompressionPropertyKey_ColorPrimaries,
1208                                       vtctx->color_primaries);
1209
1210         if (status) {
1211             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1212         }
1213     }
1214
1215     if (gamma_level) {
1216         status = VTSessionSetProperty(vtctx->session,
1217                                       kCVImageBufferGammaLevelKey,
1218                                       gamma_level);
1219
1220         if (status) {
1221             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1222         }
1223     }
1224
1225     if (!vtctx->has_b_frames) {
1226         status = VTSessionSetProperty(vtctx->session,
1227                                       kVTCompressionPropertyKey_AllowFrameReordering,
1228                                       kCFBooleanFalse);
1229
1230         if (status) {
1231             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1232             return AVERROR_EXTERNAL;
1233         }
1234     }
1235
1236     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1237         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1238                                 compat_keys.kVTH264EntropyMode_CABAC:
1239                                 compat_keys.kVTH264EntropyMode_CAVLC;
1240
1241         status = VTSessionSetProperty(vtctx->session,
1242                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1243                                       entropy);
1244
1245         if (status) {
1246             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1247         }
1248     }
1249
1250     if (vtctx->realtime) {
1251         status = VTSessionSetProperty(vtctx->session,
1252                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1253                                       kCFBooleanTrue);
1254
1255         if (status) {
1256             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1257         }
1258     }
1259
1260     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1261     if (status) {
1262         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1263         return AVERROR_EXTERNAL;
1264     }
1265
1266     return 0;
1267 }
1268
1269 static int vtenc_configure_encoder(AVCodecContext *avctx)
1270 {
1271     CFMutableDictionaryRef enc_info;
1272     CFMutableDictionaryRef pixel_buffer_info;
1273     CMVideoCodecType       codec_type;
1274     VTEncContext           *vtctx = avctx->priv_data;
1275     CFStringRef            profile_level;
1276     CFNumberRef            gamma_level = NULL;
1277     int                    status;
1278
1279     codec_type = get_cm_codec_type(avctx->codec_id);
1280     if (!codec_type) {
1281         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1282         return AVERROR(EINVAL);
1283     }
1284
1285     vtctx->codec_id = avctx->codec_id;
1286
1287     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1288         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1289
1290         vtctx->has_b_frames = avctx->max_b_frames > 0;
1291         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1292             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1293             vtctx->has_b_frames = false;
1294         }
1295
1296         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1297             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1298             vtctx->entropy = VT_ENTROPY_NOT_SET;
1299         }
1300
1301         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1302     } else {
1303         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1304         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1305         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1306     }
1307
1308     enc_info = CFDictionaryCreateMutable(
1309         kCFAllocatorDefault,
1310         20,
1311         &kCFCopyStringDictionaryKeyCallBacks,
1312         &kCFTypeDictionaryValueCallBacks
1313     );
1314
1315     if (!enc_info) return AVERROR(ENOMEM);
1316
1317 #if !TARGET_OS_IPHONE
1318     if(vtctx->require_sw) {
1319         CFDictionarySetValue(enc_info,
1320                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1321                              kCFBooleanFalse);
1322     } else if (!vtctx->allow_sw) {
1323         CFDictionarySetValue(enc_info,
1324                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1325                              kCFBooleanTrue);
1326     } else {
1327         CFDictionarySetValue(enc_info,
1328                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1329                              kCFBooleanTrue);
1330     }
1331 #endif
1332
1333     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1334         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1335         if (status)
1336             goto init_cleanup;
1337     } else {
1338         pixel_buffer_info = NULL;
1339     }
1340
1341     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1342
1343     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1344     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1345     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1346
1347
1348     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1349         status = vtenc_populate_extradata(avctx,
1350                                           codec_type,
1351                                           profile_level,
1352                                           gamma_level,
1353                                           enc_info,
1354                                           pixel_buffer_info);
1355         if (status)
1356             goto init_cleanup;
1357     }
1358
1359     status = vtenc_create_encoder(avctx,
1360                                   codec_type,
1361                                   profile_level,
1362                                   gamma_level,
1363                                   enc_info,
1364                                   pixel_buffer_info,
1365                                   &vtctx->session);
1366
1367 init_cleanup:
1368     if (gamma_level)
1369         CFRelease(gamma_level);
1370
1371     if (pixel_buffer_info)
1372         CFRelease(pixel_buffer_info);
1373
1374     CFRelease(enc_info);
1375
1376     return status;
1377 }
1378
1379 static av_cold int vtenc_init(AVCodecContext *avctx)
1380 {
1381     VTEncContext    *vtctx = avctx->priv_data;
1382     CFBooleanRef    has_b_frames_cfbool;
1383     int             status;
1384
1385     pthread_once(&once_ctrl, loadVTEncSymbols);
1386
1387     pthread_mutex_init(&vtctx->lock, NULL);
1388     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1389
1390     vtctx->session = NULL;
1391     status = vtenc_configure_encoder(avctx);
1392     if (status) return status;
1393
1394     status = VTSessionCopyProperty(vtctx->session,
1395                                    kVTCompressionPropertyKey_AllowFrameReordering,
1396                                    kCFAllocatorDefault,
1397                                    &has_b_frames_cfbool);
1398
1399     if (!status && has_b_frames_cfbool) {
1400         //Some devices don't output B-frames for main profile, even if requested.
1401         vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1402         CFRelease(has_b_frames_cfbool);
1403     }
1404     avctx->has_b_frames = vtctx->has_b_frames;
1405
1406     return 0;
1407 }
1408
1409 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1410 {
1411     CFArrayRef      attachments;
1412     CFDictionaryRef attachment;
1413     CFBooleanRef    not_sync;
1414     CFIndex         len;
1415
1416     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1417     len = !attachments ? 0 : CFArrayGetCount(attachments);
1418
1419     if (!len) {
1420         *is_key_frame = true;
1421         return;
1422     }
1423
1424     attachment = CFArrayGetValueAtIndex(attachments, 0);
1425
1426     if (CFDictionaryGetValueIfPresent(attachment,
1427                                       kCMSampleAttachmentKey_NotSync,
1428                                       (const void **)&not_sync))
1429     {
1430         *is_key_frame = !CFBooleanGetValue(not_sync);
1431     } else {
1432         *is_key_frame = true;
1433     }
1434 }
1435
1436 static int is_post_sei_nal_type(int nal_type){
1437     return nal_type != H264_NAL_SEI &&
1438            nal_type != H264_NAL_SPS &&
1439            nal_type != H264_NAL_PPS &&
1440            nal_type != H264_NAL_AUD;
1441 }
1442
1443 /*
1444  * Finds the sei message start/size of type find_sei_type.
1445  * If more than one of that type exists, the last one is returned.
1446  */
1447 static int find_sei_end(AVCodecContext *avctx,
1448                         uint8_t        *nal_data,
1449                         size_t          nal_size,
1450                         uint8_t       **sei_end)
1451 {
1452     int nal_type;
1453     size_t sei_payload_size = 0;
1454     int sei_payload_type = 0;
1455     *sei_end = NULL;
1456     uint8_t *nal_start = nal_data;
1457
1458     if (!nal_size)
1459         return 0;
1460
1461     nal_type = *nal_data & 0x1F;
1462     if (nal_type != H264_NAL_SEI)
1463         return 0;
1464
1465     nal_data++;
1466     nal_size--;
1467
1468     if (nal_data[nal_size - 1] == 0x80)
1469         nal_size--;
1470
1471     while (nal_size > 0 && *nal_data > 0) {
1472         do{
1473             sei_payload_type += *nal_data;
1474             nal_data++;
1475             nal_size--;
1476         } while (nal_size > 0 && *nal_data == 0xFF);
1477
1478         if (!nal_size) {
1479             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1480             return AVERROR_INVALIDDATA;
1481         }
1482
1483         do{
1484             sei_payload_size += *nal_data;
1485             nal_data++;
1486             nal_size--;
1487         } while (nal_size > 0 && *nal_data == 0xFF);
1488
1489         if (nal_size < sei_payload_size) {
1490             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1491             return AVERROR_INVALIDDATA;
1492         }
1493
1494         nal_data += sei_payload_size;
1495         nal_size -= sei_payload_size;
1496     }
1497
1498     *sei_end = nal_data;
1499
1500     return nal_data - nal_start + 1;
1501 }
1502
1503 /**
1504  * Copies the data inserting emulation prevention bytes as needed.
1505  * Existing data in the destination can be taken into account by providing
1506  * dst with a dst_offset > 0.
1507  *
1508  * @return The number of bytes copied on success. On failure, the negative of
1509  *         the number of bytes needed to copy src is returned.
1510  */
1511 static int copy_emulation_prev(const uint8_t *src,
1512                                size_t         src_size,
1513                                uint8_t       *dst,
1514                                ssize_t        dst_offset,
1515                                size_t         dst_size)
1516 {
1517     int zeros = 0;
1518     int wrote_bytes;
1519     uint8_t* dst_start;
1520     uint8_t* dst_end = dst + dst_size;
1521     const uint8_t* src_end = src + src_size;
1522     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1523     int i;
1524     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1525         if (!dst[i])
1526             zeros++;
1527         else
1528             zeros = 0;
1529     }
1530
1531     dst += dst_offset;
1532     dst_start = dst;
1533     for (; src < src_end; src++, dst++) {
1534         if (zeros == 2) {
1535             int insert_ep3_byte = *src <= 3;
1536             if (insert_ep3_byte) {
1537                 if (dst < dst_end)
1538                     *dst = 3;
1539                 dst++;
1540             }
1541
1542             zeros = 0;
1543         }
1544
1545         if (dst < dst_end)
1546             *dst = *src;
1547
1548         if (!*src)
1549             zeros++;
1550         else
1551             zeros = 0;
1552     }
1553
1554     wrote_bytes = dst - dst_start;
1555
1556     if (dst > dst_end)
1557         return -wrote_bytes;
1558
1559     return wrote_bytes;
1560 }
1561
1562 static int write_sei(const ExtraSEI *sei,
1563                      int             sei_type,
1564                      uint8_t        *dst,
1565                      size_t          dst_size)
1566 {
1567     uint8_t *sei_start = dst;
1568     size_t remaining_sei_size = sei->size;
1569     size_t remaining_dst_size = dst_size;
1570     int header_bytes;
1571     int bytes_written;
1572     ssize_t offset;
1573
1574     if (!remaining_dst_size)
1575         return AVERROR_BUFFER_TOO_SMALL;
1576
1577     while (sei_type && remaining_dst_size != 0) {
1578         int sei_byte = sei_type > 255 ? 255 : sei_type;
1579         *dst = sei_byte;
1580
1581         sei_type -= sei_byte;
1582         dst++;
1583         remaining_dst_size--;
1584     }
1585
1586     if (!dst_size)
1587         return AVERROR_BUFFER_TOO_SMALL;
1588
1589     while (remaining_sei_size && remaining_dst_size != 0) {
1590         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1591         *dst = size_byte;
1592
1593         remaining_sei_size -= size_byte;
1594         dst++;
1595         remaining_dst_size--;
1596     }
1597
1598     if (remaining_dst_size < sei->size)
1599         return AVERROR_BUFFER_TOO_SMALL;
1600
1601     header_bytes = dst - sei_start;
1602
1603     offset = header_bytes;
1604     bytes_written = copy_emulation_prev(sei->data,
1605                                         sei->size,
1606                                         sei_start,
1607                                         offset,
1608                                         dst_size);
1609     if (bytes_written < 0)
1610         return AVERROR_BUFFER_TOO_SMALL;
1611
1612     bytes_written += header_bytes;
1613     return bytes_written;
1614 }
1615
1616 /**
1617  * Copies NAL units and replaces length codes with
1618  * H.264 Annex B start codes. On failure, the contents of
1619  * dst_data may have been modified.
1620  *
1621  * @param length_code_size Byte length of each length code
1622  * @param sample_buffer NAL units prefixed with length codes.
1623  * @param sei Optional A53 closed captions SEI data.
1624  * @param dst_data Must be zeroed before calling this function.
1625  *                 Contains the copied NAL units prefixed with
1626  *                 start codes when the function returns
1627  *                 successfully.
1628  * @param dst_size Length of dst_data
1629  * @return 0 on success
1630  *         AVERROR_INVALIDDATA if length_code_size is invalid
1631  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1632  *         or if a length_code in src_data specifies data beyond
1633  *         the end of its buffer.
1634  */
1635 static int copy_replace_length_codes(
1636     AVCodecContext *avctx,
1637     size_t        length_code_size,
1638     CMSampleBufferRef sample_buffer,
1639     ExtraSEI      *sei,
1640     uint8_t       *dst_data,
1641     size_t        dst_size)
1642 {
1643     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1644     size_t remaining_src_size = src_size;
1645     size_t remaining_dst_size = dst_size;
1646     size_t src_offset = 0;
1647     int wrote_sei = 0;
1648     int status;
1649     uint8_t size_buf[4];
1650     uint8_t nal_type;
1651     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1652
1653     if (length_code_size > 4) {
1654         return AVERROR_INVALIDDATA;
1655     }
1656
1657     while (remaining_src_size > 0) {
1658         size_t curr_src_len;
1659         size_t curr_dst_len;
1660         size_t box_len = 0;
1661         size_t i;
1662
1663         uint8_t       *dst_box;
1664
1665         status = CMBlockBufferCopyDataBytes(block,
1666                                             src_offset,
1667                                             length_code_size,
1668                                             size_buf);
1669         if (status) {
1670             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1671             return AVERROR_EXTERNAL;
1672         }
1673
1674         status = CMBlockBufferCopyDataBytes(block,
1675                                             src_offset + length_code_size,
1676                                             1,
1677                                             &nal_type);
1678
1679         if (status) {
1680             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1681             return AVERROR_EXTERNAL;
1682         }
1683
1684         nal_type &= 0x1F;
1685
1686         for (i = 0; i < length_code_size; i++) {
1687             box_len <<= 8;
1688             box_len |= size_buf[i];
1689         }
1690
1691         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1692             //No SEI NAL unit - insert.
1693             int wrote_bytes;
1694
1695             memcpy(dst_data, start_code, sizeof(start_code));
1696             dst_data += sizeof(start_code);
1697             remaining_dst_size -= sizeof(start_code);
1698
1699             *dst_data = H264_NAL_SEI;
1700             dst_data++;
1701             remaining_dst_size--;
1702
1703             wrote_bytes = write_sei(sei,
1704                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1705                                     dst_data,
1706                                     remaining_dst_size);
1707
1708             if (wrote_bytes < 0)
1709                 return wrote_bytes;
1710
1711             remaining_dst_size -= wrote_bytes;
1712             dst_data += wrote_bytes;
1713
1714             if (remaining_dst_size <= 0)
1715                 return AVERROR_BUFFER_TOO_SMALL;
1716
1717             *dst_data = 0x80;
1718
1719             dst_data++;
1720             remaining_dst_size--;
1721
1722             wrote_sei = 1;
1723         }
1724
1725         curr_src_len = box_len + length_code_size;
1726         curr_dst_len = box_len + sizeof(start_code);
1727
1728         if (remaining_src_size < curr_src_len) {
1729             return AVERROR_BUFFER_TOO_SMALL;
1730         }
1731
1732         if (remaining_dst_size < curr_dst_len) {
1733             return AVERROR_BUFFER_TOO_SMALL;
1734         }
1735
1736         dst_box = dst_data + sizeof(start_code);
1737
1738         memcpy(dst_data, start_code, sizeof(start_code));
1739         status = CMBlockBufferCopyDataBytes(block,
1740                                             src_offset + length_code_size,
1741                                             box_len,
1742                                             dst_box);
1743
1744         if (status) {
1745             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1746             return AVERROR_EXTERNAL;
1747         }
1748
1749         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1750             //Found SEI NAL unit - append.
1751             int wrote_bytes;
1752             int old_sei_length;
1753             int extra_bytes;
1754             uint8_t *new_sei;
1755             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1756             if (old_sei_length < 0)
1757                 return status;
1758
1759             wrote_bytes = write_sei(sei,
1760                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1761                                     new_sei,
1762                                     remaining_dst_size - old_sei_length);
1763             if (wrote_bytes < 0)
1764                 return wrote_bytes;
1765
1766             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1767                 return AVERROR_BUFFER_TOO_SMALL;
1768
1769             new_sei[wrote_bytes++] = 0x80;
1770             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1771
1772             dst_data += extra_bytes;
1773             remaining_dst_size -= extra_bytes;
1774
1775             wrote_sei = 1;
1776         }
1777
1778         src_offset += curr_src_len;
1779         dst_data += curr_dst_len;
1780
1781         remaining_src_size -= curr_src_len;
1782         remaining_dst_size -= curr_dst_len;
1783     }
1784
1785     return 0;
1786 }
1787
1788 /**
1789  * Returns a sufficient number of bytes to contain the sei data.
1790  * It may be greater than the minimum required.
1791  */
1792 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1793     int copied_size;
1794     if (sei->size == 0)
1795         return 0;
1796
1797     copied_size = -copy_emulation_prev(sei->data,
1798                                        sei->size,
1799                                        NULL,
1800                                        0,
1801                                        0);
1802
1803     if ((sei->size % 255) == 0) //may result in an extra byte
1804         copied_size++;
1805
1806     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1807 }
1808
1809 static int vtenc_cm_to_avpacket(
1810     AVCodecContext    *avctx,
1811     CMSampleBufferRef sample_buffer,
1812     AVPacket          *pkt,
1813     ExtraSEI          *sei)
1814 {
1815     VTEncContext *vtctx = avctx->priv_data;
1816
1817     int     status;
1818     bool    is_key_frame;
1819     bool    add_header;
1820     size_t  length_code_size;
1821     size_t  header_size = 0;
1822     size_t  in_buf_size;
1823     size_t  out_buf_size;
1824     size_t  sei_nalu_size = 0;
1825     int64_t dts_delta;
1826     int64_t time_base_num;
1827     int nalu_count;
1828     CMTime  pts;
1829     CMTime  dts;
1830     CMVideoFormatDescriptionRef vid_fmt;
1831
1832
1833     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1834     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1835     if (status) return status;
1836
1837     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1838
1839     if (add_header) {
1840         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1841         if (!vid_fmt) {
1842             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1843             return AVERROR_EXTERNAL;
1844         }
1845
1846         int status = get_params_size(avctx, vid_fmt, &header_size);
1847         if (status) return status;
1848     }
1849
1850     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1851     if(status)
1852         return status;
1853
1854     if (sei) {
1855         size_t msg_size = get_sei_msg_bytes(sei,
1856                                             H264_SEI_TYPE_USER_DATA_REGISTERED);
1857
1858         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1859     }
1860
1861     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1862     out_buf_size = header_size +
1863                    in_buf_size +
1864                    sei_nalu_size +
1865                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1866
1867     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1868     if (status < 0)
1869         return status;
1870
1871     if (add_header) {
1872         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1873         if(status) return status;
1874     }
1875
1876     status = copy_replace_length_codes(
1877         avctx,
1878         length_code_size,
1879         sample_buffer,
1880         sei,
1881         pkt->data + header_size,
1882         pkt->size - header_size
1883     );
1884
1885     if (status) {
1886         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1887         return status;
1888     }
1889
1890     if (is_key_frame) {
1891         pkt->flags |= AV_PKT_FLAG_KEY;
1892     }
1893
1894     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1895     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1896
1897     if (CMTIME_IS_INVALID(dts)) {
1898         if (!vtctx->has_b_frames) {
1899             dts = pts;
1900         } else {
1901             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1902             return AVERROR_EXTERNAL;
1903         }
1904     }
1905
1906     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1907     time_base_num = avctx->time_base.num;
1908     pkt->pts = pts.value / time_base_num;
1909     pkt->dts = dts.value / time_base_num - dts_delta;
1910     pkt->size = out_buf_size;
1911
1912     return 0;
1913 }
1914
1915 /*
1916  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1917  * containing all planes if so.
1918  */
1919 static int get_cv_pixel_info(
1920     AVCodecContext *avctx,
1921     const AVFrame  *frame,
1922     int            *color,
1923     int            *plane_count,
1924     size_t         *widths,
1925     size_t         *heights,
1926     size_t         *strides,
1927     size_t         *contiguous_buf_size)
1928 {
1929     VTEncContext *vtctx = avctx->priv_data;
1930     int av_format       = frame->format;
1931     int av_color_range  = frame->color_range;
1932     int i;
1933     int range_guessed;
1934     int status;
1935
1936     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1937     if (status) {
1938         av_log(avctx,
1939             AV_LOG_ERROR,
1940             "Could not get pixel format for color format '%s' range '%s'.\n",
1941             av_get_pix_fmt_name(av_format),
1942             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1943             av_color_range < AVCOL_RANGE_NB ?
1944                av_color_range_name(av_color_range) :
1945                "Unknown");
1946
1947         return AVERROR(EINVAL);
1948     }
1949
1950     if (range_guessed) {
1951         if (!vtctx->warned_color_range) {
1952             vtctx->warned_color_range = true;
1953             av_log(avctx,
1954                    AV_LOG_WARNING,
1955                    "Color range not set for %s. Using MPEG range.\n",
1956                    av_get_pix_fmt_name(av_format));
1957         }
1958     }
1959
1960     switch (av_format) {
1961     case AV_PIX_FMT_NV12:
1962         *plane_count = 2;
1963
1964         widths [0] = avctx->width;
1965         heights[0] = avctx->height;
1966         strides[0] = frame ? frame->linesize[0] : avctx->width;
1967
1968         widths [1] = (avctx->width  + 1) / 2;
1969         heights[1] = (avctx->height + 1) / 2;
1970         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
1971         break;
1972
1973     case AV_PIX_FMT_YUV420P:
1974         *plane_count = 3;
1975
1976         widths [0] = avctx->width;
1977         heights[0] = avctx->height;
1978         strides[0] = frame ? frame->linesize[0] : avctx->width;
1979
1980         widths [1] = (avctx->width  + 1) / 2;
1981         heights[1] = (avctx->height + 1) / 2;
1982         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
1983
1984         widths [2] = (avctx->width  + 1) / 2;
1985         heights[2] = (avctx->height + 1) / 2;
1986         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
1987         break;
1988
1989     default:
1990         av_log(
1991                avctx,
1992                AV_LOG_ERROR,
1993                "Could not get frame format info for color %d range %d.\n",
1994                av_format,
1995                av_color_range);
1996
1997         return AVERROR(EINVAL);
1998     }
1999
2000     *contiguous_buf_size = 0;
2001     for (i = 0; i < *plane_count; i++) {
2002         if (i < *plane_count - 1 &&
2003             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2004             *contiguous_buf_size = 0;
2005             break;
2006         }
2007
2008         *contiguous_buf_size += strides[i] * heights[i];
2009     }
2010
2011     return 0;
2012 }
2013
2014 #if !TARGET_OS_IPHONE
2015 //Not used on iOS - frame is always copied.
2016 static void free_avframe(
2017     void       *release_ctx,
2018     const void *data,
2019     size_t      size,
2020     size_t      plane_count,
2021     const void *plane_addresses[])
2022 {
2023     AVFrame *frame = release_ctx;
2024     av_frame_free(&frame);
2025 }
2026 #else
2027 //Not used on OSX - frame is never copied.
2028 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2029                                         const AVFrame    *frame,
2030                                         CVPixelBufferRef cv_img,
2031                                         const size_t     *plane_strides,
2032                                         const size_t     *plane_rows)
2033 {
2034     int i, j;
2035     size_t plane_count;
2036     int status;
2037     int rows;
2038     int src_stride;
2039     int dst_stride;
2040     uint8_t *src_addr;
2041     uint8_t *dst_addr;
2042     size_t copy_bytes;
2043
2044     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2045     if (status) {
2046         av_log(
2047             avctx,
2048             AV_LOG_ERROR,
2049             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2050             status
2051         );
2052     }
2053
2054     if (CVPixelBufferIsPlanar(cv_img)) {
2055         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2056         for (i = 0; frame->data[i]; i++) {
2057             if (i == plane_count) {
2058                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2059                 av_log(avctx,
2060                     AV_LOG_ERROR,
2061                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2062                 );
2063
2064                 return AVERROR_EXTERNAL;
2065             }
2066
2067             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2068             src_addr = (uint8_t*)frame->data[i];
2069             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2070             src_stride = plane_strides[i];
2071             rows = plane_rows[i];
2072
2073             if (dst_stride == src_stride) {
2074                 memcpy(dst_addr, src_addr, src_stride * rows);
2075             } else {
2076                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2077
2078                 for (j = 0; j < rows; j++) {
2079                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2080                 }
2081             }
2082         }
2083     } else {
2084         if (frame->data[1]) {
2085             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2086             av_log(avctx,
2087                 AV_LOG_ERROR,
2088                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2089             );
2090
2091             return AVERROR_EXTERNAL;
2092         }
2093
2094         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2095         src_addr = (uint8_t*)frame->data[0];
2096         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2097         src_stride = plane_strides[0];
2098         rows = plane_rows[0];
2099
2100         if (dst_stride == src_stride) {
2101             memcpy(dst_addr, src_addr, src_stride * rows);
2102         } else {
2103             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2104
2105             for (j = 0; j < rows; j++) {
2106                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2107             }
2108         }
2109     }
2110
2111     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2112     if (status) {
2113         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2114         return AVERROR_EXTERNAL;
2115     }
2116
2117     return 0;
2118 }
2119 #endif //!TARGET_OS_IPHONE
2120
2121 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2122                                   const AVFrame    *frame,
2123                                   CVPixelBufferRef *cv_img)
2124 {
2125     int plane_count;
2126     int color;
2127     size_t widths [AV_NUM_DATA_POINTERS];
2128     size_t heights[AV_NUM_DATA_POINTERS];
2129     size_t strides[AV_NUM_DATA_POINTERS];
2130     int status;
2131     size_t contiguous_buf_size;
2132 #if TARGET_OS_IPHONE
2133     CVPixelBufferPoolRef pix_buf_pool;
2134     VTEncContext* vtctx = avctx->priv_data;
2135 #else
2136     CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
2137                                                    kCFAllocatorDefault,
2138                                                    10,
2139                                                    &kCFCopyStringDictionaryKeyCallBacks,
2140                                                    &kCFTypeDictionaryValueCallBacks);
2141
2142     if (!pix_buf_attachments) return AVERROR(ENOMEM);
2143 #endif
2144
2145     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2146         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2147
2148         *cv_img = (CVPixelBufferRef)frame->data[3];
2149         av_assert0(*cv_img);
2150
2151         CFRetain(*cv_img);
2152         return 0;
2153     }
2154
2155     memset(widths,  0, sizeof(widths));
2156     memset(heights, 0, sizeof(heights));
2157     memset(strides, 0, sizeof(strides));
2158
2159     status = get_cv_pixel_info(
2160         avctx,
2161         frame,
2162         &color,
2163         &plane_count,
2164         widths,
2165         heights,
2166         strides,
2167         &contiguous_buf_size
2168     );
2169
2170     if (status) {
2171         av_log(
2172             avctx,
2173             AV_LOG_ERROR,
2174             "Error: Cannot convert format %d color_range %d: %d\n",
2175             frame->format,
2176             frame->color_range,
2177             status
2178         );
2179
2180         return AVERROR_EXTERNAL;
2181     }
2182
2183 #if TARGET_OS_IPHONE
2184     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2185     if (!pix_buf_pool) {
2186         /* On iOS, the VT session is invalidated when the APP switches from
2187          * foreground to background and vice versa. Fetch the actual error code
2188          * of the VT session to detect that case and restart the VT session
2189          * accordingly. */
2190         OSStatus vtstatus;
2191
2192         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2193         if (vtstatus == kVTInvalidSessionErr) {
2194             CFRelease(vtctx->session);
2195             vtctx->session = NULL;
2196             status = vtenc_configure_encoder(avctx);
2197             if (status == 0)
2198                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2199         }
2200         if (!pix_buf_pool) {
2201             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2202             return AVERROR_EXTERNAL;
2203         }
2204         else
2205             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2206                    "kVTInvalidSessionErr error.\n");
2207     }
2208
2209     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2210                                                 pix_buf_pool,
2211                                                 cv_img);
2212
2213
2214     if (status) {
2215         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2216         return AVERROR_EXTERNAL;
2217     }
2218
2219     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2220     if (status) {
2221         CFRelease(*cv_img);
2222         *cv_img = NULL;
2223         return status;
2224     }
2225 #else
2226     AVFrame *enc_frame = av_frame_alloc();
2227     if (!enc_frame) return AVERROR(ENOMEM);
2228
2229     status = av_frame_ref(enc_frame, frame);
2230     if (status) {
2231         av_frame_free(&enc_frame);
2232         return status;
2233     }
2234
2235     status = CVPixelBufferCreateWithPlanarBytes(
2236         kCFAllocatorDefault,
2237         enc_frame->width,
2238         enc_frame->height,
2239         color,
2240         NULL,
2241         contiguous_buf_size,
2242         plane_count,
2243         (void **)enc_frame->data,
2244         widths,
2245         heights,
2246         strides,
2247         free_avframe,
2248         enc_frame,
2249         NULL,
2250         cv_img
2251     );
2252
2253     add_color_attr(avctx, pix_buf_attachments);
2254     CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
2255     CFRelease(pix_buf_attachments);
2256
2257     if (status) {
2258         av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
2259         return AVERROR_EXTERNAL;
2260     }
2261 #endif
2262
2263     return 0;
2264 }
2265
2266 static int create_encoder_dict_h264(const AVFrame *frame,
2267                                     CFDictionaryRef* dict_out)
2268 {
2269     CFDictionaryRef dict = NULL;
2270     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2271         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2272         const void *vals[] = { kCFBooleanTrue };
2273
2274         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2275         if(!dict) return AVERROR(ENOMEM);
2276     }
2277
2278     *dict_out = dict;
2279     return 0;
2280 }
2281
2282 static int vtenc_send_frame(AVCodecContext *avctx,
2283                             VTEncContext   *vtctx,
2284                             const AVFrame  *frame)
2285 {
2286     CMTime time;
2287     CFDictionaryRef frame_dict;
2288     CVPixelBufferRef cv_img = NULL;
2289     AVFrameSideData *side_data = NULL;
2290     ExtraSEI *sei = NULL;
2291     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2292
2293     if (status) return status;
2294
2295     status = create_encoder_dict_h264(frame, &frame_dict);
2296     if (status) {
2297         CFRelease(cv_img);
2298         return status;
2299     }
2300
2301     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2302     if (vtctx->a53_cc && side_data && side_data->size) {
2303         sei = av_mallocz(sizeof(*sei));
2304         if (!sei) {
2305             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2306         } else {
2307             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2308             if (ret < 0) {
2309                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2310                 av_free(sei);
2311                 sei = NULL;
2312             }
2313         }
2314     }
2315
2316     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2317     status = VTCompressionSessionEncodeFrame(
2318         vtctx->session,
2319         cv_img,
2320         time,
2321         kCMTimeInvalid,
2322         frame_dict,
2323         sei,
2324         NULL
2325     );
2326
2327     if (frame_dict) CFRelease(frame_dict);
2328     CFRelease(cv_img);
2329
2330     if (status) {
2331         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2332         return AVERROR_EXTERNAL;
2333     }
2334
2335     return 0;
2336 }
2337
2338 static av_cold int vtenc_frame(
2339     AVCodecContext *avctx,
2340     AVPacket       *pkt,
2341     const AVFrame  *frame,
2342     int            *got_packet)
2343 {
2344     VTEncContext *vtctx = avctx->priv_data;
2345     bool get_frame;
2346     int status;
2347     CMSampleBufferRef buf = NULL;
2348     ExtraSEI *sei = NULL;
2349
2350     if (frame) {
2351         status = vtenc_send_frame(avctx, vtctx, frame);
2352
2353         if (status) {
2354             status = AVERROR_EXTERNAL;
2355             goto end_nopkt;
2356         }
2357
2358         if (vtctx->frame_ct_in == 0) {
2359             vtctx->first_pts = frame->pts;
2360         } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2361             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2362         }
2363
2364         vtctx->frame_ct_in++;
2365     } else if(!vtctx->flushing) {
2366         vtctx->flushing = true;
2367
2368         status = VTCompressionSessionCompleteFrames(vtctx->session,
2369                                                     kCMTimeIndefinite);
2370
2371         if (status) {
2372             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2373             status = AVERROR_EXTERNAL;
2374             goto end_nopkt;
2375         }
2376     }
2377
2378     *got_packet = 0;
2379     get_frame = vtctx->dts_delta >= 0 || !frame;
2380     if (!get_frame) {
2381         status = 0;
2382         goto end_nopkt;
2383     }
2384
2385     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2386     if (status) goto end_nopkt;
2387     if (!buf)   goto end_nopkt;
2388
2389     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2390     if (sei) {
2391         if (sei->data) av_free(sei->data);
2392         av_free(sei);
2393     }
2394     CFRelease(buf);
2395     if (status) goto end_nopkt;
2396
2397     *got_packet = 1;
2398     return 0;
2399
2400 end_nopkt:
2401     av_packet_unref(pkt);
2402     return status;
2403 }
2404
2405 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2406                                     CMVideoCodecType codec_type,
2407                                     CFStringRef      profile_level,
2408                                     CFNumberRef      gamma_level,
2409                                     CFDictionaryRef  enc_info,
2410                                     CFDictionaryRef  pixel_buffer_info)
2411 {
2412     VTEncContext *vtctx = avctx->priv_data;
2413     AVFrame *frame = av_frame_alloc();
2414     int y_size = avctx->width * avctx->height;
2415     int chroma_size = (avctx->width / 2) * (avctx->height / 2);
2416     CMSampleBufferRef buf = NULL;
2417     int status;
2418
2419     if (!frame)
2420         return AVERROR(ENOMEM);
2421
2422     frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
2423
2424     if(!frame->buf[0]){
2425         status = AVERROR(ENOMEM);
2426         goto pe_cleanup;
2427     }
2428
2429     status = vtenc_create_encoder(avctx,
2430                                   codec_type,
2431                                   profile_level,
2432                                   gamma_level,
2433                                   enc_info,
2434                                   pixel_buffer_info,
2435                                   &vtctx->session);
2436     if (status)
2437         goto pe_cleanup;
2438
2439     frame->data[0] = frame->buf[0]->data;
2440     memset(frame->data[0],   0,      y_size);
2441
2442     frame->data[1] = frame->buf[0]->data + y_size;
2443     memset(frame->data[1], 128, chroma_size);
2444
2445
2446     if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
2447         frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
2448         memset(frame->data[2], 128, chroma_size);
2449     }
2450
2451     frame->linesize[0] = avctx->width;
2452
2453     if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
2454         frame->linesize[1] =
2455         frame->linesize[2] = (avctx->width + 1) / 2;
2456     } else {
2457         frame->linesize[1] = (avctx->width + 1) / 2;
2458     }
2459
2460     frame->format          = avctx->pix_fmt;
2461     frame->width           = avctx->width;
2462     frame->height          = avctx->height;
2463     frame->colorspace      = avctx->colorspace;
2464     frame->color_range     = avctx->color_range;
2465     frame->color_trc       = avctx->color_trc;
2466     frame->color_primaries = avctx->color_primaries;
2467
2468     frame->pts = 0;
2469     status = vtenc_send_frame(avctx, vtctx, frame);
2470     if (status) {
2471         av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
2472         goto pe_cleanup;
2473     }
2474
2475     //Populates extradata - output frames are flushed and param sets are available.
2476     status = VTCompressionSessionCompleteFrames(vtctx->session,
2477                                                 kCMTimeIndefinite);
2478
2479     if (status)
2480         goto pe_cleanup;
2481
2482     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2483     if (status) {
2484         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2485         goto pe_cleanup;
2486     }
2487
2488     CFRelease(buf);
2489
2490
2491
2492 pe_cleanup:
2493     if(vtctx->session)
2494         CFRelease(vtctx->session);
2495
2496     vtctx->session = NULL;
2497     vtctx->frame_ct_out = 0;
2498
2499     av_frame_unref(frame);
2500     av_frame_free(&frame);
2501
2502     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2503
2504     return status;
2505 }
2506
2507 static av_cold int vtenc_close(AVCodecContext *avctx)
2508 {
2509     VTEncContext *vtctx = avctx->priv_data;
2510
2511     pthread_cond_destroy(&vtctx->cv_sample_sent);
2512     pthread_mutex_destroy(&vtctx->lock);
2513
2514     if(!vtctx->session) return 0;
2515
2516     VTCompressionSessionCompleteFrames(vtctx->session,
2517                                        kCMTimeIndefinite);
2518     clear_frame_queue(vtctx);
2519     CFRelease(vtctx->session);
2520     vtctx->session = NULL;
2521
2522     if (vtctx->color_primaries) {
2523         CFRelease(vtctx->color_primaries);
2524         vtctx->color_primaries = NULL;
2525     }
2526
2527     if (vtctx->transfer_function) {
2528         CFRelease(vtctx->transfer_function);
2529         vtctx->transfer_function = NULL;
2530     }
2531
2532     if (vtctx->ycbcr_matrix) {
2533         CFRelease(vtctx->ycbcr_matrix);
2534         vtctx->ycbcr_matrix = NULL;
2535     }
2536
2537     return 0;
2538 }
2539
2540 static const enum AVPixelFormat pix_fmts[] = {
2541     AV_PIX_FMT_VIDEOTOOLBOX,
2542     AV_PIX_FMT_NV12,
2543     AV_PIX_FMT_YUV420P,
2544     AV_PIX_FMT_NONE
2545 };
2546
2547 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2548 #define COMMON_OPTIONS \
2549     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2550         { .i64 = 0 }, 0, 1, VE }, \
2551     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2552         { .i64 = 0 }, 0, 1, VE }, \
2553     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2554         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2555     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2556         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2557     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2558         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2559
2560 #define OFFSET(x) offsetof(VTEncContext, x)
2561 static const AVOption h264_options[] = {
2562     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2563     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2564     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2565     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2566
2567     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2568     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2569     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2570     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2571     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2572     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2573     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2574     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2575     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2576     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2577     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2578
2579     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2580     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2581     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2582     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2583     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2584
2585     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2586
2587     COMMON_OPTIONS
2588     { NULL },
2589 };
2590
2591 static const AVClass h264_videotoolbox_class = {
2592     .class_name = "h264_videotoolbox",
2593     .item_name  = av_default_item_name,
2594     .option     = h264_options,
2595     .version    = LIBAVUTIL_VERSION_INT,
2596 };
2597
2598 AVCodec ff_h264_videotoolbox_encoder = {
2599     .name             = "h264_videotoolbox",
2600     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2601     .type             = AVMEDIA_TYPE_VIDEO,
2602     .id               = AV_CODEC_ID_H264,
2603     .priv_data_size   = sizeof(VTEncContext),
2604     .pix_fmts         = pix_fmts,
2605     .init             = vtenc_init,
2606     .encode2          = vtenc_frame,
2607     .close            = vtenc_close,
2608     .capabilities     = AV_CODEC_CAP_DELAY,
2609     .priv_class       = &h264_videotoolbox_class,
2610     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2611                         FF_CODEC_CAP_INIT_CLEANUP,
2612 };
2613
2614 static const AVOption hevc_options[] = {
2615     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2616     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2617     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2618
2619     COMMON_OPTIONS
2620     { NULL },
2621 };
2622
2623 static const AVClass hevc_videotoolbox_class = {
2624     .class_name = "hevc_videotoolbox",
2625     .item_name  = av_default_item_name,
2626     .option     = hevc_options,
2627     .version    = LIBAVUTIL_VERSION_INT,
2628 };
2629
2630 AVCodec ff_hevc_videotoolbox_encoder = {
2631     .name             = "hevc_videotoolbox",
2632     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2633     .type             = AVMEDIA_TYPE_VIDEO,
2634     .id               = AV_CODEC_ID_HEVC,
2635     .priv_data_size   = sizeof(VTEncContext),
2636     .pix_fmts         = pix_fmts,
2637     .init             = vtenc_init,
2638     .encode2          = vtenc_frame,
2639     .close            = vtenc_close,
2640     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2641     .priv_class       = &hevc_videotoolbox_class,
2642     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2643                         FF_CODEC_CAP_INIT_CLEANUP,
2644     .wrapper_name     = "videotoolbox",
2645 };