]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
avcodec/videotoolboxenc: fix encoding frame crash on iOS 11
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41
42 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
43 enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
44 enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
45 #endif
46
47 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
48                                            size_t parameterSetIndex,
49                                            const uint8_t **parameterSetPointerOut,
50                                            size_t *parameterSetSizeOut,
51                                            size_t *parameterSetCountOut,
52                                            int *NALUnitHeaderLengthOut);
53
54 //These symbols may not be present
55 static struct{
56     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
57     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
58     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
59
60     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
61     CFStringRef kVTH264EntropyMode_CAVLC;
62     CFStringRef kVTH264EntropyMode_CABAC;
63
64     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
65     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
66     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
67     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
68     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
69     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
70     CFStringRef kVTProfileLevel_H264_Main_4_2;
71     CFStringRef kVTProfileLevel_H264_Main_5_1;
72     CFStringRef kVTProfileLevel_H264_Main_5_2;
73     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
74     CFStringRef kVTProfileLevel_H264_High_3_0;
75     CFStringRef kVTProfileLevel_H264_High_3_1;
76     CFStringRef kVTProfileLevel_H264_High_3_2;
77     CFStringRef kVTProfileLevel_H264_High_4_0;
78     CFStringRef kVTProfileLevel_H264_High_4_1;
79     CFStringRef kVTProfileLevel_H264_High_4_2;
80     CFStringRef kVTProfileLevel_H264_High_5_1;
81     CFStringRef kVTProfileLevel_H264_High_5_2;
82     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
83
84     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
85     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
86
87     CFStringRef kVTCompressionPropertyKey_RealTime;
88
89     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
90     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
91
92     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
93 } compat_keys;
94
95 #define GET_SYM(symbol, defaultVal)                                     \
96 do{                                                                     \
97     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
98     if(!handle)                                                         \
99         compat_keys.symbol = CFSTR(defaultVal);                         \
100     else                                                                \
101         compat_keys.symbol = *handle;                                   \
102 }while(0)
103
104 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
105
106 static void loadVTEncSymbols(){
107     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
108         (getParameterSetAtIndex)dlsym(
109             RTLD_DEFAULT,
110             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
111         );
112
113     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
114     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
115     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
116
117     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
118     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
119     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
120
121     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
122     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
123     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
124     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
125     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
126     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
127     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
128     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
129     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
130     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
131     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
132     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
133     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
134     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
135     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
136     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
137     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
138     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
139     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
140
141     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
142     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
143
144     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
145
146     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
147             "EnableHardwareAcceleratedVideoEncoder");
148     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
149             "RequireHardwareAcceleratedVideoEncoder");
150 }
151
152 typedef enum VT_H264Profile {
153     H264_PROF_AUTO,
154     H264_PROF_BASELINE,
155     H264_PROF_MAIN,
156     H264_PROF_HIGH,
157     H264_PROF_COUNT
158 } VT_H264Profile;
159
160 typedef enum VTH264Entropy{
161     VT_ENTROPY_NOT_SET,
162     VT_CAVLC,
163     VT_CABAC
164 } VTH264Entropy;
165
166 typedef enum VT_HEVCProfile {
167     HEVC_PROF_AUTO,
168     HEVC_PROF_MAIN,
169     HEVC_PROF_MAIN10,
170     HEVC_PROF_COUNT
171 } VT_HEVCProfile;
172
173 static const uint8_t start_code[] = { 0, 0, 0, 1 };
174
175 typedef struct ExtraSEI {
176   void *data;
177   size_t size;
178 } ExtraSEI;
179
180 typedef struct BufNode {
181     CMSampleBufferRef cm_buffer;
182     ExtraSEI *sei;
183     struct BufNode* next;
184     int error;
185 } BufNode;
186
187 typedef struct VTEncContext {
188     AVClass *class;
189     enum AVCodecID codec_id;
190     VTCompressionSessionRef session;
191     CFStringRef ycbcr_matrix;
192     CFStringRef color_primaries;
193     CFStringRef transfer_function;
194     getParameterSetAtIndex get_param_set_func;
195
196     pthread_mutex_t lock;
197     pthread_cond_t  cv_sample_sent;
198
199     int async_error;
200
201     BufNode *q_head;
202     BufNode *q_tail;
203
204     int64_t frame_ct_out;
205     int64_t frame_ct_in;
206
207     int64_t first_pts;
208     int64_t dts_delta;
209
210     int64_t profile;
211     int64_t level;
212     int64_t entropy;
213     int64_t realtime;
214     int64_t frames_before;
215     int64_t frames_after;
216
217     int64_t allow_sw;
218     int64_t require_sw;
219
220     bool flushing;
221     bool has_b_frames;
222     bool warned_color_range;
223     bool a53_cc;
224 } VTEncContext;
225
226 static int vtenc_populate_extradata(AVCodecContext   *avctx,
227                                     CMVideoCodecType codec_type,
228                                     CFStringRef      profile_level,
229                                     CFNumberRef      gamma_level,
230                                     CFDictionaryRef  enc_info,
231                                     CFDictionaryRef  pixel_buffer_info);
232
233 /**
234  * NULL-safe release of *refPtr, and sets value to NULL.
235  */
236 static void vt_release_num(CFNumberRef* refPtr){
237     if (!*refPtr) {
238         return;
239     }
240
241     CFRelease(*refPtr);
242     *refPtr = NULL;
243 }
244
245 static void set_async_error(VTEncContext *vtctx, int err)
246 {
247     BufNode *info;
248
249     pthread_mutex_lock(&vtctx->lock);
250
251     vtctx->async_error = err;
252
253     info = vtctx->q_head;
254     vtctx->q_head = vtctx->q_tail = NULL;
255
256     while (info) {
257         BufNode *next = info->next;
258         CFRelease(info->cm_buffer);
259         av_free(info);
260         info = next;
261     }
262
263     pthread_mutex_unlock(&vtctx->lock);
264 }
265
266 static void clear_frame_queue(VTEncContext *vtctx)
267 {
268     set_async_error(vtctx, 0);
269 }
270
271 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
272 {
273     BufNode *info;
274
275     pthread_mutex_lock(&vtctx->lock);
276
277     if (vtctx->async_error) {
278         pthread_mutex_unlock(&vtctx->lock);
279         return vtctx->async_error;
280     }
281
282     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
283         *buf = NULL;
284
285         pthread_mutex_unlock(&vtctx->lock);
286         return 0;
287     }
288
289     while (!vtctx->q_head && !vtctx->async_error && wait) {
290         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
291     }
292
293     if (!vtctx->q_head) {
294         pthread_mutex_unlock(&vtctx->lock);
295         *buf = NULL;
296         return 0;
297     }
298
299     info = vtctx->q_head;
300     vtctx->q_head = vtctx->q_head->next;
301     if (!vtctx->q_head) {
302         vtctx->q_tail = NULL;
303     }
304
305     pthread_mutex_unlock(&vtctx->lock);
306
307     *buf = info->cm_buffer;
308     if (sei && *buf) {
309         *sei = info->sei;
310     } else if (info->sei) {
311         if (info->sei->data) av_free(info->sei->data);
312         av_free(info->sei);
313     }
314     av_free(info);
315
316     vtctx->frame_ct_out++;
317
318     return 0;
319 }
320
321 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
322 {
323     BufNode *info = av_malloc(sizeof(BufNode));
324     if (!info) {
325         set_async_error(vtctx, AVERROR(ENOMEM));
326         return;
327     }
328
329     CFRetain(buffer);
330     info->cm_buffer = buffer;
331     info->sei = sei;
332     info->next = NULL;
333
334     pthread_mutex_lock(&vtctx->lock);
335     pthread_cond_signal(&vtctx->cv_sample_sent);
336
337     if (!vtctx->q_head) {
338         vtctx->q_head = info;
339     } else {
340         vtctx->q_tail->next = info;
341     }
342
343     vtctx->q_tail = info;
344
345     pthread_mutex_unlock(&vtctx->lock);
346 }
347
348 static int count_nalus(size_t length_code_size,
349                        CMSampleBufferRef sample_buffer,
350                        int *count)
351 {
352     size_t offset = 0;
353     int status;
354     int nalu_ct = 0;
355     uint8_t size_buf[4];
356     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
357     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
358
359     if (length_code_size > 4)
360         return AVERROR_INVALIDDATA;
361
362     while (offset < src_size) {
363         size_t curr_src_len;
364         size_t box_len = 0;
365         size_t i;
366
367         status = CMBlockBufferCopyDataBytes(block,
368                                             offset,
369                                             length_code_size,
370                                             size_buf);
371
372         for (i = 0; i < length_code_size; i++) {
373             box_len <<= 8;
374             box_len |= size_buf[i];
375         }
376
377         curr_src_len = box_len + length_code_size;
378         offset += curr_src_len;
379
380         nalu_ct++;
381     }
382
383     *count = nalu_ct;
384     return 0;
385 }
386
387 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
388 {
389     switch (id) {
390     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
391     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
392     default:               return 0;
393     }
394 }
395
396 /**
397  * Get the parameter sets from a CMSampleBufferRef.
398  * @param dst If *dst isn't NULL, the parameters are copied into existing
399  *            memory. *dst_size must be set accordingly when *dst != NULL.
400  *            If *dst is NULL, it will be allocated.
401  *            In all cases, *dst_size is set to the number of bytes used starting
402  *            at *dst.
403  */
404 static int get_params_size(
405     AVCodecContext              *avctx,
406     CMVideoFormatDescriptionRef vid_fmt,
407     size_t                      *size)
408 {
409     VTEncContext *vtctx = avctx->priv_data;
410     size_t total_size = 0;
411     size_t ps_count;
412     int is_count_bad = 0;
413     size_t i;
414     int status;
415     status = vtctx->get_param_set_func(vid_fmt,
416                                        0,
417                                        NULL,
418                                        NULL,
419                                        &ps_count,
420                                        NULL);
421     if (status) {
422         is_count_bad = 1;
423         ps_count     = 0;
424         status       = 0;
425     }
426
427     for (i = 0; i < ps_count || is_count_bad; i++) {
428         const uint8_t *ps;
429         size_t ps_size;
430         status = vtctx->get_param_set_func(vid_fmt,
431                                            i,
432                                            &ps,
433                                            &ps_size,
434                                            NULL,
435                                            NULL);
436         if (status) {
437             /*
438              * When ps_count is invalid, status != 0 ends the loop normally
439              * unless we didn't get any parameter sets.
440              */
441             if (i > 0 && is_count_bad) status = 0;
442
443             break;
444         }
445
446         total_size += ps_size + sizeof(start_code);
447     }
448
449     if (status) {
450         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
451         return AVERROR_EXTERNAL;
452     }
453
454     *size = total_size;
455     return 0;
456 }
457
458 static int copy_param_sets(
459     AVCodecContext              *avctx,
460     CMVideoFormatDescriptionRef vid_fmt,
461     uint8_t                     *dst,
462     size_t                      dst_size)
463 {
464     VTEncContext *vtctx = avctx->priv_data;
465     size_t ps_count;
466     int is_count_bad = 0;
467     int status;
468     size_t offset = 0;
469     size_t i;
470
471     status = vtctx->get_param_set_func(vid_fmt,
472                                        0,
473                                        NULL,
474                                        NULL,
475                                        &ps_count,
476                                        NULL);
477     if (status) {
478         is_count_bad = 1;
479         ps_count     = 0;
480         status       = 0;
481     }
482
483
484     for (i = 0; i < ps_count || is_count_bad; i++) {
485         const uint8_t *ps;
486         size_t ps_size;
487         size_t next_offset;
488
489         status = vtctx->get_param_set_func(vid_fmt,
490                                            i,
491                                            &ps,
492                                            &ps_size,
493                                            NULL,
494                                            NULL);
495         if (status) {
496             if (i > 0 && is_count_bad) status = 0;
497
498             break;
499         }
500
501         next_offset = offset + sizeof(start_code) + ps_size;
502         if (dst_size < next_offset) {
503             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
504             return AVERROR_BUFFER_TOO_SMALL;
505         }
506
507         memcpy(dst + offset, start_code, sizeof(start_code));
508         offset += sizeof(start_code);
509
510         memcpy(dst + offset, ps, ps_size);
511         offset = next_offset;
512     }
513
514     if (status) {
515         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
516         return AVERROR_EXTERNAL;
517     }
518
519     return 0;
520 }
521
522 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
523 {
524     CMVideoFormatDescriptionRef vid_fmt;
525     size_t total_size;
526     int status;
527
528     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
529     if (!vid_fmt) {
530         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
531         return AVERROR_EXTERNAL;
532     }
533
534     status = get_params_size(avctx, vid_fmt, &total_size);
535     if (status) {
536         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
537         return status;
538     }
539
540     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
541     if (!avctx->extradata) {
542         return AVERROR(ENOMEM);
543     }
544     avctx->extradata_size = total_size;
545
546     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
547
548     if (status) {
549         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
550         return status;
551     }
552
553     return 0;
554 }
555
556 static void vtenc_output_callback(
557     void *ctx,
558     void *sourceFrameCtx,
559     OSStatus status,
560     VTEncodeInfoFlags flags,
561     CMSampleBufferRef sample_buffer)
562 {
563     AVCodecContext *avctx = ctx;
564     VTEncContext   *vtctx = avctx->priv_data;
565     ExtraSEI *sei = sourceFrameCtx;
566
567     if (vtctx->async_error) {
568         if(sample_buffer) CFRelease(sample_buffer);
569         return;
570     }
571
572     if (status) {
573         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
574         set_async_error(vtctx, AVERROR_EXTERNAL);
575         return;
576     }
577
578     if (!sample_buffer) {
579         return;
580     }
581
582     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
583         int set_status = set_extradata(avctx, sample_buffer);
584         if (set_status) {
585             set_async_error(vtctx, set_status);
586             return;
587         }
588     }
589
590     vtenc_q_push(vtctx, sample_buffer, sei);
591 }
592
593 static int get_length_code_size(
594     AVCodecContext    *avctx,
595     CMSampleBufferRef sample_buffer,
596     size_t            *size)
597 {
598     VTEncContext *vtctx = avctx->priv_data;
599     CMVideoFormatDescriptionRef vid_fmt;
600     int isize;
601     int status;
602
603     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
604     if (!vid_fmt) {
605         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
606         return AVERROR_EXTERNAL;
607     }
608
609     status = vtctx->get_param_set_func(vid_fmt,
610                                        0,
611                                        NULL,
612                                        NULL,
613                                        NULL,
614                                        &isize);
615     if (status) {
616         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
617         return AVERROR_EXTERNAL;
618     }
619
620     *size = isize;
621     return 0;
622 }
623
624 /*
625  * Returns true on success.
626  *
627  * If profile_level_val is NULL and this method returns true, don't specify the
628  * profile/level to the encoder.
629  */
630 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
631                                       CFStringRef    *profile_level_val)
632 {
633     VTEncContext *vtctx = avctx->priv_data;
634     int64_t profile = vtctx->profile;
635
636     if (profile == H264_PROF_AUTO && vtctx->level) {
637         //Need to pick a profile if level is not auto-selected.
638         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
639     }
640
641     *profile_level_val = NULL;
642
643     switch (profile) {
644         case H264_PROF_AUTO:
645             return true;
646
647         case H264_PROF_BASELINE:
648             switch (vtctx->level) {
649                 case  0: *profile_level_val =
650                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
651                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
652                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
653                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
654                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
655                 case 40: *profile_level_val =
656                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
657                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
658                 case 42: *profile_level_val =
659                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
660                 case 50: *profile_level_val =
661                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
662                 case 51: *profile_level_val =
663                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
664                 case 52: *profile_level_val =
665                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
666             }
667             break;
668
669         case H264_PROF_MAIN:
670             switch (vtctx->level) {
671                 case  0: *profile_level_val =
672                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
673                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
674                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
675                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
676                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
677                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
678                 case 42: *profile_level_val =
679                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
680                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
681                 case 51: *profile_level_val =
682                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
683                 case 52: *profile_level_val =
684                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
685             }
686             break;
687
688         case H264_PROF_HIGH:
689             switch (vtctx->level) {
690                 case  0: *profile_level_val =
691                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
692                 case 30: *profile_level_val =
693                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
694                 case 31: *profile_level_val =
695                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
696                 case 32: *profile_level_val =
697                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
698                 case 40: *profile_level_val =
699                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
700                 case 41: *profile_level_val =
701                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
702                 case 42: *profile_level_val =
703                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
704                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
705                 case 51: *profile_level_val =
706                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
707                 case 52: *profile_level_val =
708                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
709             }
710             break;
711     }
712
713     if (!*profile_level_val) {
714         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
715         return false;
716     }
717
718     return true;
719 }
720
721 /*
722  * Returns true on success.
723  *
724  * If profile_level_val is NULL and this method returns true, don't specify the
725  * profile/level to the encoder.
726  */
727 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
728                                       CFStringRef    *profile_level_val)
729 {
730     VTEncContext *vtctx = avctx->priv_data;
731     int64_t profile = vtctx->profile;
732
733     *profile_level_val = NULL;
734
735     switch (profile) {
736         case HEVC_PROF_AUTO:
737             return true;
738         case HEVC_PROF_MAIN:
739             *profile_level_val =
740                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
741             break;
742         case HEVC_PROF_MAIN10:
743             *profile_level_val =
744                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
745             break;
746     }
747
748     if (!*profile_level_val) {
749         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
750         return false;
751     }
752
753     return true;
754 }
755
756 static int get_cv_pixel_format(AVCodecContext* avctx,
757                                enum AVPixelFormat fmt,
758                                enum AVColorRange range,
759                                int* av_pixel_format,
760                                int* range_guessed)
761 {
762     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
763                                         range != AVCOL_RANGE_JPEG;
764
765     //MPEG range is used when no range is set
766     if (fmt == AV_PIX_FMT_NV12) {
767         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
768                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
769                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
770     } else if (fmt == AV_PIX_FMT_YUV420P) {
771         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
772                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
773                                         kCVPixelFormatType_420YpCbCr8Planar;
774     } else if (fmt == AV_PIX_FMT_P010LE) {
775         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
776                                         kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
777                                         kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
778         *av_pixel_format = kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
779     } else {
780         return AVERROR(EINVAL);
781     }
782
783     return 0;
784 }
785
786 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
787     VTEncContext *vtctx = avctx->priv_data;
788
789     if (vtctx->color_primaries) {
790         CFDictionarySetValue(dict,
791                              kCVImageBufferColorPrimariesKey,
792                              vtctx->color_primaries);
793     }
794
795     if (vtctx->transfer_function) {
796         CFDictionarySetValue(dict,
797                              kCVImageBufferTransferFunctionKey,
798                              vtctx->transfer_function);
799     }
800
801     if (vtctx->ycbcr_matrix) {
802         CFDictionarySetValue(dict,
803                              kCVImageBufferYCbCrMatrixKey,
804                              vtctx->ycbcr_matrix);
805     }
806 }
807
808 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
809                                        CFMutableDictionaryRef* dict)
810 {
811     CFNumberRef cv_color_format_num = NULL;
812     CFNumberRef width_num = NULL;
813     CFNumberRef height_num = NULL;
814     CFMutableDictionaryRef pixel_buffer_info = NULL;
815     int cv_color_format;
816     int status = get_cv_pixel_format(avctx,
817                                      avctx->pix_fmt,
818                                      avctx->color_range,
819                                      &cv_color_format,
820                                      NULL);
821     if (status) return status;
822
823     pixel_buffer_info = CFDictionaryCreateMutable(
824                             kCFAllocatorDefault,
825                             20,
826                             &kCFCopyStringDictionaryKeyCallBacks,
827                             &kCFTypeDictionaryValueCallBacks);
828
829     if (!pixel_buffer_info) goto pbinfo_nomem;
830
831     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
832                                          kCFNumberSInt32Type,
833                                          &cv_color_format);
834     if (!cv_color_format_num) goto pbinfo_nomem;
835
836     CFDictionarySetValue(pixel_buffer_info,
837                          kCVPixelBufferPixelFormatTypeKey,
838                          cv_color_format_num);
839     vt_release_num(&cv_color_format_num);
840
841     width_num = CFNumberCreate(kCFAllocatorDefault,
842                                kCFNumberSInt32Type,
843                                &avctx->width);
844     if (!width_num) return AVERROR(ENOMEM);
845
846     CFDictionarySetValue(pixel_buffer_info,
847                          kCVPixelBufferWidthKey,
848                          width_num);
849     vt_release_num(&width_num);
850
851     height_num = CFNumberCreate(kCFAllocatorDefault,
852                                 kCFNumberSInt32Type,
853                                 &avctx->height);
854     if (!height_num) goto pbinfo_nomem;
855
856     CFDictionarySetValue(pixel_buffer_info,
857                          kCVPixelBufferHeightKey,
858                          height_num);
859     vt_release_num(&height_num);
860
861     add_color_attr(avctx, pixel_buffer_info);
862
863     *dict = pixel_buffer_info;
864     return 0;
865
866 pbinfo_nomem:
867     vt_release_num(&cv_color_format_num);
868     vt_release_num(&width_num);
869     vt_release_num(&height_num);
870     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
871
872     return AVERROR(ENOMEM);
873 }
874
875 static int get_cv_color_primaries(AVCodecContext *avctx,
876                                   CFStringRef *primaries)
877 {
878     enum AVColorPrimaries pri = avctx->color_primaries;
879     switch (pri) {
880         case AVCOL_PRI_UNSPECIFIED:
881             *primaries = NULL;
882             break;
883
884         case AVCOL_PRI_BT709:
885             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
886             break;
887
888         case AVCOL_PRI_BT2020:
889             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
890             break;
891
892         default:
893             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
894             *primaries = NULL;
895             return -1;
896     }
897
898     return 0;
899 }
900
901 static int get_cv_transfer_function(AVCodecContext *avctx,
902                                     CFStringRef *transfer_fnc,
903                                     CFNumberRef *gamma_level)
904 {
905     enum AVColorTransferCharacteristic trc = avctx->color_trc;
906     Float32 gamma;
907     *gamma_level = NULL;
908
909     switch (trc) {
910         case AVCOL_TRC_UNSPECIFIED:
911             *transfer_fnc = NULL;
912             break;
913
914         case AVCOL_TRC_BT709:
915             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
916             break;
917
918         case AVCOL_TRC_SMPTE240M:
919             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
920             break;
921
922 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
923         case AVCOL_TRC_SMPTE2084:
924             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
925             break;
926 #endif
927 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
928         case AVCOL_TRC_LINEAR:
929             *transfer_fnc = kCVImageBufferTransferFunction_Linear;
930             break;
931 #endif
932 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
933         case AVCOL_TRC_ARIB_STD_B67:
934             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
935             break;
936 #endif
937
938         case AVCOL_TRC_GAMMA22:
939             gamma = 2.2;
940             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
941             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
942             break;
943
944         case AVCOL_TRC_GAMMA28:
945             gamma = 2.8;
946             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
947             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
948             break;
949
950         case AVCOL_TRC_BT2020_10:
951         case AVCOL_TRC_BT2020_12:
952             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
953             break;
954
955         default:
956             *transfer_fnc = NULL;
957             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
958             return -1;
959     }
960
961     return 0;
962 }
963
964 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
965     switch(avctx->colorspace) {
966         case AVCOL_SPC_BT709:
967             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
968             break;
969
970         case AVCOL_SPC_UNSPECIFIED:
971             *matrix = NULL;
972             break;
973
974         case AVCOL_SPC_BT470BG:
975         case AVCOL_SPC_SMPTE170M:
976             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
977             break;
978
979         case AVCOL_SPC_SMPTE240M:
980             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
981             break;
982
983         case AVCOL_SPC_BT2020_NCL:
984             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
985             break;
986
987         default:
988             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
989             return -1;
990     }
991
992     return 0;
993 }
994
995 static int vtenc_create_encoder(AVCodecContext   *avctx,
996                                 CMVideoCodecType codec_type,
997                                 CFStringRef      profile_level,
998                                 CFNumberRef      gamma_level,
999                                 CFDictionaryRef  enc_info,
1000                                 CFDictionaryRef  pixel_buffer_info,
1001                                 VTCompressionSessionRef *session)
1002 {
1003     VTEncContext *vtctx = avctx->priv_data;
1004     SInt32       bit_rate = avctx->bit_rate;
1005     SInt32       max_rate = avctx->rc_max_rate;
1006     CFNumberRef  bit_rate_num;
1007     CFNumberRef  bytes_per_second;
1008     CFNumberRef  one_second;
1009     CFArrayRef   data_rate_limits;
1010     int64_t      bytes_per_second_value = 0;
1011     int64_t      one_second_value = 0;
1012     void         *nums[2];
1013
1014     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1015                                             avctx->width,
1016                                             avctx->height,
1017                                             codec_type,
1018                                             enc_info,
1019                                             pixel_buffer_info,
1020                                             kCFAllocatorDefault,
1021                                             vtenc_output_callback,
1022                                             avctx,
1023                                             session);
1024
1025     if (status || !vtctx->session) {
1026         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1027
1028 #if !TARGET_OS_IPHONE
1029         if (!vtctx->allow_sw) {
1030             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1031         }
1032 #endif
1033
1034         return AVERROR_EXTERNAL;
1035     }
1036
1037     bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1038                                   kCFNumberSInt32Type,
1039                                   &bit_rate);
1040     if (!bit_rate_num) return AVERROR(ENOMEM);
1041
1042     status = VTSessionSetProperty(vtctx->session,
1043                                   kVTCompressionPropertyKey_AverageBitRate,
1044                                   bit_rate_num);
1045     CFRelease(bit_rate_num);
1046
1047     if (status) {
1048         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1049         return AVERROR_EXTERNAL;
1050     }
1051
1052     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1053         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1054         bytes_per_second_value = max_rate >> 3;
1055         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1056                                           kCFNumberSInt64Type,
1057                                           &bytes_per_second_value);
1058         if (!bytes_per_second) {
1059             return AVERROR(ENOMEM);
1060         }
1061         one_second_value = 1;
1062         one_second = CFNumberCreate(kCFAllocatorDefault,
1063                                     kCFNumberSInt64Type,
1064                                     &one_second_value);
1065         if (!one_second) {
1066             CFRelease(bytes_per_second);
1067             return AVERROR(ENOMEM);
1068         }
1069         nums[0] = (void *)bytes_per_second;
1070         nums[1] = (void *)one_second;
1071         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1072                                          (const void **)nums,
1073                                          2,
1074                                          &kCFTypeArrayCallBacks);
1075
1076         if (!data_rate_limits) {
1077             CFRelease(bytes_per_second);
1078             CFRelease(one_second);
1079             return AVERROR(ENOMEM);
1080         }
1081         status = VTSessionSetProperty(vtctx->session,
1082                                       kVTCompressionPropertyKey_DataRateLimits,
1083                                       data_rate_limits);
1084
1085         CFRelease(bytes_per_second);
1086         CFRelease(one_second);
1087         CFRelease(data_rate_limits);
1088
1089         if (status) {
1090             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1091             return AVERROR_EXTERNAL;
1092         }
1093     }
1094
1095     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1096         // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1097         if (profile_level) {
1098             status = VTSessionSetProperty(vtctx->session,
1099                                         kVTCompressionPropertyKey_ProfileLevel,
1100                                         profile_level);
1101             if (status) {
1102                 av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
1103             }
1104         }
1105     }
1106
1107     if (avctx->gop_size > 0) {
1108         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1109                                               kCFNumberIntType,
1110                                               &avctx->gop_size);
1111         if (!interval) {
1112             return AVERROR(ENOMEM);
1113         }
1114
1115         status = VTSessionSetProperty(vtctx->session,
1116                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1117                                       interval);
1118         CFRelease(interval);
1119
1120         if (status) {
1121             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1122             return AVERROR_EXTERNAL;
1123         }
1124     }
1125
1126     if (vtctx->frames_before) {
1127         status = VTSessionSetProperty(vtctx->session,
1128                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1129                                       kCFBooleanTrue);
1130
1131         if (status == kVTPropertyNotSupportedErr) {
1132             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1133         } else if (status) {
1134             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1135         }
1136     }
1137
1138     if (vtctx->frames_after) {
1139         status = VTSessionSetProperty(vtctx->session,
1140                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1141                                       kCFBooleanTrue);
1142
1143         if (status == kVTPropertyNotSupportedErr) {
1144             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1145         } else if (status) {
1146             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1147         }
1148     }
1149
1150     if (avctx->sample_aspect_ratio.num != 0) {
1151         CFNumberRef num;
1152         CFNumberRef den;
1153         CFMutableDictionaryRef par;
1154         AVRational *avpar = &avctx->sample_aspect_ratio;
1155
1156         av_reduce(&avpar->num, &avpar->den,
1157                    avpar->num,  avpar->den,
1158                   0xFFFFFFFF);
1159
1160         num = CFNumberCreate(kCFAllocatorDefault,
1161                              kCFNumberIntType,
1162                              &avpar->num);
1163
1164         den = CFNumberCreate(kCFAllocatorDefault,
1165                              kCFNumberIntType,
1166                              &avpar->den);
1167
1168
1169
1170         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1171                                         2,
1172                                         &kCFCopyStringDictionaryKeyCallBacks,
1173                                         &kCFTypeDictionaryValueCallBacks);
1174
1175         if (!par || !num || !den) {
1176             if (par) CFRelease(par);
1177             if (num) CFRelease(num);
1178             if (den) CFRelease(den);
1179
1180             return AVERROR(ENOMEM);
1181         }
1182
1183         CFDictionarySetValue(
1184             par,
1185             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1186             num);
1187
1188         CFDictionarySetValue(
1189             par,
1190             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1191             den);
1192
1193         status = VTSessionSetProperty(vtctx->session,
1194                                       kVTCompressionPropertyKey_PixelAspectRatio,
1195                                       par);
1196
1197         CFRelease(par);
1198         CFRelease(num);
1199         CFRelease(den);
1200
1201         if (status) {
1202             av_log(avctx,
1203                    AV_LOG_ERROR,
1204                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1205                    avctx->sample_aspect_ratio.num,
1206                    avctx->sample_aspect_ratio.den,
1207                    status);
1208
1209             return AVERROR_EXTERNAL;
1210         }
1211     }
1212
1213
1214     if (vtctx->transfer_function) {
1215         status = VTSessionSetProperty(vtctx->session,
1216                                       kVTCompressionPropertyKey_TransferFunction,
1217                                       vtctx->transfer_function);
1218
1219         if (status) {
1220             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1221         }
1222     }
1223
1224
1225     if (vtctx->ycbcr_matrix) {
1226         status = VTSessionSetProperty(vtctx->session,
1227                                       kVTCompressionPropertyKey_YCbCrMatrix,
1228                                       vtctx->ycbcr_matrix);
1229
1230         if (status) {
1231             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1232         }
1233     }
1234
1235
1236     if (vtctx->color_primaries) {
1237         status = VTSessionSetProperty(vtctx->session,
1238                                       kVTCompressionPropertyKey_ColorPrimaries,
1239                                       vtctx->color_primaries);
1240
1241         if (status) {
1242             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1243         }
1244     }
1245
1246     if (gamma_level) {
1247         status = VTSessionSetProperty(vtctx->session,
1248                                       kCVImageBufferGammaLevelKey,
1249                                       gamma_level);
1250
1251         if (status) {
1252             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1253         }
1254     }
1255
1256     if (!vtctx->has_b_frames) {
1257         status = VTSessionSetProperty(vtctx->session,
1258                                       kVTCompressionPropertyKey_AllowFrameReordering,
1259                                       kCFBooleanFalse);
1260
1261         if (status) {
1262             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1263             return AVERROR_EXTERNAL;
1264         }
1265     }
1266
1267     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1268         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1269                                 compat_keys.kVTH264EntropyMode_CABAC:
1270                                 compat_keys.kVTH264EntropyMode_CAVLC;
1271
1272         status = VTSessionSetProperty(vtctx->session,
1273                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1274                                       entropy);
1275
1276         if (status) {
1277             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1278         }
1279     }
1280
1281     if (vtctx->realtime) {
1282         status = VTSessionSetProperty(vtctx->session,
1283                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1284                                       kCFBooleanTrue);
1285
1286         if (status) {
1287             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1288         }
1289     }
1290
1291     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1292     if (status) {
1293         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1294         return AVERROR_EXTERNAL;
1295     }
1296
1297     return 0;
1298 }
1299
1300 static int vtenc_configure_encoder(AVCodecContext *avctx)
1301 {
1302     CFMutableDictionaryRef enc_info;
1303     CFMutableDictionaryRef pixel_buffer_info;
1304     CMVideoCodecType       codec_type;
1305     VTEncContext           *vtctx = avctx->priv_data;
1306     CFStringRef            profile_level;
1307     CFNumberRef            gamma_level = NULL;
1308     int                    status;
1309
1310     codec_type = get_cm_codec_type(avctx->codec_id);
1311     if (!codec_type) {
1312         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1313         return AVERROR(EINVAL);
1314     }
1315
1316     vtctx->codec_id = avctx->codec_id;
1317
1318     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1319         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1320
1321         vtctx->has_b_frames = avctx->max_b_frames > 0;
1322         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1323             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1324             vtctx->has_b_frames = false;
1325         }
1326
1327         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1328             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1329             vtctx->entropy = VT_ENTROPY_NOT_SET;
1330         }
1331
1332         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1333     } else {
1334         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1335         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1336         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1337     }
1338
1339     enc_info = CFDictionaryCreateMutable(
1340         kCFAllocatorDefault,
1341         20,
1342         &kCFCopyStringDictionaryKeyCallBacks,
1343         &kCFTypeDictionaryValueCallBacks
1344     );
1345
1346     if (!enc_info) return AVERROR(ENOMEM);
1347
1348 #if !TARGET_OS_IPHONE
1349     if(vtctx->require_sw) {
1350         CFDictionarySetValue(enc_info,
1351                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1352                              kCFBooleanFalse);
1353     } else if (!vtctx->allow_sw) {
1354         CFDictionarySetValue(enc_info,
1355                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1356                              kCFBooleanTrue);
1357     } else {
1358         CFDictionarySetValue(enc_info,
1359                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1360                              kCFBooleanTrue);
1361     }
1362 #endif
1363
1364     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1365         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1366         if (status)
1367             goto init_cleanup;
1368     } else {
1369         pixel_buffer_info = NULL;
1370     }
1371
1372     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1373
1374     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1375     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1376     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1377
1378
1379     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1380         status = vtenc_populate_extradata(avctx,
1381                                           codec_type,
1382                                           profile_level,
1383                                           gamma_level,
1384                                           enc_info,
1385                                           pixel_buffer_info);
1386         if (status)
1387             goto init_cleanup;
1388     }
1389
1390     status = vtenc_create_encoder(avctx,
1391                                   codec_type,
1392                                   profile_level,
1393                                   gamma_level,
1394                                   enc_info,
1395                                   pixel_buffer_info,
1396                                   &vtctx->session);
1397
1398 init_cleanup:
1399     if (gamma_level)
1400         CFRelease(gamma_level);
1401
1402     if (pixel_buffer_info)
1403         CFRelease(pixel_buffer_info);
1404
1405     CFRelease(enc_info);
1406
1407     return status;
1408 }
1409
1410 static av_cold int vtenc_init(AVCodecContext *avctx)
1411 {
1412     VTEncContext    *vtctx = avctx->priv_data;
1413     CFBooleanRef    has_b_frames_cfbool;
1414     int             status;
1415
1416     pthread_once(&once_ctrl, loadVTEncSymbols);
1417
1418     pthread_mutex_init(&vtctx->lock, NULL);
1419     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1420
1421     vtctx->session = NULL;
1422     status = vtenc_configure_encoder(avctx);
1423     if (status) return status;
1424
1425     status = VTSessionCopyProperty(vtctx->session,
1426                                    kVTCompressionPropertyKey_AllowFrameReordering,
1427                                    kCFAllocatorDefault,
1428                                    &has_b_frames_cfbool);
1429
1430     if (!status && has_b_frames_cfbool) {
1431         //Some devices don't output B-frames for main profile, even if requested.
1432         vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1433         CFRelease(has_b_frames_cfbool);
1434     }
1435     avctx->has_b_frames = vtctx->has_b_frames;
1436
1437     return 0;
1438 }
1439
1440 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1441 {
1442     CFArrayRef      attachments;
1443     CFDictionaryRef attachment;
1444     CFBooleanRef    not_sync;
1445     CFIndex         len;
1446
1447     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1448     len = !attachments ? 0 : CFArrayGetCount(attachments);
1449
1450     if (!len) {
1451         *is_key_frame = true;
1452         return;
1453     }
1454
1455     attachment = CFArrayGetValueAtIndex(attachments, 0);
1456
1457     if (CFDictionaryGetValueIfPresent(attachment,
1458                                       kCMSampleAttachmentKey_NotSync,
1459                                       (const void **)&not_sync))
1460     {
1461         *is_key_frame = !CFBooleanGetValue(not_sync);
1462     } else {
1463         *is_key_frame = true;
1464     }
1465 }
1466
1467 static int is_post_sei_nal_type(int nal_type){
1468     return nal_type != H264_NAL_SEI &&
1469            nal_type != H264_NAL_SPS &&
1470            nal_type != H264_NAL_PPS &&
1471            nal_type != H264_NAL_AUD;
1472 }
1473
1474 /*
1475  * Finds the sei message start/size of type find_sei_type.
1476  * If more than one of that type exists, the last one is returned.
1477  */
1478 static int find_sei_end(AVCodecContext *avctx,
1479                         uint8_t        *nal_data,
1480                         size_t          nal_size,
1481                         uint8_t       **sei_end)
1482 {
1483     int nal_type;
1484     size_t sei_payload_size = 0;
1485     int sei_payload_type = 0;
1486     *sei_end = NULL;
1487     uint8_t *nal_start = nal_data;
1488
1489     if (!nal_size)
1490         return 0;
1491
1492     nal_type = *nal_data & 0x1F;
1493     if (nal_type != H264_NAL_SEI)
1494         return 0;
1495
1496     nal_data++;
1497     nal_size--;
1498
1499     if (nal_data[nal_size - 1] == 0x80)
1500         nal_size--;
1501
1502     while (nal_size > 0 && *nal_data > 0) {
1503         do{
1504             sei_payload_type += *nal_data;
1505             nal_data++;
1506             nal_size--;
1507         } while (nal_size > 0 && *nal_data == 0xFF);
1508
1509         if (!nal_size) {
1510             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1511             return AVERROR_INVALIDDATA;
1512         }
1513
1514         do{
1515             sei_payload_size += *nal_data;
1516             nal_data++;
1517             nal_size--;
1518         } while (nal_size > 0 && *nal_data == 0xFF);
1519
1520         if (nal_size < sei_payload_size) {
1521             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1522             return AVERROR_INVALIDDATA;
1523         }
1524
1525         nal_data += sei_payload_size;
1526         nal_size -= sei_payload_size;
1527     }
1528
1529     *sei_end = nal_data;
1530
1531     return nal_data - nal_start + 1;
1532 }
1533
1534 /**
1535  * Copies the data inserting emulation prevention bytes as needed.
1536  * Existing data in the destination can be taken into account by providing
1537  * dst with a dst_offset > 0.
1538  *
1539  * @return The number of bytes copied on success. On failure, the negative of
1540  *         the number of bytes needed to copy src is returned.
1541  */
1542 static int copy_emulation_prev(const uint8_t *src,
1543                                size_t         src_size,
1544                                uint8_t       *dst,
1545                                ssize_t        dst_offset,
1546                                size_t         dst_size)
1547 {
1548     int zeros = 0;
1549     int wrote_bytes;
1550     uint8_t* dst_start;
1551     uint8_t* dst_end = dst + dst_size;
1552     const uint8_t* src_end = src + src_size;
1553     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1554     int i;
1555     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1556         if (!dst[i])
1557             zeros++;
1558         else
1559             zeros = 0;
1560     }
1561
1562     dst += dst_offset;
1563     dst_start = dst;
1564     for (; src < src_end; src++, dst++) {
1565         if (zeros == 2) {
1566             int insert_ep3_byte = *src <= 3;
1567             if (insert_ep3_byte) {
1568                 if (dst < dst_end)
1569                     *dst = 3;
1570                 dst++;
1571             }
1572
1573             zeros = 0;
1574         }
1575
1576         if (dst < dst_end)
1577             *dst = *src;
1578
1579         if (!*src)
1580             zeros++;
1581         else
1582             zeros = 0;
1583     }
1584
1585     wrote_bytes = dst - dst_start;
1586
1587     if (dst > dst_end)
1588         return -wrote_bytes;
1589
1590     return wrote_bytes;
1591 }
1592
1593 static int write_sei(const ExtraSEI *sei,
1594                      int             sei_type,
1595                      uint8_t        *dst,
1596                      size_t          dst_size)
1597 {
1598     uint8_t *sei_start = dst;
1599     size_t remaining_sei_size = sei->size;
1600     size_t remaining_dst_size = dst_size;
1601     int header_bytes;
1602     int bytes_written;
1603     ssize_t offset;
1604
1605     if (!remaining_dst_size)
1606         return AVERROR_BUFFER_TOO_SMALL;
1607
1608     while (sei_type && remaining_dst_size != 0) {
1609         int sei_byte = sei_type > 255 ? 255 : sei_type;
1610         *dst = sei_byte;
1611
1612         sei_type -= sei_byte;
1613         dst++;
1614         remaining_dst_size--;
1615     }
1616
1617     if (!dst_size)
1618         return AVERROR_BUFFER_TOO_SMALL;
1619
1620     while (remaining_sei_size && remaining_dst_size != 0) {
1621         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1622         *dst = size_byte;
1623
1624         remaining_sei_size -= size_byte;
1625         dst++;
1626         remaining_dst_size--;
1627     }
1628
1629     if (remaining_dst_size < sei->size)
1630         return AVERROR_BUFFER_TOO_SMALL;
1631
1632     header_bytes = dst - sei_start;
1633
1634     offset = header_bytes;
1635     bytes_written = copy_emulation_prev(sei->data,
1636                                         sei->size,
1637                                         sei_start,
1638                                         offset,
1639                                         dst_size);
1640     if (bytes_written < 0)
1641         return AVERROR_BUFFER_TOO_SMALL;
1642
1643     bytes_written += header_bytes;
1644     return bytes_written;
1645 }
1646
1647 /**
1648  * Copies NAL units and replaces length codes with
1649  * H.264 Annex B start codes. On failure, the contents of
1650  * dst_data may have been modified.
1651  *
1652  * @param length_code_size Byte length of each length code
1653  * @param sample_buffer NAL units prefixed with length codes.
1654  * @param sei Optional A53 closed captions SEI data.
1655  * @param dst_data Must be zeroed before calling this function.
1656  *                 Contains the copied NAL units prefixed with
1657  *                 start codes when the function returns
1658  *                 successfully.
1659  * @param dst_size Length of dst_data
1660  * @return 0 on success
1661  *         AVERROR_INVALIDDATA if length_code_size is invalid
1662  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1663  *         or if a length_code in src_data specifies data beyond
1664  *         the end of its buffer.
1665  */
1666 static int copy_replace_length_codes(
1667     AVCodecContext *avctx,
1668     size_t        length_code_size,
1669     CMSampleBufferRef sample_buffer,
1670     ExtraSEI      *sei,
1671     uint8_t       *dst_data,
1672     size_t        dst_size)
1673 {
1674     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1675     size_t remaining_src_size = src_size;
1676     size_t remaining_dst_size = dst_size;
1677     size_t src_offset = 0;
1678     int wrote_sei = 0;
1679     int status;
1680     uint8_t size_buf[4];
1681     uint8_t nal_type;
1682     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1683
1684     if (length_code_size > 4) {
1685         return AVERROR_INVALIDDATA;
1686     }
1687
1688     while (remaining_src_size > 0) {
1689         size_t curr_src_len;
1690         size_t curr_dst_len;
1691         size_t box_len = 0;
1692         size_t i;
1693
1694         uint8_t       *dst_box;
1695
1696         status = CMBlockBufferCopyDataBytes(block,
1697                                             src_offset,
1698                                             length_code_size,
1699                                             size_buf);
1700         if (status) {
1701             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1702             return AVERROR_EXTERNAL;
1703         }
1704
1705         status = CMBlockBufferCopyDataBytes(block,
1706                                             src_offset + length_code_size,
1707                                             1,
1708                                             &nal_type);
1709
1710         if (status) {
1711             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1712             return AVERROR_EXTERNAL;
1713         }
1714
1715         nal_type &= 0x1F;
1716
1717         for (i = 0; i < length_code_size; i++) {
1718             box_len <<= 8;
1719             box_len |= size_buf[i];
1720         }
1721
1722         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1723             //No SEI NAL unit - insert.
1724             int wrote_bytes;
1725
1726             memcpy(dst_data, start_code, sizeof(start_code));
1727             dst_data += sizeof(start_code);
1728             remaining_dst_size -= sizeof(start_code);
1729
1730             *dst_data = H264_NAL_SEI;
1731             dst_data++;
1732             remaining_dst_size--;
1733
1734             wrote_bytes = write_sei(sei,
1735                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1736                                     dst_data,
1737                                     remaining_dst_size);
1738
1739             if (wrote_bytes < 0)
1740                 return wrote_bytes;
1741
1742             remaining_dst_size -= wrote_bytes;
1743             dst_data += wrote_bytes;
1744
1745             if (remaining_dst_size <= 0)
1746                 return AVERROR_BUFFER_TOO_SMALL;
1747
1748             *dst_data = 0x80;
1749
1750             dst_data++;
1751             remaining_dst_size--;
1752
1753             wrote_sei = 1;
1754         }
1755
1756         curr_src_len = box_len + length_code_size;
1757         curr_dst_len = box_len + sizeof(start_code);
1758
1759         if (remaining_src_size < curr_src_len) {
1760             return AVERROR_BUFFER_TOO_SMALL;
1761         }
1762
1763         if (remaining_dst_size < curr_dst_len) {
1764             return AVERROR_BUFFER_TOO_SMALL;
1765         }
1766
1767         dst_box = dst_data + sizeof(start_code);
1768
1769         memcpy(dst_data, start_code, sizeof(start_code));
1770         status = CMBlockBufferCopyDataBytes(block,
1771                                             src_offset + length_code_size,
1772                                             box_len,
1773                                             dst_box);
1774
1775         if (status) {
1776             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1777             return AVERROR_EXTERNAL;
1778         }
1779
1780         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1781             //Found SEI NAL unit - append.
1782             int wrote_bytes;
1783             int old_sei_length;
1784             int extra_bytes;
1785             uint8_t *new_sei;
1786             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1787             if (old_sei_length < 0)
1788                 return status;
1789
1790             wrote_bytes = write_sei(sei,
1791                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1792                                     new_sei,
1793                                     remaining_dst_size - old_sei_length);
1794             if (wrote_bytes < 0)
1795                 return wrote_bytes;
1796
1797             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1798                 return AVERROR_BUFFER_TOO_SMALL;
1799
1800             new_sei[wrote_bytes++] = 0x80;
1801             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1802
1803             dst_data += extra_bytes;
1804             remaining_dst_size -= extra_bytes;
1805
1806             wrote_sei = 1;
1807         }
1808
1809         src_offset += curr_src_len;
1810         dst_data += curr_dst_len;
1811
1812         remaining_src_size -= curr_src_len;
1813         remaining_dst_size -= curr_dst_len;
1814     }
1815
1816     return 0;
1817 }
1818
1819 /**
1820  * Returns a sufficient number of bytes to contain the sei data.
1821  * It may be greater than the minimum required.
1822  */
1823 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1824     int copied_size;
1825     if (sei->size == 0)
1826         return 0;
1827
1828     copied_size = -copy_emulation_prev(sei->data,
1829                                        sei->size,
1830                                        NULL,
1831                                        0,
1832                                        0);
1833
1834     if ((sei->size % 255) == 0) //may result in an extra byte
1835         copied_size++;
1836
1837     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1838 }
1839
1840 static int vtenc_cm_to_avpacket(
1841     AVCodecContext    *avctx,
1842     CMSampleBufferRef sample_buffer,
1843     AVPacket          *pkt,
1844     ExtraSEI          *sei)
1845 {
1846     VTEncContext *vtctx = avctx->priv_data;
1847
1848     int     status;
1849     bool    is_key_frame;
1850     bool    add_header;
1851     size_t  length_code_size;
1852     size_t  header_size = 0;
1853     size_t  in_buf_size;
1854     size_t  out_buf_size;
1855     size_t  sei_nalu_size = 0;
1856     int64_t dts_delta;
1857     int64_t time_base_num;
1858     int nalu_count;
1859     CMTime  pts;
1860     CMTime  dts;
1861     CMVideoFormatDescriptionRef vid_fmt;
1862
1863
1864     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1865     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1866     if (status) return status;
1867
1868     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1869
1870     if (add_header) {
1871         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1872         if (!vid_fmt) {
1873             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1874             return AVERROR_EXTERNAL;
1875         }
1876
1877         int status = get_params_size(avctx, vid_fmt, &header_size);
1878         if (status) return status;
1879     }
1880
1881     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1882     if(status)
1883         return status;
1884
1885     if (sei) {
1886         size_t msg_size = get_sei_msg_bytes(sei,
1887                                             H264_SEI_TYPE_USER_DATA_REGISTERED);
1888
1889         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1890     }
1891
1892     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1893     out_buf_size = header_size +
1894                    in_buf_size +
1895                    sei_nalu_size +
1896                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1897
1898     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1899     if (status < 0)
1900         return status;
1901
1902     if (add_header) {
1903         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1904         if(status) return status;
1905     }
1906
1907     status = copy_replace_length_codes(
1908         avctx,
1909         length_code_size,
1910         sample_buffer,
1911         sei,
1912         pkt->data + header_size,
1913         pkt->size - header_size
1914     );
1915
1916     if (status) {
1917         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1918         return status;
1919     }
1920
1921     if (is_key_frame) {
1922         pkt->flags |= AV_PKT_FLAG_KEY;
1923     }
1924
1925     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1926     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1927
1928     if (CMTIME_IS_INVALID(dts)) {
1929         if (!vtctx->has_b_frames) {
1930             dts = pts;
1931         } else {
1932             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1933             return AVERROR_EXTERNAL;
1934         }
1935     }
1936
1937     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1938     time_base_num = avctx->time_base.num;
1939     pkt->pts = pts.value / time_base_num;
1940     pkt->dts = dts.value / time_base_num - dts_delta;
1941     pkt->size = out_buf_size;
1942
1943     return 0;
1944 }
1945
1946 /*
1947  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1948  * containing all planes if so.
1949  */
1950 static int get_cv_pixel_info(
1951     AVCodecContext *avctx,
1952     const AVFrame  *frame,
1953     int            *color,
1954     int            *plane_count,
1955     size_t         *widths,
1956     size_t         *heights,
1957     size_t         *strides,
1958     size_t         *contiguous_buf_size)
1959 {
1960     VTEncContext *vtctx = avctx->priv_data;
1961     int av_format       = frame->format;
1962     int av_color_range  = frame->color_range;
1963     int i;
1964     int range_guessed;
1965     int status;
1966
1967     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1968     if (status) {
1969         av_log(avctx,
1970             AV_LOG_ERROR,
1971             "Could not get pixel format for color format '%s' range '%s'.\n",
1972             av_get_pix_fmt_name(av_format),
1973             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1974             av_color_range < AVCOL_RANGE_NB ?
1975                av_color_range_name(av_color_range) :
1976                "Unknown");
1977
1978         return AVERROR(EINVAL);
1979     }
1980
1981     if (range_guessed) {
1982         if (!vtctx->warned_color_range) {
1983             vtctx->warned_color_range = true;
1984             av_log(avctx,
1985                    AV_LOG_WARNING,
1986                    "Color range not set for %s. Using MPEG range.\n",
1987                    av_get_pix_fmt_name(av_format));
1988         }
1989     }
1990
1991     switch (av_format) {
1992     case AV_PIX_FMT_NV12:
1993         *plane_count = 2;
1994
1995         widths [0] = avctx->width;
1996         heights[0] = avctx->height;
1997         strides[0] = frame ? frame->linesize[0] : avctx->width;
1998
1999         widths [1] = (avctx->width  + 1) / 2;
2000         heights[1] = (avctx->height + 1) / 2;
2001         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2002         break;
2003
2004     case AV_PIX_FMT_YUV420P:
2005         *plane_count = 3;
2006
2007         widths [0] = avctx->width;
2008         heights[0] = avctx->height;
2009         strides[0] = frame ? frame->linesize[0] : avctx->width;
2010
2011         widths [1] = (avctx->width  + 1) / 2;
2012         heights[1] = (avctx->height + 1) / 2;
2013         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2014
2015         widths [2] = (avctx->width  + 1) / 2;
2016         heights[2] = (avctx->height + 1) / 2;
2017         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2018         break;
2019
2020     case AV_PIX_FMT_P010LE:
2021         *plane_count = 2;
2022         widths[0] = avctx->width;
2023         heights[0] = avctx->height;
2024         strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2025
2026         widths[1] = (avctx->width + 1) / 2;
2027         heights[1] = (avctx->height + 1) / 2;
2028         strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2029         break;
2030
2031     default:
2032         av_log(
2033                avctx,
2034                AV_LOG_ERROR,
2035                "Could not get frame format info for color %d range %d.\n",
2036                av_format,
2037                av_color_range);
2038
2039         return AVERROR(EINVAL);
2040     }
2041
2042     *contiguous_buf_size = 0;
2043     for (i = 0; i < *plane_count; i++) {
2044         if (i < *plane_count - 1 &&
2045             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2046             *contiguous_buf_size = 0;
2047             break;
2048         }
2049
2050         *contiguous_buf_size += strides[i] * heights[i];
2051     }
2052
2053     return 0;
2054 }
2055
2056 //Not used on OSX - frame is never copied.
2057 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2058                                         const AVFrame    *frame,
2059                                         CVPixelBufferRef cv_img,
2060                                         const size_t     *plane_strides,
2061                                         const size_t     *plane_rows)
2062 {
2063     int i, j;
2064     size_t plane_count;
2065     int status;
2066     int rows;
2067     int src_stride;
2068     int dst_stride;
2069     uint8_t *src_addr;
2070     uint8_t *dst_addr;
2071     size_t copy_bytes;
2072
2073     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2074     if (status) {
2075         av_log(
2076             avctx,
2077             AV_LOG_ERROR,
2078             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2079             status
2080         );
2081     }
2082
2083     if (CVPixelBufferIsPlanar(cv_img)) {
2084         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2085         for (i = 0; frame->data[i]; i++) {
2086             if (i == plane_count) {
2087                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2088                 av_log(avctx,
2089                     AV_LOG_ERROR,
2090                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2091                 );
2092
2093                 return AVERROR_EXTERNAL;
2094             }
2095
2096             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2097             src_addr = (uint8_t*)frame->data[i];
2098             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2099             src_stride = plane_strides[i];
2100             rows = plane_rows[i];
2101
2102             if (dst_stride == src_stride) {
2103                 memcpy(dst_addr, src_addr, src_stride * rows);
2104             } else {
2105                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2106
2107                 for (j = 0; j < rows; j++) {
2108                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2109                 }
2110             }
2111         }
2112     } else {
2113         if (frame->data[1]) {
2114             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2115             av_log(avctx,
2116                 AV_LOG_ERROR,
2117                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2118             );
2119
2120             return AVERROR_EXTERNAL;
2121         }
2122
2123         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2124         src_addr = (uint8_t*)frame->data[0];
2125         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2126         src_stride = plane_strides[0];
2127         rows = plane_rows[0];
2128
2129         if (dst_stride == src_stride) {
2130             memcpy(dst_addr, src_addr, src_stride * rows);
2131         } else {
2132             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2133
2134             for (j = 0; j < rows; j++) {
2135                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2136             }
2137         }
2138     }
2139
2140     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2141     if (status) {
2142         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2143         return AVERROR_EXTERNAL;
2144     }
2145
2146     return 0;
2147 }
2148
2149 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2150                                   const AVFrame    *frame,
2151                                   CVPixelBufferRef *cv_img)
2152 {
2153     int plane_count;
2154     int color;
2155     size_t widths [AV_NUM_DATA_POINTERS];
2156     size_t heights[AV_NUM_DATA_POINTERS];
2157     size_t strides[AV_NUM_DATA_POINTERS];
2158     int status;
2159     size_t contiguous_buf_size;
2160     CVPixelBufferPoolRef pix_buf_pool;
2161     VTEncContext* vtctx = avctx->priv_data;
2162
2163     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2164         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2165
2166         *cv_img = (CVPixelBufferRef)frame->data[3];
2167         av_assert0(*cv_img);
2168
2169         CFRetain(*cv_img);
2170         return 0;
2171     }
2172
2173     memset(widths,  0, sizeof(widths));
2174     memset(heights, 0, sizeof(heights));
2175     memset(strides, 0, sizeof(strides));
2176
2177     status = get_cv_pixel_info(
2178         avctx,
2179         frame,
2180         &color,
2181         &plane_count,
2182         widths,
2183         heights,
2184         strides,
2185         &contiguous_buf_size
2186     );
2187
2188     if (status) {
2189         av_log(
2190             avctx,
2191             AV_LOG_ERROR,
2192             "Error: Cannot convert format %d color_range %d: %d\n",
2193             frame->format,
2194             frame->color_range,
2195             status
2196         );
2197
2198         return AVERROR_EXTERNAL;
2199     }
2200
2201     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2202     if (!pix_buf_pool) {
2203         /* On iOS, the VT session is invalidated when the APP switches from
2204          * foreground to background and vice versa. Fetch the actual error code
2205          * of the VT session to detect that case and restart the VT session
2206          * accordingly. */
2207         OSStatus vtstatus;
2208
2209         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2210         if (vtstatus == kVTInvalidSessionErr) {
2211             CFRelease(vtctx->session);
2212             vtctx->session = NULL;
2213             status = vtenc_configure_encoder(avctx);
2214             if (status == 0)
2215                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2216         }
2217         if (!pix_buf_pool) {
2218             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2219             return AVERROR_EXTERNAL;
2220         }
2221         else
2222             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2223                    "kVTInvalidSessionErr error.\n");
2224     }
2225
2226     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2227                                                 pix_buf_pool,
2228                                                 cv_img);
2229
2230
2231     if (status) {
2232         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2233         return AVERROR_EXTERNAL;
2234     }
2235
2236     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2237     if (status) {
2238         CFRelease(*cv_img);
2239         *cv_img = NULL;
2240         return status;
2241     }
2242
2243     return 0;
2244 }
2245
2246 static int create_encoder_dict_h264(const AVFrame *frame,
2247                                     CFDictionaryRef* dict_out)
2248 {
2249     CFDictionaryRef dict = NULL;
2250     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2251         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2252         const void *vals[] = { kCFBooleanTrue };
2253
2254         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2255         if(!dict) return AVERROR(ENOMEM);
2256     }
2257
2258     *dict_out = dict;
2259     return 0;
2260 }
2261
2262 static int vtenc_send_frame(AVCodecContext *avctx,
2263                             VTEncContext   *vtctx,
2264                             const AVFrame  *frame)
2265 {
2266     CMTime time;
2267     CFDictionaryRef frame_dict;
2268     CVPixelBufferRef cv_img = NULL;
2269     AVFrameSideData *side_data = NULL;
2270     ExtraSEI *sei = NULL;
2271     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2272
2273     if (status) return status;
2274
2275     status = create_encoder_dict_h264(frame, &frame_dict);
2276     if (status) {
2277         CFRelease(cv_img);
2278         return status;
2279     }
2280
2281     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2282     if (vtctx->a53_cc && side_data && side_data->size) {
2283         sei = av_mallocz(sizeof(*sei));
2284         if (!sei) {
2285             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2286         } else {
2287             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2288             if (ret < 0) {
2289                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2290                 av_free(sei);
2291                 sei = NULL;
2292             }
2293         }
2294     }
2295
2296     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2297     status = VTCompressionSessionEncodeFrame(
2298         vtctx->session,
2299         cv_img,
2300         time,
2301         kCMTimeInvalid,
2302         frame_dict,
2303         sei,
2304         NULL
2305     );
2306
2307     if (frame_dict) CFRelease(frame_dict);
2308     CFRelease(cv_img);
2309
2310     if (status) {
2311         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2312         return AVERROR_EXTERNAL;
2313     }
2314
2315     return 0;
2316 }
2317
2318 static av_cold int vtenc_frame(
2319     AVCodecContext *avctx,
2320     AVPacket       *pkt,
2321     const AVFrame  *frame,
2322     int            *got_packet)
2323 {
2324     VTEncContext *vtctx = avctx->priv_data;
2325     bool get_frame;
2326     int status;
2327     CMSampleBufferRef buf = NULL;
2328     ExtraSEI *sei = NULL;
2329
2330     if (frame) {
2331         status = vtenc_send_frame(avctx, vtctx, frame);
2332
2333         if (status) {
2334             status = AVERROR_EXTERNAL;
2335             goto end_nopkt;
2336         }
2337
2338         if (vtctx->frame_ct_in == 0) {
2339             vtctx->first_pts = frame->pts;
2340         } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2341             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2342         }
2343
2344         vtctx->frame_ct_in++;
2345     } else if(!vtctx->flushing) {
2346         vtctx->flushing = true;
2347
2348         status = VTCompressionSessionCompleteFrames(vtctx->session,
2349                                                     kCMTimeIndefinite);
2350
2351         if (status) {
2352             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2353             status = AVERROR_EXTERNAL;
2354             goto end_nopkt;
2355         }
2356     }
2357
2358     *got_packet = 0;
2359     get_frame = vtctx->dts_delta >= 0 || !frame;
2360     if (!get_frame) {
2361         status = 0;
2362         goto end_nopkt;
2363     }
2364
2365     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2366     if (status) goto end_nopkt;
2367     if (!buf)   goto end_nopkt;
2368
2369     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2370     if (sei) {
2371         if (sei->data) av_free(sei->data);
2372         av_free(sei);
2373     }
2374     CFRelease(buf);
2375     if (status) goto end_nopkt;
2376
2377     *got_packet = 1;
2378     return 0;
2379
2380 end_nopkt:
2381     av_packet_unref(pkt);
2382     return status;
2383 }
2384
2385 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2386                                     CMVideoCodecType codec_type,
2387                                     CFStringRef      profile_level,
2388                                     CFNumberRef      gamma_level,
2389                                     CFDictionaryRef  enc_info,
2390                                     CFDictionaryRef  pixel_buffer_info)
2391 {
2392     VTEncContext *vtctx = avctx->priv_data;
2393     int status;
2394     CVPixelBufferPoolRef pool = NULL;
2395     CVPixelBufferRef pix_buf = NULL;
2396     CMTime time;
2397     CMSampleBufferRef buf = NULL;
2398
2399     status = vtenc_create_encoder(avctx,
2400                                   codec_type,
2401                                   profile_level,
2402                                   gamma_level,
2403                                   enc_info,
2404                                   pixel_buffer_info,
2405                                   &vtctx->session);
2406     if (status)
2407         goto pe_cleanup;
2408
2409     pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2410     if(!pool){
2411         av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2412         goto pe_cleanup;
2413     }
2414
2415     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2416                                                 pool,
2417                                                 &pix_buf);
2418
2419     if(status != kCVReturnSuccess){
2420         av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2421         goto pe_cleanup;
2422     }
2423
2424     time = CMTimeMake(0, avctx->time_base.den);
2425     status = VTCompressionSessionEncodeFrame(vtctx->session,
2426                                              pix_buf,
2427                                              time,
2428                                              kCMTimeInvalid,
2429                                              NULL,
2430                                              NULL,
2431                                              NULL);
2432
2433     if (status) {
2434         av_log(avctx,
2435                AV_LOG_ERROR,
2436                "Error sending frame for extradata: %d\n",
2437                status);
2438
2439         goto pe_cleanup;
2440     }
2441
2442     //Populates extradata - output frames are flushed and param sets are available.
2443     status = VTCompressionSessionCompleteFrames(vtctx->session,
2444                                                 kCMTimeIndefinite);
2445
2446     if (status)
2447         goto pe_cleanup;
2448
2449     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2450     if (status) {
2451         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2452         goto pe_cleanup;
2453     }
2454
2455     CFRelease(buf);
2456
2457
2458
2459 pe_cleanup:
2460     if(vtctx->session)
2461         CFRelease(vtctx->session);
2462
2463     vtctx->session = NULL;
2464     vtctx->frame_ct_out = 0;
2465
2466     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2467
2468     return status;
2469 }
2470
2471 static av_cold int vtenc_close(AVCodecContext *avctx)
2472 {
2473     VTEncContext *vtctx = avctx->priv_data;
2474
2475     pthread_cond_destroy(&vtctx->cv_sample_sent);
2476     pthread_mutex_destroy(&vtctx->lock);
2477
2478     if(!vtctx->session) return 0;
2479
2480     VTCompressionSessionCompleteFrames(vtctx->session,
2481                                        kCMTimeIndefinite);
2482     clear_frame_queue(vtctx);
2483     CFRelease(vtctx->session);
2484     vtctx->session = NULL;
2485
2486     if (vtctx->color_primaries) {
2487         CFRelease(vtctx->color_primaries);
2488         vtctx->color_primaries = NULL;
2489     }
2490
2491     if (vtctx->transfer_function) {
2492         CFRelease(vtctx->transfer_function);
2493         vtctx->transfer_function = NULL;
2494     }
2495
2496     if (vtctx->ycbcr_matrix) {
2497         CFRelease(vtctx->ycbcr_matrix);
2498         vtctx->ycbcr_matrix = NULL;
2499     }
2500
2501     return 0;
2502 }
2503
2504 static const enum AVPixelFormat avc_pix_fmts[] = {
2505     AV_PIX_FMT_VIDEOTOOLBOX,
2506     AV_PIX_FMT_NV12,
2507     AV_PIX_FMT_YUV420P,
2508     AV_PIX_FMT_NONE
2509 };
2510
2511 static const enum AVPixelFormat hevc_pix_fmts[] = {
2512     AV_PIX_FMT_VIDEOTOOLBOX,
2513     AV_PIX_FMT_NV12,
2514     AV_PIX_FMT_YUV420P,
2515     AV_PIX_FMT_P010LE,
2516     AV_PIX_FMT_NONE
2517 };
2518
2519 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2520 #define COMMON_OPTIONS \
2521     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2522         { .i64 = 0 }, 0, 1, VE }, \
2523     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2524         { .i64 = 0 }, 0, 1, VE }, \
2525     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2526         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2527     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2528         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2529     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2530         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2531
2532 #define OFFSET(x) offsetof(VTEncContext, x)
2533 static const AVOption h264_options[] = {
2534     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2535     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2536     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2537     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2538
2539     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2540     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2541     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2542     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2543     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2544     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2545     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2546     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2547     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2548     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2549     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2550
2551     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2552     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2553     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2554     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2555     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2556
2557     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2558
2559     COMMON_OPTIONS
2560     { NULL },
2561 };
2562
2563 static const AVClass h264_videotoolbox_class = {
2564     .class_name = "h264_videotoolbox",
2565     .item_name  = av_default_item_name,
2566     .option     = h264_options,
2567     .version    = LIBAVUTIL_VERSION_INT,
2568 };
2569
2570 AVCodec ff_h264_videotoolbox_encoder = {
2571     .name             = "h264_videotoolbox",
2572     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2573     .type             = AVMEDIA_TYPE_VIDEO,
2574     .id               = AV_CODEC_ID_H264,
2575     .priv_data_size   = sizeof(VTEncContext),
2576     .pix_fmts         = avc_pix_fmts,
2577     .init             = vtenc_init,
2578     .encode2          = vtenc_frame,
2579     .close            = vtenc_close,
2580     .capabilities     = AV_CODEC_CAP_DELAY,
2581     .priv_class       = &h264_videotoolbox_class,
2582     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2583                         FF_CODEC_CAP_INIT_CLEANUP,
2584 };
2585
2586 static const AVOption hevc_options[] = {
2587     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2588     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2589     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2590
2591     COMMON_OPTIONS
2592     { NULL },
2593 };
2594
2595 static const AVClass hevc_videotoolbox_class = {
2596     .class_name = "hevc_videotoolbox",
2597     .item_name  = av_default_item_name,
2598     .option     = hevc_options,
2599     .version    = LIBAVUTIL_VERSION_INT,
2600 };
2601
2602 AVCodec ff_hevc_videotoolbox_encoder = {
2603     .name             = "hevc_videotoolbox",
2604     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2605     .type             = AVMEDIA_TYPE_VIDEO,
2606     .id               = AV_CODEC_ID_HEVC,
2607     .priv_data_size   = sizeof(VTEncContext),
2608     .pix_fmts         = hevc_pix_fmts,
2609     .init             = vtenc_init,
2610     .encode2          = vtenc_frame,
2611     .close            = vtenc_close,
2612     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2613     .priv_class       = &hevc_videotoolbox_class,
2614     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2615                         FF_CODEC_CAP_INIT_CLEANUP,
2616     .wrapper_name     = "videotoolbox",
2617 };