]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
videotoolboxenc: enable constant quality with -q:v on Apple Silicon Macs and use...
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "atsc_a53.h"
35 #include "h264.h"
36 #include "h264_sei.h"
37 #include <dlfcn.h>
38
39 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
40 enum { kCMVideoCodecType_HEVC = 'hvc1' };
41 #endif
42
43 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
44 enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
45 enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
46 #endif
47
48 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
49                                            size_t parameterSetIndex,
50                                            const uint8_t **parameterSetPointerOut,
51                                            size_t *parameterSetSizeOut,
52                                            size_t *parameterSetCountOut,
53                                            int *NALUnitHeaderLengthOut);
54
55 //These symbols may not be present
56 static struct{
57     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
58     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
59     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
60
61     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
62     CFStringRef kVTH264EntropyMode_CAVLC;
63     CFStringRef kVTH264EntropyMode_CABAC;
64
65     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
66     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
67     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
68     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
69     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
70     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
71     CFStringRef kVTProfileLevel_H264_Main_4_2;
72     CFStringRef kVTProfileLevel_H264_Main_5_1;
73     CFStringRef kVTProfileLevel_H264_Main_5_2;
74     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
75     CFStringRef kVTProfileLevel_H264_High_3_0;
76     CFStringRef kVTProfileLevel_H264_High_3_1;
77     CFStringRef kVTProfileLevel_H264_High_3_2;
78     CFStringRef kVTProfileLevel_H264_High_4_0;
79     CFStringRef kVTProfileLevel_H264_High_4_1;
80     CFStringRef kVTProfileLevel_H264_High_4_2;
81     CFStringRef kVTProfileLevel_H264_High_5_1;
82     CFStringRef kVTProfileLevel_H264_High_5_2;
83     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
84     CFStringRef kVTProfileLevel_H264_Extended_5_0;
85     CFStringRef kVTProfileLevel_H264_Extended_AutoLevel;
86
87     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
88     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
89
90     CFStringRef kVTCompressionPropertyKey_RealTime;
91
92     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
93     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
94
95     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
96 } compat_keys;
97
98 #define GET_SYM(symbol, defaultVal)                                     \
99 do{                                                                     \
100     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
101     if(!handle)                                                         \
102         compat_keys.symbol = CFSTR(defaultVal);                         \
103     else                                                                \
104         compat_keys.symbol = *handle;                                   \
105 }while(0)
106
107 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
108
109 static void loadVTEncSymbols(){
110     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
111         (getParameterSetAtIndex)dlsym(
112             RTLD_DEFAULT,
113             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
114         );
115
116     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
117     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
118     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
119
120     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
121     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
122     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
123
124     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
125     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
126     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
127     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
128     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
129     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
130     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
131     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
132     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
133     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
134     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
135     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
136     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
137     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
138     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
139     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
140     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
141     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
142     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
143     GET_SYM(kVTProfileLevel_H264_Extended_5_0,       "H264_Extended_5_0");
144     GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
145
146     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
147     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
148
149     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
150
151     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
152             "EnableHardwareAcceleratedVideoEncoder");
153     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
154             "RequireHardwareAcceleratedVideoEncoder");
155 }
156
157 typedef enum VT_H264Profile {
158     H264_PROF_AUTO,
159     H264_PROF_BASELINE,
160     H264_PROF_MAIN,
161     H264_PROF_HIGH,
162     H264_PROF_EXTENDED,
163     H264_PROF_COUNT
164 } VT_H264Profile;
165
166 typedef enum VTH264Entropy{
167     VT_ENTROPY_NOT_SET,
168     VT_CAVLC,
169     VT_CABAC
170 } VTH264Entropy;
171
172 typedef enum VT_HEVCProfile {
173     HEVC_PROF_AUTO,
174     HEVC_PROF_MAIN,
175     HEVC_PROF_MAIN10,
176     HEVC_PROF_COUNT
177 } VT_HEVCProfile;
178
179 static const uint8_t start_code[] = { 0, 0, 0, 1 };
180
181 typedef struct ExtraSEI {
182   void *data;
183   size_t size;
184 } ExtraSEI;
185
186 typedef struct BufNode {
187     CMSampleBufferRef cm_buffer;
188     ExtraSEI *sei;
189     struct BufNode* next;
190     int error;
191 } BufNode;
192
193 typedef struct VTEncContext {
194     AVClass *class;
195     enum AVCodecID codec_id;
196     VTCompressionSessionRef session;
197     CFStringRef ycbcr_matrix;
198     CFStringRef color_primaries;
199     CFStringRef transfer_function;
200     getParameterSetAtIndex get_param_set_func;
201
202     pthread_mutex_t lock;
203     pthread_cond_t  cv_sample_sent;
204
205     int async_error;
206
207     BufNode *q_head;
208     BufNode *q_tail;
209
210     int64_t frame_ct_out;
211     int64_t frame_ct_in;
212
213     int64_t first_pts;
214     int64_t dts_delta;
215
216     int64_t profile;
217     int64_t level;
218     int64_t entropy;
219     int64_t realtime;
220     int64_t frames_before;
221     int64_t frames_after;
222
223     int64_t allow_sw;
224     int64_t require_sw;
225
226     bool flushing;
227     int has_b_frames;
228     bool warned_color_range;
229
230     /* can't be bool type since AVOption will access it as int */
231     int a53_cc;
232 } VTEncContext;
233
234 static int vtenc_populate_extradata(AVCodecContext   *avctx,
235                                     CMVideoCodecType codec_type,
236                                     CFStringRef      profile_level,
237                                     CFNumberRef      gamma_level,
238                                     CFDictionaryRef  enc_info,
239                                     CFDictionaryRef  pixel_buffer_info);
240
241 /**
242  * NULL-safe release of *refPtr, and sets value to NULL.
243  */
244 static void vt_release_num(CFNumberRef* refPtr){
245     if (!*refPtr) {
246         return;
247     }
248
249     CFRelease(*refPtr);
250     *refPtr = NULL;
251 }
252
253 static void set_async_error(VTEncContext *vtctx, int err)
254 {
255     BufNode *info;
256
257     pthread_mutex_lock(&vtctx->lock);
258
259     vtctx->async_error = err;
260
261     info = vtctx->q_head;
262     vtctx->q_head = vtctx->q_tail = NULL;
263
264     while (info) {
265         BufNode *next = info->next;
266         CFRelease(info->cm_buffer);
267         av_free(info);
268         info = next;
269     }
270
271     pthread_mutex_unlock(&vtctx->lock);
272 }
273
274 static void clear_frame_queue(VTEncContext *vtctx)
275 {
276     set_async_error(vtctx, 0);
277 }
278
279 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
280 {
281     BufNode *info;
282
283     pthread_mutex_lock(&vtctx->lock);
284
285     if (vtctx->async_error) {
286         pthread_mutex_unlock(&vtctx->lock);
287         return vtctx->async_error;
288     }
289
290     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
291         *buf = NULL;
292
293         pthread_mutex_unlock(&vtctx->lock);
294         return 0;
295     }
296
297     while (!vtctx->q_head && !vtctx->async_error && wait && !vtctx->flushing) {
298         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
299     }
300
301     if (!vtctx->q_head) {
302         pthread_mutex_unlock(&vtctx->lock);
303         *buf = NULL;
304         return 0;
305     }
306
307     info = vtctx->q_head;
308     vtctx->q_head = vtctx->q_head->next;
309     if (!vtctx->q_head) {
310         vtctx->q_tail = NULL;
311     }
312
313     vtctx->frame_ct_out++;
314     pthread_mutex_unlock(&vtctx->lock);
315
316     *buf = info->cm_buffer;
317     if (sei && *buf) {
318         *sei = info->sei;
319     } else if (info->sei) {
320         if (info->sei->data) av_free(info->sei->data);
321         av_free(info->sei);
322     }
323     av_free(info);
324
325
326     return 0;
327 }
328
329 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
330 {
331     BufNode *info = av_malloc(sizeof(BufNode));
332     if (!info) {
333         set_async_error(vtctx, AVERROR(ENOMEM));
334         return;
335     }
336
337     CFRetain(buffer);
338     info->cm_buffer = buffer;
339     info->sei = sei;
340     info->next = NULL;
341
342     pthread_mutex_lock(&vtctx->lock);
343
344     if (!vtctx->q_head) {
345         vtctx->q_head = info;
346     } else {
347         vtctx->q_tail->next = info;
348     }
349
350     vtctx->q_tail = info;
351
352     pthread_cond_signal(&vtctx->cv_sample_sent);
353     pthread_mutex_unlock(&vtctx->lock);
354 }
355
356 static int count_nalus(size_t length_code_size,
357                        CMSampleBufferRef sample_buffer,
358                        int *count)
359 {
360     size_t offset = 0;
361     int status;
362     int nalu_ct = 0;
363     uint8_t size_buf[4];
364     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
365     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
366
367     if (length_code_size > 4)
368         return AVERROR_INVALIDDATA;
369
370     while (offset < src_size) {
371         size_t curr_src_len;
372         size_t box_len = 0;
373         size_t i;
374
375         status = CMBlockBufferCopyDataBytes(block,
376                                             offset,
377                                             length_code_size,
378                                             size_buf);
379
380         for (i = 0; i < length_code_size; i++) {
381             box_len <<= 8;
382             box_len |= size_buf[i];
383         }
384
385         curr_src_len = box_len + length_code_size;
386         offset += curr_src_len;
387
388         nalu_ct++;
389     }
390
391     *count = nalu_ct;
392     return 0;
393 }
394
395 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
396 {
397     switch (id) {
398     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
399     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
400     default:               return 0;
401     }
402 }
403
404 /**
405  * Get the parameter sets from a CMSampleBufferRef.
406  * @param dst If *dst isn't NULL, the parameters are copied into existing
407  *            memory. *dst_size must be set accordingly when *dst != NULL.
408  *            If *dst is NULL, it will be allocated.
409  *            In all cases, *dst_size is set to the number of bytes used starting
410  *            at *dst.
411  */
412 static int get_params_size(
413     AVCodecContext              *avctx,
414     CMVideoFormatDescriptionRef vid_fmt,
415     size_t                      *size)
416 {
417     VTEncContext *vtctx = avctx->priv_data;
418     size_t total_size = 0;
419     size_t ps_count;
420     int is_count_bad = 0;
421     size_t i;
422     int status;
423     status = vtctx->get_param_set_func(vid_fmt,
424                                        0,
425                                        NULL,
426                                        NULL,
427                                        &ps_count,
428                                        NULL);
429     if (status) {
430         is_count_bad = 1;
431         ps_count     = 0;
432         status       = 0;
433     }
434
435     for (i = 0; i < ps_count || is_count_bad; i++) {
436         const uint8_t *ps;
437         size_t ps_size;
438         status = vtctx->get_param_set_func(vid_fmt,
439                                            i,
440                                            &ps,
441                                            &ps_size,
442                                            NULL,
443                                            NULL);
444         if (status) {
445             /*
446              * When ps_count is invalid, status != 0 ends the loop normally
447              * unless we didn't get any parameter sets.
448              */
449             if (i > 0 && is_count_bad) status = 0;
450
451             break;
452         }
453
454         total_size += ps_size + sizeof(start_code);
455     }
456
457     if (status) {
458         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
459         return AVERROR_EXTERNAL;
460     }
461
462     *size = total_size;
463     return 0;
464 }
465
466 static int copy_param_sets(
467     AVCodecContext              *avctx,
468     CMVideoFormatDescriptionRef vid_fmt,
469     uint8_t                     *dst,
470     size_t                      dst_size)
471 {
472     VTEncContext *vtctx = avctx->priv_data;
473     size_t ps_count;
474     int is_count_bad = 0;
475     int status;
476     size_t offset = 0;
477     size_t i;
478
479     status = vtctx->get_param_set_func(vid_fmt,
480                                        0,
481                                        NULL,
482                                        NULL,
483                                        &ps_count,
484                                        NULL);
485     if (status) {
486         is_count_bad = 1;
487         ps_count     = 0;
488         status       = 0;
489     }
490
491
492     for (i = 0; i < ps_count || is_count_bad; i++) {
493         const uint8_t *ps;
494         size_t ps_size;
495         size_t next_offset;
496
497         status = vtctx->get_param_set_func(vid_fmt,
498                                            i,
499                                            &ps,
500                                            &ps_size,
501                                            NULL,
502                                            NULL);
503         if (status) {
504             if (i > 0 && is_count_bad) status = 0;
505
506             break;
507         }
508
509         next_offset = offset + sizeof(start_code) + ps_size;
510         if (dst_size < next_offset) {
511             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
512             return AVERROR_BUFFER_TOO_SMALL;
513         }
514
515         memcpy(dst + offset, start_code, sizeof(start_code));
516         offset += sizeof(start_code);
517
518         memcpy(dst + offset, ps, ps_size);
519         offset = next_offset;
520     }
521
522     if (status) {
523         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
524         return AVERROR_EXTERNAL;
525     }
526
527     return 0;
528 }
529
530 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
531 {
532     CMVideoFormatDescriptionRef vid_fmt;
533     size_t total_size;
534     int status;
535
536     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
537     if (!vid_fmt) {
538         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
539         return AVERROR_EXTERNAL;
540     }
541
542     status = get_params_size(avctx, vid_fmt, &total_size);
543     if (status) {
544         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
545         return status;
546     }
547
548     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
549     if (!avctx->extradata) {
550         return AVERROR(ENOMEM);
551     }
552     avctx->extradata_size = total_size;
553
554     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
555
556     if (status) {
557         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
558         return status;
559     }
560
561     return 0;
562 }
563
564 static void vtenc_output_callback(
565     void *ctx,
566     void *sourceFrameCtx,
567     OSStatus status,
568     VTEncodeInfoFlags flags,
569     CMSampleBufferRef sample_buffer)
570 {
571     AVCodecContext *avctx = ctx;
572     VTEncContext   *vtctx = avctx->priv_data;
573     ExtraSEI *sei = sourceFrameCtx;
574
575     if (vtctx->async_error) {
576         return;
577     }
578
579     if (status) {
580         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
581         set_async_error(vtctx, AVERROR_EXTERNAL);
582         return;
583     }
584
585     if (!sample_buffer) {
586         return;
587     }
588
589     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
590         int set_status = set_extradata(avctx, sample_buffer);
591         if (set_status) {
592             set_async_error(vtctx, set_status);
593             return;
594         }
595     }
596
597     vtenc_q_push(vtctx, sample_buffer, sei);
598 }
599
600 static int get_length_code_size(
601     AVCodecContext    *avctx,
602     CMSampleBufferRef sample_buffer,
603     size_t            *size)
604 {
605     VTEncContext *vtctx = avctx->priv_data;
606     CMVideoFormatDescriptionRef vid_fmt;
607     int isize;
608     int status;
609
610     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
611     if (!vid_fmt) {
612         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
613         return AVERROR_EXTERNAL;
614     }
615
616     status = vtctx->get_param_set_func(vid_fmt,
617                                        0,
618                                        NULL,
619                                        NULL,
620                                        NULL,
621                                        &isize);
622     if (status) {
623         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
624         return AVERROR_EXTERNAL;
625     }
626
627     *size = isize;
628     return 0;
629 }
630
631 /*
632  * Returns true on success.
633  *
634  * If profile_level_val is NULL and this method returns true, don't specify the
635  * profile/level to the encoder.
636  */
637 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
638                                       CFStringRef    *profile_level_val)
639 {
640     VTEncContext *vtctx = avctx->priv_data;
641     int64_t profile = vtctx->profile;
642
643     if (profile == H264_PROF_AUTO && vtctx->level) {
644         //Need to pick a profile if level is not auto-selected.
645         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
646     }
647
648     *profile_level_val = NULL;
649
650     switch (profile) {
651         case H264_PROF_AUTO:
652             return true;
653
654         case H264_PROF_BASELINE:
655             switch (vtctx->level) {
656                 case  0: *profile_level_val =
657                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
658                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
659                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
660                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
661                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
662                 case 40: *profile_level_val =
663                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
664                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
665                 case 42: *profile_level_val =
666                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
667                 case 50: *profile_level_val =
668                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
669                 case 51: *profile_level_val =
670                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
671                 case 52: *profile_level_val =
672                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
673             }
674             break;
675
676         case H264_PROF_MAIN:
677             switch (vtctx->level) {
678                 case  0: *profile_level_val =
679                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
680                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
681                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
682                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
683                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
684                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
685                 case 42: *profile_level_val =
686                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
687                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
688                 case 51: *profile_level_val =
689                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
690                 case 52: *profile_level_val =
691                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
692             }
693             break;
694
695         case H264_PROF_HIGH:
696             switch (vtctx->level) {
697                 case  0: *profile_level_val =
698                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
699                 case 30: *profile_level_val =
700                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
701                 case 31: *profile_level_val =
702                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
703                 case 32: *profile_level_val =
704                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
705                 case 40: *profile_level_val =
706                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
707                 case 41: *profile_level_val =
708                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
709                 case 42: *profile_level_val =
710                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
711                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
712                 case 51: *profile_level_val =
713                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
714                 case 52: *profile_level_val =
715                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
716             }
717             break;
718         case H264_PROF_EXTENDED:
719             switch (vtctx->level) {
720                 case  0: *profile_level_val =
721                                   compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
722                 case 50: *profile_level_val =
723                                   compat_keys.kVTProfileLevel_H264_Extended_5_0;       break;
724             }
725             break;
726     }
727
728     if (!*profile_level_val) {
729         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
730         return false;
731     }
732
733     return true;
734 }
735
736 /*
737  * Returns true on success.
738  *
739  * If profile_level_val is NULL and this method returns true, don't specify the
740  * profile/level to the encoder.
741  */
742 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
743                                       CFStringRef    *profile_level_val)
744 {
745     VTEncContext *vtctx = avctx->priv_data;
746     int64_t profile = vtctx->profile;
747
748     *profile_level_val = NULL;
749
750     switch (profile) {
751         case HEVC_PROF_AUTO:
752             return true;
753         case HEVC_PROF_MAIN:
754             *profile_level_val =
755                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
756             break;
757         case HEVC_PROF_MAIN10:
758             *profile_level_val =
759                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
760             break;
761     }
762
763     if (!*profile_level_val) {
764         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
765         return false;
766     }
767
768     return true;
769 }
770
771 static int get_cv_pixel_format(AVCodecContext* avctx,
772                                enum AVPixelFormat fmt,
773                                enum AVColorRange range,
774                                int* av_pixel_format,
775                                int* range_guessed)
776 {
777     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
778                                         range != AVCOL_RANGE_JPEG;
779
780     //MPEG range is used when no range is set
781     if (fmt == AV_PIX_FMT_NV12) {
782         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
783                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
784                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
785     } else if (fmt == AV_PIX_FMT_YUV420P) {
786         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
787                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
788                                         kCVPixelFormatType_420YpCbCr8Planar;
789     } else if (fmt == AV_PIX_FMT_P010LE) {
790         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
791                                         kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
792                                         kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
793     } else {
794         return AVERROR(EINVAL);
795     }
796
797     return 0;
798 }
799
800 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
801     VTEncContext *vtctx = avctx->priv_data;
802
803     if (vtctx->color_primaries) {
804         CFDictionarySetValue(dict,
805                              kCVImageBufferColorPrimariesKey,
806                              vtctx->color_primaries);
807     }
808
809     if (vtctx->transfer_function) {
810         CFDictionarySetValue(dict,
811                              kCVImageBufferTransferFunctionKey,
812                              vtctx->transfer_function);
813     }
814
815     if (vtctx->ycbcr_matrix) {
816         CFDictionarySetValue(dict,
817                              kCVImageBufferYCbCrMatrixKey,
818                              vtctx->ycbcr_matrix);
819     }
820 }
821
822 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
823                                        CFMutableDictionaryRef* dict)
824 {
825     CFNumberRef cv_color_format_num = NULL;
826     CFNumberRef width_num = NULL;
827     CFNumberRef height_num = NULL;
828     CFMutableDictionaryRef pixel_buffer_info = NULL;
829     int cv_color_format;
830     int status = get_cv_pixel_format(avctx,
831                                      avctx->pix_fmt,
832                                      avctx->color_range,
833                                      &cv_color_format,
834                                      NULL);
835     if (status) return status;
836
837     pixel_buffer_info = CFDictionaryCreateMutable(
838                             kCFAllocatorDefault,
839                             20,
840                             &kCFCopyStringDictionaryKeyCallBacks,
841                             &kCFTypeDictionaryValueCallBacks);
842
843     if (!pixel_buffer_info) goto pbinfo_nomem;
844
845     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
846                                          kCFNumberSInt32Type,
847                                          &cv_color_format);
848     if (!cv_color_format_num) goto pbinfo_nomem;
849
850     CFDictionarySetValue(pixel_buffer_info,
851                          kCVPixelBufferPixelFormatTypeKey,
852                          cv_color_format_num);
853     vt_release_num(&cv_color_format_num);
854
855     width_num = CFNumberCreate(kCFAllocatorDefault,
856                                kCFNumberSInt32Type,
857                                &avctx->width);
858     if (!width_num) return AVERROR(ENOMEM);
859
860     CFDictionarySetValue(pixel_buffer_info,
861                          kCVPixelBufferWidthKey,
862                          width_num);
863     vt_release_num(&width_num);
864
865     height_num = CFNumberCreate(kCFAllocatorDefault,
866                                 kCFNumberSInt32Type,
867                                 &avctx->height);
868     if (!height_num) goto pbinfo_nomem;
869
870     CFDictionarySetValue(pixel_buffer_info,
871                          kCVPixelBufferHeightKey,
872                          height_num);
873     vt_release_num(&height_num);
874
875     add_color_attr(avctx, pixel_buffer_info);
876
877     *dict = pixel_buffer_info;
878     return 0;
879
880 pbinfo_nomem:
881     vt_release_num(&cv_color_format_num);
882     vt_release_num(&width_num);
883     vt_release_num(&height_num);
884     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
885
886     return AVERROR(ENOMEM);
887 }
888
889 static int get_cv_color_primaries(AVCodecContext *avctx,
890                                   CFStringRef *primaries)
891 {
892     enum AVColorPrimaries pri = avctx->color_primaries;
893     switch (pri) {
894         case AVCOL_PRI_UNSPECIFIED:
895             *primaries = NULL;
896             break;
897
898         case AVCOL_PRI_BT470BG:
899             *primaries = kCVImageBufferColorPrimaries_EBU_3213;
900             break;
901
902         case AVCOL_PRI_SMPTE170M:
903             *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
904             break;
905
906         case AVCOL_PRI_BT709:
907             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
908             break;
909
910         case AVCOL_PRI_BT2020:
911             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
912             break;
913
914         default:
915             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
916             *primaries = NULL;
917             return -1;
918     }
919
920     return 0;
921 }
922
923 static int get_cv_transfer_function(AVCodecContext *avctx,
924                                     CFStringRef *transfer_fnc,
925                                     CFNumberRef *gamma_level)
926 {
927     enum AVColorTransferCharacteristic trc = avctx->color_trc;
928     Float32 gamma;
929     *gamma_level = NULL;
930
931     switch (trc) {
932         case AVCOL_TRC_UNSPECIFIED:
933             *transfer_fnc = NULL;
934             break;
935
936         case AVCOL_TRC_BT709:
937             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
938             break;
939
940         case AVCOL_TRC_SMPTE240M:
941             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
942             break;
943
944 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
945         case AVCOL_TRC_SMPTE2084:
946             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
947             break;
948 #endif
949 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
950         case AVCOL_TRC_LINEAR:
951             *transfer_fnc = kCVImageBufferTransferFunction_Linear;
952             break;
953 #endif
954 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
955         case AVCOL_TRC_ARIB_STD_B67:
956             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
957             break;
958 #endif
959
960         case AVCOL_TRC_GAMMA22:
961             gamma = 2.2;
962             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
963             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
964             break;
965
966         case AVCOL_TRC_GAMMA28:
967             gamma = 2.8;
968             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
969             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
970             break;
971
972         case AVCOL_TRC_BT2020_10:
973         case AVCOL_TRC_BT2020_12:
974             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
975             break;
976
977         default:
978             *transfer_fnc = NULL;
979             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
980             return -1;
981     }
982
983     return 0;
984 }
985
986 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
987     switch(avctx->colorspace) {
988         case AVCOL_SPC_BT709:
989             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
990             break;
991
992         case AVCOL_SPC_UNSPECIFIED:
993             *matrix = NULL;
994             break;
995
996         case AVCOL_SPC_BT470BG:
997         case AVCOL_SPC_SMPTE170M:
998             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
999             break;
1000
1001         case AVCOL_SPC_SMPTE240M:
1002             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
1003             break;
1004
1005         case AVCOL_SPC_BT2020_NCL:
1006             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
1007             break;
1008
1009         default:
1010             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1011             return -1;
1012     }
1013
1014     return 0;
1015 }
1016
1017 // constant quality only on Macs with Apple Silicon
1018 static bool vtenc_qscale_enabled(void)
1019 {
1020     return TARGET_OS_OSX && TARGET_CPU_ARM64;
1021 }
1022
1023 static int vtenc_create_encoder(AVCodecContext   *avctx,
1024                                 CMVideoCodecType codec_type,
1025                                 CFStringRef      profile_level,
1026                                 CFNumberRef      gamma_level,
1027                                 CFDictionaryRef  enc_info,
1028                                 CFDictionaryRef  pixel_buffer_info,
1029                                 VTCompressionSessionRef *session)
1030 {
1031     VTEncContext *vtctx = avctx->priv_data;
1032     SInt32       bit_rate = avctx->bit_rate;
1033     SInt32       max_rate = avctx->rc_max_rate;
1034     Float32      quality = avctx->global_quality / FF_QP2LAMBDA;
1035     CFNumberRef  bit_rate_num;
1036     CFNumberRef  quality_num;
1037     CFNumberRef  bytes_per_second;
1038     CFNumberRef  one_second;
1039     CFArrayRef   data_rate_limits;
1040     int64_t      bytes_per_second_value = 0;
1041     int64_t      one_second_value = 0;
1042     void         *nums[2];
1043
1044     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1045                                             avctx->width,
1046                                             avctx->height,
1047                                             codec_type,
1048                                             enc_info,
1049                                             pixel_buffer_info,
1050                                             kCFAllocatorDefault,
1051                                             vtenc_output_callback,
1052                                             avctx,
1053                                             session);
1054
1055     if (status || !vtctx->session) {
1056         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1057
1058 #if !TARGET_OS_IPHONE
1059         if (!vtctx->allow_sw) {
1060             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1061         }
1062 #endif
1063
1064         return AVERROR_EXTERNAL;
1065     }
1066
1067     if (avctx->flags & AV_CODEC_FLAG_QSCALE && !vtenc_qscale_enabled()) {
1068         av_log(avctx, AV_LOG_ERROR, "Error: -q:v qscale not available for encoder. Use -b:v bitrate instead.\n");
1069         return AVERROR_EXTERNAL;
1070     }
1071
1072     if (avctx->flags & AV_CODEC_FLAG_QSCALE) {
1073         quality = quality >= 100 ? 1.0 : quality / 100;
1074         quality_num = CFNumberCreate(kCFAllocatorDefault,
1075                                      kCFNumberFloat32Type,
1076                                      &quality);
1077         if (!quality_num) return AVERROR(ENOMEM);
1078
1079         status = VTSessionSetProperty(vtctx->session,
1080                                       kVTCompressionPropertyKey_Quality,
1081                                       quality_num);
1082         CFRelease(quality_num);
1083     } else {
1084         bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1085                                       kCFNumberSInt32Type,
1086                                       &bit_rate);
1087         if (!bit_rate_num) return AVERROR(ENOMEM);
1088
1089         status = VTSessionSetProperty(vtctx->session,
1090                                       kVTCompressionPropertyKey_AverageBitRate,
1091                                       bit_rate_num);
1092         CFRelease(bit_rate_num);
1093     }
1094
1095     if (status) {
1096         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1097         return AVERROR_EXTERNAL;
1098     }
1099
1100     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1101         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1102         bytes_per_second_value = max_rate >> 3;
1103         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1104                                           kCFNumberSInt64Type,
1105                                           &bytes_per_second_value);
1106         if (!bytes_per_second) {
1107             return AVERROR(ENOMEM);
1108         }
1109         one_second_value = 1;
1110         one_second = CFNumberCreate(kCFAllocatorDefault,
1111                                     kCFNumberSInt64Type,
1112                                     &one_second_value);
1113         if (!one_second) {
1114             CFRelease(bytes_per_second);
1115             return AVERROR(ENOMEM);
1116         }
1117         nums[0] = (void *)bytes_per_second;
1118         nums[1] = (void *)one_second;
1119         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1120                                          (const void **)nums,
1121                                          2,
1122                                          &kCFTypeArrayCallBacks);
1123
1124         if (!data_rate_limits) {
1125             CFRelease(bytes_per_second);
1126             CFRelease(one_second);
1127             return AVERROR(ENOMEM);
1128         }
1129         status = VTSessionSetProperty(vtctx->session,
1130                                       kVTCompressionPropertyKey_DataRateLimits,
1131                                       data_rate_limits);
1132
1133         CFRelease(bytes_per_second);
1134         CFRelease(one_second);
1135         CFRelease(data_rate_limits);
1136
1137         if (status) {
1138             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1139             return AVERROR_EXTERNAL;
1140         }
1141     }
1142
1143     if (profile_level) {
1144         status = VTSessionSetProperty(vtctx->session,
1145                                       kVTCompressionPropertyKey_ProfileLevel,
1146                                       profile_level);
1147         if (status) {
1148             av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1149         }
1150     }
1151
1152     if (avctx->gop_size > 0) {
1153         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1154                                               kCFNumberIntType,
1155                                               &avctx->gop_size);
1156         if (!interval) {
1157             return AVERROR(ENOMEM);
1158         }
1159
1160         status = VTSessionSetProperty(vtctx->session,
1161                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1162                                       interval);
1163         CFRelease(interval);
1164
1165         if (status) {
1166             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1167             return AVERROR_EXTERNAL;
1168         }
1169     }
1170
1171     if (vtctx->frames_before) {
1172         status = VTSessionSetProperty(vtctx->session,
1173                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1174                                       kCFBooleanTrue);
1175
1176         if (status == kVTPropertyNotSupportedErr) {
1177             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1178         } else if (status) {
1179             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1180         }
1181     }
1182
1183     if (vtctx->frames_after) {
1184         status = VTSessionSetProperty(vtctx->session,
1185                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1186                                       kCFBooleanTrue);
1187
1188         if (status == kVTPropertyNotSupportedErr) {
1189             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1190         } else if (status) {
1191             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1192         }
1193     }
1194
1195     if (avctx->sample_aspect_ratio.num != 0) {
1196         CFNumberRef num;
1197         CFNumberRef den;
1198         CFMutableDictionaryRef par;
1199         AVRational *avpar = &avctx->sample_aspect_ratio;
1200
1201         av_reduce(&avpar->num, &avpar->den,
1202                    avpar->num,  avpar->den,
1203                   0xFFFFFFFF);
1204
1205         num = CFNumberCreate(kCFAllocatorDefault,
1206                              kCFNumberIntType,
1207                              &avpar->num);
1208
1209         den = CFNumberCreate(kCFAllocatorDefault,
1210                              kCFNumberIntType,
1211                              &avpar->den);
1212
1213
1214
1215         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1216                                         2,
1217                                         &kCFCopyStringDictionaryKeyCallBacks,
1218                                         &kCFTypeDictionaryValueCallBacks);
1219
1220         if (!par || !num || !den) {
1221             if (par) CFRelease(par);
1222             if (num) CFRelease(num);
1223             if (den) CFRelease(den);
1224
1225             return AVERROR(ENOMEM);
1226         }
1227
1228         CFDictionarySetValue(
1229             par,
1230             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1231             num);
1232
1233         CFDictionarySetValue(
1234             par,
1235             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1236             den);
1237
1238         status = VTSessionSetProperty(vtctx->session,
1239                                       kVTCompressionPropertyKey_PixelAspectRatio,
1240                                       par);
1241
1242         CFRelease(par);
1243         CFRelease(num);
1244         CFRelease(den);
1245
1246         if (status) {
1247             av_log(avctx,
1248                    AV_LOG_ERROR,
1249                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1250                    avctx->sample_aspect_ratio.num,
1251                    avctx->sample_aspect_ratio.den,
1252                    status);
1253
1254             return AVERROR_EXTERNAL;
1255         }
1256     }
1257
1258
1259     if (vtctx->transfer_function) {
1260         status = VTSessionSetProperty(vtctx->session,
1261                                       kVTCompressionPropertyKey_TransferFunction,
1262                                       vtctx->transfer_function);
1263
1264         if (status) {
1265             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1266         }
1267     }
1268
1269
1270     if (vtctx->ycbcr_matrix) {
1271         status = VTSessionSetProperty(vtctx->session,
1272                                       kVTCompressionPropertyKey_YCbCrMatrix,
1273                                       vtctx->ycbcr_matrix);
1274
1275         if (status) {
1276             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1277         }
1278     }
1279
1280
1281     if (vtctx->color_primaries) {
1282         status = VTSessionSetProperty(vtctx->session,
1283                                       kVTCompressionPropertyKey_ColorPrimaries,
1284                                       vtctx->color_primaries);
1285
1286         if (status) {
1287             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1288         }
1289     }
1290
1291     if (gamma_level) {
1292         status = VTSessionSetProperty(vtctx->session,
1293                                       kCVImageBufferGammaLevelKey,
1294                                       gamma_level);
1295
1296         if (status) {
1297             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1298         }
1299     }
1300
1301     if (!vtctx->has_b_frames) {
1302         status = VTSessionSetProperty(vtctx->session,
1303                                       kVTCompressionPropertyKey_AllowFrameReordering,
1304                                       kCFBooleanFalse);
1305
1306         if (status) {
1307             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1308             return AVERROR_EXTERNAL;
1309         }
1310     }
1311
1312     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1313         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1314                                 compat_keys.kVTH264EntropyMode_CABAC:
1315                                 compat_keys.kVTH264EntropyMode_CAVLC;
1316
1317         status = VTSessionSetProperty(vtctx->session,
1318                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1319                                       entropy);
1320
1321         if (status) {
1322             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1323         }
1324     }
1325
1326     if (vtctx->realtime) {
1327         status = VTSessionSetProperty(vtctx->session,
1328                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1329                                       kCFBooleanTrue);
1330
1331         if (status) {
1332             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1333         }
1334     }
1335
1336     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1337     if (status) {
1338         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1339         return AVERROR_EXTERNAL;
1340     }
1341
1342     return 0;
1343 }
1344
1345 static int vtenc_configure_encoder(AVCodecContext *avctx)
1346 {
1347     CFMutableDictionaryRef enc_info;
1348     CFMutableDictionaryRef pixel_buffer_info;
1349     CMVideoCodecType       codec_type;
1350     VTEncContext           *vtctx = avctx->priv_data;
1351     CFStringRef            profile_level;
1352     CFNumberRef            gamma_level = NULL;
1353     int                    status;
1354
1355     codec_type = get_cm_codec_type(avctx->codec_id);
1356     if (!codec_type) {
1357         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1358         return AVERROR(EINVAL);
1359     }
1360
1361     vtctx->codec_id = avctx->codec_id;
1362     avctx->max_b_frames = 16;
1363
1364     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1365         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1366
1367         vtctx->has_b_frames = avctx->max_b_frames > 0;
1368         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1369             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1370             vtctx->has_b_frames = 0;
1371         }
1372
1373         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1374             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1375             vtctx->entropy = VT_ENTROPY_NOT_SET;
1376         }
1377
1378         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1379     } else {
1380         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1381         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1382         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1383         // HEVC has b-byramid
1384         vtctx->has_b_frames = avctx->max_b_frames > 0 ? 2 : 0;
1385     }
1386
1387     enc_info = CFDictionaryCreateMutable(
1388         kCFAllocatorDefault,
1389         20,
1390         &kCFCopyStringDictionaryKeyCallBacks,
1391         &kCFTypeDictionaryValueCallBacks
1392     );
1393
1394     if (!enc_info) return AVERROR(ENOMEM);
1395
1396 #if !TARGET_OS_IPHONE
1397     if(vtctx->require_sw) {
1398         CFDictionarySetValue(enc_info,
1399                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1400                              kCFBooleanFalse);
1401     } else if (!vtctx->allow_sw) {
1402         CFDictionarySetValue(enc_info,
1403                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1404                              kCFBooleanTrue);
1405     } else {
1406         CFDictionarySetValue(enc_info,
1407                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1408                              kCFBooleanTrue);
1409     }
1410 #endif
1411
1412     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1413         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1414         if (status)
1415             goto init_cleanup;
1416     } else {
1417         pixel_buffer_info = NULL;
1418     }
1419
1420     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1421
1422     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1423     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1424     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1425
1426
1427     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1428         status = vtenc_populate_extradata(avctx,
1429                                           codec_type,
1430                                           profile_level,
1431                                           gamma_level,
1432                                           enc_info,
1433                                           pixel_buffer_info);
1434         if (status)
1435             goto init_cleanup;
1436     }
1437
1438     status = vtenc_create_encoder(avctx,
1439                                   codec_type,
1440                                   profile_level,
1441                                   gamma_level,
1442                                   enc_info,
1443                                   pixel_buffer_info,
1444                                   &vtctx->session);
1445
1446 init_cleanup:
1447     if (gamma_level)
1448         CFRelease(gamma_level);
1449
1450     if (pixel_buffer_info)
1451         CFRelease(pixel_buffer_info);
1452
1453     CFRelease(enc_info);
1454
1455     return status;
1456 }
1457
1458 static av_cold int vtenc_init(AVCodecContext *avctx)
1459 {
1460     VTEncContext    *vtctx = avctx->priv_data;
1461     CFBooleanRef    has_b_frames_cfbool;
1462     int             status;
1463
1464     pthread_once(&once_ctrl, loadVTEncSymbols);
1465
1466     pthread_mutex_init(&vtctx->lock, NULL);
1467     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1468
1469     vtctx->session = NULL;
1470     status = vtenc_configure_encoder(avctx);
1471     if (status) return status;
1472
1473     status = VTSessionCopyProperty(vtctx->session,
1474                                    kVTCompressionPropertyKey_AllowFrameReordering,
1475                                    kCFAllocatorDefault,
1476                                    &has_b_frames_cfbool);
1477
1478     if (!status && has_b_frames_cfbool) {
1479         //Some devices don't output B-frames for main profile, even if requested.
1480         // HEVC has b-pyramid
1481         vtctx->has_b_frames = (CFBooleanGetValue(has_b_frames_cfbool) && avctx->codec_id == AV_CODEC_ID_HEVC) ? 2 : 1;
1482         CFRelease(has_b_frames_cfbool);
1483     }
1484     avctx->has_b_frames = vtctx->has_b_frames;
1485
1486     return 0;
1487 }
1488
1489 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1490 {
1491     CFArrayRef      attachments;
1492     CFDictionaryRef attachment;
1493     CFBooleanRef    not_sync;
1494     CFIndex         len;
1495
1496     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1497     len = !attachments ? 0 : CFArrayGetCount(attachments);
1498
1499     if (!len) {
1500         *is_key_frame = true;
1501         return;
1502     }
1503
1504     attachment = CFArrayGetValueAtIndex(attachments, 0);
1505
1506     if (CFDictionaryGetValueIfPresent(attachment,
1507                                       kCMSampleAttachmentKey_NotSync,
1508                                       (const void **)&not_sync))
1509     {
1510         *is_key_frame = !CFBooleanGetValue(not_sync);
1511     } else {
1512         *is_key_frame = true;
1513     }
1514 }
1515
1516 static int is_post_sei_nal_type(int nal_type){
1517     return nal_type != H264_NAL_SEI &&
1518            nal_type != H264_NAL_SPS &&
1519            nal_type != H264_NAL_PPS &&
1520            nal_type != H264_NAL_AUD;
1521 }
1522
1523 /*
1524  * Finds the sei message start/size of type find_sei_type.
1525  * If more than one of that type exists, the last one is returned.
1526  */
1527 static int find_sei_end(AVCodecContext *avctx,
1528                         uint8_t        *nal_data,
1529                         size_t          nal_size,
1530                         uint8_t       **sei_end)
1531 {
1532     int nal_type;
1533     size_t sei_payload_size = 0;
1534     int sei_payload_type = 0;
1535     *sei_end = NULL;
1536     uint8_t *nal_start = nal_data;
1537
1538     if (!nal_size)
1539         return 0;
1540
1541     nal_type = *nal_data & 0x1F;
1542     if (nal_type != H264_NAL_SEI)
1543         return 0;
1544
1545     nal_data++;
1546     nal_size--;
1547
1548     if (nal_data[nal_size - 1] == 0x80)
1549         nal_size--;
1550
1551     while (nal_size > 0 && *nal_data > 0) {
1552         do{
1553             sei_payload_type += *nal_data;
1554             nal_data++;
1555             nal_size--;
1556         } while (nal_size > 0 && *nal_data == 0xFF);
1557
1558         if (!nal_size) {
1559             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1560             return AVERROR_INVALIDDATA;
1561         }
1562
1563         do{
1564             sei_payload_size += *nal_data;
1565             nal_data++;
1566             nal_size--;
1567         } while (nal_size > 0 && *nal_data == 0xFF);
1568
1569         if (nal_size < sei_payload_size) {
1570             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1571             return AVERROR_INVALIDDATA;
1572         }
1573
1574         nal_data += sei_payload_size;
1575         nal_size -= sei_payload_size;
1576     }
1577
1578     *sei_end = nal_data;
1579
1580     return nal_data - nal_start + 1;
1581 }
1582
1583 /**
1584  * Copies the data inserting emulation prevention bytes as needed.
1585  * Existing data in the destination can be taken into account by providing
1586  * dst with a dst_offset > 0.
1587  *
1588  * @return The number of bytes copied on success. On failure, the negative of
1589  *         the number of bytes needed to copy src is returned.
1590  */
1591 static int copy_emulation_prev(const uint8_t *src,
1592                                size_t         src_size,
1593                                uint8_t       *dst,
1594                                ssize_t        dst_offset,
1595                                size_t         dst_size)
1596 {
1597     int zeros = 0;
1598     int wrote_bytes;
1599     uint8_t* dst_start;
1600     uint8_t* dst_end = dst + dst_size;
1601     const uint8_t* src_end = src + src_size;
1602     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1603     int i;
1604     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1605         if (!dst[i])
1606             zeros++;
1607         else
1608             zeros = 0;
1609     }
1610
1611     dst += dst_offset;
1612     dst_start = dst;
1613     for (; src < src_end; src++, dst++) {
1614         if (zeros == 2) {
1615             int insert_ep3_byte = *src <= 3;
1616             if (insert_ep3_byte) {
1617                 if (dst < dst_end)
1618                     *dst = 3;
1619                 dst++;
1620             }
1621
1622             zeros = 0;
1623         }
1624
1625         if (dst < dst_end)
1626             *dst = *src;
1627
1628         if (!*src)
1629             zeros++;
1630         else
1631             zeros = 0;
1632     }
1633
1634     wrote_bytes = dst - dst_start;
1635
1636     if (dst > dst_end)
1637         return -wrote_bytes;
1638
1639     return wrote_bytes;
1640 }
1641
1642 static int write_sei(const ExtraSEI *sei,
1643                      int             sei_type,
1644                      uint8_t        *dst,
1645                      size_t          dst_size)
1646 {
1647     uint8_t *sei_start = dst;
1648     size_t remaining_sei_size = sei->size;
1649     size_t remaining_dst_size = dst_size;
1650     int header_bytes;
1651     int bytes_written;
1652     ssize_t offset;
1653
1654     if (!remaining_dst_size)
1655         return AVERROR_BUFFER_TOO_SMALL;
1656
1657     while (sei_type && remaining_dst_size != 0) {
1658         int sei_byte = sei_type > 255 ? 255 : sei_type;
1659         *dst = sei_byte;
1660
1661         sei_type -= sei_byte;
1662         dst++;
1663         remaining_dst_size--;
1664     }
1665
1666     if (!dst_size)
1667         return AVERROR_BUFFER_TOO_SMALL;
1668
1669     while (remaining_sei_size && remaining_dst_size != 0) {
1670         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1671         *dst = size_byte;
1672
1673         remaining_sei_size -= size_byte;
1674         dst++;
1675         remaining_dst_size--;
1676     }
1677
1678     if (remaining_dst_size < sei->size)
1679         return AVERROR_BUFFER_TOO_SMALL;
1680
1681     header_bytes = dst - sei_start;
1682
1683     offset = header_bytes;
1684     bytes_written = copy_emulation_prev(sei->data,
1685                                         sei->size,
1686                                         sei_start,
1687                                         offset,
1688                                         dst_size);
1689     if (bytes_written < 0)
1690         return AVERROR_BUFFER_TOO_SMALL;
1691
1692     bytes_written += header_bytes;
1693     return bytes_written;
1694 }
1695
1696 /**
1697  * Copies NAL units and replaces length codes with
1698  * H.264 Annex B start codes. On failure, the contents of
1699  * dst_data may have been modified.
1700  *
1701  * @param length_code_size Byte length of each length code
1702  * @param sample_buffer NAL units prefixed with length codes.
1703  * @param sei Optional A53 closed captions SEI data.
1704  * @param dst_data Must be zeroed before calling this function.
1705  *                 Contains the copied NAL units prefixed with
1706  *                 start codes when the function returns
1707  *                 successfully.
1708  * @param dst_size Length of dst_data
1709  * @return 0 on success
1710  *         AVERROR_INVALIDDATA if length_code_size is invalid
1711  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1712  *         or if a length_code in src_data specifies data beyond
1713  *         the end of its buffer.
1714  */
1715 static int copy_replace_length_codes(
1716     AVCodecContext *avctx,
1717     size_t        length_code_size,
1718     CMSampleBufferRef sample_buffer,
1719     ExtraSEI      *sei,
1720     uint8_t       *dst_data,
1721     size_t        dst_size)
1722 {
1723     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1724     size_t remaining_src_size = src_size;
1725     size_t remaining_dst_size = dst_size;
1726     size_t src_offset = 0;
1727     int wrote_sei = 0;
1728     int status;
1729     uint8_t size_buf[4];
1730     uint8_t nal_type;
1731     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1732
1733     if (length_code_size > 4) {
1734         return AVERROR_INVALIDDATA;
1735     }
1736
1737     while (remaining_src_size > 0) {
1738         size_t curr_src_len;
1739         size_t curr_dst_len;
1740         size_t box_len = 0;
1741         size_t i;
1742
1743         uint8_t       *dst_box;
1744
1745         status = CMBlockBufferCopyDataBytes(block,
1746                                             src_offset,
1747                                             length_code_size,
1748                                             size_buf);
1749         if (status) {
1750             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1751             return AVERROR_EXTERNAL;
1752         }
1753
1754         status = CMBlockBufferCopyDataBytes(block,
1755                                             src_offset + length_code_size,
1756                                             1,
1757                                             &nal_type);
1758
1759         if (status) {
1760             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1761             return AVERROR_EXTERNAL;
1762         }
1763
1764         nal_type &= 0x1F;
1765
1766         for (i = 0; i < length_code_size; i++) {
1767             box_len <<= 8;
1768             box_len |= size_buf[i];
1769         }
1770
1771         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1772             //No SEI NAL unit - insert.
1773             int wrote_bytes;
1774
1775             memcpy(dst_data, start_code, sizeof(start_code));
1776             dst_data += sizeof(start_code);
1777             remaining_dst_size -= sizeof(start_code);
1778
1779             *dst_data = H264_NAL_SEI;
1780             dst_data++;
1781             remaining_dst_size--;
1782
1783             wrote_bytes = write_sei(sei,
1784                                     SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35,
1785                                     dst_data,
1786                                     remaining_dst_size);
1787
1788             if (wrote_bytes < 0)
1789                 return wrote_bytes;
1790
1791             remaining_dst_size -= wrote_bytes;
1792             dst_data += wrote_bytes;
1793
1794             if (remaining_dst_size <= 0)
1795                 return AVERROR_BUFFER_TOO_SMALL;
1796
1797             *dst_data = 0x80;
1798
1799             dst_data++;
1800             remaining_dst_size--;
1801
1802             wrote_sei = 1;
1803         }
1804
1805         curr_src_len = box_len + length_code_size;
1806         curr_dst_len = box_len + sizeof(start_code);
1807
1808         if (remaining_src_size < curr_src_len) {
1809             return AVERROR_BUFFER_TOO_SMALL;
1810         }
1811
1812         if (remaining_dst_size < curr_dst_len) {
1813             return AVERROR_BUFFER_TOO_SMALL;
1814         }
1815
1816         dst_box = dst_data + sizeof(start_code);
1817
1818         memcpy(dst_data, start_code, sizeof(start_code));
1819         status = CMBlockBufferCopyDataBytes(block,
1820                                             src_offset + length_code_size,
1821                                             box_len,
1822                                             dst_box);
1823
1824         if (status) {
1825             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1826             return AVERROR_EXTERNAL;
1827         }
1828
1829         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1830             //Found SEI NAL unit - append.
1831             int wrote_bytes;
1832             int old_sei_length;
1833             int extra_bytes;
1834             uint8_t *new_sei;
1835             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1836             if (old_sei_length < 0)
1837                 return status;
1838
1839             wrote_bytes = write_sei(sei,
1840                                     SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35,
1841                                     new_sei,
1842                                     remaining_dst_size - old_sei_length);
1843             if (wrote_bytes < 0)
1844                 return wrote_bytes;
1845
1846             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1847                 return AVERROR_BUFFER_TOO_SMALL;
1848
1849             new_sei[wrote_bytes++] = 0x80;
1850             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1851
1852             dst_data += extra_bytes;
1853             remaining_dst_size -= extra_bytes;
1854
1855             wrote_sei = 1;
1856         }
1857
1858         src_offset += curr_src_len;
1859         dst_data += curr_dst_len;
1860
1861         remaining_src_size -= curr_src_len;
1862         remaining_dst_size -= curr_dst_len;
1863     }
1864
1865     return 0;
1866 }
1867
1868 /**
1869  * Returns a sufficient number of bytes to contain the sei data.
1870  * It may be greater than the minimum required.
1871  */
1872 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1873     int copied_size;
1874     if (sei->size == 0)
1875         return 0;
1876
1877     copied_size = -copy_emulation_prev(sei->data,
1878                                        sei->size,
1879                                        NULL,
1880                                        0,
1881                                        0);
1882
1883     if ((sei->size % 255) == 0) //may result in an extra byte
1884         copied_size++;
1885
1886     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1887 }
1888
1889 static int vtenc_cm_to_avpacket(
1890     AVCodecContext    *avctx,
1891     CMSampleBufferRef sample_buffer,
1892     AVPacket          *pkt,
1893     ExtraSEI          *sei)
1894 {
1895     VTEncContext *vtctx = avctx->priv_data;
1896
1897     int     status;
1898     bool    is_key_frame;
1899     bool    add_header;
1900     size_t  length_code_size;
1901     size_t  header_size = 0;
1902     size_t  in_buf_size;
1903     size_t  out_buf_size;
1904     size_t  sei_nalu_size = 0;
1905     int64_t dts_delta;
1906     int64_t time_base_num;
1907     int nalu_count;
1908     CMTime  pts;
1909     CMTime  dts;
1910     CMVideoFormatDescriptionRef vid_fmt;
1911
1912
1913     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1914     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1915     if (status) return status;
1916
1917     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1918
1919     if (add_header) {
1920         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1921         if (!vid_fmt) {
1922             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1923             return AVERROR_EXTERNAL;
1924         }
1925
1926         int status = get_params_size(avctx, vid_fmt, &header_size);
1927         if (status) return status;
1928     }
1929
1930     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1931     if(status)
1932         return status;
1933
1934     if (sei) {
1935         size_t msg_size = get_sei_msg_bytes(sei,
1936                                             SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35);
1937
1938         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1939     }
1940
1941     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1942     out_buf_size = header_size +
1943                    in_buf_size +
1944                    sei_nalu_size +
1945                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1946
1947     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1948     if (status < 0)
1949         return status;
1950
1951     if (add_header) {
1952         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1953         if(status) return status;
1954     }
1955
1956     status = copy_replace_length_codes(
1957         avctx,
1958         length_code_size,
1959         sample_buffer,
1960         sei,
1961         pkt->data + header_size,
1962         pkt->size - header_size
1963     );
1964
1965     if (status) {
1966         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1967         return status;
1968     }
1969
1970     if (is_key_frame) {
1971         pkt->flags |= AV_PKT_FLAG_KEY;
1972     }
1973
1974     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1975     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1976
1977     if (CMTIME_IS_INVALID(dts)) {
1978         if (!vtctx->has_b_frames) {
1979             dts = pts;
1980         } else {
1981             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1982             return AVERROR_EXTERNAL;
1983         }
1984     }
1985
1986     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1987     time_base_num = avctx->time_base.num;
1988     pkt->pts = pts.value / time_base_num;
1989     pkt->dts = dts.value / time_base_num - dts_delta;
1990     pkt->size = out_buf_size;
1991
1992     return 0;
1993 }
1994
1995 /*
1996  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1997  * containing all planes if so.
1998  */
1999 static int get_cv_pixel_info(
2000     AVCodecContext *avctx,
2001     const AVFrame  *frame,
2002     int            *color,
2003     int            *plane_count,
2004     size_t         *widths,
2005     size_t         *heights,
2006     size_t         *strides,
2007     size_t         *contiguous_buf_size)
2008 {
2009     VTEncContext *vtctx = avctx->priv_data;
2010     int av_format       = frame->format;
2011     int av_color_range  = frame->color_range;
2012     int i;
2013     int range_guessed;
2014     int status;
2015
2016     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
2017     if (status) {
2018         av_log(avctx,
2019             AV_LOG_ERROR,
2020             "Could not get pixel format for color format '%s' range '%s'.\n",
2021             av_get_pix_fmt_name(av_format),
2022             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
2023             av_color_range < AVCOL_RANGE_NB ?
2024                av_color_range_name(av_color_range) :
2025                "Unknown");
2026
2027         return AVERROR(EINVAL);
2028     }
2029
2030     if (range_guessed) {
2031         if (!vtctx->warned_color_range) {
2032             vtctx->warned_color_range = true;
2033             av_log(avctx,
2034                    AV_LOG_WARNING,
2035                    "Color range not set for %s. Using MPEG range.\n",
2036                    av_get_pix_fmt_name(av_format));
2037         }
2038     }
2039
2040     switch (av_format) {
2041     case AV_PIX_FMT_NV12:
2042         *plane_count = 2;
2043
2044         widths [0] = avctx->width;
2045         heights[0] = avctx->height;
2046         strides[0] = frame ? frame->linesize[0] : avctx->width;
2047
2048         widths [1] = (avctx->width  + 1) / 2;
2049         heights[1] = (avctx->height + 1) / 2;
2050         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2051         break;
2052
2053     case AV_PIX_FMT_YUV420P:
2054         *plane_count = 3;
2055
2056         widths [0] = avctx->width;
2057         heights[0] = avctx->height;
2058         strides[0] = frame ? frame->linesize[0] : avctx->width;
2059
2060         widths [1] = (avctx->width  + 1) / 2;
2061         heights[1] = (avctx->height + 1) / 2;
2062         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2063
2064         widths [2] = (avctx->width  + 1) / 2;
2065         heights[2] = (avctx->height + 1) / 2;
2066         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2067         break;
2068
2069     case AV_PIX_FMT_P010LE:
2070         *plane_count = 2;
2071         widths[0] = avctx->width;
2072         heights[0] = avctx->height;
2073         strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2074
2075         widths[1] = (avctx->width + 1) / 2;
2076         heights[1] = (avctx->height + 1) / 2;
2077         strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2078         break;
2079
2080     default:
2081         av_log(
2082                avctx,
2083                AV_LOG_ERROR,
2084                "Could not get frame format info for color %d range %d.\n",
2085                av_format,
2086                av_color_range);
2087
2088         return AVERROR(EINVAL);
2089     }
2090
2091     *contiguous_buf_size = 0;
2092     for (i = 0; i < *plane_count; i++) {
2093         if (i < *plane_count - 1 &&
2094             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2095             *contiguous_buf_size = 0;
2096             break;
2097         }
2098
2099         *contiguous_buf_size += strides[i] * heights[i];
2100     }
2101
2102     return 0;
2103 }
2104
2105 //Not used on OSX - frame is never copied.
2106 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2107                                         const AVFrame    *frame,
2108                                         CVPixelBufferRef cv_img,
2109                                         const size_t     *plane_strides,
2110                                         const size_t     *plane_rows)
2111 {
2112     int i, j;
2113     size_t plane_count;
2114     int status;
2115     int rows;
2116     int src_stride;
2117     int dst_stride;
2118     uint8_t *src_addr;
2119     uint8_t *dst_addr;
2120     size_t copy_bytes;
2121
2122     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2123     if (status) {
2124         av_log(
2125             avctx,
2126             AV_LOG_ERROR,
2127             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2128             status
2129         );
2130     }
2131
2132     if (CVPixelBufferIsPlanar(cv_img)) {
2133         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2134         for (i = 0; frame->data[i]; i++) {
2135             if (i == plane_count) {
2136                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2137                 av_log(avctx,
2138                     AV_LOG_ERROR,
2139                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2140                 );
2141
2142                 return AVERROR_EXTERNAL;
2143             }
2144
2145             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2146             src_addr = (uint8_t*)frame->data[i];
2147             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2148             src_stride = plane_strides[i];
2149             rows = plane_rows[i];
2150
2151             if (dst_stride == src_stride) {
2152                 memcpy(dst_addr, src_addr, src_stride * rows);
2153             } else {
2154                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2155
2156                 for (j = 0; j < rows; j++) {
2157                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2158                 }
2159             }
2160         }
2161     } else {
2162         if (frame->data[1]) {
2163             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2164             av_log(avctx,
2165                 AV_LOG_ERROR,
2166                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2167             );
2168
2169             return AVERROR_EXTERNAL;
2170         }
2171
2172         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2173         src_addr = (uint8_t*)frame->data[0];
2174         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2175         src_stride = plane_strides[0];
2176         rows = plane_rows[0];
2177
2178         if (dst_stride == src_stride) {
2179             memcpy(dst_addr, src_addr, src_stride * rows);
2180         } else {
2181             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2182
2183             for (j = 0; j < rows; j++) {
2184                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2185             }
2186         }
2187     }
2188
2189     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2190     if (status) {
2191         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2192         return AVERROR_EXTERNAL;
2193     }
2194
2195     return 0;
2196 }
2197
2198 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2199                                   const AVFrame    *frame,
2200                                   CVPixelBufferRef *cv_img)
2201 {
2202     int plane_count;
2203     int color;
2204     size_t widths [AV_NUM_DATA_POINTERS];
2205     size_t heights[AV_NUM_DATA_POINTERS];
2206     size_t strides[AV_NUM_DATA_POINTERS];
2207     int status;
2208     size_t contiguous_buf_size;
2209     CVPixelBufferPoolRef pix_buf_pool;
2210     VTEncContext* vtctx = avctx->priv_data;
2211
2212     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2213         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2214
2215         *cv_img = (CVPixelBufferRef)frame->data[3];
2216         av_assert0(*cv_img);
2217
2218         CFRetain(*cv_img);
2219         return 0;
2220     }
2221
2222     memset(widths,  0, sizeof(widths));
2223     memset(heights, 0, sizeof(heights));
2224     memset(strides, 0, sizeof(strides));
2225
2226     status = get_cv_pixel_info(
2227         avctx,
2228         frame,
2229         &color,
2230         &plane_count,
2231         widths,
2232         heights,
2233         strides,
2234         &contiguous_buf_size
2235     );
2236
2237     if (status) {
2238         av_log(
2239             avctx,
2240             AV_LOG_ERROR,
2241             "Error: Cannot convert format %d color_range %d: %d\n",
2242             frame->format,
2243             frame->color_range,
2244             status
2245         );
2246
2247         return AVERROR_EXTERNAL;
2248     }
2249
2250     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2251     if (!pix_buf_pool) {
2252         /* On iOS, the VT session is invalidated when the APP switches from
2253          * foreground to background and vice versa. Fetch the actual error code
2254          * of the VT session to detect that case and restart the VT session
2255          * accordingly. */
2256         OSStatus vtstatus;
2257
2258         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2259         if (vtstatus == kVTInvalidSessionErr) {
2260             CFRelease(vtctx->session);
2261             vtctx->session = NULL;
2262             status = vtenc_configure_encoder(avctx);
2263             if (status == 0)
2264                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2265         }
2266         if (!pix_buf_pool) {
2267             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2268             return AVERROR_EXTERNAL;
2269         }
2270         else
2271             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2272                    "kVTInvalidSessionErr error.\n");
2273     }
2274
2275     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2276                                                 pix_buf_pool,
2277                                                 cv_img);
2278
2279
2280     if (status) {
2281         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2282         return AVERROR_EXTERNAL;
2283     }
2284
2285     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2286     if (status) {
2287         CFRelease(*cv_img);
2288         *cv_img = NULL;
2289         return status;
2290     }
2291
2292     return 0;
2293 }
2294
2295 static int create_encoder_dict_h264(const AVFrame *frame,
2296                                     CFDictionaryRef* dict_out)
2297 {
2298     CFDictionaryRef dict = NULL;
2299     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2300         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2301         const void *vals[] = { kCFBooleanTrue };
2302
2303         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2304         if(!dict) return AVERROR(ENOMEM);
2305     }
2306
2307     *dict_out = dict;
2308     return 0;
2309 }
2310
2311 static int vtenc_send_frame(AVCodecContext *avctx,
2312                             VTEncContext   *vtctx,
2313                             const AVFrame  *frame)
2314 {
2315     CMTime time;
2316     CFDictionaryRef frame_dict;
2317     CVPixelBufferRef cv_img = NULL;
2318     AVFrameSideData *side_data = NULL;
2319     ExtraSEI *sei = NULL;
2320     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2321
2322     if (status) return status;
2323
2324     status = create_encoder_dict_h264(frame, &frame_dict);
2325     if (status) {
2326         CFRelease(cv_img);
2327         return status;
2328     }
2329
2330     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2331     if (vtctx->a53_cc && side_data && side_data->size) {
2332         sei = av_mallocz(sizeof(*sei));
2333         if (!sei) {
2334             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2335         } else {
2336             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2337             if (ret < 0) {
2338                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2339                 av_free(sei);
2340                 sei = NULL;
2341             }
2342         }
2343     }
2344
2345     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2346     status = VTCompressionSessionEncodeFrame(
2347         vtctx->session,
2348         cv_img,
2349         time,
2350         kCMTimeInvalid,
2351         frame_dict,
2352         sei,
2353         NULL
2354     );
2355
2356     if (frame_dict) CFRelease(frame_dict);
2357     CFRelease(cv_img);
2358
2359     if (status) {
2360         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2361         return AVERROR_EXTERNAL;
2362     }
2363
2364     return 0;
2365 }
2366
2367 static av_cold int vtenc_frame(
2368     AVCodecContext *avctx,
2369     AVPacket       *pkt,
2370     const AVFrame  *frame,
2371     int            *got_packet)
2372 {
2373     VTEncContext *vtctx = avctx->priv_data;
2374     bool get_frame;
2375     int status;
2376     CMSampleBufferRef buf = NULL;
2377     ExtraSEI *sei = NULL;
2378
2379     if (frame) {
2380         status = vtenc_send_frame(avctx, vtctx, frame);
2381
2382         if (status) {
2383             status = AVERROR_EXTERNAL;
2384             goto end_nopkt;
2385         }
2386
2387         if (vtctx->frame_ct_in == 0) {
2388             vtctx->first_pts = frame->pts;
2389         } else if(vtctx->frame_ct_in == vtctx->has_b_frames) {
2390             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2391         }
2392
2393         vtctx->frame_ct_in++;
2394     } else if(!vtctx->flushing) {
2395         vtctx->flushing = true;
2396
2397         status = VTCompressionSessionCompleteFrames(vtctx->session,
2398                                                     kCMTimeIndefinite);
2399
2400         if (status) {
2401             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2402             status = AVERROR_EXTERNAL;
2403             goto end_nopkt;
2404         }
2405     }
2406
2407     *got_packet = 0;
2408     get_frame = vtctx->dts_delta >= 0 || !frame;
2409     if (!get_frame) {
2410         status = 0;
2411         goto end_nopkt;
2412     }
2413
2414     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2415     if (status) goto end_nopkt;
2416     if (!buf)   goto end_nopkt;
2417
2418     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2419     if (sei) {
2420         if (sei->data) av_free(sei->data);
2421         av_free(sei);
2422     }
2423     CFRelease(buf);
2424     if (status) goto end_nopkt;
2425
2426     *got_packet = 1;
2427     return 0;
2428
2429 end_nopkt:
2430     av_packet_unref(pkt);
2431     return status;
2432 }
2433
2434 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2435                                     CMVideoCodecType codec_type,
2436                                     CFStringRef      profile_level,
2437                                     CFNumberRef      gamma_level,
2438                                     CFDictionaryRef  enc_info,
2439                                     CFDictionaryRef  pixel_buffer_info)
2440 {
2441     VTEncContext *vtctx = avctx->priv_data;
2442     int status;
2443     CVPixelBufferPoolRef pool = NULL;
2444     CVPixelBufferRef pix_buf = NULL;
2445     CMTime time;
2446     CMSampleBufferRef buf = NULL;
2447
2448     status = vtenc_create_encoder(avctx,
2449                                   codec_type,
2450                                   profile_level,
2451                                   gamma_level,
2452                                   enc_info,
2453                                   pixel_buffer_info,
2454                                   &vtctx->session);
2455     if (status)
2456         goto pe_cleanup;
2457
2458     pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2459     if(!pool){
2460         av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2461         goto pe_cleanup;
2462     }
2463
2464     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2465                                                 pool,
2466                                                 &pix_buf);
2467
2468     if(status != kCVReturnSuccess){
2469         av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2470         goto pe_cleanup;
2471     }
2472
2473     time = CMTimeMake(0, avctx->time_base.den);
2474     status = VTCompressionSessionEncodeFrame(vtctx->session,
2475                                              pix_buf,
2476                                              time,
2477                                              kCMTimeInvalid,
2478                                              NULL,
2479                                              NULL,
2480                                              NULL);
2481
2482     if (status) {
2483         av_log(avctx,
2484                AV_LOG_ERROR,
2485                "Error sending frame for extradata: %d\n",
2486                status);
2487
2488         goto pe_cleanup;
2489     }
2490
2491     //Populates extradata - output frames are flushed and param sets are available.
2492     status = VTCompressionSessionCompleteFrames(vtctx->session,
2493                                                 kCMTimeIndefinite);
2494
2495     if (status)
2496         goto pe_cleanup;
2497
2498     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2499     if (status) {
2500         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2501         goto pe_cleanup;
2502     }
2503
2504     CFRelease(buf);
2505
2506
2507
2508 pe_cleanup:
2509     if(vtctx->session)
2510         CFRelease(vtctx->session);
2511
2512     vtctx->session = NULL;
2513     vtctx->frame_ct_out = 0;
2514
2515     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2516
2517     return status;
2518 }
2519
2520 static av_cold int vtenc_close(AVCodecContext *avctx)
2521 {
2522     VTEncContext *vtctx = avctx->priv_data;
2523
2524     if(!vtctx->session) {
2525         pthread_cond_destroy(&vtctx->cv_sample_sent);
2526         pthread_mutex_destroy(&vtctx->lock);
2527         return 0;
2528     }
2529
2530     VTCompressionSessionCompleteFrames(vtctx->session,
2531                                        kCMTimeIndefinite);
2532     clear_frame_queue(vtctx);
2533     pthread_cond_destroy(&vtctx->cv_sample_sent);
2534     pthread_mutex_destroy(&vtctx->lock);
2535     CFRelease(vtctx->session);
2536     vtctx->session = NULL;
2537
2538     if (vtctx->color_primaries) {
2539         CFRelease(vtctx->color_primaries);
2540         vtctx->color_primaries = NULL;
2541     }
2542
2543     if (vtctx->transfer_function) {
2544         CFRelease(vtctx->transfer_function);
2545         vtctx->transfer_function = NULL;
2546     }
2547
2548     if (vtctx->ycbcr_matrix) {
2549         CFRelease(vtctx->ycbcr_matrix);
2550         vtctx->ycbcr_matrix = NULL;
2551     }
2552
2553     return 0;
2554 }
2555
2556 static const enum AVPixelFormat avc_pix_fmts[] = {
2557     AV_PIX_FMT_VIDEOTOOLBOX,
2558     AV_PIX_FMT_NV12,
2559     AV_PIX_FMT_YUV420P,
2560     AV_PIX_FMT_NONE
2561 };
2562
2563 static const enum AVPixelFormat hevc_pix_fmts[] = {
2564     AV_PIX_FMT_VIDEOTOOLBOX,
2565     AV_PIX_FMT_NV12,
2566     AV_PIX_FMT_YUV420P,
2567     AV_PIX_FMT_P010LE,
2568     AV_PIX_FMT_NONE
2569 };
2570
2571 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2572 #define COMMON_OPTIONS \
2573     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2574         { .i64 = 0 }, 0, 1, VE }, \
2575     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2576         { .i64 = 0 }, 0, 1, VE }, \
2577     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2578         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2579     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2580         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2581     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2582         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2583
2584 #define OFFSET(x) offsetof(VTEncContext, x)
2585 static const AVOption h264_options[] = {
2586     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2587     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2588     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2589     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2590     { "extended", "Extend Profile",   0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2591
2592     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2593     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2594     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2595     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2596     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2597     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2598     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2599     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2600     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2601     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2602     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2603
2604     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2605     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2606     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2607     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2608     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2609
2610     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2611
2612     COMMON_OPTIONS
2613     { NULL },
2614 };
2615
2616 static const AVClass h264_videotoolbox_class = {
2617     .class_name = "h264_videotoolbox",
2618     .item_name  = av_default_item_name,
2619     .option     = h264_options,
2620     .version    = LIBAVUTIL_VERSION_INT,
2621 };
2622
2623 AVCodec ff_h264_videotoolbox_encoder = {
2624     .name             = "h264_videotoolbox",
2625     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2626     .type             = AVMEDIA_TYPE_VIDEO,
2627     .id               = AV_CODEC_ID_H264,
2628     .priv_data_size   = sizeof(VTEncContext),
2629     .pix_fmts         = avc_pix_fmts,
2630     .init             = vtenc_init,
2631     .encode2          = vtenc_frame,
2632     .close            = vtenc_close,
2633     .capabilities     = AV_CODEC_CAP_DELAY,
2634     .priv_class       = &h264_videotoolbox_class,
2635     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2636                         FF_CODEC_CAP_INIT_CLEANUP,
2637 };
2638
2639 static const AVOption hevc_options[] = {
2640     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2641     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2642     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2643
2644     COMMON_OPTIONS
2645     { NULL },
2646 };
2647
2648 static const AVClass hevc_videotoolbox_class = {
2649     .class_name = "hevc_videotoolbox",
2650     .item_name  = av_default_item_name,
2651     .option     = hevc_options,
2652     .version    = LIBAVUTIL_VERSION_INT,
2653 };
2654
2655 AVCodec ff_hevc_videotoolbox_encoder = {
2656     .name             = "hevc_videotoolbox",
2657     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2658     .type             = AVMEDIA_TYPE_VIDEO,
2659     .id               = AV_CODEC_ID_HEVC,
2660     .priv_data_size   = sizeof(VTEncContext),
2661     .pix_fmts         = hevc_pix_fmts,
2662     .init             = vtenc_init,
2663     .encode2          = vtenc_frame,
2664     .close            = vtenc_close,
2665     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2666     .priv_class       = &hevc_videotoolbox_class,
2667     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2668                         FF_CODEC_CAP_INIT_CLEANUP,
2669     .wrapper_name     = "videotoolbox",
2670 };