]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
avcodec: move ff_alloc_a53_sei() to atsc_53
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "atsc_a53.h"
35 #include "h264.h"
36 #include "h264_sei.h"
37 #include <dlfcn.h>
38
39 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
40 enum { kCMVideoCodecType_HEVC = 'hvc1' };
41 #endif
42
43 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
44 enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
45 enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
46 #endif
47
48 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
49                                            size_t parameterSetIndex,
50                                            const uint8_t **parameterSetPointerOut,
51                                            size_t *parameterSetSizeOut,
52                                            size_t *parameterSetCountOut,
53                                            int *NALUnitHeaderLengthOut);
54
55 //These symbols may not be present
56 static struct{
57     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
58     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
59     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
60
61     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
62     CFStringRef kVTH264EntropyMode_CAVLC;
63     CFStringRef kVTH264EntropyMode_CABAC;
64
65     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
66     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
67     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
68     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
69     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
70     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
71     CFStringRef kVTProfileLevel_H264_Main_4_2;
72     CFStringRef kVTProfileLevel_H264_Main_5_1;
73     CFStringRef kVTProfileLevel_H264_Main_5_2;
74     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
75     CFStringRef kVTProfileLevel_H264_High_3_0;
76     CFStringRef kVTProfileLevel_H264_High_3_1;
77     CFStringRef kVTProfileLevel_H264_High_3_2;
78     CFStringRef kVTProfileLevel_H264_High_4_0;
79     CFStringRef kVTProfileLevel_H264_High_4_1;
80     CFStringRef kVTProfileLevel_H264_High_4_2;
81     CFStringRef kVTProfileLevel_H264_High_5_1;
82     CFStringRef kVTProfileLevel_H264_High_5_2;
83     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
84     CFStringRef kVTProfileLevel_H264_Extended_5_0;
85     CFStringRef kVTProfileLevel_H264_Extended_AutoLevel;
86
87     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
88     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
89
90     CFStringRef kVTCompressionPropertyKey_RealTime;
91
92     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
93     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
94
95     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
96 } compat_keys;
97
98 #define GET_SYM(symbol, defaultVal)                                     \
99 do{                                                                     \
100     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
101     if(!handle)                                                         \
102         compat_keys.symbol = CFSTR(defaultVal);                         \
103     else                                                                \
104         compat_keys.symbol = *handle;                                   \
105 }while(0)
106
107 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
108
109 static void loadVTEncSymbols(){
110     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
111         (getParameterSetAtIndex)dlsym(
112             RTLD_DEFAULT,
113             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
114         );
115
116     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
117     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
118     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
119
120     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
121     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
122     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
123
124     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
125     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
126     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
127     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
128     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
129     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
130     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
131     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
132     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
133     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
134     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
135     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
136     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
137     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
138     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
139     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
140     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
141     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
142     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
143     GET_SYM(kVTProfileLevel_H264_Extended_5_0,       "H264_Extended_5_0");
144     GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
145
146     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
147     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
148
149     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
150
151     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
152             "EnableHardwareAcceleratedVideoEncoder");
153     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
154             "RequireHardwareAcceleratedVideoEncoder");
155 }
156
157 typedef enum VT_H264Profile {
158     H264_PROF_AUTO,
159     H264_PROF_BASELINE,
160     H264_PROF_MAIN,
161     H264_PROF_HIGH,
162     H264_PROF_EXTENDED,
163     H264_PROF_COUNT
164 } VT_H264Profile;
165
166 typedef enum VTH264Entropy{
167     VT_ENTROPY_NOT_SET,
168     VT_CAVLC,
169     VT_CABAC
170 } VTH264Entropy;
171
172 typedef enum VT_HEVCProfile {
173     HEVC_PROF_AUTO,
174     HEVC_PROF_MAIN,
175     HEVC_PROF_MAIN10,
176     HEVC_PROF_COUNT
177 } VT_HEVCProfile;
178
179 static const uint8_t start_code[] = { 0, 0, 0, 1 };
180
181 typedef struct ExtraSEI {
182   void *data;
183   size_t size;
184 } ExtraSEI;
185
186 typedef struct BufNode {
187     CMSampleBufferRef cm_buffer;
188     ExtraSEI *sei;
189     struct BufNode* next;
190     int error;
191 } BufNode;
192
193 typedef struct VTEncContext {
194     AVClass *class;
195     enum AVCodecID codec_id;
196     VTCompressionSessionRef session;
197     CFStringRef ycbcr_matrix;
198     CFStringRef color_primaries;
199     CFStringRef transfer_function;
200     getParameterSetAtIndex get_param_set_func;
201
202     pthread_mutex_t lock;
203     pthread_cond_t  cv_sample_sent;
204
205     int async_error;
206
207     BufNode *q_head;
208     BufNode *q_tail;
209
210     int64_t frame_ct_out;
211     int64_t frame_ct_in;
212
213     int64_t first_pts;
214     int64_t dts_delta;
215
216     int64_t profile;
217     int64_t level;
218     int64_t entropy;
219     int64_t realtime;
220     int64_t frames_before;
221     int64_t frames_after;
222
223     int64_t allow_sw;
224     int64_t require_sw;
225
226     bool flushing;
227     bool has_b_frames;
228     bool warned_color_range;
229     bool a53_cc;
230 } VTEncContext;
231
232 static int vtenc_populate_extradata(AVCodecContext   *avctx,
233                                     CMVideoCodecType codec_type,
234                                     CFStringRef      profile_level,
235                                     CFNumberRef      gamma_level,
236                                     CFDictionaryRef  enc_info,
237                                     CFDictionaryRef  pixel_buffer_info);
238
239 /**
240  * NULL-safe release of *refPtr, and sets value to NULL.
241  */
242 static void vt_release_num(CFNumberRef* refPtr){
243     if (!*refPtr) {
244         return;
245     }
246
247     CFRelease(*refPtr);
248     *refPtr = NULL;
249 }
250
251 static void set_async_error(VTEncContext *vtctx, int err)
252 {
253     BufNode *info;
254
255     pthread_mutex_lock(&vtctx->lock);
256
257     vtctx->async_error = err;
258
259     info = vtctx->q_head;
260     vtctx->q_head = vtctx->q_tail = NULL;
261
262     while (info) {
263         BufNode *next = info->next;
264         CFRelease(info->cm_buffer);
265         av_free(info);
266         info = next;
267     }
268
269     pthread_mutex_unlock(&vtctx->lock);
270 }
271
272 static void clear_frame_queue(VTEncContext *vtctx)
273 {
274     set_async_error(vtctx, 0);
275 }
276
277 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
278 {
279     BufNode *info;
280
281     pthread_mutex_lock(&vtctx->lock);
282
283     if (vtctx->async_error) {
284         pthread_mutex_unlock(&vtctx->lock);
285         return vtctx->async_error;
286     }
287
288     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
289         *buf = NULL;
290
291         pthread_mutex_unlock(&vtctx->lock);
292         return 0;
293     }
294
295     while (!vtctx->q_head && !vtctx->async_error && wait) {
296         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
297     }
298
299     if (!vtctx->q_head) {
300         pthread_mutex_unlock(&vtctx->lock);
301         *buf = NULL;
302         return 0;
303     }
304
305     info = vtctx->q_head;
306     vtctx->q_head = vtctx->q_head->next;
307     if (!vtctx->q_head) {
308         vtctx->q_tail = NULL;
309     }
310
311     pthread_mutex_unlock(&vtctx->lock);
312
313     *buf = info->cm_buffer;
314     if (sei && *buf) {
315         *sei = info->sei;
316     } else if (info->sei) {
317         if (info->sei->data) av_free(info->sei->data);
318         av_free(info->sei);
319     }
320     av_free(info);
321
322     vtctx->frame_ct_out++;
323
324     return 0;
325 }
326
327 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
328 {
329     BufNode *info = av_malloc(sizeof(BufNode));
330     if (!info) {
331         set_async_error(vtctx, AVERROR(ENOMEM));
332         return;
333     }
334
335     CFRetain(buffer);
336     info->cm_buffer = buffer;
337     info->sei = sei;
338     info->next = NULL;
339
340     pthread_mutex_lock(&vtctx->lock);
341     pthread_cond_signal(&vtctx->cv_sample_sent);
342
343     if (!vtctx->q_head) {
344         vtctx->q_head = info;
345     } else {
346         vtctx->q_tail->next = info;
347     }
348
349     vtctx->q_tail = info;
350
351     pthread_mutex_unlock(&vtctx->lock);
352 }
353
354 static int count_nalus(size_t length_code_size,
355                        CMSampleBufferRef sample_buffer,
356                        int *count)
357 {
358     size_t offset = 0;
359     int status;
360     int nalu_ct = 0;
361     uint8_t size_buf[4];
362     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
363     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
364
365     if (length_code_size > 4)
366         return AVERROR_INVALIDDATA;
367
368     while (offset < src_size) {
369         size_t curr_src_len;
370         size_t box_len = 0;
371         size_t i;
372
373         status = CMBlockBufferCopyDataBytes(block,
374                                             offset,
375                                             length_code_size,
376                                             size_buf);
377
378         for (i = 0; i < length_code_size; i++) {
379             box_len <<= 8;
380             box_len |= size_buf[i];
381         }
382
383         curr_src_len = box_len + length_code_size;
384         offset += curr_src_len;
385
386         nalu_ct++;
387     }
388
389     *count = nalu_ct;
390     return 0;
391 }
392
393 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
394 {
395     switch (id) {
396     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
397     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
398     default:               return 0;
399     }
400 }
401
402 /**
403  * Get the parameter sets from a CMSampleBufferRef.
404  * @param dst If *dst isn't NULL, the parameters are copied into existing
405  *            memory. *dst_size must be set accordingly when *dst != NULL.
406  *            If *dst is NULL, it will be allocated.
407  *            In all cases, *dst_size is set to the number of bytes used starting
408  *            at *dst.
409  */
410 static int get_params_size(
411     AVCodecContext              *avctx,
412     CMVideoFormatDescriptionRef vid_fmt,
413     size_t                      *size)
414 {
415     VTEncContext *vtctx = avctx->priv_data;
416     size_t total_size = 0;
417     size_t ps_count;
418     int is_count_bad = 0;
419     size_t i;
420     int status;
421     status = vtctx->get_param_set_func(vid_fmt,
422                                        0,
423                                        NULL,
424                                        NULL,
425                                        &ps_count,
426                                        NULL);
427     if (status) {
428         is_count_bad = 1;
429         ps_count     = 0;
430         status       = 0;
431     }
432
433     for (i = 0; i < ps_count || is_count_bad; i++) {
434         const uint8_t *ps;
435         size_t ps_size;
436         status = vtctx->get_param_set_func(vid_fmt,
437                                            i,
438                                            &ps,
439                                            &ps_size,
440                                            NULL,
441                                            NULL);
442         if (status) {
443             /*
444              * When ps_count is invalid, status != 0 ends the loop normally
445              * unless we didn't get any parameter sets.
446              */
447             if (i > 0 && is_count_bad) status = 0;
448
449             break;
450         }
451
452         total_size += ps_size + sizeof(start_code);
453     }
454
455     if (status) {
456         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
457         return AVERROR_EXTERNAL;
458     }
459
460     *size = total_size;
461     return 0;
462 }
463
464 static int copy_param_sets(
465     AVCodecContext              *avctx,
466     CMVideoFormatDescriptionRef vid_fmt,
467     uint8_t                     *dst,
468     size_t                      dst_size)
469 {
470     VTEncContext *vtctx = avctx->priv_data;
471     size_t ps_count;
472     int is_count_bad = 0;
473     int status;
474     size_t offset = 0;
475     size_t i;
476
477     status = vtctx->get_param_set_func(vid_fmt,
478                                        0,
479                                        NULL,
480                                        NULL,
481                                        &ps_count,
482                                        NULL);
483     if (status) {
484         is_count_bad = 1;
485         ps_count     = 0;
486         status       = 0;
487     }
488
489
490     for (i = 0; i < ps_count || is_count_bad; i++) {
491         const uint8_t *ps;
492         size_t ps_size;
493         size_t next_offset;
494
495         status = vtctx->get_param_set_func(vid_fmt,
496                                            i,
497                                            &ps,
498                                            &ps_size,
499                                            NULL,
500                                            NULL);
501         if (status) {
502             if (i > 0 && is_count_bad) status = 0;
503
504             break;
505         }
506
507         next_offset = offset + sizeof(start_code) + ps_size;
508         if (dst_size < next_offset) {
509             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
510             return AVERROR_BUFFER_TOO_SMALL;
511         }
512
513         memcpy(dst + offset, start_code, sizeof(start_code));
514         offset += sizeof(start_code);
515
516         memcpy(dst + offset, ps, ps_size);
517         offset = next_offset;
518     }
519
520     if (status) {
521         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
522         return AVERROR_EXTERNAL;
523     }
524
525     return 0;
526 }
527
528 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
529 {
530     CMVideoFormatDescriptionRef vid_fmt;
531     size_t total_size;
532     int status;
533
534     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
535     if (!vid_fmt) {
536         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
537         return AVERROR_EXTERNAL;
538     }
539
540     status = get_params_size(avctx, vid_fmt, &total_size);
541     if (status) {
542         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
543         return status;
544     }
545
546     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
547     if (!avctx->extradata) {
548         return AVERROR(ENOMEM);
549     }
550     avctx->extradata_size = total_size;
551
552     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
553
554     if (status) {
555         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
556         return status;
557     }
558
559     return 0;
560 }
561
562 static void vtenc_output_callback(
563     void *ctx,
564     void *sourceFrameCtx,
565     OSStatus status,
566     VTEncodeInfoFlags flags,
567     CMSampleBufferRef sample_buffer)
568 {
569     AVCodecContext *avctx = ctx;
570     VTEncContext   *vtctx = avctx->priv_data;
571     ExtraSEI *sei = sourceFrameCtx;
572
573     if (vtctx->async_error) {
574         if(sample_buffer) CFRelease(sample_buffer);
575         return;
576     }
577
578     if (status) {
579         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
580         set_async_error(vtctx, AVERROR_EXTERNAL);
581         return;
582     }
583
584     if (!sample_buffer) {
585         return;
586     }
587
588     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
589         int set_status = set_extradata(avctx, sample_buffer);
590         if (set_status) {
591             set_async_error(vtctx, set_status);
592             return;
593         }
594     }
595
596     vtenc_q_push(vtctx, sample_buffer, sei);
597 }
598
599 static int get_length_code_size(
600     AVCodecContext    *avctx,
601     CMSampleBufferRef sample_buffer,
602     size_t            *size)
603 {
604     VTEncContext *vtctx = avctx->priv_data;
605     CMVideoFormatDescriptionRef vid_fmt;
606     int isize;
607     int status;
608
609     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
610     if (!vid_fmt) {
611         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
612         return AVERROR_EXTERNAL;
613     }
614
615     status = vtctx->get_param_set_func(vid_fmt,
616                                        0,
617                                        NULL,
618                                        NULL,
619                                        NULL,
620                                        &isize);
621     if (status) {
622         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
623         return AVERROR_EXTERNAL;
624     }
625
626     *size = isize;
627     return 0;
628 }
629
630 /*
631  * Returns true on success.
632  *
633  * If profile_level_val is NULL and this method returns true, don't specify the
634  * profile/level to the encoder.
635  */
636 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
637                                       CFStringRef    *profile_level_val)
638 {
639     VTEncContext *vtctx = avctx->priv_data;
640     int64_t profile = vtctx->profile;
641
642     if (profile == H264_PROF_AUTO && vtctx->level) {
643         //Need to pick a profile if level is not auto-selected.
644         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
645     }
646
647     *profile_level_val = NULL;
648
649     switch (profile) {
650         case H264_PROF_AUTO:
651             return true;
652
653         case H264_PROF_BASELINE:
654             switch (vtctx->level) {
655                 case  0: *profile_level_val =
656                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
657                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
658                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
659                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
660                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
661                 case 40: *profile_level_val =
662                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
663                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
664                 case 42: *profile_level_val =
665                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
666                 case 50: *profile_level_val =
667                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
668                 case 51: *profile_level_val =
669                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
670                 case 52: *profile_level_val =
671                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
672             }
673             break;
674
675         case H264_PROF_MAIN:
676             switch (vtctx->level) {
677                 case  0: *profile_level_val =
678                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
679                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
680                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
681                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
682                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
683                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
684                 case 42: *profile_level_val =
685                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
686                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
687                 case 51: *profile_level_val =
688                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
689                 case 52: *profile_level_val =
690                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
691             }
692             break;
693
694         case H264_PROF_HIGH:
695             switch (vtctx->level) {
696                 case  0: *profile_level_val =
697                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
698                 case 30: *profile_level_val =
699                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
700                 case 31: *profile_level_val =
701                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
702                 case 32: *profile_level_val =
703                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
704                 case 40: *profile_level_val =
705                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
706                 case 41: *profile_level_val =
707                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
708                 case 42: *profile_level_val =
709                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
710                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
711                 case 51: *profile_level_val =
712                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
713                 case 52: *profile_level_val =
714                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
715             }
716             break;
717         case H264_PROF_EXTENDED:
718             switch (vtctx->level) {
719                 case  0: *profile_level_val =
720                                   compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
721                 case 50: *profile_level_val =
722                                   compat_keys.kVTProfileLevel_H264_Extended_5_0;       break;
723             }
724             break;
725     }
726
727     if (!*profile_level_val) {
728         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
729         return false;
730     }
731
732     return true;
733 }
734
735 /*
736  * Returns true on success.
737  *
738  * If profile_level_val is NULL and this method returns true, don't specify the
739  * profile/level to the encoder.
740  */
741 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
742                                       CFStringRef    *profile_level_val)
743 {
744     VTEncContext *vtctx = avctx->priv_data;
745     int64_t profile = vtctx->profile;
746
747     *profile_level_val = NULL;
748
749     switch (profile) {
750         case HEVC_PROF_AUTO:
751             return true;
752         case HEVC_PROF_MAIN:
753             *profile_level_val =
754                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
755             break;
756         case HEVC_PROF_MAIN10:
757             *profile_level_val =
758                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
759             break;
760     }
761
762     if (!*profile_level_val) {
763         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
764         return false;
765     }
766
767     return true;
768 }
769
770 static int get_cv_pixel_format(AVCodecContext* avctx,
771                                enum AVPixelFormat fmt,
772                                enum AVColorRange range,
773                                int* av_pixel_format,
774                                int* range_guessed)
775 {
776     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
777                                         range != AVCOL_RANGE_JPEG;
778
779     //MPEG range is used when no range is set
780     if (fmt == AV_PIX_FMT_NV12) {
781         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
782                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
783                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
784     } else if (fmt == AV_PIX_FMT_YUV420P) {
785         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
786                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
787                                         kCVPixelFormatType_420YpCbCr8Planar;
788     } else if (fmt == AV_PIX_FMT_P010LE) {
789         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
790                                         kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
791                                         kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
792         *av_pixel_format = kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
793     } else {
794         return AVERROR(EINVAL);
795     }
796
797     return 0;
798 }
799
800 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
801     VTEncContext *vtctx = avctx->priv_data;
802
803     if (vtctx->color_primaries) {
804         CFDictionarySetValue(dict,
805                              kCVImageBufferColorPrimariesKey,
806                              vtctx->color_primaries);
807     }
808
809     if (vtctx->transfer_function) {
810         CFDictionarySetValue(dict,
811                              kCVImageBufferTransferFunctionKey,
812                              vtctx->transfer_function);
813     }
814
815     if (vtctx->ycbcr_matrix) {
816         CFDictionarySetValue(dict,
817                              kCVImageBufferYCbCrMatrixKey,
818                              vtctx->ycbcr_matrix);
819     }
820 }
821
822 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
823                                        CFMutableDictionaryRef* dict)
824 {
825     CFNumberRef cv_color_format_num = NULL;
826     CFNumberRef width_num = NULL;
827     CFNumberRef height_num = NULL;
828     CFMutableDictionaryRef pixel_buffer_info = NULL;
829     int cv_color_format;
830     int status = get_cv_pixel_format(avctx,
831                                      avctx->pix_fmt,
832                                      avctx->color_range,
833                                      &cv_color_format,
834                                      NULL);
835     if (status) return status;
836
837     pixel_buffer_info = CFDictionaryCreateMutable(
838                             kCFAllocatorDefault,
839                             20,
840                             &kCFCopyStringDictionaryKeyCallBacks,
841                             &kCFTypeDictionaryValueCallBacks);
842
843     if (!pixel_buffer_info) goto pbinfo_nomem;
844
845     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
846                                          kCFNumberSInt32Type,
847                                          &cv_color_format);
848     if (!cv_color_format_num) goto pbinfo_nomem;
849
850     CFDictionarySetValue(pixel_buffer_info,
851                          kCVPixelBufferPixelFormatTypeKey,
852                          cv_color_format_num);
853     vt_release_num(&cv_color_format_num);
854
855     width_num = CFNumberCreate(kCFAllocatorDefault,
856                                kCFNumberSInt32Type,
857                                &avctx->width);
858     if (!width_num) return AVERROR(ENOMEM);
859
860     CFDictionarySetValue(pixel_buffer_info,
861                          kCVPixelBufferWidthKey,
862                          width_num);
863     vt_release_num(&width_num);
864
865     height_num = CFNumberCreate(kCFAllocatorDefault,
866                                 kCFNumberSInt32Type,
867                                 &avctx->height);
868     if (!height_num) goto pbinfo_nomem;
869
870     CFDictionarySetValue(pixel_buffer_info,
871                          kCVPixelBufferHeightKey,
872                          height_num);
873     vt_release_num(&height_num);
874
875     add_color_attr(avctx, pixel_buffer_info);
876
877     *dict = pixel_buffer_info;
878     return 0;
879
880 pbinfo_nomem:
881     vt_release_num(&cv_color_format_num);
882     vt_release_num(&width_num);
883     vt_release_num(&height_num);
884     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
885
886     return AVERROR(ENOMEM);
887 }
888
889 static int get_cv_color_primaries(AVCodecContext *avctx,
890                                   CFStringRef *primaries)
891 {
892     enum AVColorPrimaries pri = avctx->color_primaries;
893     switch (pri) {
894         case AVCOL_PRI_UNSPECIFIED:
895             *primaries = NULL;
896             break;
897
898         case AVCOL_PRI_BT470BG:
899             *primaries = kCVImageBufferColorPrimaries_EBU_3213;
900             break;
901
902         case AVCOL_PRI_SMPTE170M:
903             *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
904             break;
905
906         case AVCOL_PRI_BT709:
907             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
908             break;
909
910         case AVCOL_PRI_BT2020:
911             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
912             break;
913
914         default:
915             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
916             *primaries = NULL;
917             return -1;
918     }
919
920     return 0;
921 }
922
923 static int get_cv_transfer_function(AVCodecContext *avctx,
924                                     CFStringRef *transfer_fnc,
925                                     CFNumberRef *gamma_level)
926 {
927     enum AVColorTransferCharacteristic trc = avctx->color_trc;
928     Float32 gamma;
929     *gamma_level = NULL;
930
931     switch (trc) {
932         case AVCOL_TRC_UNSPECIFIED:
933             *transfer_fnc = NULL;
934             break;
935
936         case AVCOL_TRC_BT709:
937             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
938             break;
939
940         case AVCOL_TRC_SMPTE240M:
941             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
942             break;
943
944 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
945         case AVCOL_TRC_SMPTE2084:
946             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
947             break;
948 #endif
949 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
950         case AVCOL_TRC_LINEAR:
951             *transfer_fnc = kCVImageBufferTransferFunction_Linear;
952             break;
953 #endif
954 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
955         case AVCOL_TRC_ARIB_STD_B67:
956             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
957             break;
958 #endif
959
960         case AVCOL_TRC_GAMMA22:
961             gamma = 2.2;
962             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
963             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
964             break;
965
966         case AVCOL_TRC_GAMMA28:
967             gamma = 2.8;
968             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
969             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
970             break;
971
972         case AVCOL_TRC_BT2020_10:
973         case AVCOL_TRC_BT2020_12:
974             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
975             break;
976
977         default:
978             *transfer_fnc = NULL;
979             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
980             return -1;
981     }
982
983     return 0;
984 }
985
986 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
987     switch(avctx->colorspace) {
988         case AVCOL_SPC_BT709:
989             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
990             break;
991
992         case AVCOL_SPC_UNSPECIFIED:
993             *matrix = NULL;
994             break;
995
996         case AVCOL_SPC_BT470BG:
997         case AVCOL_SPC_SMPTE170M:
998             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
999             break;
1000
1001         case AVCOL_SPC_SMPTE240M:
1002             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
1003             break;
1004
1005         case AVCOL_SPC_BT2020_NCL:
1006             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
1007             break;
1008
1009         default:
1010             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1011             return -1;
1012     }
1013
1014     return 0;
1015 }
1016
1017 static int vtenc_create_encoder(AVCodecContext   *avctx,
1018                                 CMVideoCodecType codec_type,
1019                                 CFStringRef      profile_level,
1020                                 CFNumberRef      gamma_level,
1021                                 CFDictionaryRef  enc_info,
1022                                 CFDictionaryRef  pixel_buffer_info,
1023                                 VTCompressionSessionRef *session)
1024 {
1025     VTEncContext *vtctx = avctx->priv_data;
1026     SInt32       bit_rate = avctx->bit_rate;
1027     SInt32       max_rate = avctx->rc_max_rate;
1028     CFNumberRef  bit_rate_num;
1029     CFNumberRef  bytes_per_second;
1030     CFNumberRef  one_second;
1031     CFArrayRef   data_rate_limits;
1032     int64_t      bytes_per_second_value = 0;
1033     int64_t      one_second_value = 0;
1034     void         *nums[2];
1035
1036     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1037                                             avctx->width,
1038                                             avctx->height,
1039                                             codec_type,
1040                                             enc_info,
1041                                             pixel_buffer_info,
1042                                             kCFAllocatorDefault,
1043                                             vtenc_output_callback,
1044                                             avctx,
1045                                             session);
1046
1047     if (status || !vtctx->session) {
1048         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1049
1050 #if !TARGET_OS_IPHONE
1051         if (!vtctx->allow_sw) {
1052             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1053         }
1054 #endif
1055
1056         return AVERROR_EXTERNAL;
1057     }
1058
1059     bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1060                                   kCFNumberSInt32Type,
1061                                   &bit_rate);
1062     if (!bit_rate_num) return AVERROR(ENOMEM);
1063
1064     status = VTSessionSetProperty(vtctx->session,
1065                                   kVTCompressionPropertyKey_AverageBitRate,
1066                                   bit_rate_num);
1067     CFRelease(bit_rate_num);
1068
1069     if (status) {
1070         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1071         return AVERROR_EXTERNAL;
1072     }
1073
1074     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1075         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1076         bytes_per_second_value = max_rate >> 3;
1077         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1078                                           kCFNumberSInt64Type,
1079                                           &bytes_per_second_value);
1080         if (!bytes_per_second) {
1081             return AVERROR(ENOMEM);
1082         }
1083         one_second_value = 1;
1084         one_second = CFNumberCreate(kCFAllocatorDefault,
1085                                     kCFNumberSInt64Type,
1086                                     &one_second_value);
1087         if (!one_second) {
1088             CFRelease(bytes_per_second);
1089             return AVERROR(ENOMEM);
1090         }
1091         nums[0] = (void *)bytes_per_second;
1092         nums[1] = (void *)one_second;
1093         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1094                                          (const void **)nums,
1095                                          2,
1096                                          &kCFTypeArrayCallBacks);
1097
1098         if (!data_rate_limits) {
1099             CFRelease(bytes_per_second);
1100             CFRelease(one_second);
1101             return AVERROR(ENOMEM);
1102         }
1103         status = VTSessionSetProperty(vtctx->session,
1104                                       kVTCompressionPropertyKey_DataRateLimits,
1105                                       data_rate_limits);
1106
1107         CFRelease(bytes_per_second);
1108         CFRelease(one_second);
1109         CFRelease(data_rate_limits);
1110
1111         if (status) {
1112             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1113             return AVERROR_EXTERNAL;
1114         }
1115     }
1116
1117     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1118         // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1119         if (profile_level) {
1120             status = VTSessionSetProperty(vtctx->session,
1121                                         kVTCompressionPropertyKey_ProfileLevel,
1122                                         profile_level);
1123             if (status) {
1124                 av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1125             }
1126         }
1127     }
1128
1129     if (avctx->gop_size > 0) {
1130         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1131                                               kCFNumberIntType,
1132                                               &avctx->gop_size);
1133         if (!interval) {
1134             return AVERROR(ENOMEM);
1135         }
1136
1137         status = VTSessionSetProperty(vtctx->session,
1138                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1139                                       interval);
1140         CFRelease(interval);
1141
1142         if (status) {
1143             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1144             return AVERROR_EXTERNAL;
1145         }
1146     }
1147
1148     if (vtctx->frames_before) {
1149         status = VTSessionSetProperty(vtctx->session,
1150                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1151                                       kCFBooleanTrue);
1152
1153         if (status == kVTPropertyNotSupportedErr) {
1154             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1155         } else if (status) {
1156             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1157         }
1158     }
1159
1160     if (vtctx->frames_after) {
1161         status = VTSessionSetProperty(vtctx->session,
1162                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1163                                       kCFBooleanTrue);
1164
1165         if (status == kVTPropertyNotSupportedErr) {
1166             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1167         } else if (status) {
1168             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1169         }
1170     }
1171
1172     if (avctx->sample_aspect_ratio.num != 0) {
1173         CFNumberRef num;
1174         CFNumberRef den;
1175         CFMutableDictionaryRef par;
1176         AVRational *avpar = &avctx->sample_aspect_ratio;
1177
1178         av_reduce(&avpar->num, &avpar->den,
1179                    avpar->num,  avpar->den,
1180                   0xFFFFFFFF);
1181
1182         num = CFNumberCreate(kCFAllocatorDefault,
1183                              kCFNumberIntType,
1184                              &avpar->num);
1185
1186         den = CFNumberCreate(kCFAllocatorDefault,
1187                              kCFNumberIntType,
1188                              &avpar->den);
1189
1190
1191
1192         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1193                                         2,
1194                                         &kCFCopyStringDictionaryKeyCallBacks,
1195                                         &kCFTypeDictionaryValueCallBacks);
1196
1197         if (!par || !num || !den) {
1198             if (par) CFRelease(par);
1199             if (num) CFRelease(num);
1200             if (den) CFRelease(den);
1201
1202             return AVERROR(ENOMEM);
1203         }
1204
1205         CFDictionarySetValue(
1206             par,
1207             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1208             num);
1209
1210         CFDictionarySetValue(
1211             par,
1212             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1213             den);
1214
1215         status = VTSessionSetProperty(vtctx->session,
1216                                       kVTCompressionPropertyKey_PixelAspectRatio,
1217                                       par);
1218
1219         CFRelease(par);
1220         CFRelease(num);
1221         CFRelease(den);
1222
1223         if (status) {
1224             av_log(avctx,
1225                    AV_LOG_ERROR,
1226                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1227                    avctx->sample_aspect_ratio.num,
1228                    avctx->sample_aspect_ratio.den,
1229                    status);
1230
1231             return AVERROR_EXTERNAL;
1232         }
1233     }
1234
1235
1236     if (vtctx->transfer_function) {
1237         status = VTSessionSetProperty(vtctx->session,
1238                                       kVTCompressionPropertyKey_TransferFunction,
1239                                       vtctx->transfer_function);
1240
1241         if (status) {
1242             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1243         }
1244     }
1245
1246
1247     if (vtctx->ycbcr_matrix) {
1248         status = VTSessionSetProperty(vtctx->session,
1249                                       kVTCompressionPropertyKey_YCbCrMatrix,
1250                                       vtctx->ycbcr_matrix);
1251
1252         if (status) {
1253             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1254         }
1255     }
1256
1257
1258     if (vtctx->color_primaries) {
1259         status = VTSessionSetProperty(vtctx->session,
1260                                       kVTCompressionPropertyKey_ColorPrimaries,
1261                                       vtctx->color_primaries);
1262
1263         if (status) {
1264             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1265         }
1266     }
1267
1268     if (gamma_level) {
1269         status = VTSessionSetProperty(vtctx->session,
1270                                       kCVImageBufferGammaLevelKey,
1271                                       gamma_level);
1272
1273         if (status) {
1274             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1275         }
1276     }
1277
1278     if (!vtctx->has_b_frames) {
1279         status = VTSessionSetProperty(vtctx->session,
1280                                       kVTCompressionPropertyKey_AllowFrameReordering,
1281                                       kCFBooleanFalse);
1282
1283         if (status) {
1284             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1285             return AVERROR_EXTERNAL;
1286         }
1287     }
1288
1289     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1290         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1291                                 compat_keys.kVTH264EntropyMode_CABAC:
1292                                 compat_keys.kVTH264EntropyMode_CAVLC;
1293
1294         status = VTSessionSetProperty(vtctx->session,
1295                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1296                                       entropy);
1297
1298         if (status) {
1299             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1300         }
1301     }
1302
1303     if (vtctx->realtime) {
1304         status = VTSessionSetProperty(vtctx->session,
1305                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1306                                       kCFBooleanTrue);
1307
1308         if (status) {
1309             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1310         }
1311     }
1312
1313     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1314     if (status) {
1315         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1316         return AVERROR_EXTERNAL;
1317     }
1318
1319     return 0;
1320 }
1321
1322 static int vtenc_configure_encoder(AVCodecContext *avctx)
1323 {
1324     CFMutableDictionaryRef enc_info;
1325     CFMutableDictionaryRef pixel_buffer_info;
1326     CMVideoCodecType       codec_type;
1327     VTEncContext           *vtctx = avctx->priv_data;
1328     CFStringRef            profile_level;
1329     CFNumberRef            gamma_level = NULL;
1330     int                    status;
1331
1332     codec_type = get_cm_codec_type(avctx->codec_id);
1333     if (!codec_type) {
1334         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1335         return AVERROR(EINVAL);
1336     }
1337
1338     vtctx->codec_id = avctx->codec_id;
1339
1340     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1341         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1342
1343         vtctx->has_b_frames = avctx->max_b_frames > 0;
1344         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1345             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1346             vtctx->has_b_frames = false;
1347         }
1348
1349         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1350             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1351             vtctx->entropy = VT_ENTROPY_NOT_SET;
1352         }
1353
1354         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1355     } else {
1356         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1357         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1358         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1359     }
1360
1361     enc_info = CFDictionaryCreateMutable(
1362         kCFAllocatorDefault,
1363         20,
1364         &kCFCopyStringDictionaryKeyCallBacks,
1365         &kCFTypeDictionaryValueCallBacks
1366     );
1367
1368     if (!enc_info) return AVERROR(ENOMEM);
1369
1370 #if !TARGET_OS_IPHONE
1371     if(vtctx->require_sw) {
1372         CFDictionarySetValue(enc_info,
1373                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1374                              kCFBooleanFalse);
1375     } else if (!vtctx->allow_sw) {
1376         CFDictionarySetValue(enc_info,
1377                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1378                              kCFBooleanTrue);
1379     } else {
1380         CFDictionarySetValue(enc_info,
1381                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1382                              kCFBooleanTrue);
1383     }
1384 #endif
1385
1386     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1387         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1388         if (status)
1389             goto init_cleanup;
1390     } else {
1391         pixel_buffer_info = NULL;
1392     }
1393
1394     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1395
1396     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1397     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1398     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1399
1400
1401     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1402         status = vtenc_populate_extradata(avctx,
1403                                           codec_type,
1404                                           profile_level,
1405                                           gamma_level,
1406                                           enc_info,
1407                                           pixel_buffer_info);
1408         if (status)
1409             goto init_cleanup;
1410     }
1411
1412     status = vtenc_create_encoder(avctx,
1413                                   codec_type,
1414                                   profile_level,
1415                                   gamma_level,
1416                                   enc_info,
1417                                   pixel_buffer_info,
1418                                   &vtctx->session);
1419
1420 init_cleanup:
1421     if (gamma_level)
1422         CFRelease(gamma_level);
1423
1424     if (pixel_buffer_info)
1425         CFRelease(pixel_buffer_info);
1426
1427     CFRelease(enc_info);
1428
1429     return status;
1430 }
1431
1432 static av_cold int vtenc_init(AVCodecContext *avctx)
1433 {
1434     VTEncContext    *vtctx = avctx->priv_data;
1435     CFBooleanRef    has_b_frames_cfbool;
1436     int             status;
1437
1438     pthread_once(&once_ctrl, loadVTEncSymbols);
1439
1440     pthread_mutex_init(&vtctx->lock, NULL);
1441     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1442
1443     vtctx->session = NULL;
1444     status = vtenc_configure_encoder(avctx);
1445     if (status) return status;
1446
1447     status = VTSessionCopyProperty(vtctx->session,
1448                                    kVTCompressionPropertyKey_AllowFrameReordering,
1449                                    kCFAllocatorDefault,
1450                                    &has_b_frames_cfbool);
1451
1452     if (!status && has_b_frames_cfbool) {
1453         //Some devices don't output B-frames for main profile, even if requested.
1454         vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1455         CFRelease(has_b_frames_cfbool);
1456     }
1457     avctx->has_b_frames = vtctx->has_b_frames;
1458
1459     return 0;
1460 }
1461
1462 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1463 {
1464     CFArrayRef      attachments;
1465     CFDictionaryRef attachment;
1466     CFBooleanRef    not_sync;
1467     CFIndex         len;
1468
1469     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1470     len = !attachments ? 0 : CFArrayGetCount(attachments);
1471
1472     if (!len) {
1473         *is_key_frame = true;
1474         return;
1475     }
1476
1477     attachment = CFArrayGetValueAtIndex(attachments, 0);
1478
1479     if (CFDictionaryGetValueIfPresent(attachment,
1480                                       kCMSampleAttachmentKey_NotSync,
1481                                       (const void **)&not_sync))
1482     {
1483         *is_key_frame = !CFBooleanGetValue(not_sync);
1484     } else {
1485         *is_key_frame = true;
1486     }
1487 }
1488
1489 static int is_post_sei_nal_type(int nal_type){
1490     return nal_type != H264_NAL_SEI &&
1491            nal_type != H264_NAL_SPS &&
1492            nal_type != H264_NAL_PPS &&
1493            nal_type != H264_NAL_AUD;
1494 }
1495
1496 /*
1497  * Finds the sei message start/size of type find_sei_type.
1498  * If more than one of that type exists, the last one is returned.
1499  */
1500 static int find_sei_end(AVCodecContext *avctx,
1501                         uint8_t        *nal_data,
1502                         size_t          nal_size,
1503                         uint8_t       **sei_end)
1504 {
1505     int nal_type;
1506     size_t sei_payload_size = 0;
1507     int sei_payload_type = 0;
1508     *sei_end = NULL;
1509     uint8_t *nal_start = nal_data;
1510
1511     if (!nal_size)
1512         return 0;
1513
1514     nal_type = *nal_data & 0x1F;
1515     if (nal_type != H264_NAL_SEI)
1516         return 0;
1517
1518     nal_data++;
1519     nal_size--;
1520
1521     if (nal_data[nal_size - 1] == 0x80)
1522         nal_size--;
1523
1524     while (nal_size > 0 && *nal_data > 0) {
1525         do{
1526             sei_payload_type += *nal_data;
1527             nal_data++;
1528             nal_size--;
1529         } while (nal_size > 0 && *nal_data == 0xFF);
1530
1531         if (!nal_size) {
1532             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1533             return AVERROR_INVALIDDATA;
1534         }
1535
1536         do{
1537             sei_payload_size += *nal_data;
1538             nal_data++;
1539             nal_size--;
1540         } while (nal_size > 0 && *nal_data == 0xFF);
1541
1542         if (nal_size < sei_payload_size) {
1543             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1544             return AVERROR_INVALIDDATA;
1545         }
1546
1547         nal_data += sei_payload_size;
1548         nal_size -= sei_payload_size;
1549     }
1550
1551     *sei_end = nal_data;
1552
1553     return nal_data - nal_start + 1;
1554 }
1555
1556 /**
1557  * Copies the data inserting emulation prevention bytes as needed.
1558  * Existing data in the destination can be taken into account by providing
1559  * dst with a dst_offset > 0.
1560  *
1561  * @return The number of bytes copied on success. On failure, the negative of
1562  *         the number of bytes needed to copy src is returned.
1563  */
1564 static int copy_emulation_prev(const uint8_t *src,
1565                                size_t         src_size,
1566                                uint8_t       *dst,
1567                                ssize_t        dst_offset,
1568                                size_t         dst_size)
1569 {
1570     int zeros = 0;
1571     int wrote_bytes;
1572     uint8_t* dst_start;
1573     uint8_t* dst_end = dst + dst_size;
1574     const uint8_t* src_end = src + src_size;
1575     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1576     int i;
1577     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1578         if (!dst[i])
1579             zeros++;
1580         else
1581             zeros = 0;
1582     }
1583
1584     dst += dst_offset;
1585     dst_start = dst;
1586     for (; src < src_end; src++, dst++) {
1587         if (zeros == 2) {
1588             int insert_ep3_byte = *src <= 3;
1589             if (insert_ep3_byte) {
1590                 if (dst < dst_end)
1591                     *dst = 3;
1592                 dst++;
1593             }
1594
1595             zeros = 0;
1596         }
1597
1598         if (dst < dst_end)
1599             *dst = *src;
1600
1601         if (!*src)
1602             zeros++;
1603         else
1604             zeros = 0;
1605     }
1606
1607     wrote_bytes = dst - dst_start;
1608
1609     if (dst > dst_end)
1610         return -wrote_bytes;
1611
1612     return wrote_bytes;
1613 }
1614
1615 static int write_sei(const ExtraSEI *sei,
1616                      int             sei_type,
1617                      uint8_t        *dst,
1618                      size_t          dst_size)
1619 {
1620     uint8_t *sei_start = dst;
1621     size_t remaining_sei_size = sei->size;
1622     size_t remaining_dst_size = dst_size;
1623     int header_bytes;
1624     int bytes_written;
1625     ssize_t offset;
1626
1627     if (!remaining_dst_size)
1628         return AVERROR_BUFFER_TOO_SMALL;
1629
1630     while (sei_type && remaining_dst_size != 0) {
1631         int sei_byte = sei_type > 255 ? 255 : sei_type;
1632         *dst = sei_byte;
1633
1634         sei_type -= sei_byte;
1635         dst++;
1636         remaining_dst_size--;
1637     }
1638
1639     if (!dst_size)
1640         return AVERROR_BUFFER_TOO_SMALL;
1641
1642     while (remaining_sei_size && remaining_dst_size != 0) {
1643         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1644         *dst = size_byte;
1645
1646         remaining_sei_size -= size_byte;
1647         dst++;
1648         remaining_dst_size--;
1649     }
1650
1651     if (remaining_dst_size < sei->size)
1652         return AVERROR_BUFFER_TOO_SMALL;
1653
1654     header_bytes = dst - sei_start;
1655
1656     offset = header_bytes;
1657     bytes_written = copy_emulation_prev(sei->data,
1658                                         sei->size,
1659                                         sei_start,
1660                                         offset,
1661                                         dst_size);
1662     if (bytes_written < 0)
1663         return AVERROR_BUFFER_TOO_SMALL;
1664
1665     bytes_written += header_bytes;
1666     return bytes_written;
1667 }
1668
1669 /**
1670  * Copies NAL units and replaces length codes with
1671  * H.264 Annex B start codes. On failure, the contents of
1672  * dst_data may have been modified.
1673  *
1674  * @param length_code_size Byte length of each length code
1675  * @param sample_buffer NAL units prefixed with length codes.
1676  * @param sei Optional A53 closed captions SEI data.
1677  * @param dst_data Must be zeroed before calling this function.
1678  *                 Contains the copied NAL units prefixed with
1679  *                 start codes when the function returns
1680  *                 successfully.
1681  * @param dst_size Length of dst_data
1682  * @return 0 on success
1683  *         AVERROR_INVALIDDATA if length_code_size is invalid
1684  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1685  *         or if a length_code in src_data specifies data beyond
1686  *         the end of its buffer.
1687  */
1688 static int copy_replace_length_codes(
1689     AVCodecContext *avctx,
1690     size_t        length_code_size,
1691     CMSampleBufferRef sample_buffer,
1692     ExtraSEI      *sei,
1693     uint8_t       *dst_data,
1694     size_t        dst_size)
1695 {
1696     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1697     size_t remaining_src_size = src_size;
1698     size_t remaining_dst_size = dst_size;
1699     size_t src_offset = 0;
1700     int wrote_sei = 0;
1701     int status;
1702     uint8_t size_buf[4];
1703     uint8_t nal_type;
1704     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1705
1706     if (length_code_size > 4) {
1707         return AVERROR_INVALIDDATA;
1708     }
1709
1710     while (remaining_src_size > 0) {
1711         size_t curr_src_len;
1712         size_t curr_dst_len;
1713         size_t box_len = 0;
1714         size_t i;
1715
1716         uint8_t       *dst_box;
1717
1718         status = CMBlockBufferCopyDataBytes(block,
1719                                             src_offset,
1720                                             length_code_size,
1721                                             size_buf);
1722         if (status) {
1723             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1724             return AVERROR_EXTERNAL;
1725         }
1726
1727         status = CMBlockBufferCopyDataBytes(block,
1728                                             src_offset + length_code_size,
1729                                             1,
1730                                             &nal_type);
1731
1732         if (status) {
1733             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1734             return AVERROR_EXTERNAL;
1735         }
1736
1737         nal_type &= 0x1F;
1738
1739         for (i = 0; i < length_code_size; i++) {
1740             box_len <<= 8;
1741             box_len |= size_buf[i];
1742         }
1743
1744         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1745             //No SEI NAL unit - insert.
1746             int wrote_bytes;
1747
1748             memcpy(dst_data, start_code, sizeof(start_code));
1749             dst_data += sizeof(start_code);
1750             remaining_dst_size -= sizeof(start_code);
1751
1752             *dst_data = H264_NAL_SEI;
1753             dst_data++;
1754             remaining_dst_size--;
1755
1756             wrote_bytes = write_sei(sei,
1757                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1758                                     dst_data,
1759                                     remaining_dst_size);
1760
1761             if (wrote_bytes < 0)
1762                 return wrote_bytes;
1763
1764             remaining_dst_size -= wrote_bytes;
1765             dst_data += wrote_bytes;
1766
1767             if (remaining_dst_size <= 0)
1768                 return AVERROR_BUFFER_TOO_SMALL;
1769
1770             *dst_data = 0x80;
1771
1772             dst_data++;
1773             remaining_dst_size--;
1774
1775             wrote_sei = 1;
1776         }
1777
1778         curr_src_len = box_len + length_code_size;
1779         curr_dst_len = box_len + sizeof(start_code);
1780
1781         if (remaining_src_size < curr_src_len) {
1782             return AVERROR_BUFFER_TOO_SMALL;
1783         }
1784
1785         if (remaining_dst_size < curr_dst_len) {
1786             return AVERROR_BUFFER_TOO_SMALL;
1787         }
1788
1789         dst_box = dst_data + sizeof(start_code);
1790
1791         memcpy(dst_data, start_code, sizeof(start_code));
1792         status = CMBlockBufferCopyDataBytes(block,
1793                                             src_offset + length_code_size,
1794                                             box_len,
1795                                             dst_box);
1796
1797         if (status) {
1798             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1799             return AVERROR_EXTERNAL;
1800         }
1801
1802         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1803             //Found SEI NAL unit - append.
1804             int wrote_bytes;
1805             int old_sei_length;
1806             int extra_bytes;
1807             uint8_t *new_sei;
1808             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1809             if (old_sei_length < 0)
1810                 return status;
1811
1812             wrote_bytes = write_sei(sei,
1813                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1814                                     new_sei,
1815                                     remaining_dst_size - old_sei_length);
1816             if (wrote_bytes < 0)
1817                 return wrote_bytes;
1818
1819             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1820                 return AVERROR_BUFFER_TOO_SMALL;
1821
1822             new_sei[wrote_bytes++] = 0x80;
1823             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1824
1825             dst_data += extra_bytes;
1826             remaining_dst_size -= extra_bytes;
1827
1828             wrote_sei = 1;
1829         }
1830
1831         src_offset += curr_src_len;
1832         dst_data += curr_dst_len;
1833
1834         remaining_src_size -= curr_src_len;
1835         remaining_dst_size -= curr_dst_len;
1836     }
1837
1838     return 0;
1839 }
1840
1841 /**
1842  * Returns a sufficient number of bytes to contain the sei data.
1843  * It may be greater than the minimum required.
1844  */
1845 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1846     int copied_size;
1847     if (sei->size == 0)
1848         return 0;
1849
1850     copied_size = -copy_emulation_prev(sei->data,
1851                                        sei->size,
1852                                        NULL,
1853                                        0,
1854                                        0);
1855
1856     if ((sei->size % 255) == 0) //may result in an extra byte
1857         copied_size++;
1858
1859     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1860 }
1861
1862 static int vtenc_cm_to_avpacket(
1863     AVCodecContext    *avctx,
1864     CMSampleBufferRef sample_buffer,
1865     AVPacket          *pkt,
1866     ExtraSEI          *sei)
1867 {
1868     VTEncContext *vtctx = avctx->priv_data;
1869
1870     int     status;
1871     bool    is_key_frame;
1872     bool    add_header;
1873     size_t  length_code_size;
1874     size_t  header_size = 0;
1875     size_t  in_buf_size;
1876     size_t  out_buf_size;
1877     size_t  sei_nalu_size = 0;
1878     int64_t dts_delta;
1879     int64_t time_base_num;
1880     int nalu_count;
1881     CMTime  pts;
1882     CMTime  dts;
1883     CMVideoFormatDescriptionRef vid_fmt;
1884
1885
1886     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1887     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1888     if (status) return status;
1889
1890     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1891
1892     if (add_header) {
1893         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1894         if (!vid_fmt) {
1895             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1896             return AVERROR_EXTERNAL;
1897         }
1898
1899         int status = get_params_size(avctx, vid_fmt, &header_size);
1900         if (status) return status;
1901     }
1902
1903     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1904     if(status)
1905         return status;
1906
1907     if (sei) {
1908         size_t msg_size = get_sei_msg_bytes(sei,
1909                                             H264_SEI_TYPE_USER_DATA_REGISTERED);
1910
1911         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1912     }
1913
1914     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1915     out_buf_size = header_size +
1916                    in_buf_size +
1917                    sei_nalu_size +
1918                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1919
1920     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1921     if (status < 0)
1922         return status;
1923
1924     if (add_header) {
1925         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1926         if(status) return status;
1927     }
1928
1929     status = copy_replace_length_codes(
1930         avctx,
1931         length_code_size,
1932         sample_buffer,
1933         sei,
1934         pkt->data + header_size,
1935         pkt->size - header_size
1936     );
1937
1938     if (status) {
1939         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1940         return status;
1941     }
1942
1943     if (is_key_frame) {
1944         pkt->flags |= AV_PKT_FLAG_KEY;
1945     }
1946
1947     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1948     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1949
1950     if (CMTIME_IS_INVALID(dts)) {
1951         if (!vtctx->has_b_frames) {
1952             dts = pts;
1953         } else {
1954             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1955             return AVERROR_EXTERNAL;
1956         }
1957     }
1958
1959     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1960     time_base_num = avctx->time_base.num;
1961     pkt->pts = pts.value / time_base_num;
1962     pkt->dts = dts.value / time_base_num - dts_delta;
1963     pkt->size = out_buf_size;
1964
1965     return 0;
1966 }
1967
1968 /*
1969  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1970  * containing all planes if so.
1971  */
1972 static int get_cv_pixel_info(
1973     AVCodecContext *avctx,
1974     const AVFrame  *frame,
1975     int            *color,
1976     int            *plane_count,
1977     size_t         *widths,
1978     size_t         *heights,
1979     size_t         *strides,
1980     size_t         *contiguous_buf_size)
1981 {
1982     VTEncContext *vtctx = avctx->priv_data;
1983     int av_format       = frame->format;
1984     int av_color_range  = frame->color_range;
1985     int i;
1986     int range_guessed;
1987     int status;
1988
1989     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1990     if (status) {
1991         av_log(avctx,
1992             AV_LOG_ERROR,
1993             "Could not get pixel format for color format '%s' range '%s'.\n",
1994             av_get_pix_fmt_name(av_format),
1995             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1996             av_color_range < AVCOL_RANGE_NB ?
1997                av_color_range_name(av_color_range) :
1998                "Unknown");
1999
2000         return AVERROR(EINVAL);
2001     }
2002
2003     if (range_guessed) {
2004         if (!vtctx->warned_color_range) {
2005             vtctx->warned_color_range = true;
2006             av_log(avctx,
2007                    AV_LOG_WARNING,
2008                    "Color range not set for %s. Using MPEG range.\n",
2009                    av_get_pix_fmt_name(av_format));
2010         }
2011     }
2012
2013     switch (av_format) {
2014     case AV_PIX_FMT_NV12:
2015         *plane_count = 2;
2016
2017         widths [0] = avctx->width;
2018         heights[0] = avctx->height;
2019         strides[0] = frame ? frame->linesize[0] : avctx->width;
2020
2021         widths [1] = (avctx->width  + 1) / 2;
2022         heights[1] = (avctx->height + 1) / 2;
2023         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2024         break;
2025
2026     case AV_PIX_FMT_YUV420P:
2027         *plane_count = 3;
2028
2029         widths [0] = avctx->width;
2030         heights[0] = avctx->height;
2031         strides[0] = frame ? frame->linesize[0] : avctx->width;
2032
2033         widths [1] = (avctx->width  + 1) / 2;
2034         heights[1] = (avctx->height + 1) / 2;
2035         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2036
2037         widths [2] = (avctx->width  + 1) / 2;
2038         heights[2] = (avctx->height + 1) / 2;
2039         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2040         break;
2041
2042     case AV_PIX_FMT_P010LE:
2043         *plane_count = 2;
2044         widths[0] = avctx->width;
2045         heights[0] = avctx->height;
2046         strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2047
2048         widths[1] = (avctx->width + 1) / 2;
2049         heights[1] = (avctx->height + 1) / 2;
2050         strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2051         break;
2052
2053     default:
2054         av_log(
2055                avctx,
2056                AV_LOG_ERROR,
2057                "Could not get frame format info for color %d range %d.\n",
2058                av_format,
2059                av_color_range);
2060
2061         return AVERROR(EINVAL);
2062     }
2063
2064     *contiguous_buf_size = 0;
2065     for (i = 0; i < *plane_count; i++) {
2066         if (i < *plane_count - 1 &&
2067             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2068             *contiguous_buf_size = 0;
2069             break;
2070         }
2071
2072         *contiguous_buf_size += strides[i] * heights[i];
2073     }
2074
2075     return 0;
2076 }
2077
2078 //Not used on OSX - frame is never copied.
2079 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2080                                         const AVFrame    *frame,
2081                                         CVPixelBufferRef cv_img,
2082                                         const size_t     *plane_strides,
2083                                         const size_t     *plane_rows)
2084 {
2085     int i, j;
2086     size_t plane_count;
2087     int status;
2088     int rows;
2089     int src_stride;
2090     int dst_stride;
2091     uint8_t *src_addr;
2092     uint8_t *dst_addr;
2093     size_t copy_bytes;
2094
2095     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2096     if (status) {
2097         av_log(
2098             avctx,
2099             AV_LOG_ERROR,
2100             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2101             status
2102         );
2103     }
2104
2105     if (CVPixelBufferIsPlanar(cv_img)) {
2106         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2107         for (i = 0; frame->data[i]; i++) {
2108             if (i == plane_count) {
2109                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2110                 av_log(avctx,
2111                     AV_LOG_ERROR,
2112                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2113                 );
2114
2115                 return AVERROR_EXTERNAL;
2116             }
2117
2118             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2119             src_addr = (uint8_t*)frame->data[i];
2120             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2121             src_stride = plane_strides[i];
2122             rows = plane_rows[i];
2123
2124             if (dst_stride == src_stride) {
2125                 memcpy(dst_addr, src_addr, src_stride * rows);
2126             } else {
2127                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2128
2129                 for (j = 0; j < rows; j++) {
2130                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2131                 }
2132             }
2133         }
2134     } else {
2135         if (frame->data[1]) {
2136             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2137             av_log(avctx,
2138                 AV_LOG_ERROR,
2139                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2140             );
2141
2142             return AVERROR_EXTERNAL;
2143         }
2144
2145         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2146         src_addr = (uint8_t*)frame->data[0];
2147         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2148         src_stride = plane_strides[0];
2149         rows = plane_rows[0];
2150
2151         if (dst_stride == src_stride) {
2152             memcpy(dst_addr, src_addr, src_stride * rows);
2153         } else {
2154             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2155
2156             for (j = 0; j < rows; j++) {
2157                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2158             }
2159         }
2160     }
2161
2162     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2163     if (status) {
2164         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2165         return AVERROR_EXTERNAL;
2166     }
2167
2168     return 0;
2169 }
2170
2171 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2172                                   const AVFrame    *frame,
2173                                   CVPixelBufferRef *cv_img)
2174 {
2175     int plane_count;
2176     int color;
2177     size_t widths [AV_NUM_DATA_POINTERS];
2178     size_t heights[AV_NUM_DATA_POINTERS];
2179     size_t strides[AV_NUM_DATA_POINTERS];
2180     int status;
2181     size_t contiguous_buf_size;
2182     CVPixelBufferPoolRef pix_buf_pool;
2183     VTEncContext* vtctx = avctx->priv_data;
2184
2185     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2186         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2187
2188         *cv_img = (CVPixelBufferRef)frame->data[3];
2189         av_assert0(*cv_img);
2190
2191         CFRetain(*cv_img);
2192         return 0;
2193     }
2194
2195     memset(widths,  0, sizeof(widths));
2196     memset(heights, 0, sizeof(heights));
2197     memset(strides, 0, sizeof(strides));
2198
2199     status = get_cv_pixel_info(
2200         avctx,
2201         frame,
2202         &color,
2203         &plane_count,
2204         widths,
2205         heights,
2206         strides,
2207         &contiguous_buf_size
2208     );
2209
2210     if (status) {
2211         av_log(
2212             avctx,
2213             AV_LOG_ERROR,
2214             "Error: Cannot convert format %d color_range %d: %d\n",
2215             frame->format,
2216             frame->color_range,
2217             status
2218         );
2219
2220         return AVERROR_EXTERNAL;
2221     }
2222
2223     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2224     if (!pix_buf_pool) {
2225         /* On iOS, the VT session is invalidated when the APP switches from
2226          * foreground to background and vice versa. Fetch the actual error code
2227          * of the VT session to detect that case and restart the VT session
2228          * accordingly. */
2229         OSStatus vtstatus;
2230
2231         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2232         if (vtstatus == kVTInvalidSessionErr) {
2233             CFRelease(vtctx->session);
2234             vtctx->session = NULL;
2235             status = vtenc_configure_encoder(avctx);
2236             if (status == 0)
2237                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2238         }
2239         if (!pix_buf_pool) {
2240             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2241             return AVERROR_EXTERNAL;
2242         }
2243         else
2244             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2245                    "kVTInvalidSessionErr error.\n");
2246     }
2247
2248     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2249                                                 pix_buf_pool,
2250                                                 cv_img);
2251
2252
2253     if (status) {
2254         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2255         return AVERROR_EXTERNAL;
2256     }
2257
2258     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2259     if (status) {
2260         CFRelease(*cv_img);
2261         *cv_img = NULL;
2262         return status;
2263     }
2264
2265     return 0;
2266 }
2267
2268 static int create_encoder_dict_h264(const AVFrame *frame,
2269                                     CFDictionaryRef* dict_out)
2270 {
2271     CFDictionaryRef dict = NULL;
2272     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2273         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2274         const void *vals[] = { kCFBooleanTrue };
2275
2276         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2277         if(!dict) return AVERROR(ENOMEM);
2278     }
2279
2280     *dict_out = dict;
2281     return 0;
2282 }
2283
2284 static int vtenc_send_frame(AVCodecContext *avctx,
2285                             VTEncContext   *vtctx,
2286                             const AVFrame  *frame)
2287 {
2288     CMTime time;
2289     CFDictionaryRef frame_dict;
2290     CVPixelBufferRef cv_img = NULL;
2291     AVFrameSideData *side_data = NULL;
2292     ExtraSEI *sei = NULL;
2293     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2294
2295     if (status) return status;
2296
2297     status = create_encoder_dict_h264(frame, &frame_dict);
2298     if (status) {
2299         CFRelease(cv_img);
2300         return status;
2301     }
2302
2303     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2304     if (vtctx->a53_cc && side_data && side_data->size) {
2305         sei = av_mallocz(sizeof(*sei));
2306         if (!sei) {
2307             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2308         } else {
2309             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2310             if (ret < 0) {
2311                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2312                 av_free(sei);
2313                 sei = NULL;
2314             }
2315         }
2316     }
2317
2318     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2319     status = VTCompressionSessionEncodeFrame(
2320         vtctx->session,
2321         cv_img,
2322         time,
2323         kCMTimeInvalid,
2324         frame_dict,
2325         sei,
2326         NULL
2327     );
2328
2329     if (frame_dict) CFRelease(frame_dict);
2330     CFRelease(cv_img);
2331
2332     if (status) {
2333         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2334         return AVERROR_EXTERNAL;
2335     }
2336
2337     return 0;
2338 }
2339
2340 static av_cold int vtenc_frame(
2341     AVCodecContext *avctx,
2342     AVPacket       *pkt,
2343     const AVFrame  *frame,
2344     int            *got_packet)
2345 {
2346     VTEncContext *vtctx = avctx->priv_data;
2347     bool get_frame;
2348     int status;
2349     CMSampleBufferRef buf = NULL;
2350     ExtraSEI *sei = NULL;
2351
2352     if (frame) {
2353         status = vtenc_send_frame(avctx, vtctx, frame);
2354
2355         if (status) {
2356             status = AVERROR_EXTERNAL;
2357             goto end_nopkt;
2358         }
2359
2360         if (vtctx->frame_ct_in == 0) {
2361             vtctx->first_pts = frame->pts;
2362         } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2363             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2364         }
2365
2366         vtctx->frame_ct_in++;
2367     } else if(!vtctx->flushing) {
2368         vtctx->flushing = true;
2369
2370         status = VTCompressionSessionCompleteFrames(vtctx->session,
2371                                                     kCMTimeIndefinite);
2372
2373         if (status) {
2374             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2375             status = AVERROR_EXTERNAL;
2376             goto end_nopkt;
2377         }
2378     }
2379
2380     *got_packet = 0;
2381     get_frame = vtctx->dts_delta >= 0 || !frame;
2382     if (!get_frame) {
2383         status = 0;
2384         goto end_nopkt;
2385     }
2386
2387     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2388     if (status) goto end_nopkt;
2389     if (!buf)   goto end_nopkt;
2390
2391     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2392     if (sei) {
2393         if (sei->data) av_free(sei->data);
2394         av_free(sei);
2395     }
2396     CFRelease(buf);
2397     if (status) goto end_nopkt;
2398
2399     *got_packet = 1;
2400     return 0;
2401
2402 end_nopkt:
2403     av_packet_unref(pkt);
2404     return status;
2405 }
2406
2407 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2408                                     CMVideoCodecType codec_type,
2409                                     CFStringRef      profile_level,
2410                                     CFNumberRef      gamma_level,
2411                                     CFDictionaryRef  enc_info,
2412                                     CFDictionaryRef  pixel_buffer_info)
2413 {
2414     VTEncContext *vtctx = avctx->priv_data;
2415     int status;
2416     CVPixelBufferPoolRef pool = NULL;
2417     CVPixelBufferRef pix_buf = NULL;
2418     CMTime time;
2419     CMSampleBufferRef buf = NULL;
2420
2421     status = vtenc_create_encoder(avctx,
2422                                   codec_type,
2423                                   profile_level,
2424                                   gamma_level,
2425                                   enc_info,
2426                                   pixel_buffer_info,
2427                                   &vtctx->session);
2428     if (status)
2429         goto pe_cleanup;
2430
2431     pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2432     if(!pool){
2433         av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2434         goto pe_cleanup;
2435     }
2436
2437     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2438                                                 pool,
2439                                                 &pix_buf);
2440
2441     if(status != kCVReturnSuccess){
2442         av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2443         goto pe_cleanup;
2444     }
2445
2446     time = CMTimeMake(0, avctx->time_base.den);
2447     status = VTCompressionSessionEncodeFrame(vtctx->session,
2448                                              pix_buf,
2449                                              time,
2450                                              kCMTimeInvalid,
2451                                              NULL,
2452                                              NULL,
2453                                              NULL);
2454
2455     if (status) {
2456         av_log(avctx,
2457                AV_LOG_ERROR,
2458                "Error sending frame for extradata: %d\n",
2459                status);
2460
2461         goto pe_cleanup;
2462     }
2463
2464     //Populates extradata - output frames are flushed and param sets are available.
2465     status = VTCompressionSessionCompleteFrames(vtctx->session,
2466                                                 kCMTimeIndefinite);
2467
2468     if (status)
2469         goto pe_cleanup;
2470
2471     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2472     if (status) {
2473         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2474         goto pe_cleanup;
2475     }
2476
2477     CFRelease(buf);
2478
2479
2480
2481 pe_cleanup:
2482     if(vtctx->session)
2483         CFRelease(vtctx->session);
2484
2485     vtctx->session = NULL;
2486     vtctx->frame_ct_out = 0;
2487
2488     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2489
2490     return status;
2491 }
2492
2493 static av_cold int vtenc_close(AVCodecContext *avctx)
2494 {
2495     VTEncContext *vtctx = avctx->priv_data;
2496
2497     pthread_cond_destroy(&vtctx->cv_sample_sent);
2498     pthread_mutex_destroy(&vtctx->lock);
2499
2500     if(!vtctx->session) return 0;
2501
2502     VTCompressionSessionCompleteFrames(vtctx->session,
2503                                        kCMTimeIndefinite);
2504     clear_frame_queue(vtctx);
2505     CFRelease(vtctx->session);
2506     vtctx->session = NULL;
2507
2508     if (vtctx->color_primaries) {
2509         CFRelease(vtctx->color_primaries);
2510         vtctx->color_primaries = NULL;
2511     }
2512
2513     if (vtctx->transfer_function) {
2514         CFRelease(vtctx->transfer_function);
2515         vtctx->transfer_function = NULL;
2516     }
2517
2518     if (vtctx->ycbcr_matrix) {
2519         CFRelease(vtctx->ycbcr_matrix);
2520         vtctx->ycbcr_matrix = NULL;
2521     }
2522
2523     return 0;
2524 }
2525
2526 static const enum AVPixelFormat avc_pix_fmts[] = {
2527     AV_PIX_FMT_VIDEOTOOLBOX,
2528     AV_PIX_FMT_NV12,
2529     AV_PIX_FMT_YUV420P,
2530     AV_PIX_FMT_NONE
2531 };
2532
2533 static const enum AVPixelFormat hevc_pix_fmts[] = {
2534     AV_PIX_FMT_VIDEOTOOLBOX,
2535     AV_PIX_FMT_NV12,
2536     AV_PIX_FMT_YUV420P,
2537     AV_PIX_FMT_P010LE,
2538     AV_PIX_FMT_NONE
2539 };
2540
2541 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2542 #define COMMON_OPTIONS \
2543     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2544         { .i64 = 0 }, 0, 1, VE }, \
2545     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2546         { .i64 = 0 }, 0, 1, VE }, \
2547     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2548         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2549     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2550         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2551     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2552         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2553
2554 #define OFFSET(x) offsetof(VTEncContext, x)
2555 static const AVOption h264_options[] = {
2556     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2557     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2558     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2559     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2560     { "extended", "Extend Profile",   0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2561
2562     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2563     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2564     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2565     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2566     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2567     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2568     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2569     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2570     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2571     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2572     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2573
2574     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2575     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2576     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2577     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2578     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2579
2580     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2581
2582     COMMON_OPTIONS
2583     { NULL },
2584 };
2585
2586 static const AVClass h264_videotoolbox_class = {
2587     .class_name = "h264_videotoolbox",
2588     .item_name  = av_default_item_name,
2589     .option     = h264_options,
2590     .version    = LIBAVUTIL_VERSION_INT,
2591 };
2592
2593 AVCodec ff_h264_videotoolbox_encoder = {
2594     .name             = "h264_videotoolbox",
2595     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2596     .type             = AVMEDIA_TYPE_VIDEO,
2597     .id               = AV_CODEC_ID_H264,
2598     .priv_data_size   = sizeof(VTEncContext),
2599     .pix_fmts         = avc_pix_fmts,
2600     .init             = vtenc_init,
2601     .encode2          = vtenc_frame,
2602     .close            = vtenc_close,
2603     .capabilities     = AV_CODEC_CAP_DELAY,
2604     .priv_class       = &h264_videotoolbox_class,
2605     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2606                         FF_CODEC_CAP_INIT_CLEANUP,
2607 };
2608
2609 static const AVOption hevc_options[] = {
2610     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2611     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2612     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2613
2614     COMMON_OPTIONS
2615     { NULL },
2616 };
2617
2618 static const AVClass hevc_videotoolbox_class = {
2619     .class_name = "hevc_videotoolbox",
2620     .item_name  = av_default_item_name,
2621     .option     = hevc_options,
2622     .version    = LIBAVUTIL_VERSION_INT,
2623 };
2624
2625 AVCodec ff_hevc_videotoolbox_encoder = {
2626     .name             = "hevc_videotoolbox",
2627     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2628     .type             = AVMEDIA_TYPE_VIDEO,
2629     .id               = AV_CODEC_ID_HEVC,
2630     .priv_data_size   = sizeof(VTEncContext),
2631     .pix_fmts         = hevc_pix_fmts,
2632     .init             = vtenc_init,
2633     .encode2          = vtenc_frame,
2634     .close            = vtenc_close,
2635     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2636     .priv_class       = &hevc_videotoolbox_class,
2637     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2638                         FF_CODEC_CAP_INIT_CLEANUP,
2639     .wrapper_name     = "videotoolbox",
2640 };