]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
avcodec/wmaprodec: Check if there is a stream
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "h264.h"
35 #include "h264_sei.h"
36 #include <dlfcn.h>
37
38 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
39 enum { kCMVideoCodecType_HEVC = 'hvc1' };
40 #endif
41
42 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
43 enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
44 enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
45 #endif
46
47 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
48                                            size_t parameterSetIndex,
49                                            const uint8_t **parameterSetPointerOut,
50                                            size_t *parameterSetSizeOut,
51                                            size_t *parameterSetCountOut,
52                                            int *NALUnitHeaderLengthOut);
53
54 //These symbols may not be present
55 static struct{
56     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
57     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
58     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
59
60     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
61     CFStringRef kVTH264EntropyMode_CAVLC;
62     CFStringRef kVTH264EntropyMode_CABAC;
63
64     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
65     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
66     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
67     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
68     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
69     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
70     CFStringRef kVTProfileLevel_H264_Main_4_2;
71     CFStringRef kVTProfileLevel_H264_Main_5_1;
72     CFStringRef kVTProfileLevel_H264_Main_5_2;
73     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
74     CFStringRef kVTProfileLevel_H264_High_3_0;
75     CFStringRef kVTProfileLevel_H264_High_3_1;
76     CFStringRef kVTProfileLevel_H264_High_3_2;
77     CFStringRef kVTProfileLevel_H264_High_4_0;
78     CFStringRef kVTProfileLevel_H264_High_4_1;
79     CFStringRef kVTProfileLevel_H264_High_4_2;
80     CFStringRef kVTProfileLevel_H264_High_5_1;
81     CFStringRef kVTProfileLevel_H264_High_5_2;
82     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
83     CFStringRef kVTProfileLevel_H264_Extended_5_0;
84     CFStringRef kVTProfileLevel_H264_Extended_AutoLevel;
85
86     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
87     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
88
89     CFStringRef kVTCompressionPropertyKey_RealTime;
90
91     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
92     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
93
94     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
95 } compat_keys;
96
97 #define GET_SYM(symbol, defaultVal)                                     \
98 do{                                                                     \
99     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
100     if(!handle)                                                         \
101         compat_keys.symbol = CFSTR(defaultVal);                         \
102     else                                                                \
103         compat_keys.symbol = *handle;                                   \
104 }while(0)
105
106 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
107
108 static void loadVTEncSymbols(){
109     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
110         (getParameterSetAtIndex)dlsym(
111             RTLD_DEFAULT,
112             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
113         );
114
115     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
116     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
117     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
118
119     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
120     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
121     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
122
123     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
124     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
125     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
126     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
127     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
128     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
129     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
130     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
131     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
132     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
133     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
134     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
135     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
136     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
137     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
138     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
139     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
140     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
141     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
142     GET_SYM(kVTProfileLevel_H264_Extended_5_0,       "H264_Extended_5_0");
143     GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
144
145     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
146     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
147
148     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
149
150     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
151             "EnableHardwareAcceleratedVideoEncoder");
152     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
153             "RequireHardwareAcceleratedVideoEncoder");
154 }
155
156 typedef enum VT_H264Profile {
157     H264_PROF_AUTO,
158     H264_PROF_BASELINE,
159     H264_PROF_MAIN,
160     H264_PROF_HIGH,
161     H264_PROF_EXTENDED,
162     H264_PROF_COUNT
163 } VT_H264Profile;
164
165 typedef enum VTH264Entropy{
166     VT_ENTROPY_NOT_SET,
167     VT_CAVLC,
168     VT_CABAC
169 } VTH264Entropy;
170
171 typedef enum VT_HEVCProfile {
172     HEVC_PROF_AUTO,
173     HEVC_PROF_MAIN,
174     HEVC_PROF_MAIN10,
175     HEVC_PROF_COUNT
176 } VT_HEVCProfile;
177
178 static const uint8_t start_code[] = { 0, 0, 0, 1 };
179
180 typedef struct ExtraSEI {
181   void *data;
182   size_t size;
183 } ExtraSEI;
184
185 typedef struct BufNode {
186     CMSampleBufferRef cm_buffer;
187     ExtraSEI *sei;
188     struct BufNode* next;
189     int error;
190 } BufNode;
191
192 typedef struct VTEncContext {
193     AVClass *class;
194     enum AVCodecID codec_id;
195     VTCompressionSessionRef session;
196     CFStringRef ycbcr_matrix;
197     CFStringRef color_primaries;
198     CFStringRef transfer_function;
199     getParameterSetAtIndex get_param_set_func;
200
201     pthread_mutex_t lock;
202     pthread_cond_t  cv_sample_sent;
203
204     int async_error;
205
206     BufNode *q_head;
207     BufNode *q_tail;
208
209     int64_t frame_ct_out;
210     int64_t frame_ct_in;
211
212     int64_t first_pts;
213     int64_t dts_delta;
214
215     int64_t profile;
216     int64_t level;
217     int64_t entropy;
218     int64_t realtime;
219     int64_t frames_before;
220     int64_t frames_after;
221
222     int64_t allow_sw;
223     int64_t require_sw;
224
225     bool flushing;
226     bool has_b_frames;
227     bool warned_color_range;
228     bool a53_cc;
229 } VTEncContext;
230
231 static int vtenc_populate_extradata(AVCodecContext   *avctx,
232                                     CMVideoCodecType codec_type,
233                                     CFStringRef      profile_level,
234                                     CFNumberRef      gamma_level,
235                                     CFDictionaryRef  enc_info,
236                                     CFDictionaryRef  pixel_buffer_info);
237
238 /**
239  * NULL-safe release of *refPtr, and sets value to NULL.
240  */
241 static void vt_release_num(CFNumberRef* refPtr){
242     if (!*refPtr) {
243         return;
244     }
245
246     CFRelease(*refPtr);
247     *refPtr = NULL;
248 }
249
250 static void set_async_error(VTEncContext *vtctx, int err)
251 {
252     BufNode *info;
253
254     pthread_mutex_lock(&vtctx->lock);
255
256     vtctx->async_error = err;
257
258     info = vtctx->q_head;
259     vtctx->q_head = vtctx->q_tail = NULL;
260
261     while (info) {
262         BufNode *next = info->next;
263         CFRelease(info->cm_buffer);
264         av_free(info);
265         info = next;
266     }
267
268     pthread_mutex_unlock(&vtctx->lock);
269 }
270
271 static void clear_frame_queue(VTEncContext *vtctx)
272 {
273     set_async_error(vtctx, 0);
274 }
275
276 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
277 {
278     BufNode *info;
279
280     pthread_mutex_lock(&vtctx->lock);
281
282     if (vtctx->async_error) {
283         pthread_mutex_unlock(&vtctx->lock);
284         return vtctx->async_error;
285     }
286
287     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
288         *buf = NULL;
289
290         pthread_mutex_unlock(&vtctx->lock);
291         return 0;
292     }
293
294     while (!vtctx->q_head && !vtctx->async_error && wait) {
295         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
296     }
297
298     if (!vtctx->q_head) {
299         pthread_mutex_unlock(&vtctx->lock);
300         *buf = NULL;
301         return 0;
302     }
303
304     info = vtctx->q_head;
305     vtctx->q_head = vtctx->q_head->next;
306     if (!vtctx->q_head) {
307         vtctx->q_tail = NULL;
308     }
309
310     pthread_mutex_unlock(&vtctx->lock);
311
312     *buf = info->cm_buffer;
313     if (sei && *buf) {
314         *sei = info->sei;
315     } else if (info->sei) {
316         if (info->sei->data) av_free(info->sei->data);
317         av_free(info->sei);
318     }
319     av_free(info);
320
321     vtctx->frame_ct_out++;
322
323     return 0;
324 }
325
326 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
327 {
328     BufNode *info = av_malloc(sizeof(BufNode));
329     if (!info) {
330         set_async_error(vtctx, AVERROR(ENOMEM));
331         return;
332     }
333
334     CFRetain(buffer);
335     info->cm_buffer = buffer;
336     info->sei = sei;
337     info->next = NULL;
338
339     pthread_mutex_lock(&vtctx->lock);
340     pthread_cond_signal(&vtctx->cv_sample_sent);
341
342     if (!vtctx->q_head) {
343         vtctx->q_head = info;
344     } else {
345         vtctx->q_tail->next = info;
346     }
347
348     vtctx->q_tail = info;
349
350     pthread_mutex_unlock(&vtctx->lock);
351 }
352
353 static int count_nalus(size_t length_code_size,
354                        CMSampleBufferRef sample_buffer,
355                        int *count)
356 {
357     size_t offset = 0;
358     int status;
359     int nalu_ct = 0;
360     uint8_t size_buf[4];
361     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
362     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
363
364     if (length_code_size > 4)
365         return AVERROR_INVALIDDATA;
366
367     while (offset < src_size) {
368         size_t curr_src_len;
369         size_t box_len = 0;
370         size_t i;
371
372         status = CMBlockBufferCopyDataBytes(block,
373                                             offset,
374                                             length_code_size,
375                                             size_buf);
376
377         for (i = 0; i < length_code_size; i++) {
378             box_len <<= 8;
379             box_len |= size_buf[i];
380         }
381
382         curr_src_len = box_len + length_code_size;
383         offset += curr_src_len;
384
385         nalu_ct++;
386     }
387
388     *count = nalu_ct;
389     return 0;
390 }
391
392 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
393 {
394     switch (id) {
395     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
396     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
397     default:               return 0;
398     }
399 }
400
401 /**
402  * Get the parameter sets from a CMSampleBufferRef.
403  * @param dst If *dst isn't NULL, the parameters are copied into existing
404  *            memory. *dst_size must be set accordingly when *dst != NULL.
405  *            If *dst is NULL, it will be allocated.
406  *            In all cases, *dst_size is set to the number of bytes used starting
407  *            at *dst.
408  */
409 static int get_params_size(
410     AVCodecContext              *avctx,
411     CMVideoFormatDescriptionRef vid_fmt,
412     size_t                      *size)
413 {
414     VTEncContext *vtctx = avctx->priv_data;
415     size_t total_size = 0;
416     size_t ps_count;
417     int is_count_bad = 0;
418     size_t i;
419     int status;
420     status = vtctx->get_param_set_func(vid_fmt,
421                                        0,
422                                        NULL,
423                                        NULL,
424                                        &ps_count,
425                                        NULL);
426     if (status) {
427         is_count_bad = 1;
428         ps_count     = 0;
429         status       = 0;
430     }
431
432     for (i = 0; i < ps_count || is_count_bad; i++) {
433         const uint8_t *ps;
434         size_t ps_size;
435         status = vtctx->get_param_set_func(vid_fmt,
436                                            i,
437                                            &ps,
438                                            &ps_size,
439                                            NULL,
440                                            NULL);
441         if (status) {
442             /*
443              * When ps_count is invalid, status != 0 ends the loop normally
444              * unless we didn't get any parameter sets.
445              */
446             if (i > 0 && is_count_bad) status = 0;
447
448             break;
449         }
450
451         total_size += ps_size + sizeof(start_code);
452     }
453
454     if (status) {
455         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
456         return AVERROR_EXTERNAL;
457     }
458
459     *size = total_size;
460     return 0;
461 }
462
463 static int copy_param_sets(
464     AVCodecContext              *avctx,
465     CMVideoFormatDescriptionRef vid_fmt,
466     uint8_t                     *dst,
467     size_t                      dst_size)
468 {
469     VTEncContext *vtctx = avctx->priv_data;
470     size_t ps_count;
471     int is_count_bad = 0;
472     int status;
473     size_t offset = 0;
474     size_t i;
475
476     status = vtctx->get_param_set_func(vid_fmt,
477                                        0,
478                                        NULL,
479                                        NULL,
480                                        &ps_count,
481                                        NULL);
482     if (status) {
483         is_count_bad = 1;
484         ps_count     = 0;
485         status       = 0;
486     }
487
488
489     for (i = 0; i < ps_count || is_count_bad; i++) {
490         const uint8_t *ps;
491         size_t ps_size;
492         size_t next_offset;
493
494         status = vtctx->get_param_set_func(vid_fmt,
495                                            i,
496                                            &ps,
497                                            &ps_size,
498                                            NULL,
499                                            NULL);
500         if (status) {
501             if (i > 0 && is_count_bad) status = 0;
502
503             break;
504         }
505
506         next_offset = offset + sizeof(start_code) + ps_size;
507         if (dst_size < next_offset) {
508             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
509             return AVERROR_BUFFER_TOO_SMALL;
510         }
511
512         memcpy(dst + offset, start_code, sizeof(start_code));
513         offset += sizeof(start_code);
514
515         memcpy(dst + offset, ps, ps_size);
516         offset = next_offset;
517     }
518
519     if (status) {
520         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
521         return AVERROR_EXTERNAL;
522     }
523
524     return 0;
525 }
526
527 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
528 {
529     CMVideoFormatDescriptionRef vid_fmt;
530     size_t total_size;
531     int status;
532
533     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
534     if (!vid_fmt) {
535         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
536         return AVERROR_EXTERNAL;
537     }
538
539     status = get_params_size(avctx, vid_fmt, &total_size);
540     if (status) {
541         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
542         return status;
543     }
544
545     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
546     if (!avctx->extradata) {
547         return AVERROR(ENOMEM);
548     }
549     avctx->extradata_size = total_size;
550
551     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
552
553     if (status) {
554         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
555         return status;
556     }
557
558     return 0;
559 }
560
561 static void vtenc_output_callback(
562     void *ctx,
563     void *sourceFrameCtx,
564     OSStatus status,
565     VTEncodeInfoFlags flags,
566     CMSampleBufferRef sample_buffer)
567 {
568     AVCodecContext *avctx = ctx;
569     VTEncContext   *vtctx = avctx->priv_data;
570     ExtraSEI *sei = sourceFrameCtx;
571
572     if (vtctx->async_error) {
573         if(sample_buffer) CFRelease(sample_buffer);
574         return;
575     }
576
577     if (status) {
578         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
579         set_async_error(vtctx, AVERROR_EXTERNAL);
580         return;
581     }
582
583     if (!sample_buffer) {
584         return;
585     }
586
587     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
588         int set_status = set_extradata(avctx, sample_buffer);
589         if (set_status) {
590             set_async_error(vtctx, set_status);
591             return;
592         }
593     }
594
595     vtenc_q_push(vtctx, sample_buffer, sei);
596 }
597
598 static int get_length_code_size(
599     AVCodecContext    *avctx,
600     CMSampleBufferRef sample_buffer,
601     size_t            *size)
602 {
603     VTEncContext *vtctx = avctx->priv_data;
604     CMVideoFormatDescriptionRef vid_fmt;
605     int isize;
606     int status;
607
608     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
609     if (!vid_fmt) {
610         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
611         return AVERROR_EXTERNAL;
612     }
613
614     status = vtctx->get_param_set_func(vid_fmt,
615                                        0,
616                                        NULL,
617                                        NULL,
618                                        NULL,
619                                        &isize);
620     if (status) {
621         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
622         return AVERROR_EXTERNAL;
623     }
624
625     *size = isize;
626     return 0;
627 }
628
629 /*
630  * Returns true on success.
631  *
632  * If profile_level_val is NULL and this method returns true, don't specify the
633  * profile/level to the encoder.
634  */
635 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
636                                       CFStringRef    *profile_level_val)
637 {
638     VTEncContext *vtctx = avctx->priv_data;
639     int64_t profile = vtctx->profile;
640
641     if (profile == H264_PROF_AUTO && vtctx->level) {
642         //Need to pick a profile if level is not auto-selected.
643         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
644     }
645
646     *profile_level_val = NULL;
647
648     switch (profile) {
649         case H264_PROF_AUTO:
650             return true;
651
652         case H264_PROF_BASELINE:
653             switch (vtctx->level) {
654                 case  0: *profile_level_val =
655                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
656                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
657                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
658                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
659                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
660                 case 40: *profile_level_val =
661                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
662                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
663                 case 42: *profile_level_val =
664                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
665                 case 50: *profile_level_val =
666                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
667                 case 51: *profile_level_val =
668                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
669                 case 52: *profile_level_val =
670                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
671             }
672             break;
673
674         case H264_PROF_MAIN:
675             switch (vtctx->level) {
676                 case  0: *profile_level_val =
677                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
678                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
679                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
680                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
681                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
682                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
683                 case 42: *profile_level_val =
684                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
685                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
686                 case 51: *profile_level_val =
687                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
688                 case 52: *profile_level_val =
689                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
690             }
691             break;
692
693         case H264_PROF_HIGH:
694             switch (vtctx->level) {
695                 case  0: *profile_level_val =
696                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
697                 case 30: *profile_level_val =
698                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
699                 case 31: *profile_level_val =
700                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
701                 case 32: *profile_level_val =
702                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
703                 case 40: *profile_level_val =
704                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
705                 case 41: *profile_level_val =
706                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
707                 case 42: *profile_level_val =
708                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
709                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
710                 case 51: *profile_level_val =
711                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
712                 case 52: *profile_level_val =
713                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
714             }
715             break;
716         case H264_PROF_EXTENDED:
717             switch (vtctx->level) {
718                 case  0: *profile_level_val =
719                                   compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
720                 case 50: *profile_level_val =
721                                   compat_keys.kVTProfileLevel_H264_Extended_5_0;       break;
722             }
723             break;
724     }
725
726     if (!*profile_level_val) {
727         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
728         return false;
729     }
730
731     return true;
732 }
733
734 /*
735  * Returns true on success.
736  *
737  * If profile_level_val is NULL and this method returns true, don't specify the
738  * profile/level to the encoder.
739  */
740 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
741                                       CFStringRef    *profile_level_val)
742 {
743     VTEncContext *vtctx = avctx->priv_data;
744     int64_t profile = vtctx->profile;
745
746     *profile_level_val = NULL;
747
748     switch (profile) {
749         case HEVC_PROF_AUTO:
750             return true;
751         case HEVC_PROF_MAIN:
752             *profile_level_val =
753                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
754             break;
755         case HEVC_PROF_MAIN10:
756             *profile_level_val =
757                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
758             break;
759     }
760
761     if (!*profile_level_val) {
762         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
763         return false;
764     }
765
766     return true;
767 }
768
769 static int get_cv_pixel_format(AVCodecContext* avctx,
770                                enum AVPixelFormat fmt,
771                                enum AVColorRange range,
772                                int* av_pixel_format,
773                                int* range_guessed)
774 {
775     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
776                                         range != AVCOL_RANGE_JPEG;
777
778     //MPEG range is used when no range is set
779     if (fmt == AV_PIX_FMT_NV12) {
780         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
781                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
782                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
783     } else if (fmt == AV_PIX_FMT_YUV420P) {
784         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
785                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
786                                         kCVPixelFormatType_420YpCbCr8Planar;
787     } else if (fmt == AV_PIX_FMT_P010LE) {
788         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
789                                         kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
790                                         kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
791         *av_pixel_format = kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
792     } else {
793         return AVERROR(EINVAL);
794     }
795
796     return 0;
797 }
798
799 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
800     VTEncContext *vtctx = avctx->priv_data;
801
802     if (vtctx->color_primaries) {
803         CFDictionarySetValue(dict,
804                              kCVImageBufferColorPrimariesKey,
805                              vtctx->color_primaries);
806     }
807
808     if (vtctx->transfer_function) {
809         CFDictionarySetValue(dict,
810                              kCVImageBufferTransferFunctionKey,
811                              vtctx->transfer_function);
812     }
813
814     if (vtctx->ycbcr_matrix) {
815         CFDictionarySetValue(dict,
816                              kCVImageBufferYCbCrMatrixKey,
817                              vtctx->ycbcr_matrix);
818     }
819 }
820
821 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
822                                        CFMutableDictionaryRef* dict)
823 {
824     CFNumberRef cv_color_format_num = NULL;
825     CFNumberRef width_num = NULL;
826     CFNumberRef height_num = NULL;
827     CFMutableDictionaryRef pixel_buffer_info = NULL;
828     int cv_color_format;
829     int status = get_cv_pixel_format(avctx,
830                                      avctx->pix_fmt,
831                                      avctx->color_range,
832                                      &cv_color_format,
833                                      NULL);
834     if (status) return status;
835
836     pixel_buffer_info = CFDictionaryCreateMutable(
837                             kCFAllocatorDefault,
838                             20,
839                             &kCFCopyStringDictionaryKeyCallBacks,
840                             &kCFTypeDictionaryValueCallBacks);
841
842     if (!pixel_buffer_info) goto pbinfo_nomem;
843
844     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
845                                          kCFNumberSInt32Type,
846                                          &cv_color_format);
847     if (!cv_color_format_num) goto pbinfo_nomem;
848
849     CFDictionarySetValue(pixel_buffer_info,
850                          kCVPixelBufferPixelFormatTypeKey,
851                          cv_color_format_num);
852     vt_release_num(&cv_color_format_num);
853
854     width_num = CFNumberCreate(kCFAllocatorDefault,
855                                kCFNumberSInt32Type,
856                                &avctx->width);
857     if (!width_num) return AVERROR(ENOMEM);
858
859     CFDictionarySetValue(pixel_buffer_info,
860                          kCVPixelBufferWidthKey,
861                          width_num);
862     vt_release_num(&width_num);
863
864     height_num = CFNumberCreate(kCFAllocatorDefault,
865                                 kCFNumberSInt32Type,
866                                 &avctx->height);
867     if (!height_num) goto pbinfo_nomem;
868
869     CFDictionarySetValue(pixel_buffer_info,
870                          kCVPixelBufferHeightKey,
871                          height_num);
872     vt_release_num(&height_num);
873
874     add_color_attr(avctx, pixel_buffer_info);
875
876     *dict = pixel_buffer_info;
877     return 0;
878
879 pbinfo_nomem:
880     vt_release_num(&cv_color_format_num);
881     vt_release_num(&width_num);
882     vt_release_num(&height_num);
883     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
884
885     return AVERROR(ENOMEM);
886 }
887
888 static int get_cv_color_primaries(AVCodecContext *avctx,
889                                   CFStringRef *primaries)
890 {
891     enum AVColorPrimaries pri = avctx->color_primaries;
892     switch (pri) {
893         case AVCOL_PRI_UNSPECIFIED:
894             *primaries = NULL;
895             break;
896
897         case AVCOL_PRI_BT709:
898             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
899             break;
900
901         case AVCOL_PRI_BT2020:
902             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
903             break;
904
905         default:
906             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
907             *primaries = NULL;
908             return -1;
909     }
910
911     return 0;
912 }
913
914 static int get_cv_transfer_function(AVCodecContext *avctx,
915                                     CFStringRef *transfer_fnc,
916                                     CFNumberRef *gamma_level)
917 {
918     enum AVColorTransferCharacteristic trc = avctx->color_trc;
919     Float32 gamma;
920     *gamma_level = NULL;
921
922     switch (trc) {
923         case AVCOL_TRC_UNSPECIFIED:
924             *transfer_fnc = NULL;
925             break;
926
927         case AVCOL_TRC_BT709:
928             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
929             break;
930
931         case AVCOL_TRC_SMPTE240M:
932             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
933             break;
934
935 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
936         case AVCOL_TRC_SMPTE2084:
937             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
938             break;
939 #endif
940 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
941         case AVCOL_TRC_LINEAR:
942             *transfer_fnc = kCVImageBufferTransferFunction_Linear;
943             break;
944 #endif
945 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
946         case AVCOL_TRC_ARIB_STD_B67:
947             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
948             break;
949 #endif
950
951         case AVCOL_TRC_GAMMA22:
952             gamma = 2.2;
953             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
954             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
955             break;
956
957         case AVCOL_TRC_GAMMA28:
958             gamma = 2.8;
959             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
960             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
961             break;
962
963         case AVCOL_TRC_BT2020_10:
964         case AVCOL_TRC_BT2020_12:
965             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
966             break;
967
968         default:
969             *transfer_fnc = NULL;
970             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
971             return -1;
972     }
973
974     return 0;
975 }
976
977 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
978     switch(avctx->colorspace) {
979         case AVCOL_SPC_BT709:
980             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
981             break;
982
983         case AVCOL_SPC_UNSPECIFIED:
984             *matrix = NULL;
985             break;
986
987         case AVCOL_SPC_BT470BG:
988         case AVCOL_SPC_SMPTE170M:
989             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
990             break;
991
992         case AVCOL_SPC_SMPTE240M:
993             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
994             break;
995
996         case AVCOL_SPC_BT2020_NCL:
997             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
998             break;
999
1000         default:
1001             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1002             return -1;
1003     }
1004
1005     return 0;
1006 }
1007
1008 static int vtenc_create_encoder(AVCodecContext   *avctx,
1009                                 CMVideoCodecType codec_type,
1010                                 CFStringRef      profile_level,
1011                                 CFNumberRef      gamma_level,
1012                                 CFDictionaryRef  enc_info,
1013                                 CFDictionaryRef  pixel_buffer_info,
1014                                 VTCompressionSessionRef *session)
1015 {
1016     VTEncContext *vtctx = avctx->priv_data;
1017     SInt32       bit_rate = avctx->bit_rate;
1018     SInt32       max_rate = avctx->rc_max_rate;
1019     CFNumberRef  bit_rate_num;
1020     CFNumberRef  bytes_per_second;
1021     CFNumberRef  one_second;
1022     CFArrayRef   data_rate_limits;
1023     int64_t      bytes_per_second_value = 0;
1024     int64_t      one_second_value = 0;
1025     void         *nums[2];
1026
1027     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1028                                             avctx->width,
1029                                             avctx->height,
1030                                             codec_type,
1031                                             enc_info,
1032                                             pixel_buffer_info,
1033                                             kCFAllocatorDefault,
1034                                             vtenc_output_callback,
1035                                             avctx,
1036                                             session);
1037
1038     if (status || !vtctx->session) {
1039         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1040
1041 #if !TARGET_OS_IPHONE
1042         if (!vtctx->allow_sw) {
1043             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1044         }
1045 #endif
1046
1047         return AVERROR_EXTERNAL;
1048     }
1049
1050     bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1051                                   kCFNumberSInt32Type,
1052                                   &bit_rate);
1053     if (!bit_rate_num) return AVERROR(ENOMEM);
1054
1055     status = VTSessionSetProperty(vtctx->session,
1056                                   kVTCompressionPropertyKey_AverageBitRate,
1057                                   bit_rate_num);
1058     CFRelease(bit_rate_num);
1059
1060     if (status) {
1061         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1062         return AVERROR_EXTERNAL;
1063     }
1064
1065     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1066         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1067         bytes_per_second_value = max_rate >> 3;
1068         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1069                                           kCFNumberSInt64Type,
1070                                           &bytes_per_second_value);
1071         if (!bytes_per_second) {
1072             return AVERROR(ENOMEM);
1073         }
1074         one_second_value = 1;
1075         one_second = CFNumberCreate(kCFAllocatorDefault,
1076                                     kCFNumberSInt64Type,
1077                                     &one_second_value);
1078         if (!one_second) {
1079             CFRelease(bytes_per_second);
1080             return AVERROR(ENOMEM);
1081         }
1082         nums[0] = (void *)bytes_per_second;
1083         nums[1] = (void *)one_second;
1084         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1085                                          (const void **)nums,
1086                                          2,
1087                                          &kCFTypeArrayCallBacks);
1088
1089         if (!data_rate_limits) {
1090             CFRelease(bytes_per_second);
1091             CFRelease(one_second);
1092             return AVERROR(ENOMEM);
1093         }
1094         status = VTSessionSetProperty(vtctx->session,
1095                                       kVTCompressionPropertyKey_DataRateLimits,
1096                                       data_rate_limits);
1097
1098         CFRelease(bytes_per_second);
1099         CFRelease(one_second);
1100         CFRelease(data_rate_limits);
1101
1102         if (status) {
1103             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1104             return AVERROR_EXTERNAL;
1105         }
1106     }
1107
1108     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1109         // kVTCompressionPropertyKey_ProfileLevel is not available for HEVC
1110         if (profile_level) {
1111             status = VTSessionSetProperty(vtctx->session,
1112                                         kVTCompressionPropertyKey_ProfileLevel,
1113                                         profile_level);
1114             if (status) {
1115                 av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1116             }
1117         }
1118     }
1119
1120     if (avctx->gop_size > 0) {
1121         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1122                                               kCFNumberIntType,
1123                                               &avctx->gop_size);
1124         if (!interval) {
1125             return AVERROR(ENOMEM);
1126         }
1127
1128         status = VTSessionSetProperty(vtctx->session,
1129                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1130                                       interval);
1131         CFRelease(interval);
1132
1133         if (status) {
1134             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1135             return AVERROR_EXTERNAL;
1136         }
1137     }
1138
1139     if (vtctx->frames_before) {
1140         status = VTSessionSetProperty(vtctx->session,
1141                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1142                                       kCFBooleanTrue);
1143
1144         if (status == kVTPropertyNotSupportedErr) {
1145             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1146         } else if (status) {
1147             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1148         }
1149     }
1150
1151     if (vtctx->frames_after) {
1152         status = VTSessionSetProperty(vtctx->session,
1153                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1154                                       kCFBooleanTrue);
1155
1156         if (status == kVTPropertyNotSupportedErr) {
1157             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1158         } else if (status) {
1159             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1160         }
1161     }
1162
1163     if (avctx->sample_aspect_ratio.num != 0) {
1164         CFNumberRef num;
1165         CFNumberRef den;
1166         CFMutableDictionaryRef par;
1167         AVRational *avpar = &avctx->sample_aspect_ratio;
1168
1169         av_reduce(&avpar->num, &avpar->den,
1170                    avpar->num,  avpar->den,
1171                   0xFFFFFFFF);
1172
1173         num = CFNumberCreate(kCFAllocatorDefault,
1174                              kCFNumberIntType,
1175                              &avpar->num);
1176
1177         den = CFNumberCreate(kCFAllocatorDefault,
1178                              kCFNumberIntType,
1179                              &avpar->den);
1180
1181
1182
1183         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1184                                         2,
1185                                         &kCFCopyStringDictionaryKeyCallBacks,
1186                                         &kCFTypeDictionaryValueCallBacks);
1187
1188         if (!par || !num || !den) {
1189             if (par) CFRelease(par);
1190             if (num) CFRelease(num);
1191             if (den) CFRelease(den);
1192
1193             return AVERROR(ENOMEM);
1194         }
1195
1196         CFDictionarySetValue(
1197             par,
1198             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1199             num);
1200
1201         CFDictionarySetValue(
1202             par,
1203             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1204             den);
1205
1206         status = VTSessionSetProperty(vtctx->session,
1207                                       kVTCompressionPropertyKey_PixelAspectRatio,
1208                                       par);
1209
1210         CFRelease(par);
1211         CFRelease(num);
1212         CFRelease(den);
1213
1214         if (status) {
1215             av_log(avctx,
1216                    AV_LOG_ERROR,
1217                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1218                    avctx->sample_aspect_ratio.num,
1219                    avctx->sample_aspect_ratio.den,
1220                    status);
1221
1222             return AVERROR_EXTERNAL;
1223         }
1224     }
1225
1226
1227     if (vtctx->transfer_function) {
1228         status = VTSessionSetProperty(vtctx->session,
1229                                       kVTCompressionPropertyKey_TransferFunction,
1230                                       vtctx->transfer_function);
1231
1232         if (status) {
1233             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1234         }
1235     }
1236
1237
1238     if (vtctx->ycbcr_matrix) {
1239         status = VTSessionSetProperty(vtctx->session,
1240                                       kVTCompressionPropertyKey_YCbCrMatrix,
1241                                       vtctx->ycbcr_matrix);
1242
1243         if (status) {
1244             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1245         }
1246     }
1247
1248
1249     if (vtctx->color_primaries) {
1250         status = VTSessionSetProperty(vtctx->session,
1251                                       kVTCompressionPropertyKey_ColorPrimaries,
1252                                       vtctx->color_primaries);
1253
1254         if (status) {
1255             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1256         }
1257     }
1258
1259     if (gamma_level) {
1260         status = VTSessionSetProperty(vtctx->session,
1261                                       kCVImageBufferGammaLevelKey,
1262                                       gamma_level);
1263
1264         if (status) {
1265             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1266         }
1267     }
1268
1269     if (!vtctx->has_b_frames) {
1270         status = VTSessionSetProperty(vtctx->session,
1271                                       kVTCompressionPropertyKey_AllowFrameReordering,
1272                                       kCFBooleanFalse);
1273
1274         if (status) {
1275             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1276             return AVERROR_EXTERNAL;
1277         }
1278     }
1279
1280     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1281         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1282                                 compat_keys.kVTH264EntropyMode_CABAC:
1283                                 compat_keys.kVTH264EntropyMode_CAVLC;
1284
1285         status = VTSessionSetProperty(vtctx->session,
1286                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1287                                       entropy);
1288
1289         if (status) {
1290             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1291         }
1292     }
1293
1294     if (vtctx->realtime) {
1295         status = VTSessionSetProperty(vtctx->session,
1296                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1297                                       kCFBooleanTrue);
1298
1299         if (status) {
1300             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1301         }
1302     }
1303
1304     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1305     if (status) {
1306         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1307         return AVERROR_EXTERNAL;
1308     }
1309
1310     return 0;
1311 }
1312
1313 static int vtenc_configure_encoder(AVCodecContext *avctx)
1314 {
1315     CFMutableDictionaryRef enc_info;
1316     CFMutableDictionaryRef pixel_buffer_info;
1317     CMVideoCodecType       codec_type;
1318     VTEncContext           *vtctx = avctx->priv_data;
1319     CFStringRef            profile_level;
1320     CFNumberRef            gamma_level = NULL;
1321     int                    status;
1322
1323     codec_type = get_cm_codec_type(avctx->codec_id);
1324     if (!codec_type) {
1325         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1326         return AVERROR(EINVAL);
1327     }
1328
1329     vtctx->codec_id = avctx->codec_id;
1330
1331     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1332         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1333
1334         vtctx->has_b_frames = avctx->max_b_frames > 0;
1335         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1336             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1337             vtctx->has_b_frames = false;
1338         }
1339
1340         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1341             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1342             vtctx->entropy = VT_ENTROPY_NOT_SET;
1343         }
1344
1345         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1346     } else {
1347         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1348         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1349         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1350     }
1351
1352     enc_info = CFDictionaryCreateMutable(
1353         kCFAllocatorDefault,
1354         20,
1355         &kCFCopyStringDictionaryKeyCallBacks,
1356         &kCFTypeDictionaryValueCallBacks
1357     );
1358
1359     if (!enc_info) return AVERROR(ENOMEM);
1360
1361 #if !TARGET_OS_IPHONE
1362     if(vtctx->require_sw) {
1363         CFDictionarySetValue(enc_info,
1364                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1365                              kCFBooleanFalse);
1366     } else if (!vtctx->allow_sw) {
1367         CFDictionarySetValue(enc_info,
1368                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1369                              kCFBooleanTrue);
1370     } else {
1371         CFDictionarySetValue(enc_info,
1372                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1373                              kCFBooleanTrue);
1374     }
1375 #endif
1376
1377     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1378         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1379         if (status)
1380             goto init_cleanup;
1381     } else {
1382         pixel_buffer_info = NULL;
1383     }
1384
1385     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1386
1387     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1388     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1389     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1390
1391
1392     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1393         status = vtenc_populate_extradata(avctx,
1394                                           codec_type,
1395                                           profile_level,
1396                                           gamma_level,
1397                                           enc_info,
1398                                           pixel_buffer_info);
1399         if (status)
1400             goto init_cleanup;
1401     }
1402
1403     status = vtenc_create_encoder(avctx,
1404                                   codec_type,
1405                                   profile_level,
1406                                   gamma_level,
1407                                   enc_info,
1408                                   pixel_buffer_info,
1409                                   &vtctx->session);
1410
1411 init_cleanup:
1412     if (gamma_level)
1413         CFRelease(gamma_level);
1414
1415     if (pixel_buffer_info)
1416         CFRelease(pixel_buffer_info);
1417
1418     CFRelease(enc_info);
1419
1420     return status;
1421 }
1422
1423 static av_cold int vtenc_init(AVCodecContext *avctx)
1424 {
1425     VTEncContext    *vtctx = avctx->priv_data;
1426     CFBooleanRef    has_b_frames_cfbool;
1427     int             status;
1428
1429     pthread_once(&once_ctrl, loadVTEncSymbols);
1430
1431     pthread_mutex_init(&vtctx->lock, NULL);
1432     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1433
1434     vtctx->session = NULL;
1435     status = vtenc_configure_encoder(avctx);
1436     if (status) return status;
1437
1438     status = VTSessionCopyProperty(vtctx->session,
1439                                    kVTCompressionPropertyKey_AllowFrameReordering,
1440                                    kCFAllocatorDefault,
1441                                    &has_b_frames_cfbool);
1442
1443     if (!status && has_b_frames_cfbool) {
1444         //Some devices don't output B-frames for main profile, even if requested.
1445         vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1446         CFRelease(has_b_frames_cfbool);
1447     }
1448     avctx->has_b_frames = vtctx->has_b_frames;
1449
1450     return 0;
1451 }
1452
1453 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1454 {
1455     CFArrayRef      attachments;
1456     CFDictionaryRef attachment;
1457     CFBooleanRef    not_sync;
1458     CFIndex         len;
1459
1460     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1461     len = !attachments ? 0 : CFArrayGetCount(attachments);
1462
1463     if (!len) {
1464         *is_key_frame = true;
1465         return;
1466     }
1467
1468     attachment = CFArrayGetValueAtIndex(attachments, 0);
1469
1470     if (CFDictionaryGetValueIfPresent(attachment,
1471                                       kCMSampleAttachmentKey_NotSync,
1472                                       (const void **)&not_sync))
1473     {
1474         *is_key_frame = !CFBooleanGetValue(not_sync);
1475     } else {
1476         *is_key_frame = true;
1477     }
1478 }
1479
1480 static int is_post_sei_nal_type(int nal_type){
1481     return nal_type != H264_NAL_SEI &&
1482            nal_type != H264_NAL_SPS &&
1483            nal_type != H264_NAL_PPS &&
1484            nal_type != H264_NAL_AUD;
1485 }
1486
1487 /*
1488  * Finds the sei message start/size of type find_sei_type.
1489  * If more than one of that type exists, the last one is returned.
1490  */
1491 static int find_sei_end(AVCodecContext *avctx,
1492                         uint8_t        *nal_data,
1493                         size_t          nal_size,
1494                         uint8_t       **sei_end)
1495 {
1496     int nal_type;
1497     size_t sei_payload_size = 0;
1498     int sei_payload_type = 0;
1499     *sei_end = NULL;
1500     uint8_t *nal_start = nal_data;
1501
1502     if (!nal_size)
1503         return 0;
1504
1505     nal_type = *nal_data & 0x1F;
1506     if (nal_type != H264_NAL_SEI)
1507         return 0;
1508
1509     nal_data++;
1510     nal_size--;
1511
1512     if (nal_data[nal_size - 1] == 0x80)
1513         nal_size--;
1514
1515     while (nal_size > 0 && *nal_data > 0) {
1516         do{
1517             sei_payload_type += *nal_data;
1518             nal_data++;
1519             nal_size--;
1520         } while (nal_size > 0 && *nal_data == 0xFF);
1521
1522         if (!nal_size) {
1523             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1524             return AVERROR_INVALIDDATA;
1525         }
1526
1527         do{
1528             sei_payload_size += *nal_data;
1529             nal_data++;
1530             nal_size--;
1531         } while (nal_size > 0 && *nal_data == 0xFF);
1532
1533         if (nal_size < sei_payload_size) {
1534             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1535             return AVERROR_INVALIDDATA;
1536         }
1537
1538         nal_data += sei_payload_size;
1539         nal_size -= sei_payload_size;
1540     }
1541
1542     *sei_end = nal_data;
1543
1544     return nal_data - nal_start + 1;
1545 }
1546
1547 /**
1548  * Copies the data inserting emulation prevention bytes as needed.
1549  * Existing data in the destination can be taken into account by providing
1550  * dst with a dst_offset > 0.
1551  *
1552  * @return The number of bytes copied on success. On failure, the negative of
1553  *         the number of bytes needed to copy src is returned.
1554  */
1555 static int copy_emulation_prev(const uint8_t *src,
1556                                size_t         src_size,
1557                                uint8_t       *dst,
1558                                ssize_t        dst_offset,
1559                                size_t         dst_size)
1560 {
1561     int zeros = 0;
1562     int wrote_bytes;
1563     uint8_t* dst_start;
1564     uint8_t* dst_end = dst + dst_size;
1565     const uint8_t* src_end = src + src_size;
1566     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1567     int i;
1568     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1569         if (!dst[i])
1570             zeros++;
1571         else
1572             zeros = 0;
1573     }
1574
1575     dst += dst_offset;
1576     dst_start = dst;
1577     for (; src < src_end; src++, dst++) {
1578         if (zeros == 2) {
1579             int insert_ep3_byte = *src <= 3;
1580             if (insert_ep3_byte) {
1581                 if (dst < dst_end)
1582                     *dst = 3;
1583                 dst++;
1584             }
1585
1586             zeros = 0;
1587         }
1588
1589         if (dst < dst_end)
1590             *dst = *src;
1591
1592         if (!*src)
1593             zeros++;
1594         else
1595             zeros = 0;
1596     }
1597
1598     wrote_bytes = dst - dst_start;
1599
1600     if (dst > dst_end)
1601         return -wrote_bytes;
1602
1603     return wrote_bytes;
1604 }
1605
1606 static int write_sei(const ExtraSEI *sei,
1607                      int             sei_type,
1608                      uint8_t        *dst,
1609                      size_t          dst_size)
1610 {
1611     uint8_t *sei_start = dst;
1612     size_t remaining_sei_size = sei->size;
1613     size_t remaining_dst_size = dst_size;
1614     int header_bytes;
1615     int bytes_written;
1616     ssize_t offset;
1617
1618     if (!remaining_dst_size)
1619         return AVERROR_BUFFER_TOO_SMALL;
1620
1621     while (sei_type && remaining_dst_size != 0) {
1622         int sei_byte = sei_type > 255 ? 255 : sei_type;
1623         *dst = sei_byte;
1624
1625         sei_type -= sei_byte;
1626         dst++;
1627         remaining_dst_size--;
1628     }
1629
1630     if (!dst_size)
1631         return AVERROR_BUFFER_TOO_SMALL;
1632
1633     while (remaining_sei_size && remaining_dst_size != 0) {
1634         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1635         *dst = size_byte;
1636
1637         remaining_sei_size -= size_byte;
1638         dst++;
1639         remaining_dst_size--;
1640     }
1641
1642     if (remaining_dst_size < sei->size)
1643         return AVERROR_BUFFER_TOO_SMALL;
1644
1645     header_bytes = dst - sei_start;
1646
1647     offset = header_bytes;
1648     bytes_written = copy_emulation_prev(sei->data,
1649                                         sei->size,
1650                                         sei_start,
1651                                         offset,
1652                                         dst_size);
1653     if (bytes_written < 0)
1654         return AVERROR_BUFFER_TOO_SMALL;
1655
1656     bytes_written += header_bytes;
1657     return bytes_written;
1658 }
1659
1660 /**
1661  * Copies NAL units and replaces length codes with
1662  * H.264 Annex B start codes. On failure, the contents of
1663  * dst_data may have been modified.
1664  *
1665  * @param length_code_size Byte length of each length code
1666  * @param sample_buffer NAL units prefixed with length codes.
1667  * @param sei Optional A53 closed captions SEI data.
1668  * @param dst_data Must be zeroed before calling this function.
1669  *                 Contains the copied NAL units prefixed with
1670  *                 start codes when the function returns
1671  *                 successfully.
1672  * @param dst_size Length of dst_data
1673  * @return 0 on success
1674  *         AVERROR_INVALIDDATA if length_code_size is invalid
1675  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1676  *         or if a length_code in src_data specifies data beyond
1677  *         the end of its buffer.
1678  */
1679 static int copy_replace_length_codes(
1680     AVCodecContext *avctx,
1681     size_t        length_code_size,
1682     CMSampleBufferRef sample_buffer,
1683     ExtraSEI      *sei,
1684     uint8_t       *dst_data,
1685     size_t        dst_size)
1686 {
1687     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1688     size_t remaining_src_size = src_size;
1689     size_t remaining_dst_size = dst_size;
1690     size_t src_offset = 0;
1691     int wrote_sei = 0;
1692     int status;
1693     uint8_t size_buf[4];
1694     uint8_t nal_type;
1695     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1696
1697     if (length_code_size > 4) {
1698         return AVERROR_INVALIDDATA;
1699     }
1700
1701     while (remaining_src_size > 0) {
1702         size_t curr_src_len;
1703         size_t curr_dst_len;
1704         size_t box_len = 0;
1705         size_t i;
1706
1707         uint8_t       *dst_box;
1708
1709         status = CMBlockBufferCopyDataBytes(block,
1710                                             src_offset,
1711                                             length_code_size,
1712                                             size_buf);
1713         if (status) {
1714             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1715             return AVERROR_EXTERNAL;
1716         }
1717
1718         status = CMBlockBufferCopyDataBytes(block,
1719                                             src_offset + length_code_size,
1720                                             1,
1721                                             &nal_type);
1722
1723         if (status) {
1724             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1725             return AVERROR_EXTERNAL;
1726         }
1727
1728         nal_type &= 0x1F;
1729
1730         for (i = 0; i < length_code_size; i++) {
1731             box_len <<= 8;
1732             box_len |= size_buf[i];
1733         }
1734
1735         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1736             //No SEI NAL unit - insert.
1737             int wrote_bytes;
1738
1739             memcpy(dst_data, start_code, sizeof(start_code));
1740             dst_data += sizeof(start_code);
1741             remaining_dst_size -= sizeof(start_code);
1742
1743             *dst_data = H264_NAL_SEI;
1744             dst_data++;
1745             remaining_dst_size--;
1746
1747             wrote_bytes = write_sei(sei,
1748                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1749                                     dst_data,
1750                                     remaining_dst_size);
1751
1752             if (wrote_bytes < 0)
1753                 return wrote_bytes;
1754
1755             remaining_dst_size -= wrote_bytes;
1756             dst_data += wrote_bytes;
1757
1758             if (remaining_dst_size <= 0)
1759                 return AVERROR_BUFFER_TOO_SMALL;
1760
1761             *dst_data = 0x80;
1762
1763             dst_data++;
1764             remaining_dst_size--;
1765
1766             wrote_sei = 1;
1767         }
1768
1769         curr_src_len = box_len + length_code_size;
1770         curr_dst_len = box_len + sizeof(start_code);
1771
1772         if (remaining_src_size < curr_src_len) {
1773             return AVERROR_BUFFER_TOO_SMALL;
1774         }
1775
1776         if (remaining_dst_size < curr_dst_len) {
1777             return AVERROR_BUFFER_TOO_SMALL;
1778         }
1779
1780         dst_box = dst_data + sizeof(start_code);
1781
1782         memcpy(dst_data, start_code, sizeof(start_code));
1783         status = CMBlockBufferCopyDataBytes(block,
1784                                             src_offset + length_code_size,
1785                                             box_len,
1786                                             dst_box);
1787
1788         if (status) {
1789             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1790             return AVERROR_EXTERNAL;
1791         }
1792
1793         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1794             //Found SEI NAL unit - append.
1795             int wrote_bytes;
1796             int old_sei_length;
1797             int extra_bytes;
1798             uint8_t *new_sei;
1799             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1800             if (old_sei_length < 0)
1801                 return status;
1802
1803             wrote_bytes = write_sei(sei,
1804                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1805                                     new_sei,
1806                                     remaining_dst_size - old_sei_length);
1807             if (wrote_bytes < 0)
1808                 return wrote_bytes;
1809
1810             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1811                 return AVERROR_BUFFER_TOO_SMALL;
1812
1813             new_sei[wrote_bytes++] = 0x80;
1814             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1815
1816             dst_data += extra_bytes;
1817             remaining_dst_size -= extra_bytes;
1818
1819             wrote_sei = 1;
1820         }
1821
1822         src_offset += curr_src_len;
1823         dst_data += curr_dst_len;
1824
1825         remaining_src_size -= curr_src_len;
1826         remaining_dst_size -= curr_dst_len;
1827     }
1828
1829     return 0;
1830 }
1831
1832 /**
1833  * Returns a sufficient number of bytes to contain the sei data.
1834  * It may be greater than the minimum required.
1835  */
1836 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1837     int copied_size;
1838     if (sei->size == 0)
1839         return 0;
1840
1841     copied_size = -copy_emulation_prev(sei->data,
1842                                        sei->size,
1843                                        NULL,
1844                                        0,
1845                                        0);
1846
1847     if ((sei->size % 255) == 0) //may result in an extra byte
1848         copied_size++;
1849
1850     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1851 }
1852
1853 static int vtenc_cm_to_avpacket(
1854     AVCodecContext    *avctx,
1855     CMSampleBufferRef sample_buffer,
1856     AVPacket          *pkt,
1857     ExtraSEI          *sei)
1858 {
1859     VTEncContext *vtctx = avctx->priv_data;
1860
1861     int     status;
1862     bool    is_key_frame;
1863     bool    add_header;
1864     size_t  length_code_size;
1865     size_t  header_size = 0;
1866     size_t  in_buf_size;
1867     size_t  out_buf_size;
1868     size_t  sei_nalu_size = 0;
1869     int64_t dts_delta;
1870     int64_t time_base_num;
1871     int nalu_count;
1872     CMTime  pts;
1873     CMTime  dts;
1874     CMVideoFormatDescriptionRef vid_fmt;
1875
1876
1877     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1878     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1879     if (status) return status;
1880
1881     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1882
1883     if (add_header) {
1884         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1885         if (!vid_fmt) {
1886             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1887             return AVERROR_EXTERNAL;
1888         }
1889
1890         int status = get_params_size(avctx, vid_fmt, &header_size);
1891         if (status) return status;
1892     }
1893
1894     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1895     if(status)
1896         return status;
1897
1898     if (sei) {
1899         size_t msg_size = get_sei_msg_bytes(sei,
1900                                             H264_SEI_TYPE_USER_DATA_REGISTERED);
1901
1902         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1903     }
1904
1905     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1906     out_buf_size = header_size +
1907                    in_buf_size +
1908                    sei_nalu_size +
1909                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1910
1911     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1912     if (status < 0)
1913         return status;
1914
1915     if (add_header) {
1916         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1917         if(status) return status;
1918     }
1919
1920     status = copy_replace_length_codes(
1921         avctx,
1922         length_code_size,
1923         sample_buffer,
1924         sei,
1925         pkt->data + header_size,
1926         pkt->size - header_size
1927     );
1928
1929     if (status) {
1930         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1931         return status;
1932     }
1933
1934     if (is_key_frame) {
1935         pkt->flags |= AV_PKT_FLAG_KEY;
1936     }
1937
1938     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1939     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1940
1941     if (CMTIME_IS_INVALID(dts)) {
1942         if (!vtctx->has_b_frames) {
1943             dts = pts;
1944         } else {
1945             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1946             return AVERROR_EXTERNAL;
1947         }
1948     }
1949
1950     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1951     time_base_num = avctx->time_base.num;
1952     pkt->pts = pts.value / time_base_num;
1953     pkt->dts = dts.value / time_base_num - dts_delta;
1954     pkt->size = out_buf_size;
1955
1956     return 0;
1957 }
1958
1959 /*
1960  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1961  * containing all planes if so.
1962  */
1963 static int get_cv_pixel_info(
1964     AVCodecContext *avctx,
1965     const AVFrame  *frame,
1966     int            *color,
1967     int            *plane_count,
1968     size_t         *widths,
1969     size_t         *heights,
1970     size_t         *strides,
1971     size_t         *contiguous_buf_size)
1972 {
1973     VTEncContext *vtctx = avctx->priv_data;
1974     int av_format       = frame->format;
1975     int av_color_range  = frame->color_range;
1976     int i;
1977     int range_guessed;
1978     int status;
1979
1980     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1981     if (status) {
1982         av_log(avctx,
1983             AV_LOG_ERROR,
1984             "Could not get pixel format for color format '%s' range '%s'.\n",
1985             av_get_pix_fmt_name(av_format),
1986             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1987             av_color_range < AVCOL_RANGE_NB ?
1988                av_color_range_name(av_color_range) :
1989                "Unknown");
1990
1991         return AVERROR(EINVAL);
1992     }
1993
1994     if (range_guessed) {
1995         if (!vtctx->warned_color_range) {
1996             vtctx->warned_color_range = true;
1997             av_log(avctx,
1998                    AV_LOG_WARNING,
1999                    "Color range not set for %s. Using MPEG range.\n",
2000                    av_get_pix_fmt_name(av_format));
2001         }
2002     }
2003
2004     switch (av_format) {
2005     case AV_PIX_FMT_NV12:
2006         *plane_count = 2;
2007
2008         widths [0] = avctx->width;
2009         heights[0] = avctx->height;
2010         strides[0] = frame ? frame->linesize[0] : avctx->width;
2011
2012         widths [1] = (avctx->width  + 1) / 2;
2013         heights[1] = (avctx->height + 1) / 2;
2014         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2015         break;
2016
2017     case AV_PIX_FMT_YUV420P:
2018         *plane_count = 3;
2019
2020         widths [0] = avctx->width;
2021         heights[0] = avctx->height;
2022         strides[0] = frame ? frame->linesize[0] : avctx->width;
2023
2024         widths [1] = (avctx->width  + 1) / 2;
2025         heights[1] = (avctx->height + 1) / 2;
2026         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2027
2028         widths [2] = (avctx->width  + 1) / 2;
2029         heights[2] = (avctx->height + 1) / 2;
2030         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2031         break;
2032
2033     case AV_PIX_FMT_P010LE:
2034         *plane_count = 2;
2035         widths[0] = avctx->width;
2036         heights[0] = avctx->height;
2037         strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2038
2039         widths[1] = (avctx->width + 1) / 2;
2040         heights[1] = (avctx->height + 1) / 2;
2041         strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2042         break;
2043
2044     default:
2045         av_log(
2046                avctx,
2047                AV_LOG_ERROR,
2048                "Could not get frame format info for color %d range %d.\n",
2049                av_format,
2050                av_color_range);
2051
2052         return AVERROR(EINVAL);
2053     }
2054
2055     *contiguous_buf_size = 0;
2056     for (i = 0; i < *plane_count; i++) {
2057         if (i < *plane_count - 1 &&
2058             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2059             *contiguous_buf_size = 0;
2060             break;
2061         }
2062
2063         *contiguous_buf_size += strides[i] * heights[i];
2064     }
2065
2066     return 0;
2067 }
2068
2069 //Not used on OSX - frame is never copied.
2070 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2071                                         const AVFrame    *frame,
2072                                         CVPixelBufferRef cv_img,
2073                                         const size_t     *plane_strides,
2074                                         const size_t     *plane_rows)
2075 {
2076     int i, j;
2077     size_t plane_count;
2078     int status;
2079     int rows;
2080     int src_stride;
2081     int dst_stride;
2082     uint8_t *src_addr;
2083     uint8_t *dst_addr;
2084     size_t copy_bytes;
2085
2086     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2087     if (status) {
2088         av_log(
2089             avctx,
2090             AV_LOG_ERROR,
2091             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2092             status
2093         );
2094     }
2095
2096     if (CVPixelBufferIsPlanar(cv_img)) {
2097         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2098         for (i = 0; frame->data[i]; i++) {
2099             if (i == plane_count) {
2100                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2101                 av_log(avctx,
2102                     AV_LOG_ERROR,
2103                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2104                 );
2105
2106                 return AVERROR_EXTERNAL;
2107             }
2108
2109             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2110             src_addr = (uint8_t*)frame->data[i];
2111             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2112             src_stride = plane_strides[i];
2113             rows = plane_rows[i];
2114
2115             if (dst_stride == src_stride) {
2116                 memcpy(dst_addr, src_addr, src_stride * rows);
2117             } else {
2118                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2119
2120                 for (j = 0; j < rows; j++) {
2121                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2122                 }
2123             }
2124         }
2125     } else {
2126         if (frame->data[1]) {
2127             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2128             av_log(avctx,
2129                 AV_LOG_ERROR,
2130                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2131             );
2132
2133             return AVERROR_EXTERNAL;
2134         }
2135
2136         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2137         src_addr = (uint8_t*)frame->data[0];
2138         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2139         src_stride = plane_strides[0];
2140         rows = plane_rows[0];
2141
2142         if (dst_stride == src_stride) {
2143             memcpy(dst_addr, src_addr, src_stride * rows);
2144         } else {
2145             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2146
2147             for (j = 0; j < rows; j++) {
2148                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2149             }
2150         }
2151     }
2152
2153     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2154     if (status) {
2155         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2156         return AVERROR_EXTERNAL;
2157     }
2158
2159     return 0;
2160 }
2161
2162 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2163                                   const AVFrame    *frame,
2164                                   CVPixelBufferRef *cv_img)
2165 {
2166     int plane_count;
2167     int color;
2168     size_t widths [AV_NUM_DATA_POINTERS];
2169     size_t heights[AV_NUM_DATA_POINTERS];
2170     size_t strides[AV_NUM_DATA_POINTERS];
2171     int status;
2172     size_t contiguous_buf_size;
2173     CVPixelBufferPoolRef pix_buf_pool;
2174     VTEncContext* vtctx = avctx->priv_data;
2175
2176     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2177         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2178
2179         *cv_img = (CVPixelBufferRef)frame->data[3];
2180         av_assert0(*cv_img);
2181
2182         CFRetain(*cv_img);
2183         return 0;
2184     }
2185
2186     memset(widths,  0, sizeof(widths));
2187     memset(heights, 0, sizeof(heights));
2188     memset(strides, 0, sizeof(strides));
2189
2190     status = get_cv_pixel_info(
2191         avctx,
2192         frame,
2193         &color,
2194         &plane_count,
2195         widths,
2196         heights,
2197         strides,
2198         &contiguous_buf_size
2199     );
2200
2201     if (status) {
2202         av_log(
2203             avctx,
2204             AV_LOG_ERROR,
2205             "Error: Cannot convert format %d color_range %d: %d\n",
2206             frame->format,
2207             frame->color_range,
2208             status
2209         );
2210
2211         return AVERROR_EXTERNAL;
2212     }
2213
2214     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2215     if (!pix_buf_pool) {
2216         /* On iOS, the VT session is invalidated when the APP switches from
2217          * foreground to background and vice versa. Fetch the actual error code
2218          * of the VT session to detect that case and restart the VT session
2219          * accordingly. */
2220         OSStatus vtstatus;
2221
2222         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2223         if (vtstatus == kVTInvalidSessionErr) {
2224             CFRelease(vtctx->session);
2225             vtctx->session = NULL;
2226             status = vtenc_configure_encoder(avctx);
2227             if (status == 0)
2228                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2229         }
2230         if (!pix_buf_pool) {
2231             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2232             return AVERROR_EXTERNAL;
2233         }
2234         else
2235             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2236                    "kVTInvalidSessionErr error.\n");
2237     }
2238
2239     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2240                                                 pix_buf_pool,
2241                                                 cv_img);
2242
2243
2244     if (status) {
2245         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2246         return AVERROR_EXTERNAL;
2247     }
2248
2249     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2250     if (status) {
2251         CFRelease(*cv_img);
2252         *cv_img = NULL;
2253         return status;
2254     }
2255
2256     return 0;
2257 }
2258
2259 static int create_encoder_dict_h264(const AVFrame *frame,
2260                                     CFDictionaryRef* dict_out)
2261 {
2262     CFDictionaryRef dict = NULL;
2263     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2264         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2265         const void *vals[] = { kCFBooleanTrue };
2266
2267         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2268         if(!dict) return AVERROR(ENOMEM);
2269     }
2270
2271     *dict_out = dict;
2272     return 0;
2273 }
2274
2275 static int vtenc_send_frame(AVCodecContext *avctx,
2276                             VTEncContext   *vtctx,
2277                             const AVFrame  *frame)
2278 {
2279     CMTime time;
2280     CFDictionaryRef frame_dict;
2281     CVPixelBufferRef cv_img = NULL;
2282     AVFrameSideData *side_data = NULL;
2283     ExtraSEI *sei = NULL;
2284     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2285
2286     if (status) return status;
2287
2288     status = create_encoder_dict_h264(frame, &frame_dict);
2289     if (status) {
2290         CFRelease(cv_img);
2291         return status;
2292     }
2293
2294     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2295     if (vtctx->a53_cc && side_data && side_data->size) {
2296         sei = av_mallocz(sizeof(*sei));
2297         if (!sei) {
2298             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2299         } else {
2300             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2301             if (ret < 0) {
2302                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2303                 av_free(sei);
2304                 sei = NULL;
2305             }
2306         }
2307     }
2308
2309     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2310     status = VTCompressionSessionEncodeFrame(
2311         vtctx->session,
2312         cv_img,
2313         time,
2314         kCMTimeInvalid,
2315         frame_dict,
2316         sei,
2317         NULL
2318     );
2319
2320     if (frame_dict) CFRelease(frame_dict);
2321     CFRelease(cv_img);
2322
2323     if (status) {
2324         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2325         return AVERROR_EXTERNAL;
2326     }
2327
2328     return 0;
2329 }
2330
2331 static av_cold int vtenc_frame(
2332     AVCodecContext *avctx,
2333     AVPacket       *pkt,
2334     const AVFrame  *frame,
2335     int            *got_packet)
2336 {
2337     VTEncContext *vtctx = avctx->priv_data;
2338     bool get_frame;
2339     int status;
2340     CMSampleBufferRef buf = NULL;
2341     ExtraSEI *sei = NULL;
2342
2343     if (frame) {
2344         status = vtenc_send_frame(avctx, vtctx, frame);
2345
2346         if (status) {
2347             status = AVERROR_EXTERNAL;
2348             goto end_nopkt;
2349         }
2350
2351         if (vtctx->frame_ct_in == 0) {
2352             vtctx->first_pts = frame->pts;
2353         } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2354             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2355         }
2356
2357         vtctx->frame_ct_in++;
2358     } else if(!vtctx->flushing) {
2359         vtctx->flushing = true;
2360
2361         status = VTCompressionSessionCompleteFrames(vtctx->session,
2362                                                     kCMTimeIndefinite);
2363
2364         if (status) {
2365             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2366             status = AVERROR_EXTERNAL;
2367             goto end_nopkt;
2368         }
2369     }
2370
2371     *got_packet = 0;
2372     get_frame = vtctx->dts_delta >= 0 || !frame;
2373     if (!get_frame) {
2374         status = 0;
2375         goto end_nopkt;
2376     }
2377
2378     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2379     if (status) goto end_nopkt;
2380     if (!buf)   goto end_nopkt;
2381
2382     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2383     if (sei) {
2384         if (sei->data) av_free(sei->data);
2385         av_free(sei);
2386     }
2387     CFRelease(buf);
2388     if (status) goto end_nopkt;
2389
2390     *got_packet = 1;
2391     return 0;
2392
2393 end_nopkt:
2394     av_packet_unref(pkt);
2395     return status;
2396 }
2397
2398 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2399                                     CMVideoCodecType codec_type,
2400                                     CFStringRef      profile_level,
2401                                     CFNumberRef      gamma_level,
2402                                     CFDictionaryRef  enc_info,
2403                                     CFDictionaryRef  pixel_buffer_info)
2404 {
2405     VTEncContext *vtctx = avctx->priv_data;
2406     int status;
2407     CVPixelBufferPoolRef pool = NULL;
2408     CVPixelBufferRef pix_buf = NULL;
2409     CMTime time;
2410     CMSampleBufferRef buf = NULL;
2411
2412     status = vtenc_create_encoder(avctx,
2413                                   codec_type,
2414                                   profile_level,
2415                                   gamma_level,
2416                                   enc_info,
2417                                   pixel_buffer_info,
2418                                   &vtctx->session);
2419     if (status)
2420         goto pe_cleanup;
2421
2422     pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2423     if(!pool){
2424         av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2425         goto pe_cleanup;
2426     }
2427
2428     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2429                                                 pool,
2430                                                 &pix_buf);
2431
2432     if(status != kCVReturnSuccess){
2433         av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2434         goto pe_cleanup;
2435     }
2436
2437     time = CMTimeMake(0, avctx->time_base.den);
2438     status = VTCompressionSessionEncodeFrame(vtctx->session,
2439                                              pix_buf,
2440                                              time,
2441                                              kCMTimeInvalid,
2442                                              NULL,
2443                                              NULL,
2444                                              NULL);
2445
2446     if (status) {
2447         av_log(avctx,
2448                AV_LOG_ERROR,
2449                "Error sending frame for extradata: %d\n",
2450                status);
2451
2452         goto pe_cleanup;
2453     }
2454
2455     //Populates extradata - output frames are flushed and param sets are available.
2456     status = VTCompressionSessionCompleteFrames(vtctx->session,
2457                                                 kCMTimeIndefinite);
2458
2459     if (status)
2460         goto pe_cleanup;
2461
2462     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2463     if (status) {
2464         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2465         goto pe_cleanup;
2466     }
2467
2468     CFRelease(buf);
2469
2470
2471
2472 pe_cleanup:
2473     if(vtctx->session)
2474         CFRelease(vtctx->session);
2475
2476     vtctx->session = NULL;
2477     vtctx->frame_ct_out = 0;
2478
2479     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2480
2481     return status;
2482 }
2483
2484 static av_cold int vtenc_close(AVCodecContext *avctx)
2485 {
2486     VTEncContext *vtctx = avctx->priv_data;
2487
2488     pthread_cond_destroy(&vtctx->cv_sample_sent);
2489     pthread_mutex_destroy(&vtctx->lock);
2490
2491     if(!vtctx->session) return 0;
2492
2493     VTCompressionSessionCompleteFrames(vtctx->session,
2494                                        kCMTimeIndefinite);
2495     clear_frame_queue(vtctx);
2496     CFRelease(vtctx->session);
2497     vtctx->session = NULL;
2498
2499     if (vtctx->color_primaries) {
2500         CFRelease(vtctx->color_primaries);
2501         vtctx->color_primaries = NULL;
2502     }
2503
2504     if (vtctx->transfer_function) {
2505         CFRelease(vtctx->transfer_function);
2506         vtctx->transfer_function = NULL;
2507     }
2508
2509     if (vtctx->ycbcr_matrix) {
2510         CFRelease(vtctx->ycbcr_matrix);
2511         vtctx->ycbcr_matrix = NULL;
2512     }
2513
2514     return 0;
2515 }
2516
2517 static const enum AVPixelFormat avc_pix_fmts[] = {
2518     AV_PIX_FMT_VIDEOTOOLBOX,
2519     AV_PIX_FMT_NV12,
2520     AV_PIX_FMT_YUV420P,
2521     AV_PIX_FMT_NONE
2522 };
2523
2524 static const enum AVPixelFormat hevc_pix_fmts[] = {
2525     AV_PIX_FMT_VIDEOTOOLBOX,
2526     AV_PIX_FMT_NV12,
2527     AV_PIX_FMT_YUV420P,
2528     AV_PIX_FMT_P010LE,
2529     AV_PIX_FMT_NONE
2530 };
2531
2532 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2533 #define COMMON_OPTIONS \
2534     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2535         { .i64 = 0 }, 0, 1, VE }, \
2536     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2537         { .i64 = 0 }, 0, 1, VE }, \
2538     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2539         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2540     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2541         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2542     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2543         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2544
2545 #define OFFSET(x) offsetof(VTEncContext, x)
2546 static const AVOption h264_options[] = {
2547     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2548     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2549     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2550     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2551     { "extended", "Extend Profile",   0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2552
2553     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2554     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2555     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2556     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2557     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2558     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2559     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2560     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2561     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2562     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2563     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2564
2565     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2566     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2567     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2568     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2569     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2570
2571     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2572
2573     COMMON_OPTIONS
2574     { NULL },
2575 };
2576
2577 static const AVClass h264_videotoolbox_class = {
2578     .class_name = "h264_videotoolbox",
2579     .item_name  = av_default_item_name,
2580     .option     = h264_options,
2581     .version    = LIBAVUTIL_VERSION_INT,
2582 };
2583
2584 AVCodec ff_h264_videotoolbox_encoder = {
2585     .name             = "h264_videotoolbox",
2586     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2587     .type             = AVMEDIA_TYPE_VIDEO,
2588     .id               = AV_CODEC_ID_H264,
2589     .priv_data_size   = sizeof(VTEncContext),
2590     .pix_fmts         = avc_pix_fmts,
2591     .init             = vtenc_init,
2592     .encode2          = vtenc_frame,
2593     .close            = vtenc_close,
2594     .capabilities     = AV_CODEC_CAP_DELAY,
2595     .priv_class       = &h264_videotoolbox_class,
2596     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2597                         FF_CODEC_CAP_INIT_CLEANUP,
2598 };
2599
2600 static const AVOption hevc_options[] = {
2601     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2602     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2603     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2604
2605     COMMON_OPTIONS
2606     { NULL },
2607 };
2608
2609 static const AVClass hevc_videotoolbox_class = {
2610     .class_name = "hevc_videotoolbox",
2611     .item_name  = av_default_item_name,
2612     .option     = hevc_options,
2613     .version    = LIBAVUTIL_VERSION_INT,
2614 };
2615
2616 AVCodec ff_hevc_videotoolbox_encoder = {
2617     .name             = "hevc_videotoolbox",
2618     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2619     .type             = AVMEDIA_TYPE_VIDEO,
2620     .id               = AV_CODEC_ID_HEVC,
2621     .priv_data_size   = sizeof(VTEncContext),
2622     .pix_fmts         = hevc_pix_fmts,
2623     .init             = vtenc_init,
2624     .encode2          = vtenc_frame,
2625     .close            = vtenc_close,
2626     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2627     .priv_class       = &hevc_videotoolbox_class,
2628     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2629                         FF_CODEC_CAP_INIT_CLEANUP,
2630     .wrapper_name     = "videotoolbox",
2631 };