]> git.sesse.net Git - ffmpeg/blob - libavcodec/videotoolboxenc.c
avcodec/videotoolboxenc: fix use after destroy
[ffmpeg] / libavcodec / videotoolboxenc.c
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavcodec/avcodec.h"
31 #include "libavutil/pixdesc.h"
32 #include "internal.h"
33 #include <pthread.h>
34 #include "atsc_a53.h"
35 #include "h264.h"
36 #include "h264_sei.h"
37 #include <dlfcn.h>
38
39 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
40 enum { kCMVideoCodecType_HEVC = 'hvc1' };
41 #endif
42
43 #if !HAVE_KCVPIXELFORMATTYPE_420YPCBCR10BIPLANARVIDEORANGE
44 enum { kCVPixelFormatType_420YpCbCr10BiPlanarFullRange = 'xf20' };
45 enum { kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange = 'x420' };
46 #endif
47
48 typedef OSStatus (*getParameterSetAtIndex)(CMFormatDescriptionRef videoDesc,
49                                            size_t parameterSetIndex,
50                                            const uint8_t **parameterSetPointerOut,
51                                            size_t *parameterSetSizeOut,
52                                            size_t *parameterSetCountOut,
53                                            int *NALUnitHeaderLengthOut);
54
55 //These symbols may not be present
56 static struct{
57     CFStringRef kCVImageBufferColorPrimaries_ITU_R_2020;
58     CFStringRef kCVImageBufferTransferFunction_ITU_R_2020;
59     CFStringRef kCVImageBufferYCbCrMatrix_ITU_R_2020;
60
61     CFStringRef kVTCompressionPropertyKey_H264EntropyMode;
62     CFStringRef kVTH264EntropyMode_CAVLC;
63     CFStringRef kVTH264EntropyMode_CABAC;
64
65     CFStringRef kVTProfileLevel_H264_Baseline_4_0;
66     CFStringRef kVTProfileLevel_H264_Baseline_4_2;
67     CFStringRef kVTProfileLevel_H264_Baseline_5_0;
68     CFStringRef kVTProfileLevel_H264_Baseline_5_1;
69     CFStringRef kVTProfileLevel_H264_Baseline_5_2;
70     CFStringRef kVTProfileLevel_H264_Baseline_AutoLevel;
71     CFStringRef kVTProfileLevel_H264_Main_4_2;
72     CFStringRef kVTProfileLevel_H264_Main_5_1;
73     CFStringRef kVTProfileLevel_H264_Main_5_2;
74     CFStringRef kVTProfileLevel_H264_Main_AutoLevel;
75     CFStringRef kVTProfileLevel_H264_High_3_0;
76     CFStringRef kVTProfileLevel_H264_High_3_1;
77     CFStringRef kVTProfileLevel_H264_High_3_2;
78     CFStringRef kVTProfileLevel_H264_High_4_0;
79     CFStringRef kVTProfileLevel_H264_High_4_1;
80     CFStringRef kVTProfileLevel_H264_High_4_2;
81     CFStringRef kVTProfileLevel_H264_High_5_1;
82     CFStringRef kVTProfileLevel_H264_High_5_2;
83     CFStringRef kVTProfileLevel_H264_High_AutoLevel;
84     CFStringRef kVTProfileLevel_H264_Extended_5_0;
85     CFStringRef kVTProfileLevel_H264_Extended_AutoLevel;
86
87     CFStringRef kVTProfileLevel_HEVC_Main_AutoLevel;
88     CFStringRef kVTProfileLevel_HEVC_Main10_AutoLevel;
89
90     CFStringRef kVTCompressionPropertyKey_RealTime;
91
92     CFStringRef kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder;
93     CFStringRef kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder;
94
95     getParameterSetAtIndex CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
96 } compat_keys;
97
98 #define GET_SYM(symbol, defaultVal)                                     \
99 do{                                                                     \
100     CFStringRef* handle = (CFStringRef*)dlsym(RTLD_DEFAULT, #symbol);   \
101     if(!handle)                                                         \
102         compat_keys.symbol = CFSTR(defaultVal);                         \
103     else                                                                \
104         compat_keys.symbol = *handle;                                   \
105 }while(0)
106
107 static pthread_once_t once_ctrl = PTHREAD_ONCE_INIT;
108
109 static void loadVTEncSymbols(){
110     compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex =
111         (getParameterSetAtIndex)dlsym(
112             RTLD_DEFAULT,
113             "CMVideoFormatDescriptionGetHEVCParameterSetAtIndex"
114         );
115
116     GET_SYM(kCVImageBufferColorPrimaries_ITU_R_2020,   "ITU_R_2020");
117     GET_SYM(kCVImageBufferTransferFunction_ITU_R_2020, "ITU_R_2020");
118     GET_SYM(kCVImageBufferYCbCrMatrix_ITU_R_2020,      "ITU_R_2020");
119
120     GET_SYM(kVTCompressionPropertyKey_H264EntropyMode, "H264EntropyMode");
121     GET_SYM(kVTH264EntropyMode_CAVLC, "CAVLC");
122     GET_SYM(kVTH264EntropyMode_CABAC, "CABAC");
123
124     GET_SYM(kVTProfileLevel_H264_Baseline_4_0,       "H264_Baseline_4_0");
125     GET_SYM(kVTProfileLevel_H264_Baseline_4_2,       "H264_Baseline_4_2");
126     GET_SYM(kVTProfileLevel_H264_Baseline_5_0,       "H264_Baseline_5_0");
127     GET_SYM(kVTProfileLevel_H264_Baseline_5_1,       "H264_Baseline_5_1");
128     GET_SYM(kVTProfileLevel_H264_Baseline_5_2,       "H264_Baseline_5_2");
129     GET_SYM(kVTProfileLevel_H264_Baseline_AutoLevel, "H264_Baseline_AutoLevel");
130     GET_SYM(kVTProfileLevel_H264_Main_4_2,           "H264_Main_4_2");
131     GET_SYM(kVTProfileLevel_H264_Main_5_1,           "H264_Main_5_1");
132     GET_SYM(kVTProfileLevel_H264_Main_5_2,           "H264_Main_5_2");
133     GET_SYM(kVTProfileLevel_H264_Main_AutoLevel,     "H264_Main_AutoLevel");
134     GET_SYM(kVTProfileLevel_H264_High_3_0,           "H264_High_3_0");
135     GET_SYM(kVTProfileLevel_H264_High_3_1,           "H264_High_3_1");
136     GET_SYM(kVTProfileLevel_H264_High_3_2,           "H264_High_3_2");
137     GET_SYM(kVTProfileLevel_H264_High_4_0,           "H264_High_4_0");
138     GET_SYM(kVTProfileLevel_H264_High_4_1,           "H264_High_4_1");
139     GET_SYM(kVTProfileLevel_H264_High_4_2,           "H264_High_4_2");
140     GET_SYM(kVTProfileLevel_H264_High_5_1,           "H264_High_5_1");
141     GET_SYM(kVTProfileLevel_H264_High_5_2,           "H264_High_5_2");
142     GET_SYM(kVTProfileLevel_H264_High_AutoLevel,     "H264_High_AutoLevel");
143     GET_SYM(kVTProfileLevel_H264_Extended_5_0,       "H264_Extended_5_0");
144     GET_SYM(kVTProfileLevel_H264_Extended_AutoLevel, "H264_Extended_AutoLevel");
145
146     GET_SYM(kVTProfileLevel_HEVC_Main_AutoLevel,     "HEVC_Main_AutoLevel");
147     GET_SYM(kVTProfileLevel_HEVC_Main10_AutoLevel,   "HEVC_Main10_AutoLevel");
148
149     GET_SYM(kVTCompressionPropertyKey_RealTime, "RealTime");
150
151     GET_SYM(kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
152             "EnableHardwareAcceleratedVideoEncoder");
153     GET_SYM(kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
154             "RequireHardwareAcceleratedVideoEncoder");
155 }
156
157 typedef enum VT_H264Profile {
158     H264_PROF_AUTO,
159     H264_PROF_BASELINE,
160     H264_PROF_MAIN,
161     H264_PROF_HIGH,
162     H264_PROF_EXTENDED,
163     H264_PROF_COUNT
164 } VT_H264Profile;
165
166 typedef enum VTH264Entropy{
167     VT_ENTROPY_NOT_SET,
168     VT_CAVLC,
169     VT_CABAC
170 } VTH264Entropy;
171
172 typedef enum VT_HEVCProfile {
173     HEVC_PROF_AUTO,
174     HEVC_PROF_MAIN,
175     HEVC_PROF_MAIN10,
176     HEVC_PROF_COUNT
177 } VT_HEVCProfile;
178
179 static const uint8_t start_code[] = { 0, 0, 0, 1 };
180
181 typedef struct ExtraSEI {
182   void *data;
183   size_t size;
184 } ExtraSEI;
185
186 typedef struct BufNode {
187     CMSampleBufferRef cm_buffer;
188     ExtraSEI *sei;
189     struct BufNode* next;
190     int error;
191 } BufNode;
192
193 typedef struct VTEncContext {
194     AVClass *class;
195     enum AVCodecID codec_id;
196     VTCompressionSessionRef session;
197     CFStringRef ycbcr_matrix;
198     CFStringRef color_primaries;
199     CFStringRef transfer_function;
200     getParameterSetAtIndex get_param_set_func;
201
202     pthread_mutex_t lock;
203     pthread_cond_t  cv_sample_sent;
204
205     int async_error;
206
207     BufNode *q_head;
208     BufNode *q_tail;
209
210     int64_t frame_ct_out;
211     int64_t frame_ct_in;
212
213     int64_t first_pts;
214     int64_t dts_delta;
215
216     int64_t profile;
217     int64_t level;
218     int64_t entropy;
219     int64_t realtime;
220     int64_t frames_before;
221     int64_t frames_after;
222
223     int64_t allow_sw;
224     int64_t require_sw;
225
226     bool flushing;
227     bool has_b_frames;
228     bool warned_color_range;
229
230     /* can't be bool type since AVOption will access it as int */
231     int a53_cc;
232 } VTEncContext;
233
234 static int vtenc_populate_extradata(AVCodecContext   *avctx,
235                                     CMVideoCodecType codec_type,
236                                     CFStringRef      profile_level,
237                                     CFNumberRef      gamma_level,
238                                     CFDictionaryRef  enc_info,
239                                     CFDictionaryRef  pixel_buffer_info);
240
241 /**
242  * NULL-safe release of *refPtr, and sets value to NULL.
243  */
244 static void vt_release_num(CFNumberRef* refPtr){
245     if (!*refPtr) {
246         return;
247     }
248
249     CFRelease(*refPtr);
250     *refPtr = NULL;
251 }
252
253 static void set_async_error(VTEncContext *vtctx, int err)
254 {
255     BufNode *info;
256
257     pthread_mutex_lock(&vtctx->lock);
258
259     vtctx->async_error = err;
260
261     info = vtctx->q_head;
262     vtctx->q_head = vtctx->q_tail = NULL;
263
264     while (info) {
265         BufNode *next = info->next;
266         CFRelease(info->cm_buffer);
267         av_free(info);
268         info = next;
269     }
270
271     pthread_mutex_unlock(&vtctx->lock);
272 }
273
274 static void clear_frame_queue(VTEncContext *vtctx)
275 {
276     set_async_error(vtctx, 0);
277 }
278
279 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
280 {
281     BufNode *info;
282
283     pthread_mutex_lock(&vtctx->lock);
284
285     if (vtctx->async_error) {
286         pthread_mutex_unlock(&vtctx->lock);
287         return vtctx->async_error;
288     }
289
290     if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
291         *buf = NULL;
292
293         pthread_mutex_unlock(&vtctx->lock);
294         return 0;
295     }
296
297     while (!vtctx->q_head && !vtctx->async_error && wait) {
298         pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
299     }
300
301     if (!vtctx->q_head) {
302         pthread_mutex_unlock(&vtctx->lock);
303         *buf = NULL;
304         return 0;
305     }
306
307     info = vtctx->q_head;
308     vtctx->q_head = vtctx->q_head->next;
309     if (!vtctx->q_head) {
310         vtctx->q_tail = NULL;
311     }
312
313     pthread_mutex_unlock(&vtctx->lock);
314
315     *buf = info->cm_buffer;
316     if (sei && *buf) {
317         *sei = info->sei;
318     } else if (info->sei) {
319         if (info->sei->data) av_free(info->sei->data);
320         av_free(info->sei);
321     }
322     av_free(info);
323
324     vtctx->frame_ct_out++;
325
326     return 0;
327 }
328
329 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
330 {
331     BufNode *info = av_malloc(sizeof(BufNode));
332     if (!info) {
333         set_async_error(vtctx, AVERROR(ENOMEM));
334         return;
335     }
336
337     CFRetain(buffer);
338     info->cm_buffer = buffer;
339     info->sei = sei;
340     info->next = NULL;
341
342     pthread_mutex_lock(&vtctx->lock);
343     pthread_cond_signal(&vtctx->cv_sample_sent);
344
345     if (!vtctx->q_head) {
346         vtctx->q_head = info;
347     } else {
348         vtctx->q_tail->next = info;
349     }
350
351     vtctx->q_tail = info;
352
353     pthread_mutex_unlock(&vtctx->lock);
354 }
355
356 static int count_nalus(size_t length_code_size,
357                        CMSampleBufferRef sample_buffer,
358                        int *count)
359 {
360     size_t offset = 0;
361     int status;
362     int nalu_ct = 0;
363     uint8_t size_buf[4];
364     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
365     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
366
367     if (length_code_size > 4)
368         return AVERROR_INVALIDDATA;
369
370     while (offset < src_size) {
371         size_t curr_src_len;
372         size_t box_len = 0;
373         size_t i;
374
375         status = CMBlockBufferCopyDataBytes(block,
376                                             offset,
377                                             length_code_size,
378                                             size_buf);
379
380         for (i = 0; i < length_code_size; i++) {
381             box_len <<= 8;
382             box_len |= size_buf[i];
383         }
384
385         curr_src_len = box_len + length_code_size;
386         offset += curr_src_len;
387
388         nalu_ct++;
389     }
390
391     *count = nalu_ct;
392     return 0;
393 }
394
395 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
396 {
397     switch (id) {
398     case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
399     case AV_CODEC_ID_HEVC: return kCMVideoCodecType_HEVC;
400     default:               return 0;
401     }
402 }
403
404 /**
405  * Get the parameter sets from a CMSampleBufferRef.
406  * @param dst If *dst isn't NULL, the parameters are copied into existing
407  *            memory. *dst_size must be set accordingly when *dst != NULL.
408  *            If *dst is NULL, it will be allocated.
409  *            In all cases, *dst_size is set to the number of bytes used starting
410  *            at *dst.
411  */
412 static int get_params_size(
413     AVCodecContext              *avctx,
414     CMVideoFormatDescriptionRef vid_fmt,
415     size_t                      *size)
416 {
417     VTEncContext *vtctx = avctx->priv_data;
418     size_t total_size = 0;
419     size_t ps_count;
420     int is_count_bad = 0;
421     size_t i;
422     int status;
423     status = vtctx->get_param_set_func(vid_fmt,
424                                        0,
425                                        NULL,
426                                        NULL,
427                                        &ps_count,
428                                        NULL);
429     if (status) {
430         is_count_bad = 1;
431         ps_count     = 0;
432         status       = 0;
433     }
434
435     for (i = 0; i < ps_count || is_count_bad; i++) {
436         const uint8_t *ps;
437         size_t ps_size;
438         status = vtctx->get_param_set_func(vid_fmt,
439                                            i,
440                                            &ps,
441                                            &ps_size,
442                                            NULL,
443                                            NULL);
444         if (status) {
445             /*
446              * When ps_count is invalid, status != 0 ends the loop normally
447              * unless we didn't get any parameter sets.
448              */
449             if (i > 0 && is_count_bad) status = 0;
450
451             break;
452         }
453
454         total_size += ps_size + sizeof(start_code);
455     }
456
457     if (status) {
458         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
459         return AVERROR_EXTERNAL;
460     }
461
462     *size = total_size;
463     return 0;
464 }
465
466 static int copy_param_sets(
467     AVCodecContext              *avctx,
468     CMVideoFormatDescriptionRef vid_fmt,
469     uint8_t                     *dst,
470     size_t                      dst_size)
471 {
472     VTEncContext *vtctx = avctx->priv_data;
473     size_t ps_count;
474     int is_count_bad = 0;
475     int status;
476     size_t offset = 0;
477     size_t i;
478
479     status = vtctx->get_param_set_func(vid_fmt,
480                                        0,
481                                        NULL,
482                                        NULL,
483                                        &ps_count,
484                                        NULL);
485     if (status) {
486         is_count_bad = 1;
487         ps_count     = 0;
488         status       = 0;
489     }
490
491
492     for (i = 0; i < ps_count || is_count_bad; i++) {
493         const uint8_t *ps;
494         size_t ps_size;
495         size_t next_offset;
496
497         status = vtctx->get_param_set_func(vid_fmt,
498                                            i,
499                                            &ps,
500                                            &ps_size,
501                                            NULL,
502                                            NULL);
503         if (status) {
504             if (i > 0 && is_count_bad) status = 0;
505
506             break;
507         }
508
509         next_offset = offset + sizeof(start_code) + ps_size;
510         if (dst_size < next_offset) {
511             av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
512             return AVERROR_BUFFER_TOO_SMALL;
513         }
514
515         memcpy(dst + offset, start_code, sizeof(start_code));
516         offset += sizeof(start_code);
517
518         memcpy(dst + offset, ps, ps_size);
519         offset = next_offset;
520     }
521
522     if (status) {
523         av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
524         return AVERROR_EXTERNAL;
525     }
526
527     return 0;
528 }
529
530 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
531 {
532     CMVideoFormatDescriptionRef vid_fmt;
533     size_t total_size;
534     int status;
535
536     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
537     if (!vid_fmt) {
538         av_log(avctx, AV_LOG_ERROR, "No video format.\n");
539         return AVERROR_EXTERNAL;
540     }
541
542     status = get_params_size(avctx, vid_fmt, &total_size);
543     if (status) {
544         av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
545         return status;
546     }
547
548     avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
549     if (!avctx->extradata) {
550         return AVERROR(ENOMEM);
551     }
552     avctx->extradata_size = total_size;
553
554     status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
555
556     if (status) {
557         av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
558         return status;
559     }
560
561     return 0;
562 }
563
564 static void vtenc_output_callback(
565     void *ctx,
566     void *sourceFrameCtx,
567     OSStatus status,
568     VTEncodeInfoFlags flags,
569     CMSampleBufferRef sample_buffer)
570 {
571     AVCodecContext *avctx = ctx;
572     VTEncContext   *vtctx = avctx->priv_data;
573     ExtraSEI *sei = sourceFrameCtx;
574
575     if (vtctx->async_error) {
576         if(sample_buffer) CFRelease(sample_buffer);
577         return;
578     }
579
580     if (status) {
581         av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
582         set_async_error(vtctx, AVERROR_EXTERNAL);
583         return;
584     }
585
586     if (!sample_buffer) {
587         return;
588     }
589
590     if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
591         int set_status = set_extradata(avctx, sample_buffer);
592         if (set_status) {
593             set_async_error(vtctx, set_status);
594             return;
595         }
596     }
597
598     vtenc_q_push(vtctx, sample_buffer, sei);
599 }
600
601 static int get_length_code_size(
602     AVCodecContext    *avctx,
603     CMSampleBufferRef sample_buffer,
604     size_t            *size)
605 {
606     VTEncContext *vtctx = avctx->priv_data;
607     CMVideoFormatDescriptionRef vid_fmt;
608     int isize;
609     int status;
610
611     vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
612     if (!vid_fmt) {
613         av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
614         return AVERROR_EXTERNAL;
615     }
616
617     status = vtctx->get_param_set_func(vid_fmt,
618                                        0,
619                                        NULL,
620                                        NULL,
621                                        NULL,
622                                        &isize);
623     if (status) {
624         av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
625         return AVERROR_EXTERNAL;
626     }
627
628     *size = isize;
629     return 0;
630 }
631
632 /*
633  * Returns true on success.
634  *
635  * If profile_level_val is NULL and this method returns true, don't specify the
636  * profile/level to the encoder.
637  */
638 static bool get_vt_h264_profile_level(AVCodecContext *avctx,
639                                       CFStringRef    *profile_level_val)
640 {
641     VTEncContext *vtctx = avctx->priv_data;
642     int64_t profile = vtctx->profile;
643
644     if (profile == H264_PROF_AUTO && vtctx->level) {
645         //Need to pick a profile if level is not auto-selected.
646         profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
647     }
648
649     *profile_level_val = NULL;
650
651     switch (profile) {
652         case H264_PROF_AUTO:
653             return true;
654
655         case H264_PROF_BASELINE:
656             switch (vtctx->level) {
657                 case  0: *profile_level_val =
658                                   compat_keys.kVTProfileLevel_H264_Baseline_AutoLevel; break;
659                 case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3;       break;
660                 case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0;       break;
661                 case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1;       break;
662                 case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2;       break;
663                 case 40: *profile_level_val =
664                                   compat_keys.kVTProfileLevel_H264_Baseline_4_0;       break;
665                 case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1;       break;
666                 case 42: *profile_level_val =
667                                   compat_keys.kVTProfileLevel_H264_Baseline_4_2;       break;
668                 case 50: *profile_level_val =
669                                   compat_keys.kVTProfileLevel_H264_Baseline_5_0;       break;
670                 case 51: *profile_level_val =
671                                   compat_keys.kVTProfileLevel_H264_Baseline_5_1;       break;
672                 case 52: *profile_level_val =
673                                   compat_keys.kVTProfileLevel_H264_Baseline_5_2;       break;
674             }
675             break;
676
677         case H264_PROF_MAIN:
678             switch (vtctx->level) {
679                 case  0: *profile_level_val =
680                                   compat_keys.kVTProfileLevel_H264_Main_AutoLevel; break;
681                 case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0;       break;
682                 case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1;       break;
683                 case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2;       break;
684                 case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0;       break;
685                 case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1;       break;
686                 case 42: *profile_level_val =
687                                   compat_keys.kVTProfileLevel_H264_Main_4_2;       break;
688                 case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0;       break;
689                 case 51: *profile_level_val =
690                                   compat_keys.kVTProfileLevel_H264_Main_5_1;       break;
691                 case 52: *profile_level_val =
692                                   compat_keys.kVTProfileLevel_H264_Main_5_2;       break;
693             }
694             break;
695
696         case H264_PROF_HIGH:
697             switch (vtctx->level) {
698                 case  0: *profile_level_val =
699                                   compat_keys.kVTProfileLevel_H264_High_AutoLevel; break;
700                 case 30: *profile_level_val =
701                                   compat_keys.kVTProfileLevel_H264_High_3_0;       break;
702                 case 31: *profile_level_val =
703                                   compat_keys.kVTProfileLevel_H264_High_3_1;       break;
704                 case 32: *profile_level_val =
705                                   compat_keys.kVTProfileLevel_H264_High_3_2;       break;
706                 case 40: *profile_level_val =
707                                   compat_keys.kVTProfileLevel_H264_High_4_0;       break;
708                 case 41: *profile_level_val =
709                                   compat_keys.kVTProfileLevel_H264_High_4_1;       break;
710                 case 42: *profile_level_val =
711                                   compat_keys.kVTProfileLevel_H264_High_4_2;       break;
712                 case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0;       break;
713                 case 51: *profile_level_val =
714                                   compat_keys.kVTProfileLevel_H264_High_5_1;       break;
715                 case 52: *profile_level_val =
716                                   compat_keys.kVTProfileLevel_H264_High_5_2;       break;
717             }
718             break;
719         case H264_PROF_EXTENDED:
720             switch (vtctx->level) {
721                 case  0: *profile_level_val =
722                                   compat_keys.kVTProfileLevel_H264_Extended_AutoLevel; break;
723                 case 50: *profile_level_val =
724                                   compat_keys.kVTProfileLevel_H264_Extended_5_0;       break;
725             }
726             break;
727     }
728
729     if (!*profile_level_val) {
730         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
731         return false;
732     }
733
734     return true;
735 }
736
737 /*
738  * Returns true on success.
739  *
740  * If profile_level_val is NULL and this method returns true, don't specify the
741  * profile/level to the encoder.
742  */
743 static bool get_vt_hevc_profile_level(AVCodecContext *avctx,
744                                       CFStringRef    *profile_level_val)
745 {
746     VTEncContext *vtctx = avctx->priv_data;
747     int64_t profile = vtctx->profile;
748
749     *profile_level_val = NULL;
750
751     switch (profile) {
752         case HEVC_PROF_AUTO:
753             return true;
754         case HEVC_PROF_MAIN:
755             *profile_level_val =
756                 compat_keys.kVTProfileLevel_HEVC_Main_AutoLevel;
757             break;
758         case HEVC_PROF_MAIN10:
759             *profile_level_val =
760                 compat_keys.kVTProfileLevel_HEVC_Main10_AutoLevel;
761             break;
762     }
763
764     if (!*profile_level_val) {
765         av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
766         return false;
767     }
768
769     return true;
770 }
771
772 static int get_cv_pixel_format(AVCodecContext* avctx,
773                                enum AVPixelFormat fmt,
774                                enum AVColorRange range,
775                                int* av_pixel_format,
776                                int* range_guessed)
777 {
778     if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
779                                         range != AVCOL_RANGE_JPEG;
780
781     //MPEG range is used when no range is set
782     if (fmt == AV_PIX_FMT_NV12) {
783         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
784                                         kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
785                                         kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
786     } else if (fmt == AV_PIX_FMT_YUV420P) {
787         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
788                                         kCVPixelFormatType_420YpCbCr8PlanarFullRange :
789                                         kCVPixelFormatType_420YpCbCr8Planar;
790     } else if (fmt == AV_PIX_FMT_P010LE) {
791         *av_pixel_format = range == AVCOL_RANGE_JPEG ?
792                                         kCVPixelFormatType_420YpCbCr10BiPlanarFullRange :
793                                         kCVPixelFormatType_420YpCbCr10BiPlanarVideoRange;
794     } else {
795         return AVERROR(EINVAL);
796     }
797
798     return 0;
799 }
800
801 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
802     VTEncContext *vtctx = avctx->priv_data;
803
804     if (vtctx->color_primaries) {
805         CFDictionarySetValue(dict,
806                              kCVImageBufferColorPrimariesKey,
807                              vtctx->color_primaries);
808     }
809
810     if (vtctx->transfer_function) {
811         CFDictionarySetValue(dict,
812                              kCVImageBufferTransferFunctionKey,
813                              vtctx->transfer_function);
814     }
815
816     if (vtctx->ycbcr_matrix) {
817         CFDictionarySetValue(dict,
818                              kCVImageBufferYCbCrMatrixKey,
819                              vtctx->ycbcr_matrix);
820     }
821 }
822
823 static int create_cv_pixel_buffer_info(AVCodecContext* avctx,
824                                        CFMutableDictionaryRef* dict)
825 {
826     CFNumberRef cv_color_format_num = NULL;
827     CFNumberRef width_num = NULL;
828     CFNumberRef height_num = NULL;
829     CFMutableDictionaryRef pixel_buffer_info = NULL;
830     int cv_color_format;
831     int status = get_cv_pixel_format(avctx,
832                                      avctx->pix_fmt,
833                                      avctx->color_range,
834                                      &cv_color_format,
835                                      NULL);
836     if (status) return status;
837
838     pixel_buffer_info = CFDictionaryCreateMutable(
839                             kCFAllocatorDefault,
840                             20,
841                             &kCFCopyStringDictionaryKeyCallBacks,
842                             &kCFTypeDictionaryValueCallBacks);
843
844     if (!pixel_buffer_info) goto pbinfo_nomem;
845
846     cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
847                                          kCFNumberSInt32Type,
848                                          &cv_color_format);
849     if (!cv_color_format_num) goto pbinfo_nomem;
850
851     CFDictionarySetValue(pixel_buffer_info,
852                          kCVPixelBufferPixelFormatTypeKey,
853                          cv_color_format_num);
854     vt_release_num(&cv_color_format_num);
855
856     width_num = CFNumberCreate(kCFAllocatorDefault,
857                                kCFNumberSInt32Type,
858                                &avctx->width);
859     if (!width_num) return AVERROR(ENOMEM);
860
861     CFDictionarySetValue(pixel_buffer_info,
862                          kCVPixelBufferWidthKey,
863                          width_num);
864     vt_release_num(&width_num);
865
866     height_num = CFNumberCreate(kCFAllocatorDefault,
867                                 kCFNumberSInt32Type,
868                                 &avctx->height);
869     if (!height_num) goto pbinfo_nomem;
870
871     CFDictionarySetValue(pixel_buffer_info,
872                          kCVPixelBufferHeightKey,
873                          height_num);
874     vt_release_num(&height_num);
875
876     add_color_attr(avctx, pixel_buffer_info);
877
878     *dict = pixel_buffer_info;
879     return 0;
880
881 pbinfo_nomem:
882     vt_release_num(&cv_color_format_num);
883     vt_release_num(&width_num);
884     vt_release_num(&height_num);
885     if (pixel_buffer_info) CFRelease(pixel_buffer_info);
886
887     return AVERROR(ENOMEM);
888 }
889
890 static int get_cv_color_primaries(AVCodecContext *avctx,
891                                   CFStringRef *primaries)
892 {
893     enum AVColorPrimaries pri = avctx->color_primaries;
894     switch (pri) {
895         case AVCOL_PRI_UNSPECIFIED:
896             *primaries = NULL;
897             break;
898
899         case AVCOL_PRI_BT470BG:
900             *primaries = kCVImageBufferColorPrimaries_EBU_3213;
901             break;
902
903         case AVCOL_PRI_SMPTE170M:
904             *primaries = kCVImageBufferColorPrimaries_SMPTE_C;
905             break;
906
907         case AVCOL_PRI_BT709:
908             *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
909             break;
910
911         case AVCOL_PRI_BT2020:
912             *primaries = compat_keys.kCVImageBufferColorPrimaries_ITU_R_2020;
913             break;
914
915         default:
916             av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
917             *primaries = NULL;
918             return -1;
919     }
920
921     return 0;
922 }
923
924 static int get_cv_transfer_function(AVCodecContext *avctx,
925                                     CFStringRef *transfer_fnc,
926                                     CFNumberRef *gamma_level)
927 {
928     enum AVColorTransferCharacteristic trc = avctx->color_trc;
929     Float32 gamma;
930     *gamma_level = NULL;
931
932     switch (trc) {
933         case AVCOL_TRC_UNSPECIFIED:
934             *transfer_fnc = NULL;
935             break;
936
937         case AVCOL_TRC_BT709:
938             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
939             break;
940
941         case AVCOL_TRC_SMPTE240M:
942             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
943             break;
944
945 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_SMPTE_ST_2084_PQ
946         case AVCOL_TRC_SMPTE2084:
947             *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_ST_2084_PQ;
948             break;
949 #endif
950 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_LINEAR
951         case AVCOL_TRC_LINEAR:
952             *transfer_fnc = kCVImageBufferTransferFunction_Linear;
953             break;
954 #endif
955 #if HAVE_KCVIMAGEBUFFERTRANSFERFUNCTION_ITU_R_2100_HLG
956         case AVCOL_TRC_ARIB_STD_B67:
957             *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_2100_HLG;
958             break;
959 #endif
960
961         case AVCOL_TRC_GAMMA22:
962             gamma = 2.2;
963             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
964             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
965             break;
966
967         case AVCOL_TRC_GAMMA28:
968             gamma = 2.8;
969             *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
970             *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
971             break;
972
973         case AVCOL_TRC_BT2020_10:
974         case AVCOL_TRC_BT2020_12:
975             *transfer_fnc = compat_keys.kCVImageBufferTransferFunction_ITU_R_2020;
976             break;
977
978         default:
979             *transfer_fnc = NULL;
980             av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
981             return -1;
982     }
983
984     return 0;
985 }
986
987 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
988     switch(avctx->colorspace) {
989         case AVCOL_SPC_BT709:
990             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
991             break;
992
993         case AVCOL_SPC_UNSPECIFIED:
994             *matrix = NULL;
995             break;
996
997         case AVCOL_SPC_BT470BG:
998         case AVCOL_SPC_SMPTE170M:
999             *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
1000             break;
1001
1002         case AVCOL_SPC_SMPTE240M:
1003             *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
1004             break;
1005
1006         case AVCOL_SPC_BT2020_NCL:
1007             *matrix = compat_keys.kCVImageBufferYCbCrMatrix_ITU_R_2020;
1008             break;
1009
1010         default:
1011             av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
1012             return -1;
1013     }
1014
1015     return 0;
1016 }
1017
1018 static int vtenc_create_encoder(AVCodecContext   *avctx,
1019                                 CMVideoCodecType codec_type,
1020                                 CFStringRef      profile_level,
1021                                 CFNumberRef      gamma_level,
1022                                 CFDictionaryRef  enc_info,
1023                                 CFDictionaryRef  pixel_buffer_info,
1024                                 VTCompressionSessionRef *session)
1025 {
1026     VTEncContext *vtctx = avctx->priv_data;
1027     SInt32       bit_rate = avctx->bit_rate;
1028     SInt32       max_rate = avctx->rc_max_rate;
1029     CFNumberRef  bit_rate_num;
1030     CFNumberRef  bytes_per_second;
1031     CFNumberRef  one_second;
1032     CFArrayRef   data_rate_limits;
1033     int64_t      bytes_per_second_value = 0;
1034     int64_t      one_second_value = 0;
1035     void         *nums[2];
1036
1037     int status = VTCompressionSessionCreate(kCFAllocatorDefault,
1038                                             avctx->width,
1039                                             avctx->height,
1040                                             codec_type,
1041                                             enc_info,
1042                                             pixel_buffer_info,
1043                                             kCFAllocatorDefault,
1044                                             vtenc_output_callback,
1045                                             avctx,
1046                                             session);
1047
1048     if (status || !vtctx->session) {
1049         av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
1050
1051 #if !TARGET_OS_IPHONE
1052         if (!vtctx->allow_sw) {
1053             av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
1054         }
1055 #endif
1056
1057         return AVERROR_EXTERNAL;
1058     }
1059
1060     bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
1061                                   kCFNumberSInt32Type,
1062                                   &bit_rate);
1063     if (!bit_rate_num) return AVERROR(ENOMEM);
1064
1065     status = VTSessionSetProperty(vtctx->session,
1066                                   kVTCompressionPropertyKey_AverageBitRate,
1067                                   bit_rate_num);
1068     CFRelease(bit_rate_num);
1069
1070     if (status) {
1071         av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
1072         return AVERROR_EXTERNAL;
1073     }
1074
1075     if (vtctx->codec_id == AV_CODEC_ID_H264 && max_rate > 0) {
1076         // kVTCompressionPropertyKey_DataRateLimits is not available for HEVC
1077         bytes_per_second_value = max_rate >> 3;
1078         bytes_per_second = CFNumberCreate(kCFAllocatorDefault,
1079                                           kCFNumberSInt64Type,
1080                                           &bytes_per_second_value);
1081         if (!bytes_per_second) {
1082             return AVERROR(ENOMEM);
1083         }
1084         one_second_value = 1;
1085         one_second = CFNumberCreate(kCFAllocatorDefault,
1086                                     kCFNumberSInt64Type,
1087                                     &one_second_value);
1088         if (!one_second) {
1089             CFRelease(bytes_per_second);
1090             return AVERROR(ENOMEM);
1091         }
1092         nums[0] = (void *)bytes_per_second;
1093         nums[1] = (void *)one_second;
1094         data_rate_limits = CFArrayCreate(kCFAllocatorDefault,
1095                                          (const void **)nums,
1096                                          2,
1097                                          &kCFTypeArrayCallBacks);
1098
1099         if (!data_rate_limits) {
1100             CFRelease(bytes_per_second);
1101             CFRelease(one_second);
1102             return AVERROR(ENOMEM);
1103         }
1104         status = VTSessionSetProperty(vtctx->session,
1105                                       kVTCompressionPropertyKey_DataRateLimits,
1106                                       data_rate_limits);
1107
1108         CFRelease(bytes_per_second);
1109         CFRelease(one_second);
1110         CFRelease(data_rate_limits);
1111
1112         if (status) {
1113             av_log(avctx, AV_LOG_ERROR, "Error setting max bitrate property: %d\n", status);
1114             return AVERROR_EXTERNAL;
1115         }
1116     }
1117
1118     if (profile_level) {
1119         status = VTSessionSetProperty(vtctx->session,
1120                                       kVTCompressionPropertyKey_ProfileLevel,
1121                                       profile_level);
1122         if (status) {
1123             av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d. Output will be encoded using a supported profile/level combination.\n", status);
1124         }
1125     }
1126
1127     if (avctx->gop_size > 0) {
1128         CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
1129                                               kCFNumberIntType,
1130                                               &avctx->gop_size);
1131         if (!interval) {
1132             return AVERROR(ENOMEM);
1133         }
1134
1135         status = VTSessionSetProperty(vtctx->session,
1136                                       kVTCompressionPropertyKey_MaxKeyFrameInterval,
1137                                       interval);
1138         CFRelease(interval);
1139
1140         if (status) {
1141             av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
1142             return AVERROR_EXTERNAL;
1143         }
1144     }
1145
1146     if (vtctx->frames_before) {
1147         status = VTSessionSetProperty(vtctx->session,
1148                                       kVTCompressionPropertyKey_MoreFramesBeforeStart,
1149                                       kCFBooleanTrue);
1150
1151         if (status == kVTPropertyNotSupportedErr) {
1152             av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
1153         } else if (status) {
1154             av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
1155         }
1156     }
1157
1158     if (vtctx->frames_after) {
1159         status = VTSessionSetProperty(vtctx->session,
1160                                       kVTCompressionPropertyKey_MoreFramesAfterEnd,
1161                                       kCFBooleanTrue);
1162
1163         if (status == kVTPropertyNotSupportedErr) {
1164             av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
1165         } else if (status) {
1166             av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
1167         }
1168     }
1169
1170     if (avctx->sample_aspect_ratio.num != 0) {
1171         CFNumberRef num;
1172         CFNumberRef den;
1173         CFMutableDictionaryRef par;
1174         AVRational *avpar = &avctx->sample_aspect_ratio;
1175
1176         av_reduce(&avpar->num, &avpar->den,
1177                    avpar->num,  avpar->den,
1178                   0xFFFFFFFF);
1179
1180         num = CFNumberCreate(kCFAllocatorDefault,
1181                              kCFNumberIntType,
1182                              &avpar->num);
1183
1184         den = CFNumberCreate(kCFAllocatorDefault,
1185                              kCFNumberIntType,
1186                              &avpar->den);
1187
1188
1189
1190         par = CFDictionaryCreateMutable(kCFAllocatorDefault,
1191                                         2,
1192                                         &kCFCopyStringDictionaryKeyCallBacks,
1193                                         &kCFTypeDictionaryValueCallBacks);
1194
1195         if (!par || !num || !den) {
1196             if (par) CFRelease(par);
1197             if (num) CFRelease(num);
1198             if (den) CFRelease(den);
1199
1200             return AVERROR(ENOMEM);
1201         }
1202
1203         CFDictionarySetValue(
1204             par,
1205             kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
1206             num);
1207
1208         CFDictionarySetValue(
1209             par,
1210             kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
1211             den);
1212
1213         status = VTSessionSetProperty(vtctx->session,
1214                                       kVTCompressionPropertyKey_PixelAspectRatio,
1215                                       par);
1216
1217         CFRelease(par);
1218         CFRelease(num);
1219         CFRelease(den);
1220
1221         if (status) {
1222             av_log(avctx,
1223                    AV_LOG_ERROR,
1224                    "Error setting pixel aspect ratio to %d:%d: %d.\n",
1225                    avctx->sample_aspect_ratio.num,
1226                    avctx->sample_aspect_ratio.den,
1227                    status);
1228
1229             return AVERROR_EXTERNAL;
1230         }
1231     }
1232
1233
1234     if (vtctx->transfer_function) {
1235         status = VTSessionSetProperty(vtctx->session,
1236                                       kVTCompressionPropertyKey_TransferFunction,
1237                                       vtctx->transfer_function);
1238
1239         if (status) {
1240             av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
1241         }
1242     }
1243
1244
1245     if (vtctx->ycbcr_matrix) {
1246         status = VTSessionSetProperty(vtctx->session,
1247                                       kVTCompressionPropertyKey_YCbCrMatrix,
1248                                       vtctx->ycbcr_matrix);
1249
1250         if (status) {
1251             av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
1252         }
1253     }
1254
1255
1256     if (vtctx->color_primaries) {
1257         status = VTSessionSetProperty(vtctx->session,
1258                                       kVTCompressionPropertyKey_ColorPrimaries,
1259                                       vtctx->color_primaries);
1260
1261         if (status) {
1262             av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
1263         }
1264     }
1265
1266     if (gamma_level) {
1267         status = VTSessionSetProperty(vtctx->session,
1268                                       kCVImageBufferGammaLevelKey,
1269                                       gamma_level);
1270
1271         if (status) {
1272             av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
1273         }
1274     }
1275
1276     if (!vtctx->has_b_frames) {
1277         status = VTSessionSetProperty(vtctx->session,
1278                                       kVTCompressionPropertyKey_AllowFrameReordering,
1279                                       kCFBooleanFalse);
1280
1281         if (status) {
1282             av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1283             return AVERROR_EXTERNAL;
1284         }
1285     }
1286
1287     if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1288         CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1289                                 compat_keys.kVTH264EntropyMode_CABAC:
1290                                 compat_keys.kVTH264EntropyMode_CAVLC;
1291
1292         status = VTSessionSetProperty(vtctx->session,
1293                                       compat_keys.kVTCompressionPropertyKey_H264EntropyMode,
1294                                       entropy);
1295
1296         if (status) {
1297             av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1298         }
1299     }
1300
1301     if (vtctx->realtime) {
1302         status = VTSessionSetProperty(vtctx->session,
1303                                       compat_keys.kVTCompressionPropertyKey_RealTime,
1304                                       kCFBooleanTrue);
1305
1306         if (status) {
1307             av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1308         }
1309     }
1310
1311     status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1312     if (status) {
1313         av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1314         return AVERROR_EXTERNAL;
1315     }
1316
1317     return 0;
1318 }
1319
1320 static int vtenc_configure_encoder(AVCodecContext *avctx)
1321 {
1322     CFMutableDictionaryRef enc_info;
1323     CFMutableDictionaryRef pixel_buffer_info;
1324     CMVideoCodecType       codec_type;
1325     VTEncContext           *vtctx = avctx->priv_data;
1326     CFStringRef            profile_level;
1327     CFNumberRef            gamma_level = NULL;
1328     int                    status;
1329
1330     codec_type = get_cm_codec_type(avctx->codec_id);
1331     if (!codec_type) {
1332         av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1333         return AVERROR(EINVAL);
1334     }
1335
1336     vtctx->codec_id = avctx->codec_id;
1337
1338     if (vtctx->codec_id == AV_CODEC_ID_H264) {
1339         vtctx->get_param_set_func = CMVideoFormatDescriptionGetH264ParameterSetAtIndex;
1340
1341         vtctx->has_b_frames = avctx->max_b_frames > 0;
1342         if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1343             av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1344             vtctx->has_b_frames = false;
1345         }
1346
1347         if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1348             av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1349             vtctx->entropy = VT_ENTROPY_NOT_SET;
1350         }
1351
1352         if (!get_vt_h264_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1353     } else {
1354         vtctx->get_param_set_func = compat_keys.CMVideoFormatDescriptionGetHEVCParameterSetAtIndex;
1355         if (!vtctx->get_param_set_func) return AVERROR(EINVAL);
1356         if (!get_vt_hevc_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1357     }
1358
1359     enc_info = CFDictionaryCreateMutable(
1360         kCFAllocatorDefault,
1361         20,
1362         &kCFCopyStringDictionaryKeyCallBacks,
1363         &kCFTypeDictionaryValueCallBacks
1364     );
1365
1366     if (!enc_info) return AVERROR(ENOMEM);
1367
1368 #if !TARGET_OS_IPHONE
1369     if(vtctx->require_sw) {
1370         CFDictionarySetValue(enc_info,
1371                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1372                              kCFBooleanFalse);
1373     } else if (!vtctx->allow_sw) {
1374         CFDictionarySetValue(enc_info,
1375                              compat_keys.kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder,
1376                              kCFBooleanTrue);
1377     } else {
1378         CFDictionarySetValue(enc_info,
1379                              compat_keys.kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
1380                              kCFBooleanTrue);
1381     }
1382 #endif
1383
1384     if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1385         status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1386         if (status)
1387             goto init_cleanup;
1388     } else {
1389         pixel_buffer_info = NULL;
1390     }
1391
1392     vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1393
1394     get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1395     get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1396     get_cv_color_primaries(avctx, &vtctx->color_primaries);
1397
1398
1399     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1400         status = vtenc_populate_extradata(avctx,
1401                                           codec_type,
1402                                           profile_level,
1403                                           gamma_level,
1404                                           enc_info,
1405                                           pixel_buffer_info);
1406         if (status)
1407             goto init_cleanup;
1408     }
1409
1410     status = vtenc_create_encoder(avctx,
1411                                   codec_type,
1412                                   profile_level,
1413                                   gamma_level,
1414                                   enc_info,
1415                                   pixel_buffer_info,
1416                                   &vtctx->session);
1417
1418 init_cleanup:
1419     if (gamma_level)
1420         CFRelease(gamma_level);
1421
1422     if (pixel_buffer_info)
1423         CFRelease(pixel_buffer_info);
1424
1425     CFRelease(enc_info);
1426
1427     return status;
1428 }
1429
1430 static av_cold int vtenc_init(AVCodecContext *avctx)
1431 {
1432     VTEncContext    *vtctx = avctx->priv_data;
1433     CFBooleanRef    has_b_frames_cfbool;
1434     int             status;
1435
1436     pthread_once(&once_ctrl, loadVTEncSymbols);
1437
1438     pthread_mutex_init(&vtctx->lock, NULL);
1439     pthread_cond_init(&vtctx->cv_sample_sent, NULL);
1440
1441     vtctx->session = NULL;
1442     status = vtenc_configure_encoder(avctx);
1443     if (status) return status;
1444
1445     status = VTSessionCopyProperty(vtctx->session,
1446                                    kVTCompressionPropertyKey_AllowFrameReordering,
1447                                    kCFAllocatorDefault,
1448                                    &has_b_frames_cfbool);
1449
1450     if (!status && has_b_frames_cfbool) {
1451         //Some devices don't output B-frames for main profile, even if requested.
1452         vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1453         CFRelease(has_b_frames_cfbool);
1454     }
1455     avctx->has_b_frames = vtctx->has_b_frames;
1456
1457     return 0;
1458 }
1459
1460 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1461 {
1462     CFArrayRef      attachments;
1463     CFDictionaryRef attachment;
1464     CFBooleanRef    not_sync;
1465     CFIndex         len;
1466
1467     attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1468     len = !attachments ? 0 : CFArrayGetCount(attachments);
1469
1470     if (!len) {
1471         *is_key_frame = true;
1472         return;
1473     }
1474
1475     attachment = CFArrayGetValueAtIndex(attachments, 0);
1476
1477     if (CFDictionaryGetValueIfPresent(attachment,
1478                                       kCMSampleAttachmentKey_NotSync,
1479                                       (const void **)&not_sync))
1480     {
1481         *is_key_frame = !CFBooleanGetValue(not_sync);
1482     } else {
1483         *is_key_frame = true;
1484     }
1485 }
1486
1487 static int is_post_sei_nal_type(int nal_type){
1488     return nal_type != H264_NAL_SEI &&
1489            nal_type != H264_NAL_SPS &&
1490            nal_type != H264_NAL_PPS &&
1491            nal_type != H264_NAL_AUD;
1492 }
1493
1494 /*
1495  * Finds the sei message start/size of type find_sei_type.
1496  * If more than one of that type exists, the last one is returned.
1497  */
1498 static int find_sei_end(AVCodecContext *avctx,
1499                         uint8_t        *nal_data,
1500                         size_t          nal_size,
1501                         uint8_t       **sei_end)
1502 {
1503     int nal_type;
1504     size_t sei_payload_size = 0;
1505     int sei_payload_type = 0;
1506     *sei_end = NULL;
1507     uint8_t *nal_start = nal_data;
1508
1509     if (!nal_size)
1510         return 0;
1511
1512     nal_type = *nal_data & 0x1F;
1513     if (nal_type != H264_NAL_SEI)
1514         return 0;
1515
1516     nal_data++;
1517     nal_size--;
1518
1519     if (nal_data[nal_size - 1] == 0x80)
1520         nal_size--;
1521
1522     while (nal_size > 0 && *nal_data > 0) {
1523         do{
1524             sei_payload_type += *nal_data;
1525             nal_data++;
1526             nal_size--;
1527         } while (nal_size > 0 && *nal_data == 0xFF);
1528
1529         if (!nal_size) {
1530             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1531             return AVERROR_INVALIDDATA;
1532         }
1533
1534         do{
1535             sei_payload_size += *nal_data;
1536             nal_data++;
1537             nal_size--;
1538         } while (nal_size > 0 && *nal_data == 0xFF);
1539
1540         if (nal_size < sei_payload_size) {
1541             av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1542             return AVERROR_INVALIDDATA;
1543         }
1544
1545         nal_data += sei_payload_size;
1546         nal_size -= sei_payload_size;
1547     }
1548
1549     *sei_end = nal_data;
1550
1551     return nal_data - nal_start + 1;
1552 }
1553
1554 /**
1555  * Copies the data inserting emulation prevention bytes as needed.
1556  * Existing data in the destination can be taken into account by providing
1557  * dst with a dst_offset > 0.
1558  *
1559  * @return The number of bytes copied on success. On failure, the negative of
1560  *         the number of bytes needed to copy src is returned.
1561  */
1562 static int copy_emulation_prev(const uint8_t *src,
1563                                size_t         src_size,
1564                                uint8_t       *dst,
1565                                ssize_t        dst_offset,
1566                                size_t         dst_size)
1567 {
1568     int zeros = 0;
1569     int wrote_bytes;
1570     uint8_t* dst_start;
1571     uint8_t* dst_end = dst + dst_size;
1572     const uint8_t* src_end = src + src_size;
1573     int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1574     int i;
1575     for (i = start_at; i < dst_offset && i < dst_size; i++) {
1576         if (!dst[i])
1577             zeros++;
1578         else
1579             zeros = 0;
1580     }
1581
1582     dst += dst_offset;
1583     dst_start = dst;
1584     for (; src < src_end; src++, dst++) {
1585         if (zeros == 2) {
1586             int insert_ep3_byte = *src <= 3;
1587             if (insert_ep3_byte) {
1588                 if (dst < dst_end)
1589                     *dst = 3;
1590                 dst++;
1591             }
1592
1593             zeros = 0;
1594         }
1595
1596         if (dst < dst_end)
1597             *dst = *src;
1598
1599         if (!*src)
1600             zeros++;
1601         else
1602             zeros = 0;
1603     }
1604
1605     wrote_bytes = dst - dst_start;
1606
1607     if (dst > dst_end)
1608         return -wrote_bytes;
1609
1610     return wrote_bytes;
1611 }
1612
1613 static int write_sei(const ExtraSEI *sei,
1614                      int             sei_type,
1615                      uint8_t        *dst,
1616                      size_t          dst_size)
1617 {
1618     uint8_t *sei_start = dst;
1619     size_t remaining_sei_size = sei->size;
1620     size_t remaining_dst_size = dst_size;
1621     int header_bytes;
1622     int bytes_written;
1623     ssize_t offset;
1624
1625     if (!remaining_dst_size)
1626         return AVERROR_BUFFER_TOO_SMALL;
1627
1628     while (sei_type && remaining_dst_size != 0) {
1629         int sei_byte = sei_type > 255 ? 255 : sei_type;
1630         *dst = sei_byte;
1631
1632         sei_type -= sei_byte;
1633         dst++;
1634         remaining_dst_size--;
1635     }
1636
1637     if (!dst_size)
1638         return AVERROR_BUFFER_TOO_SMALL;
1639
1640     while (remaining_sei_size && remaining_dst_size != 0) {
1641         int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1642         *dst = size_byte;
1643
1644         remaining_sei_size -= size_byte;
1645         dst++;
1646         remaining_dst_size--;
1647     }
1648
1649     if (remaining_dst_size < sei->size)
1650         return AVERROR_BUFFER_TOO_SMALL;
1651
1652     header_bytes = dst - sei_start;
1653
1654     offset = header_bytes;
1655     bytes_written = copy_emulation_prev(sei->data,
1656                                         sei->size,
1657                                         sei_start,
1658                                         offset,
1659                                         dst_size);
1660     if (bytes_written < 0)
1661         return AVERROR_BUFFER_TOO_SMALL;
1662
1663     bytes_written += header_bytes;
1664     return bytes_written;
1665 }
1666
1667 /**
1668  * Copies NAL units and replaces length codes with
1669  * H.264 Annex B start codes. On failure, the contents of
1670  * dst_data may have been modified.
1671  *
1672  * @param length_code_size Byte length of each length code
1673  * @param sample_buffer NAL units prefixed with length codes.
1674  * @param sei Optional A53 closed captions SEI data.
1675  * @param dst_data Must be zeroed before calling this function.
1676  *                 Contains the copied NAL units prefixed with
1677  *                 start codes when the function returns
1678  *                 successfully.
1679  * @param dst_size Length of dst_data
1680  * @return 0 on success
1681  *         AVERROR_INVALIDDATA if length_code_size is invalid
1682  *         AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1683  *         or if a length_code in src_data specifies data beyond
1684  *         the end of its buffer.
1685  */
1686 static int copy_replace_length_codes(
1687     AVCodecContext *avctx,
1688     size_t        length_code_size,
1689     CMSampleBufferRef sample_buffer,
1690     ExtraSEI      *sei,
1691     uint8_t       *dst_data,
1692     size_t        dst_size)
1693 {
1694     size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1695     size_t remaining_src_size = src_size;
1696     size_t remaining_dst_size = dst_size;
1697     size_t src_offset = 0;
1698     int wrote_sei = 0;
1699     int status;
1700     uint8_t size_buf[4];
1701     uint8_t nal_type;
1702     CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1703
1704     if (length_code_size > 4) {
1705         return AVERROR_INVALIDDATA;
1706     }
1707
1708     while (remaining_src_size > 0) {
1709         size_t curr_src_len;
1710         size_t curr_dst_len;
1711         size_t box_len = 0;
1712         size_t i;
1713
1714         uint8_t       *dst_box;
1715
1716         status = CMBlockBufferCopyDataBytes(block,
1717                                             src_offset,
1718                                             length_code_size,
1719                                             size_buf);
1720         if (status) {
1721             av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1722             return AVERROR_EXTERNAL;
1723         }
1724
1725         status = CMBlockBufferCopyDataBytes(block,
1726                                             src_offset + length_code_size,
1727                                             1,
1728                                             &nal_type);
1729
1730         if (status) {
1731             av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1732             return AVERROR_EXTERNAL;
1733         }
1734
1735         nal_type &= 0x1F;
1736
1737         for (i = 0; i < length_code_size; i++) {
1738             box_len <<= 8;
1739             box_len |= size_buf[i];
1740         }
1741
1742         if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1743             //No SEI NAL unit - insert.
1744             int wrote_bytes;
1745
1746             memcpy(dst_data, start_code, sizeof(start_code));
1747             dst_data += sizeof(start_code);
1748             remaining_dst_size -= sizeof(start_code);
1749
1750             *dst_data = H264_NAL_SEI;
1751             dst_data++;
1752             remaining_dst_size--;
1753
1754             wrote_bytes = write_sei(sei,
1755                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1756                                     dst_data,
1757                                     remaining_dst_size);
1758
1759             if (wrote_bytes < 0)
1760                 return wrote_bytes;
1761
1762             remaining_dst_size -= wrote_bytes;
1763             dst_data += wrote_bytes;
1764
1765             if (remaining_dst_size <= 0)
1766                 return AVERROR_BUFFER_TOO_SMALL;
1767
1768             *dst_data = 0x80;
1769
1770             dst_data++;
1771             remaining_dst_size--;
1772
1773             wrote_sei = 1;
1774         }
1775
1776         curr_src_len = box_len + length_code_size;
1777         curr_dst_len = box_len + sizeof(start_code);
1778
1779         if (remaining_src_size < curr_src_len) {
1780             return AVERROR_BUFFER_TOO_SMALL;
1781         }
1782
1783         if (remaining_dst_size < curr_dst_len) {
1784             return AVERROR_BUFFER_TOO_SMALL;
1785         }
1786
1787         dst_box = dst_data + sizeof(start_code);
1788
1789         memcpy(dst_data, start_code, sizeof(start_code));
1790         status = CMBlockBufferCopyDataBytes(block,
1791                                             src_offset + length_code_size,
1792                                             box_len,
1793                                             dst_box);
1794
1795         if (status) {
1796             av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1797             return AVERROR_EXTERNAL;
1798         }
1799
1800         if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1801             //Found SEI NAL unit - append.
1802             int wrote_bytes;
1803             int old_sei_length;
1804             int extra_bytes;
1805             uint8_t *new_sei;
1806             old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1807             if (old_sei_length < 0)
1808                 return status;
1809
1810             wrote_bytes = write_sei(sei,
1811                                     H264_SEI_TYPE_USER_DATA_REGISTERED,
1812                                     new_sei,
1813                                     remaining_dst_size - old_sei_length);
1814             if (wrote_bytes < 0)
1815                 return wrote_bytes;
1816
1817             if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1818                 return AVERROR_BUFFER_TOO_SMALL;
1819
1820             new_sei[wrote_bytes++] = 0x80;
1821             extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1822
1823             dst_data += extra_bytes;
1824             remaining_dst_size -= extra_bytes;
1825
1826             wrote_sei = 1;
1827         }
1828
1829         src_offset += curr_src_len;
1830         dst_data += curr_dst_len;
1831
1832         remaining_src_size -= curr_src_len;
1833         remaining_dst_size -= curr_dst_len;
1834     }
1835
1836     return 0;
1837 }
1838
1839 /**
1840  * Returns a sufficient number of bytes to contain the sei data.
1841  * It may be greater than the minimum required.
1842  */
1843 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1844     int copied_size;
1845     if (sei->size == 0)
1846         return 0;
1847
1848     copied_size = -copy_emulation_prev(sei->data,
1849                                        sei->size,
1850                                        NULL,
1851                                        0,
1852                                        0);
1853
1854     if ((sei->size % 255) == 0) //may result in an extra byte
1855         copied_size++;
1856
1857     return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1858 }
1859
1860 static int vtenc_cm_to_avpacket(
1861     AVCodecContext    *avctx,
1862     CMSampleBufferRef sample_buffer,
1863     AVPacket          *pkt,
1864     ExtraSEI          *sei)
1865 {
1866     VTEncContext *vtctx = avctx->priv_data;
1867
1868     int     status;
1869     bool    is_key_frame;
1870     bool    add_header;
1871     size_t  length_code_size;
1872     size_t  header_size = 0;
1873     size_t  in_buf_size;
1874     size_t  out_buf_size;
1875     size_t  sei_nalu_size = 0;
1876     int64_t dts_delta;
1877     int64_t time_base_num;
1878     int nalu_count;
1879     CMTime  pts;
1880     CMTime  dts;
1881     CMVideoFormatDescriptionRef vid_fmt;
1882
1883
1884     vtenc_get_frame_info(sample_buffer, &is_key_frame);
1885     status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1886     if (status) return status;
1887
1888     add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1889
1890     if (add_header) {
1891         vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1892         if (!vid_fmt) {
1893             av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1894             return AVERROR_EXTERNAL;
1895         }
1896
1897         int status = get_params_size(avctx, vid_fmt, &header_size);
1898         if (status) return status;
1899     }
1900
1901     status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1902     if(status)
1903         return status;
1904
1905     if (sei) {
1906         size_t msg_size = get_sei_msg_bytes(sei,
1907                                             H264_SEI_TYPE_USER_DATA_REGISTERED);
1908
1909         sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1910     }
1911
1912     in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1913     out_buf_size = header_size +
1914                    in_buf_size +
1915                    sei_nalu_size +
1916                    nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1917
1918     status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1919     if (status < 0)
1920         return status;
1921
1922     if (add_header) {
1923         status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1924         if(status) return status;
1925     }
1926
1927     status = copy_replace_length_codes(
1928         avctx,
1929         length_code_size,
1930         sample_buffer,
1931         sei,
1932         pkt->data + header_size,
1933         pkt->size - header_size
1934     );
1935
1936     if (status) {
1937         av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1938         return status;
1939     }
1940
1941     if (is_key_frame) {
1942         pkt->flags |= AV_PKT_FLAG_KEY;
1943     }
1944
1945     pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1946     dts = CMSampleBufferGetDecodeTimeStamp      (sample_buffer);
1947
1948     if (CMTIME_IS_INVALID(dts)) {
1949         if (!vtctx->has_b_frames) {
1950             dts = pts;
1951         } else {
1952             av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1953             return AVERROR_EXTERNAL;
1954         }
1955     }
1956
1957     dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1958     time_base_num = avctx->time_base.num;
1959     pkt->pts = pts.value / time_base_num;
1960     pkt->dts = dts.value / time_base_num - dts_delta;
1961     pkt->size = out_buf_size;
1962
1963     return 0;
1964 }
1965
1966 /*
1967  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1968  * containing all planes if so.
1969  */
1970 static int get_cv_pixel_info(
1971     AVCodecContext *avctx,
1972     const AVFrame  *frame,
1973     int            *color,
1974     int            *plane_count,
1975     size_t         *widths,
1976     size_t         *heights,
1977     size_t         *strides,
1978     size_t         *contiguous_buf_size)
1979 {
1980     VTEncContext *vtctx = avctx->priv_data;
1981     int av_format       = frame->format;
1982     int av_color_range  = frame->color_range;
1983     int i;
1984     int range_guessed;
1985     int status;
1986
1987     status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1988     if (status) {
1989         av_log(avctx,
1990             AV_LOG_ERROR,
1991             "Could not get pixel format for color format '%s' range '%s'.\n",
1992             av_get_pix_fmt_name(av_format),
1993             av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1994             av_color_range < AVCOL_RANGE_NB ?
1995                av_color_range_name(av_color_range) :
1996                "Unknown");
1997
1998         return AVERROR(EINVAL);
1999     }
2000
2001     if (range_guessed) {
2002         if (!vtctx->warned_color_range) {
2003             vtctx->warned_color_range = true;
2004             av_log(avctx,
2005                    AV_LOG_WARNING,
2006                    "Color range not set for %s. Using MPEG range.\n",
2007                    av_get_pix_fmt_name(av_format));
2008         }
2009     }
2010
2011     switch (av_format) {
2012     case AV_PIX_FMT_NV12:
2013         *plane_count = 2;
2014
2015         widths [0] = avctx->width;
2016         heights[0] = avctx->height;
2017         strides[0] = frame ? frame->linesize[0] : avctx->width;
2018
2019         widths [1] = (avctx->width  + 1) / 2;
2020         heights[1] = (avctx->height + 1) / 2;
2021         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
2022         break;
2023
2024     case AV_PIX_FMT_YUV420P:
2025         *plane_count = 3;
2026
2027         widths [0] = avctx->width;
2028         heights[0] = avctx->height;
2029         strides[0] = frame ? frame->linesize[0] : avctx->width;
2030
2031         widths [1] = (avctx->width  + 1) / 2;
2032         heights[1] = (avctx->height + 1) / 2;
2033         strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
2034
2035         widths [2] = (avctx->width  + 1) / 2;
2036         heights[2] = (avctx->height + 1) / 2;
2037         strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
2038         break;
2039
2040     case AV_PIX_FMT_P010LE:
2041         *plane_count = 2;
2042         widths[0] = avctx->width;
2043         heights[0] = avctx->height;
2044         strides[0] = frame ? frame->linesize[0] : (avctx->width * 2 + 63) & -64;
2045
2046         widths[1] = (avctx->width + 1) / 2;
2047         heights[1] = (avctx->height + 1) / 2;
2048         strides[1] = frame ? frame->linesize[1] : ((avctx->width + 1) / 2 + 63) & -64;
2049         break;
2050
2051     default:
2052         av_log(
2053                avctx,
2054                AV_LOG_ERROR,
2055                "Could not get frame format info for color %d range %d.\n",
2056                av_format,
2057                av_color_range);
2058
2059         return AVERROR(EINVAL);
2060     }
2061
2062     *contiguous_buf_size = 0;
2063     for (i = 0; i < *plane_count; i++) {
2064         if (i < *plane_count - 1 &&
2065             frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
2066             *contiguous_buf_size = 0;
2067             break;
2068         }
2069
2070         *contiguous_buf_size += strides[i] * heights[i];
2071     }
2072
2073     return 0;
2074 }
2075
2076 //Not used on OSX - frame is never copied.
2077 static int copy_avframe_to_pixel_buffer(AVCodecContext   *avctx,
2078                                         const AVFrame    *frame,
2079                                         CVPixelBufferRef cv_img,
2080                                         const size_t     *plane_strides,
2081                                         const size_t     *plane_rows)
2082 {
2083     int i, j;
2084     size_t plane_count;
2085     int status;
2086     int rows;
2087     int src_stride;
2088     int dst_stride;
2089     uint8_t *src_addr;
2090     uint8_t *dst_addr;
2091     size_t copy_bytes;
2092
2093     status = CVPixelBufferLockBaseAddress(cv_img, 0);
2094     if (status) {
2095         av_log(
2096             avctx,
2097             AV_LOG_ERROR,
2098             "Error: Could not lock base address of CVPixelBuffer: %d.\n",
2099             status
2100         );
2101     }
2102
2103     if (CVPixelBufferIsPlanar(cv_img)) {
2104         plane_count = CVPixelBufferGetPlaneCount(cv_img);
2105         for (i = 0; frame->data[i]; i++) {
2106             if (i == plane_count) {
2107                 CVPixelBufferUnlockBaseAddress(cv_img, 0);
2108                 av_log(avctx,
2109                     AV_LOG_ERROR,
2110                     "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
2111                 );
2112
2113                 return AVERROR_EXTERNAL;
2114             }
2115
2116             dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
2117             src_addr = (uint8_t*)frame->data[i];
2118             dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
2119             src_stride = plane_strides[i];
2120             rows = plane_rows[i];
2121
2122             if (dst_stride == src_stride) {
2123                 memcpy(dst_addr, src_addr, src_stride * rows);
2124             } else {
2125                 copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2126
2127                 for (j = 0; j < rows; j++) {
2128                     memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2129                 }
2130             }
2131         }
2132     } else {
2133         if (frame->data[1]) {
2134             CVPixelBufferUnlockBaseAddress(cv_img, 0);
2135             av_log(avctx,
2136                 AV_LOG_ERROR,
2137                 "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
2138             );
2139
2140             return AVERROR_EXTERNAL;
2141         }
2142
2143         dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
2144         src_addr = (uint8_t*)frame->data[0];
2145         dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
2146         src_stride = plane_strides[0];
2147         rows = plane_rows[0];
2148
2149         if (dst_stride == src_stride) {
2150             memcpy(dst_addr, src_addr, src_stride * rows);
2151         } else {
2152             copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
2153
2154             for (j = 0; j < rows; j++) {
2155                 memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
2156             }
2157         }
2158     }
2159
2160     status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
2161     if (status) {
2162         av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
2163         return AVERROR_EXTERNAL;
2164     }
2165
2166     return 0;
2167 }
2168
2169 static int create_cv_pixel_buffer(AVCodecContext   *avctx,
2170                                   const AVFrame    *frame,
2171                                   CVPixelBufferRef *cv_img)
2172 {
2173     int plane_count;
2174     int color;
2175     size_t widths [AV_NUM_DATA_POINTERS];
2176     size_t heights[AV_NUM_DATA_POINTERS];
2177     size_t strides[AV_NUM_DATA_POINTERS];
2178     int status;
2179     size_t contiguous_buf_size;
2180     CVPixelBufferPoolRef pix_buf_pool;
2181     VTEncContext* vtctx = avctx->priv_data;
2182
2183     if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
2184         av_assert0(frame->format == AV_PIX_FMT_VIDEOTOOLBOX);
2185
2186         *cv_img = (CVPixelBufferRef)frame->data[3];
2187         av_assert0(*cv_img);
2188
2189         CFRetain(*cv_img);
2190         return 0;
2191     }
2192
2193     memset(widths,  0, sizeof(widths));
2194     memset(heights, 0, sizeof(heights));
2195     memset(strides, 0, sizeof(strides));
2196
2197     status = get_cv_pixel_info(
2198         avctx,
2199         frame,
2200         &color,
2201         &plane_count,
2202         widths,
2203         heights,
2204         strides,
2205         &contiguous_buf_size
2206     );
2207
2208     if (status) {
2209         av_log(
2210             avctx,
2211             AV_LOG_ERROR,
2212             "Error: Cannot convert format %d color_range %d: %d\n",
2213             frame->format,
2214             frame->color_range,
2215             status
2216         );
2217
2218         return AVERROR_EXTERNAL;
2219     }
2220
2221     pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2222     if (!pix_buf_pool) {
2223         /* On iOS, the VT session is invalidated when the APP switches from
2224          * foreground to background and vice versa. Fetch the actual error code
2225          * of the VT session to detect that case and restart the VT session
2226          * accordingly. */
2227         OSStatus vtstatus;
2228
2229         vtstatus = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
2230         if (vtstatus == kVTInvalidSessionErr) {
2231             CFRelease(vtctx->session);
2232             vtctx->session = NULL;
2233             status = vtenc_configure_encoder(avctx);
2234             if (status == 0)
2235                 pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2236         }
2237         if (!pix_buf_pool) {
2238             av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
2239             return AVERROR_EXTERNAL;
2240         }
2241         else
2242             av_log(avctx, AV_LOG_WARNING, "VT session restarted because of a "
2243                    "kVTInvalidSessionErr error.\n");
2244     }
2245
2246     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2247                                                 pix_buf_pool,
2248                                                 cv_img);
2249
2250
2251     if (status) {
2252         av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
2253         return AVERROR_EXTERNAL;
2254     }
2255
2256     status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
2257     if (status) {
2258         CFRelease(*cv_img);
2259         *cv_img = NULL;
2260         return status;
2261     }
2262
2263     return 0;
2264 }
2265
2266 static int create_encoder_dict_h264(const AVFrame *frame,
2267                                     CFDictionaryRef* dict_out)
2268 {
2269     CFDictionaryRef dict = NULL;
2270     if (frame->pict_type == AV_PICTURE_TYPE_I) {
2271         const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2272         const void *vals[] = { kCFBooleanTrue };
2273
2274         dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2275         if(!dict) return AVERROR(ENOMEM);
2276     }
2277
2278     *dict_out = dict;
2279     return 0;
2280 }
2281
2282 static int vtenc_send_frame(AVCodecContext *avctx,
2283                             VTEncContext   *vtctx,
2284                             const AVFrame  *frame)
2285 {
2286     CMTime time;
2287     CFDictionaryRef frame_dict;
2288     CVPixelBufferRef cv_img = NULL;
2289     AVFrameSideData *side_data = NULL;
2290     ExtraSEI *sei = NULL;
2291     int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2292
2293     if (status) return status;
2294
2295     status = create_encoder_dict_h264(frame, &frame_dict);
2296     if (status) {
2297         CFRelease(cv_img);
2298         return status;
2299     }
2300
2301     side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2302     if (vtctx->a53_cc && side_data && side_data->size) {
2303         sei = av_mallocz(sizeof(*sei));
2304         if (!sei) {
2305             av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2306         } else {
2307             int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2308             if (ret < 0) {
2309                 av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2310                 av_free(sei);
2311                 sei = NULL;
2312             }
2313         }
2314     }
2315
2316     time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2317     status = VTCompressionSessionEncodeFrame(
2318         vtctx->session,
2319         cv_img,
2320         time,
2321         kCMTimeInvalid,
2322         frame_dict,
2323         sei,
2324         NULL
2325     );
2326
2327     if (frame_dict) CFRelease(frame_dict);
2328     CFRelease(cv_img);
2329
2330     if (status) {
2331         av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2332         return AVERROR_EXTERNAL;
2333     }
2334
2335     return 0;
2336 }
2337
2338 static av_cold int vtenc_frame(
2339     AVCodecContext *avctx,
2340     AVPacket       *pkt,
2341     const AVFrame  *frame,
2342     int            *got_packet)
2343 {
2344     VTEncContext *vtctx = avctx->priv_data;
2345     bool get_frame;
2346     int status;
2347     CMSampleBufferRef buf = NULL;
2348     ExtraSEI *sei = NULL;
2349
2350     if (frame) {
2351         status = vtenc_send_frame(avctx, vtctx, frame);
2352
2353         if (status) {
2354             status = AVERROR_EXTERNAL;
2355             goto end_nopkt;
2356         }
2357
2358         if (vtctx->frame_ct_in == 0) {
2359             vtctx->first_pts = frame->pts;
2360         } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2361             vtctx->dts_delta = frame->pts - vtctx->first_pts;
2362         }
2363
2364         vtctx->frame_ct_in++;
2365     } else if(!vtctx->flushing) {
2366         vtctx->flushing = true;
2367
2368         status = VTCompressionSessionCompleteFrames(vtctx->session,
2369                                                     kCMTimeIndefinite);
2370
2371         if (status) {
2372             av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2373             status = AVERROR_EXTERNAL;
2374             goto end_nopkt;
2375         }
2376     }
2377
2378     *got_packet = 0;
2379     get_frame = vtctx->dts_delta >= 0 || !frame;
2380     if (!get_frame) {
2381         status = 0;
2382         goto end_nopkt;
2383     }
2384
2385     status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2386     if (status) goto end_nopkt;
2387     if (!buf)   goto end_nopkt;
2388
2389     status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2390     if (sei) {
2391         if (sei->data) av_free(sei->data);
2392         av_free(sei);
2393     }
2394     CFRelease(buf);
2395     if (status) goto end_nopkt;
2396
2397     *got_packet = 1;
2398     return 0;
2399
2400 end_nopkt:
2401     av_packet_unref(pkt);
2402     return status;
2403 }
2404
2405 static int vtenc_populate_extradata(AVCodecContext   *avctx,
2406                                     CMVideoCodecType codec_type,
2407                                     CFStringRef      profile_level,
2408                                     CFNumberRef      gamma_level,
2409                                     CFDictionaryRef  enc_info,
2410                                     CFDictionaryRef  pixel_buffer_info)
2411 {
2412     VTEncContext *vtctx = avctx->priv_data;
2413     int status;
2414     CVPixelBufferPoolRef pool = NULL;
2415     CVPixelBufferRef pix_buf = NULL;
2416     CMTime time;
2417     CMSampleBufferRef buf = NULL;
2418
2419     status = vtenc_create_encoder(avctx,
2420                                   codec_type,
2421                                   profile_level,
2422                                   gamma_level,
2423                                   enc_info,
2424                                   pixel_buffer_info,
2425                                   &vtctx->session);
2426     if (status)
2427         goto pe_cleanup;
2428
2429     pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
2430     if(!pool){
2431         av_log(avctx, AV_LOG_ERROR, "Error getting pixel buffer pool.\n");
2432         goto pe_cleanup;
2433     }
2434
2435     status = CVPixelBufferPoolCreatePixelBuffer(NULL,
2436                                                 pool,
2437                                                 &pix_buf);
2438
2439     if(status != kCVReturnSuccess){
2440         av_log(avctx, AV_LOG_ERROR, "Error creating frame from pool: %d\n", status);
2441         goto pe_cleanup;
2442     }
2443
2444     time = CMTimeMake(0, avctx->time_base.den);
2445     status = VTCompressionSessionEncodeFrame(vtctx->session,
2446                                              pix_buf,
2447                                              time,
2448                                              kCMTimeInvalid,
2449                                              NULL,
2450                                              NULL,
2451                                              NULL);
2452
2453     if (status) {
2454         av_log(avctx,
2455                AV_LOG_ERROR,
2456                "Error sending frame for extradata: %d\n",
2457                status);
2458
2459         goto pe_cleanup;
2460     }
2461
2462     //Populates extradata - output frames are flushed and param sets are available.
2463     status = VTCompressionSessionCompleteFrames(vtctx->session,
2464                                                 kCMTimeIndefinite);
2465
2466     if (status)
2467         goto pe_cleanup;
2468
2469     status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2470     if (status) {
2471         av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2472         goto pe_cleanup;
2473     }
2474
2475     CFRelease(buf);
2476
2477
2478
2479 pe_cleanup:
2480     if(vtctx->session)
2481         CFRelease(vtctx->session);
2482
2483     vtctx->session = NULL;
2484     vtctx->frame_ct_out = 0;
2485
2486     av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2487
2488     return status;
2489 }
2490
2491 static av_cold int vtenc_close(AVCodecContext *avctx)
2492 {
2493     VTEncContext *vtctx = avctx->priv_data;
2494
2495     if(!vtctx->session) {
2496         pthread_cond_destroy(&vtctx->cv_sample_sent);
2497         pthread_mutex_destroy(&vtctx->lock);
2498         return 0;
2499     }
2500
2501     VTCompressionSessionCompleteFrames(vtctx->session,
2502                                        kCMTimeIndefinite);
2503     clear_frame_queue(vtctx);
2504     pthread_cond_destroy(&vtctx->cv_sample_sent);
2505     pthread_mutex_destroy(&vtctx->lock);
2506     CFRelease(vtctx->session);
2507     vtctx->session = NULL;
2508
2509     if (vtctx->color_primaries) {
2510         CFRelease(vtctx->color_primaries);
2511         vtctx->color_primaries = NULL;
2512     }
2513
2514     if (vtctx->transfer_function) {
2515         CFRelease(vtctx->transfer_function);
2516         vtctx->transfer_function = NULL;
2517     }
2518
2519     if (vtctx->ycbcr_matrix) {
2520         CFRelease(vtctx->ycbcr_matrix);
2521         vtctx->ycbcr_matrix = NULL;
2522     }
2523
2524     return 0;
2525 }
2526
2527 static const enum AVPixelFormat avc_pix_fmts[] = {
2528     AV_PIX_FMT_VIDEOTOOLBOX,
2529     AV_PIX_FMT_NV12,
2530     AV_PIX_FMT_YUV420P,
2531     AV_PIX_FMT_NONE
2532 };
2533
2534 static const enum AVPixelFormat hevc_pix_fmts[] = {
2535     AV_PIX_FMT_VIDEOTOOLBOX,
2536     AV_PIX_FMT_NV12,
2537     AV_PIX_FMT_YUV420P,
2538     AV_PIX_FMT_P010LE,
2539     AV_PIX_FMT_NONE
2540 };
2541
2542 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2543 #define COMMON_OPTIONS \
2544     { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL, \
2545         { .i64 = 0 }, 0, 1, VE }, \
2546     { "require_sw", "Require software encoding", OFFSET(require_sw), AV_OPT_TYPE_BOOL, \
2547         { .i64 = 0 }, 0, 1, VE }, \
2548     { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).", \
2549         OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2550     { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.", \
2551         OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, \
2552     { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.", \
2553         OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2554
2555 #define OFFSET(x) offsetof(VTEncContext, x)
2556 static const AVOption h264_options[] = {
2557     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2558     { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2559     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN     }, INT_MIN, INT_MAX, VE, "profile" },
2560     { "high",     "High Profile",     0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH     }, INT_MIN, INT_MAX, VE, "profile" },
2561     { "extended", "Extend Profile",   0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_EXTENDED }, INT_MIN, INT_MAX, VE, "profile" },
2562
2563     { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2564     { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2565     { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2566     { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2567     { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2568     { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2569     { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2570     { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2571     { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2572     { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2573     { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2574
2575     { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2576     { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2577     { "vlc",   "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2578     { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2579     { "ac",    "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2580
2581     { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2582
2583     COMMON_OPTIONS
2584     { NULL },
2585 };
2586
2587 static const AVClass h264_videotoolbox_class = {
2588     .class_name = "h264_videotoolbox",
2589     .item_name  = av_default_item_name,
2590     .option     = h264_options,
2591     .version    = LIBAVUTIL_VERSION_INT,
2592 };
2593
2594 AVCodec ff_h264_videotoolbox_encoder = {
2595     .name             = "h264_videotoolbox",
2596     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2597     .type             = AVMEDIA_TYPE_VIDEO,
2598     .id               = AV_CODEC_ID_H264,
2599     .priv_data_size   = sizeof(VTEncContext),
2600     .pix_fmts         = avc_pix_fmts,
2601     .init             = vtenc_init,
2602     .encode2          = vtenc_frame,
2603     .close            = vtenc_close,
2604     .capabilities     = AV_CODEC_CAP_DELAY,
2605     .priv_class       = &h264_videotoolbox_class,
2606     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2607                         FF_CODEC_CAP_INIT_CLEANUP,
2608 };
2609
2610 static const AVOption hevc_options[] = {
2611     { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = HEVC_PROF_AUTO }, HEVC_PROF_AUTO, HEVC_PROF_COUNT, VE, "profile" },
2612     { "main",     "Main Profile",     0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN   }, INT_MIN, INT_MAX, VE, "profile" },
2613     { "main10",   "Main10 Profile",   0, AV_OPT_TYPE_CONST, { .i64 = HEVC_PROF_MAIN10 }, INT_MIN, INT_MAX, VE, "profile" },
2614
2615     COMMON_OPTIONS
2616     { NULL },
2617 };
2618
2619 static const AVClass hevc_videotoolbox_class = {
2620     .class_name = "hevc_videotoolbox",
2621     .item_name  = av_default_item_name,
2622     .option     = hevc_options,
2623     .version    = LIBAVUTIL_VERSION_INT,
2624 };
2625
2626 AVCodec ff_hevc_videotoolbox_encoder = {
2627     .name             = "hevc_videotoolbox",
2628     .long_name        = NULL_IF_CONFIG_SMALL("VideoToolbox H.265 Encoder"),
2629     .type             = AVMEDIA_TYPE_VIDEO,
2630     .id               = AV_CODEC_ID_HEVC,
2631     .priv_data_size   = sizeof(VTEncContext),
2632     .pix_fmts         = hevc_pix_fmts,
2633     .init             = vtenc_init,
2634     .encode2          = vtenc_frame,
2635     .close            = vtenc_close,
2636     .capabilities     = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
2637     .priv_class       = &hevc_videotoolbox_class,
2638     .caps_internal    = FF_CODEC_CAP_INIT_THREADSAFE |
2639                         FF_CODEC_CAP_INIT_CLEANUP,
2640     .wrapper_name     = "videotoolbox",
2641 };