2 * Videotoolbox hardware acceleration
4 * copyright (c) 2012 Sebastien Zwickert
6 * This file is part of FFmpeg.
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "videotoolbox.h"
25 #include "libavutil/hwcontext_videotoolbox.h"
26 #include "vt_internal.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/pixdesc.h"
30 #include "bytestream.h"
34 #include "mpegvideo.h"
35 #include <TargetConditionals.h>
37 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
38 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
40 #ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
41 # define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
44 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
45 enum { kCMVideoCodecType_HEVC = 'hvc1' };
48 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
50 typedef struct VTHWFrame {
51 CVPixelBufferRef pixbuf;
52 AVBufferRef *hw_frames_ctx;
55 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
57 VTHWFrame *ref = (VTHWFrame *)data;
58 av_buffer_unref(&ref->hw_frames_ctx);
59 CVPixelBufferRelease(ref->pixbuf);
64 static int videotoolbox_buffer_copy(VTContext *vtctx,
65 const uint8_t *buffer,
70 tmp = av_fast_realloc(vtctx->bitstream,
71 &vtctx->allocated_size,
75 return AVERROR(ENOMEM);
77 vtctx->bitstream = tmp;
78 memcpy(vtctx->bitstream, buffer, size);
79 vtctx->bitstream_size = size;
84 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
86 VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
89 av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
90 av_frame_unref(frame);
91 return AVERROR_EXTERNAL;
94 frame->crop_right = 0;
97 frame->crop_bottom = 0;
99 frame->data[3] = (uint8_t*)ref->pixbuf;
101 if (ref->hw_frames_ctx) {
102 av_buffer_unref(&frame->hw_frames_ctx);
103 frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
104 if (!frame->hw_frames_ctx)
105 return AVERROR(ENOMEM);
111 int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
113 size_t size = sizeof(VTHWFrame);
114 uint8_t *data = NULL;
115 AVBufferRef *buf = NULL;
116 int ret = ff_attach_decode_data(frame);
117 FrameDecodeData *fdd;
121 data = av_mallocz(size);
123 return AVERROR(ENOMEM);
124 buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
127 return AVERROR(ENOMEM);
131 fdd = (FrameDecodeData*)frame->private_ref->data;
132 fdd->post_process = videotoolbox_postproc_frame;
134 frame->width = avctx->width;
135 frame->height = avctx->height;
136 frame->format = avctx->pix_fmt;
141 #define AV_W8(p, v) *(p) = (v)
143 CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
145 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
146 H264Context *h = avctx->priv_data;
147 CFDataRef data = NULL;
149 int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
150 uint8_t *vt_extradata = av_malloc(vt_extradata_size);
156 AV_W8(p + 0, 1); /* version */
157 AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
158 AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
159 AV_W8(p + 3, h->ps.sps->data[3]); /* level */
160 AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
161 AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
162 AV_WB16(p + 6, h->ps.sps->data_size);
163 memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
164 p += 8 + h->ps.sps->data_size;
165 AV_W8(p + 0, 1); /* number of pps */
166 AV_WB16(p + 1, h->ps.pps->data_size);
167 memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
169 p += 3 + h->ps.pps->data_size;
170 av_assert0(p - vt_extradata == vt_extradata_size);
172 // save sps header (profile/level) used to create decoder session,
173 // so we can detect changes and recreate it.
175 memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
177 data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
178 av_free(vt_extradata);
182 CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
184 HEVCContext *h = avctx->priv_data;
185 int i, num_vps = 0, num_sps = 0, num_pps = 0;
186 const HEVCVPS *vps = h->ps.vps;
187 const HEVCSPS *sps = h->ps.sps;
188 const HEVCPPS *pps = h->ps.pps;
189 PTLCommon ptlc = vps->ptl.general_ptl;
191 uint8_t parallelismType;
192 CFDataRef data = NULL;
194 int vt_extradata_size = 23 + 3 + 3 + 3;
195 uint8_t *vt_extradata;
197 #define COUNT_SIZE_PS(T, t) \
198 for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
199 if (h->ps.t##ps_list[i]) { \
200 const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
201 vt_extradata_size += 2 + lps->data_size; \
210 vt_extradata = av_malloc(vt_extradata_size);
215 /* unsigned int(8) configurationVersion = 1; */
219 * unsigned int(2) general_profile_space;
220 * unsigned int(1) general_tier_flag;
221 * unsigned int(5) general_profile_idc;
223 AV_W8(p + 1, ptlc.profile_space << 6 |
224 ptlc.tier_flag << 5 |
227 /* unsigned int(32) general_profile_compatibility_flags; */
228 memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
230 /* unsigned int(48) general_constraint_indicator_flags; */
231 AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
232 ptlc.interlaced_source_flag << 6 |
233 ptlc.non_packed_constraint_flag << 5 |
234 ptlc.frame_only_constraint_flag << 4);
238 /* unsigned int(8) general_level_idc; */
239 AV_W8(p + 12, ptlc.level_idc);
242 * bit(4) reserved = ‘1111’b;
243 * unsigned int(12) min_spatial_segmentation_idc;
245 AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
246 AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
249 * bit(6) reserved = ‘111111’b;
250 * unsigned int(2) parallelismType;
252 if (!vui.min_spatial_segmentation_idc)
254 else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
256 else if (pps->entropy_coding_sync_enabled_flag)
258 else if (pps->tiles_enabled_flag)
262 AV_W8(p + 15, 0xfc | parallelismType);
265 * bit(6) reserved = ‘111111’b;
266 * unsigned int(2) chromaFormat;
268 AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
271 * bit(5) reserved = ‘11111’b;
272 * unsigned int(3) bitDepthLumaMinus8;
274 AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
277 * bit(5) reserved = ‘11111’b;
278 * unsigned int(3) bitDepthChromaMinus8;
280 AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
282 /* bit(16) avgFrameRate; */
286 * bit(2) constantFrameRate;
287 * bit(3) numTemporalLayers;
288 * bit(1) temporalIdNested;
289 * unsigned int(2) lengthSizeMinusOne;
291 AV_W8(p + 21, 0 << 6 |
292 sps->max_sub_layers << 3 |
293 sps->temporal_id_nesting_flag << 2 |
296 /* unsigned int(8) numOfArrays; */
301 #define APPEND_PS(T, t) \
303 * bit(1) array_completeness; \
304 * unsigned int(1) reserved = 0; \
305 * unsigned int(6) NAL_unit_type; \
308 HEVC_NAL_##T##PS & 0x3f); \
309 /* unsigned int(16) numNalus; */ \
310 AV_WB16(p + 1, num_##t##ps); \
312 for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
313 if (h->ps.t##ps_list[i]) { \
314 const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
315 /* unsigned int(16) nalUnitLength; */ \
316 AV_WB16(p, lps->data_size); \
317 /* bit(8*nalUnitLength) nalUnit; */ \
318 memcpy(p + 2, lps->data, lps->data_size); \
319 p += 2 + lps->data_size; \
327 av_assert0(p - vt_extradata == vt_extradata_size);
329 data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
330 av_free(vt_extradata);
334 int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
335 const uint8_t *buffer,
338 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
339 H264Context *h = avctx->priv_data;
341 if (h->is_avc == 1) {
342 return videotoolbox_buffer_copy(vtctx, buffer, size);
348 static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
350 const uint8_t *buffer,
353 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
354 H264Context *h = avctx->priv_data;
356 // save sps header (profile/level) used to create decoder session
358 memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
360 if (type == H264_NAL_SPS) {
361 if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
362 vtctx->reconfig_needed = true;
363 memcpy(vtctx->sps, buffer + 1, 3);
367 // pass-through SPS/PPS changes to the decoder
368 return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
371 static int videotoolbox_common_decode_slice(AVCodecContext *avctx,
372 const uint8_t *buffer,
375 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
378 tmp = av_fast_realloc(vtctx->bitstream,
379 &vtctx->allocated_size,
380 vtctx->bitstream_size+size+4);
382 return AVERROR(ENOMEM);
384 vtctx->bitstream = tmp;
386 AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
387 memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
389 vtctx->bitstream_size += size + 4;
394 int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
395 const uint8_t *buffer,
398 H264Context *h = avctx->priv_data;
403 return videotoolbox_common_decode_slice(avctx, buffer, size);
406 int ff_videotoolbox_uninit(AVCodecContext *avctx)
408 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
410 av_freep(&vtctx->bitstream);
412 CVPixelBufferRelease(vtctx->frame);
418 #if CONFIG_VIDEOTOOLBOX
419 // Return the AVVideotoolboxContext that matters currently. Where it comes from
420 // depends on the API used.
421 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
423 // Somewhat tricky because the user can call av_videotoolbox_default_free()
424 // at any time, even when the codec is closed.
425 if (avctx->internal && avctx->internal->hwaccel_priv_data) {
426 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
428 return vtctx->vt_ctx;
430 return avctx->hwaccel_context;
433 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
435 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
436 CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
437 OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
438 enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
439 int width = CVPixelBufferGetWidth(pixbuf);
440 int height = CVPixelBufferGetHeight(pixbuf);
441 AVHWFramesContext *cached_frames;
445 if (!frame->buf[0] || frame->data[3]) {
446 av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
447 av_frame_unref(frame);
448 return AVERROR_EXTERNAL;
451 ref = (VTHWFrame *)frame->buf[0]->data;
454 CVPixelBufferRelease(ref->pixbuf);
455 ref->pixbuf = vtctx->frame;
458 // Old API code path.
459 if (!vtctx->cached_hw_frames_ctx)
462 cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
464 if (cached_frames->sw_format != sw_format ||
465 cached_frames->width != width ||
466 cached_frames->height != height) {
467 AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
468 AVHWFramesContext *hw_frames;
470 return AVERROR(ENOMEM);
472 hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
473 hw_frames->format = cached_frames->format;
474 hw_frames->sw_format = sw_format;
475 hw_frames->width = width;
476 hw_frames->height = height;
478 ret = av_hwframe_ctx_init(hw_frames_ctx);
480 av_buffer_unref(&hw_frames_ctx);
484 av_buffer_unref(&vtctx->cached_hw_frames_ctx);
485 vtctx->cached_hw_frames_ctx = hw_frames_ctx;
488 av_buffer_unref(&ref->hw_frames_ctx);
489 ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
490 if (!ref->hw_frames_ctx)
491 return AVERROR(ENOMEM);
496 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
501 for (i = 3; i >= 0; i--) {
502 b = (length >> (i * 7)) & 0x7F;
506 bytestream2_put_byteu(pb, b);
510 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
513 uint8_t *rw_extradata;
515 int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
516 // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
517 int config_size = 13 + 5 + avctx->extradata_size;
520 if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
523 bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
524 bytestream2_put_byteu(&pb, 0); // version
525 bytestream2_put_ne24(&pb, 0); // flags
527 // elementary stream descriptor
528 bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
529 videotoolbox_write_mp4_descr_length(&pb, full_size);
530 bytestream2_put_ne16(&pb, 0); // esid
531 bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
533 // decoder configuration descriptor
534 bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
535 videotoolbox_write_mp4_descr_length(&pb, config_size);
536 bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
537 bytestream2_put_byteu(&pb, 0x11); // stream type
538 bytestream2_put_ne24(&pb, 0); // buffer size
539 bytestream2_put_ne32(&pb, 0); // max bitrate
540 bytestream2_put_ne32(&pb, 0); // avg bitrate
542 // decoder specific descriptor
543 bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
544 videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
546 bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
548 // SLConfigDescriptor
549 bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
550 bytestream2_put_byteu(&pb, 0x01); // length
551 bytestream2_put_byteu(&pb, 0x02); //
553 s = bytestream2_size_p(&pb);
555 data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
557 av_freep(&rw_extradata);
561 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
566 CMBlockBufferRef block_buf;
567 CMSampleBufferRef sample_buf;
572 status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
573 buffer, // memoryBlock
575 kCFAllocatorNull, // blockAllocator
576 NULL, // customBlockSource
583 status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
584 block_buf, // dataBuffer
586 0, // makeDataReadyCallback
587 0, // makeDataReadyRefcon
588 fmt_desc, // formatDescription
590 0, // numSampleTimingEntries
591 NULL, // sampleTimingArray
592 0, // numSampleSizeEntries
593 NULL, // sampleSizeArray
598 CFRelease(block_buf);
603 static void videotoolbox_decoder_callback(void *opaque,
604 void *sourceFrameRefCon,
606 VTDecodeInfoFlags flags,
607 CVImageBufferRef image_buffer,
611 AVCodecContext *avctx = opaque;
612 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
615 CVPixelBufferRelease(vtctx->frame);
620 av_log(avctx, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
624 vtctx->frame = CVPixelBufferRetain(image_buffer);
627 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
630 CMSampleBufferRef sample_buf;
631 AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
632 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
634 sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
636 vtctx->bitstream_size);
641 status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
644 NULL, // sourceFrameRefCon
647 status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
649 CFRelease(sample_buf);
654 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
655 CFDictionaryRef decoder_spec,
659 CMFormatDescriptionRef cm_fmt_desc;
662 status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
666 decoder_spec, // Dictionary of extension
675 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
679 CFMutableDictionaryRef buffer_attributes;
680 CFMutableDictionaryRef io_surface_properties;
681 CFNumberRef cv_pix_fmt;
685 w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
686 h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
687 cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
689 buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
691 &kCFTypeDictionaryKeyCallBacks,
692 &kCFTypeDictionaryValueCallBacks);
693 io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
695 &kCFTypeDictionaryKeyCallBacks,
696 &kCFTypeDictionaryValueCallBacks);
699 CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
700 CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
701 CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
702 CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
704 CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
706 CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
709 CFRelease(io_surface_properties);
710 CFRelease(cv_pix_fmt);
714 return buffer_attributes;
717 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
718 AVCodecContext *avctx)
720 CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
722 &kCFTypeDictionaryKeyCallBacks,
723 &kCFTypeDictionaryValueCallBacks);
725 CFDictionarySetValue(config_info,
726 codec_type == kCMVideoCodecType_HEVC ?
727 kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
728 kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
731 CFMutableDictionaryRef avc_info;
732 CFDataRef data = NULL;
734 avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
736 &kCFTypeDictionaryKeyCallBacks,
737 &kCFTypeDictionaryValueCallBacks);
739 switch (codec_type) {
740 case kCMVideoCodecType_MPEG4Video :
741 if (avctx->extradata_size)
742 data = videotoolbox_esds_extradata_create(avctx);
744 CFDictionarySetValue(avc_info, CFSTR("esds"), data);
746 case kCMVideoCodecType_H264 :
747 data = ff_videotoolbox_avcc_extradata_create(avctx);
749 CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
751 case kCMVideoCodecType_HEVC :
752 data = ff_videotoolbox_hvcc_extradata_create(avctx);
754 CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
760 CFDictionarySetValue(config_info,
761 kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
771 static int videotoolbox_start(AVCodecContext *avctx)
773 AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
775 VTDecompressionOutputCallbackRecord decoder_cb;
776 CFDictionaryRef decoder_spec;
777 CFDictionaryRef buf_attr;
780 av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
784 switch( avctx->codec_id ) {
785 case AV_CODEC_ID_H263 :
786 videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
788 case AV_CODEC_ID_H264 :
789 videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
791 case AV_CODEC_ID_HEVC :
792 videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
794 case AV_CODEC_ID_MPEG1VIDEO :
795 videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
797 case AV_CODEC_ID_MPEG2VIDEO :
798 videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
800 case AV_CODEC_ID_MPEG4 :
801 videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
807 decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
810 av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
814 videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
818 if (!videotoolbox->cm_fmt_desc) {
820 CFRelease(decoder_spec);
822 av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
826 buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
828 videotoolbox->cv_pix_fmt_type);
830 decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
831 decoder_cb.decompressionOutputRefCon = avctx;
833 status = VTDecompressionSessionCreate(NULL, // allocator
834 videotoolbox->cm_fmt_desc, // videoFormatDescription
835 decoder_spec, // videoDecoderSpecification
836 buf_attr, // destinationImageBufferAttributes
837 &decoder_cb, // outputCallback
838 &videotoolbox->session); // decompressionSessionOut
841 CFRelease(decoder_spec);
846 case kVTVideoDecoderNotAvailableNowErr:
847 av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
848 return AVERROR(ENOSYS);
849 case kVTVideoDecoderUnsupportedDataFormatErr:
850 av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
851 return AVERROR(ENOSYS);
852 case kVTCouldNotFindVideoDecoderErr:
853 av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
854 return AVERROR(ENOSYS);
855 case kVTVideoDecoderMalfunctionErr:
856 av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
857 return AVERROR(EINVAL);
858 case kVTVideoDecoderBadDataErr:
859 av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
860 return AVERROR_INVALIDDATA;
864 av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
865 return AVERROR_UNKNOWN;
869 static void videotoolbox_stop(AVCodecContext *avctx)
871 AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
875 if (videotoolbox->cm_fmt_desc) {
876 CFRelease(videotoolbox->cm_fmt_desc);
877 videotoolbox->cm_fmt_desc = NULL;
880 if (videotoolbox->session) {
881 VTDecompressionSessionInvalidate(videotoolbox->session);
882 CFRelease(videotoolbox->session);
883 videotoolbox->session = NULL;
887 static const char *videotoolbox_error_string(OSStatus status)
890 case kVTVideoDecoderBadDataErr:
892 case kVTVideoDecoderMalfunctionErr:
893 return "decoder malfunction";
894 case kVTInvalidSessionErr:
895 return "invalid session";
900 static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
903 AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
904 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
906 if (vtctx->reconfig_needed == true) {
907 vtctx->reconfig_needed = false;
908 av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
909 videotoolbox_stop(avctx);
910 if (videotoolbox_start(avctx) != 0) {
911 return AVERROR_EXTERNAL;
915 if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
916 return AVERROR_INVALIDDATA;
918 status = videotoolbox_session_decode_frame(avctx);
919 if (status != noErr) {
920 if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
921 vtctx->reconfig_needed = true;
922 av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
923 return AVERROR_UNKNOWN;
927 vtctx->reconfig_needed = true;
928 return AVERROR_UNKNOWN;
931 return videotoolbox_buffer_create(avctx, frame);
934 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
936 H264Context *h = avctx->priv_data;
937 AVFrame *frame = h->cur_pic_ptr->f;
938 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
939 int ret = videotoolbox_common_end_frame(avctx, frame);
940 vtctx->bitstream_size = 0;
944 static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
945 const uint8_t *buffer,
951 static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
952 const uint8_t *buffer,
955 return videotoolbox_common_decode_slice(avctx, buffer, size);
959 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
961 const uint8_t *buffer,
964 return videotoolbox_common_decode_slice(avctx, buffer, size);
967 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
969 HEVCContext *h = avctx->priv_data;
970 AVFrame *frame = h->ref->frame;
971 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
973 h->output_frame->crop_right = 0;
974 h->output_frame->crop_left = 0;
975 h->output_frame->crop_top = 0;
976 h->output_frame->crop_bottom = 0;
978 int ret = videotoolbox_common_end_frame(avctx, frame);
979 vtctx->bitstream_size = 0;
983 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
984 const uint8_t *buffer,
987 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
989 return videotoolbox_buffer_copy(vtctx, buffer, size);
992 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
993 const uint8_t *buffer,
999 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
1001 MpegEncContext *s = avctx->priv_data;
1002 AVFrame *frame = s->current_picture_ptr->f;
1004 return videotoolbox_common_end_frame(avctx, frame);
1007 static int videotoolbox_uninit(AVCodecContext *avctx)
1009 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1013 ff_videotoolbox_uninit(avctx);
1016 videotoolbox_stop(avctx);
1018 av_buffer_unref(&vtctx->cached_hw_frames_ctx);
1019 av_freep(&vtctx->vt_ctx);
1024 static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
1025 const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->pix_fmt);
1027 return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1029 int depth = descriptor->comp[0].depth;
1031 return AV_PIX_FMT_P010;
1034 return AV_PIX_FMT_NV12;
1037 static int videotoolbox_common_init(AVCodecContext *avctx)
1039 VTContext *vtctx = avctx->internal->hwaccel_priv_data;
1040 AVHWFramesContext *hw_frames;
1043 // Old API - do nothing.
1044 if (avctx->hwaccel_context)
1047 if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
1048 av_log(avctx, AV_LOG_ERROR,
1049 "Either hw_frames_ctx or hw_device_ctx must be set.\n");
1050 return AVERROR(EINVAL);
1053 vtctx->vt_ctx = av_videotoolbox_alloc_context();
1054 if (!vtctx->vt_ctx) {
1055 err = AVERROR(ENOMEM);
1059 if (avctx->hw_frames_ctx) {
1060 hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1062 avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
1063 if (!avctx->hw_frames_ctx) {
1064 err = AVERROR(ENOMEM);
1068 hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1069 hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1070 hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
1071 hw_frames->width = avctx->width;
1072 hw_frames->height = avctx->height;
1074 err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1076 av_buffer_unref(&avctx->hw_frames_ctx);
1081 vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
1082 if (!vtctx->cached_hw_frames_ctx) {
1083 err = AVERROR(ENOMEM);
1087 bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1088 vtctx->vt_ctx->cv_pix_fmt_type =
1089 av_map_videotoolbox_format_from_pixfmt2(hw_frames->sw_format, full_range);
1090 if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1091 av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
1092 err = AVERROR(EINVAL);
1096 err = videotoolbox_start(avctx);
1103 videotoolbox_uninit(avctx);
1107 static int videotoolbox_frame_params(AVCodecContext *avctx,
1108 AVBufferRef *hw_frames_ctx)
1110 AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1112 frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1113 frames_ctx->width = avctx->coded_width;
1114 frames_ctx->height = avctx->coded_height;
1115 frames_ctx->sw_format = videotoolbox_best_pixel_format(avctx);
1120 const AVHWAccel ff_h263_videotoolbox_hwaccel = {
1121 .name = "h263_videotoolbox",
1122 .type = AVMEDIA_TYPE_VIDEO,
1123 .id = AV_CODEC_ID_H263,
1124 .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1125 .alloc_frame = ff_videotoolbox_alloc_frame,
1126 .start_frame = videotoolbox_mpeg_start_frame,
1127 .decode_slice = videotoolbox_mpeg_decode_slice,
1128 .end_frame = videotoolbox_mpeg_end_frame,
1129 .frame_params = videotoolbox_frame_params,
1130 .init = videotoolbox_common_init,
1131 .uninit = videotoolbox_uninit,
1132 .priv_data_size = sizeof(VTContext),
1135 const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
1136 .name = "hevc_videotoolbox",
1137 .type = AVMEDIA_TYPE_VIDEO,
1138 .id = AV_CODEC_ID_HEVC,
1139 .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1140 .alloc_frame = ff_videotoolbox_alloc_frame,
1141 .start_frame = videotoolbox_hevc_start_frame,
1142 .decode_slice = videotoolbox_hevc_decode_slice,
1143 .decode_params = videotoolbox_hevc_decode_params,
1144 .end_frame = videotoolbox_hevc_end_frame,
1145 .frame_params = videotoolbox_frame_params,
1146 .init = videotoolbox_common_init,
1147 .uninit = videotoolbox_uninit,
1148 .priv_data_size = sizeof(VTContext),
1151 const AVHWAccel ff_h264_videotoolbox_hwaccel = {
1152 .name = "h264_videotoolbox",
1153 .type = AVMEDIA_TYPE_VIDEO,
1154 .id = AV_CODEC_ID_H264,
1155 .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1156 .alloc_frame = ff_videotoolbox_alloc_frame,
1157 .start_frame = ff_videotoolbox_h264_start_frame,
1158 .decode_slice = ff_videotoolbox_h264_decode_slice,
1159 .decode_params = videotoolbox_h264_decode_params,
1160 .end_frame = videotoolbox_h264_end_frame,
1161 .frame_params = videotoolbox_frame_params,
1162 .init = videotoolbox_common_init,
1163 .uninit = videotoolbox_uninit,
1164 .priv_data_size = sizeof(VTContext),
1167 const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
1168 .name = "mpeg1_videotoolbox",
1169 .type = AVMEDIA_TYPE_VIDEO,
1170 .id = AV_CODEC_ID_MPEG1VIDEO,
1171 .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1172 .alloc_frame = ff_videotoolbox_alloc_frame,
1173 .start_frame = videotoolbox_mpeg_start_frame,
1174 .decode_slice = videotoolbox_mpeg_decode_slice,
1175 .end_frame = videotoolbox_mpeg_end_frame,
1176 .frame_params = videotoolbox_frame_params,
1177 .init = videotoolbox_common_init,
1178 .uninit = videotoolbox_uninit,
1179 .priv_data_size = sizeof(VTContext),
1182 const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
1183 .name = "mpeg2_videotoolbox",
1184 .type = AVMEDIA_TYPE_VIDEO,
1185 .id = AV_CODEC_ID_MPEG2VIDEO,
1186 .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1187 .alloc_frame = ff_videotoolbox_alloc_frame,
1188 .start_frame = videotoolbox_mpeg_start_frame,
1189 .decode_slice = videotoolbox_mpeg_decode_slice,
1190 .end_frame = videotoolbox_mpeg_end_frame,
1191 .frame_params = videotoolbox_frame_params,
1192 .init = videotoolbox_common_init,
1193 .uninit = videotoolbox_uninit,
1194 .priv_data_size = sizeof(VTContext),
1197 const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
1198 .name = "mpeg4_videotoolbox",
1199 .type = AVMEDIA_TYPE_VIDEO,
1200 .id = AV_CODEC_ID_MPEG4,
1201 .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1202 .alloc_frame = ff_videotoolbox_alloc_frame,
1203 .start_frame = videotoolbox_mpeg_start_frame,
1204 .decode_slice = videotoolbox_mpeg_decode_slice,
1205 .end_frame = videotoolbox_mpeg_end_frame,
1206 .frame_params = videotoolbox_frame_params,
1207 .init = videotoolbox_common_init,
1208 .uninit = videotoolbox_uninit,
1209 .priv_data_size = sizeof(VTContext),
1212 static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
1215 AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1218 ret->output_callback = videotoolbox_decoder_callback;
1220 OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
1221 if (cv_pix_fmt_type == 0) {
1222 cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1224 ret->cv_pix_fmt_type = cv_pix_fmt_type;
1230 AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
1232 return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
1235 int av_videotoolbox_default_init(AVCodecContext *avctx)
1237 return av_videotoolbox_default_init2(avctx, NULL);
1240 int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
1242 enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
1243 bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
1244 avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
1245 if (!avctx->hwaccel_context)
1246 return AVERROR(ENOMEM);
1247 return videotoolbox_start(avctx);
1250 void av_videotoolbox_default_free(AVCodecContext *avctx)
1253 videotoolbox_stop(avctx);
1254 av_freep(&avctx->hwaccel_context);
1256 #endif /* CONFIG_VIDEOTOOLBOX */