X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Futils.c;h=bc1beee4626d339717d328c605b60bd564580447;hb=32c8359093d1ff4f45ed19518b449b3ac3769d27;hp=8cbd47b84d7bae15ce08f8717743af26b83c74bb;hpb=e3e317e0c015b164b6c2eb8913e393216d78de23;p=ffmpeg diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 8cbd47b84d7..bc1beee4626 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -32,6 +32,7 @@ #include "libavutil/channel_layout.h" #include "libavutil/crc.h" #include "libavutil/frame.h" +#include "libavutil/hwcontext.h" #include "libavutil/internal.h" #include "libavutil/mathematics.h" #include "libavutil/pixdesc.h" @@ -59,14 +60,14 @@ static void *avformat_mutex; void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) { void **p = ptr; - if (min_size > SIZE_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { + if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) { av_freep(p); *size = 0; return; } - av_fast_malloc(p, size, min_size + FF_INPUT_BUFFER_PADDING_SIZE); + av_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE); if (*size) - memset((uint8_t *)*p + min_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); + memset((uint8_t *)*p + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); } /* encoder management */ @@ -94,12 +95,12 @@ static av_cold void avcodec_init(void) int av_codec_is_encoder(const AVCodec *codec) { - return codec && (codec->encode_sub || codec->encode2); + return codec && (codec->encode_sub || codec->encode2 ||codec->send_frame); } int av_codec_is_decoder(const AVCodec *codec) { - return codec && codec->decode; + return codec && (codec->decode || codec->send_packet); } av_cold void avcodec_register(AVCodec *codec) @@ -147,7 +148,7 @@ int ff_set_sar(AVCodecContext *avctx, AVRational sar) int ret = av_image_check_sar(avctx->width, avctx->height, sar); if (ret < 0) { - av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n", + av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n", sar.num, sar.den); avctx->sample_aspect_ratio = (AVRational){ 0, 1 }; return ret; @@ -199,6 +200,7 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, case AV_PIX_FMT_YUV440P: case AV_PIX_FMT_YUV444P: case AV_PIX_FMT_GBRP: + case AV_PIX_FMT_GBRAP: case AV_PIX_FMT_GRAY8: case AV_PIX_FMT_GRAY16BE: case AV_PIX_FMT_GRAY16LE: @@ -336,7 +338,8 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) switch (avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: { - AVPicture picture; + uint8_t *data[4]; + int linesize[4]; int size[4] = { 0 }; int w = frame->width; int h = frame->height; @@ -351,27 +354,27 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) do { // NOTE: do not align linesizes individually, this breaks e.g. assumptions // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2 - av_image_fill_linesizes(picture.linesize, avctx->pix_fmt, w); + av_image_fill_linesizes(linesize, avctx->pix_fmt, w); // increase alignment of w for next try (rhs gives the lowest bit set in w) w += w & ~(w - 1); unaligned = 0; for (i = 0; i < 4; i++) - unaligned |= picture.linesize[i] % pool->stride_align[i]; + unaligned |= linesize[i] % pool->stride_align[i]; } while (unaligned); - tmpsize = av_image_fill_pointers(picture.data, avctx->pix_fmt, h, - NULL, picture.linesize); + tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h, + NULL, linesize); if (tmpsize < 0) return -1; - for (i = 0; i < 3 && picture.data[i + 1]; i++) - size[i] = picture.data[i + 1] - picture.data[i]; - size[i] = tmpsize - (picture.data[i] - picture.data[0]); + for (i = 0; i < 3 && data[i + 1]; i++) + size[i] = data[i + 1] - data[i]; + size[i] = tmpsize - (data[i] - data[0]); for (i = 0; i < 4; i++) { av_buffer_pool_uninit(&pool->pools[i]); - pool->linesize[i] = picture.linesize[i]; + pool->linesize[i] = linesize[i]; if (size[i]) { pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL); if (!pool->pools[i]) { @@ -510,15 +513,12 @@ int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags { int ret; + if (avctx->hw_frames_ctx) + return av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0); + if ((ret = update_frame_pool(avctx, frame)) < 0) return ret; -#if FF_API_GET_BUFFER -FF_DISABLE_DEPRECATION_WARNINGS - frame->type = FF_BUFFER_TYPE_INTERNAL; -FF_ENABLE_DEPRECATION_WARNINGS -#endif - switch (avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: return video_get_buffer(avctx, frame); @@ -529,34 +529,6 @@ FF_ENABLE_DEPRECATION_WARNINGS } } -#if FF_API_GET_BUFFER -FF_DISABLE_DEPRECATION_WARNINGS -int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame) -{ - return avcodec_default_get_buffer2(avctx, frame, 0); -} - -typedef struct CompatReleaseBufPriv { - AVCodecContext avctx; - AVFrame frame; -} CompatReleaseBufPriv; - -static void compat_free_buffer(void *opaque, uint8_t *data) -{ - CompatReleaseBufPriv *priv = opaque; - if (priv->avctx.release_buffer) - priv->avctx.release_buffer(&priv->avctx, &priv->frame); - av_freep(&priv); -} - -static void compat_release_buffer(void *opaque, uint8_t *data) -{ - AVBufferRef *buf = opaque; - av_buffer_unref(&buf); -} -FF_ENABLE_DEPRECATION_WARNINGS -#endif - int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) { AVPacket *pkt = avctx->internal->pkt; @@ -568,6 +540,7 @@ int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN }, { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX }, { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D }, + { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE }, }; frame->color_primaries = avctx->color_primaries; @@ -578,11 +551,21 @@ int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) frame->reordered_opaque = avctx->reordered_opaque; if (!pkt) { +#if FF_API_PKT_PTS +FF_DISABLE_DEPRECATION_WARNINGS frame->pkt_pts = AV_NOPTS_VALUE; +FF_ENABLE_DEPRECATION_WARNINGS +#endif + frame->pts = AV_NOPTS_VALUE; return 0; } +#if FF_API_PKT_PTS +FF_DISABLE_DEPRECATION_WARNINGS frame->pkt_pts = pkt->pts; +FF_ENABLE_DEPRECATION_WARNINGS +#endif + frame->pts = pkt->pts; for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) { int size; @@ -665,126 +648,13 @@ int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) if (ret < 0) return ret; - if (hwaccel && hwaccel->alloc_frame) { - ret = hwaccel->alloc_frame(avctx, frame); - goto end; - } - -#if FF_API_GET_BUFFER -FF_DISABLE_DEPRECATION_WARNINGS - /* - * Wrap an old get_buffer()-allocated buffer in an bunch of AVBuffers. - * We wrap each plane in its own AVBuffer. Each of those has a reference to - * a dummy AVBuffer as its private data, unreffing it on free. - * When all the planes are freed, the dummy buffer's free callback calls - * release_buffer(). - */ - if (avctx->get_buffer) { - CompatReleaseBufPriv *priv = NULL; - AVBufferRef *dummy_buf = NULL; - int planes, i, ret; - - if (flags & AV_GET_BUFFER_FLAG_REF) - frame->reference = 1; - - ret = avctx->get_buffer(avctx, frame); - if (ret < 0) - return ret; - - /* return if the buffers are already set up - * this would happen e.g. when a custom get_buffer() calls - * avcodec_default_get_buffer - */ - if (frame->buf[0]) - return 0; - - priv = av_mallocz(sizeof(*priv)); - if (!priv) { - ret = AVERROR(ENOMEM); - goto fail; + if (hwaccel) { + if (hwaccel->alloc_frame) { + ret = hwaccel->alloc_frame(avctx, frame); + goto end; } - priv->avctx = *avctx; - priv->frame = *frame; - - dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, priv, 0); - if (!dummy_buf) { - ret = AVERROR(ENOMEM); - goto fail; - } - -#define WRAP_PLANE(ref_out, data, data_size) \ -do { \ - AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ - if (!dummy_ref) { \ - ret = AVERROR(ENOMEM); \ - goto fail; \ - } \ - ref_out = av_buffer_create(data, data_size, compat_release_buffer, \ - dummy_ref, 0); \ - if (!ref_out) { \ - av_frame_unref(frame); \ - ret = AVERROR(ENOMEM); \ - goto fail; \ - } \ -} while (0) - - if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); - - planes = av_pix_fmt_count_planes(frame->format); - /* workaround for AVHWAccel plane count of 0, buf[0] is used as - check for allocated buffers: make libavcodec happy */ - if (desc && desc->flags & AV_PIX_FMT_FLAG_HWACCEL) - planes = 1; - if (!desc || planes <= 0) { - ret = AVERROR(EINVAL); - goto fail; - } - - for (i = 0; i < planes; i++) { - int v_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; - int plane_size = (frame->height >> v_shift) * frame->linesize[i]; - - WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); - } - } else { - int planar = av_sample_fmt_is_planar(frame->format); - planes = planar ? avctx->channels : 1; - - if (planes > FF_ARRAY_ELEMS(frame->buf)) { - frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); - frame->extended_buf = av_malloc(sizeof(*frame->extended_buf) * - frame->nb_extended_buf); - if (!frame->extended_buf) { - ret = AVERROR(ENOMEM); - goto fail; - } - } - - for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) - WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); - - for (i = 0; i < frame->nb_extended_buf; i++) - WRAP_PLANE(frame->extended_buf[i], - frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], - frame->linesize[0]); - } - - av_buffer_unref(&dummy_buf); - - frame->width = avctx->width; - frame->height = avctx->height; - - return 0; - -fail: - avctx->release_buffer(avctx, frame); - av_freep(&priv); - av_buffer_unref(&dummy_buf); - return ret; - } -FF_ENABLE_DEPRECATION_WARNINGS -#endif + } else + avctx->sw_pix_fmt = avctx->pix_fmt; ret = avctx->get_buffer2(avctx, frame, flags); @@ -828,19 +698,6 @@ int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame) return 0; } -#if FF_API_GET_BUFFER -void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic) -{ - av_frame_unref(pic); -} - -int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic) -{ - av_assert0(0); - return AVERROR_BUG; -} -#endif - int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size) { int i; @@ -933,6 +790,10 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) while (fmt[n] != AV_PIX_FMT_NONE) ++n; + av_assert0(n >= 1); + avctx->sw_pix_fmt = fmt[n - 1]; + av_assert2(!is_hwaccel_pix_fmt(avctx->sw_pix_fmt)); + choices = av_malloc_array(n + 1, sizeof(*choices)); if (!choices) return AV_PIX_FMT_NONE; @@ -945,6 +806,8 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) av_freep(&avctx->internal->hwaccel_priv_data); avctx->hwaccel = NULL; + av_buffer_unref(&avctx->hw_frames_ctx); + ret = avctx->get_format(avctx, choices); desc = av_pix_fmt_desc_get(ret); @@ -956,6 +819,16 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) break; + if (avctx->hw_frames_ctx) { + AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; + if (hw_frames_ctx->format != ret) { + av_log(avctx, AV_LOG_ERROR, "Format returned from get_buffer() " + "does not match the format of provided AVHWFramesContext\n"); + ret = AV_PIX_FMT_NONE; + break; + } + } + if (!setup_hwaccel(avctx, ret, desc->name)) break; @@ -972,41 +845,6 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) return ret; } -#if FF_API_AVFRAME_LAVC -void avcodec_get_frame_defaults(AVFrame *frame) -{ - if (frame->extended_data != frame->data) - av_freep(&frame->extended_data); - - memset(frame, 0, sizeof(AVFrame)); - - frame->pts = AV_NOPTS_VALUE; - frame->key_frame = 1; - frame->sample_aspect_ratio = (AVRational) {0, 1 }; - frame->format = -1; /* unknown */ - frame->extended_data = frame->data; -} - -AVFrame *avcodec_alloc_frame(void) -{ - AVFrame *frame = av_mallocz(sizeof(AVFrame)); - - if (!frame) - return NULL; - -FF_DISABLE_DEPRECATION_WARNINGS - avcodec_get_frame_defaults(frame); -FF_ENABLE_DEPRECATION_WARNINGS - - return frame; -} - -void avcodec_free_frame(AVFrame **frame) -{ - av_frame_free(frame); -} -#endif - int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) { int ret = 0; @@ -1034,16 +872,21 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code av_dict_copy(&tmp, *options, 0); /* If there is a user-supplied mutex locking routine, call it. */ - if (lockmgr_cb) { - if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) - return -1; - } + if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) { + if (lockmgr_cb) { + if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) + return -1; + } - entangled_thread_counter++; - if (entangled_thread_counter != 1) { - av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n"); - ret = -1; - goto end; + entangled_thread_counter++; + if (entangled_thread_counter != 1) { + av_log(avctx, AV_LOG_ERROR, + "Insufficient thread locking. At least %d threads are " + "calling avcodec_open2() at the same time right now.\n", + entangled_thread_counter); + ret = -1; + goto end; + } } avctx->internal = av_mallocz(sizeof(AVCodecInternal)); @@ -1064,6 +907,18 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code goto free_and_end; } + avctx->internal->buffer_frame = av_frame_alloc(); + if (!avctx->internal->buffer_frame) { + ret = AVERROR(ENOMEM); + goto free_and_end; + } + + avctx->internal->buffer_pkt = av_packet_alloc(); + if (!avctx->internal->buffer_pkt) { + ret = AVERROR(ENOMEM); + goto free_and_end; + } + if (codec->priv_data_size > 0) { if (!avctx->priv_data) { avctx->priv_data = av_mallocz(codec->priv_data_size); @@ -1132,7 +987,7 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code } avctx->frame_number = 0; - if (avctx->codec->capabilities & CODEC_CAP_EXPERIMENTAL && + if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { ret = AVERROR_EXPERIMENTAL; goto free_and_end; @@ -1150,11 +1005,27 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code goto free_and_end; } } - if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS)) + if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS)) avctx->thread_count = 1; if (av_codec_is_encoder(avctx->codec)) { int i; +#if FF_API_CODED_FRAME +FF_DISABLE_DEPRECATION_WARNINGS + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) { + ret = AVERROR(ENOMEM); + goto free_and_end; + } +FF_ENABLE_DEPRECATION_WARNINGS +#endif + + if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) { + av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n"); + ret = AVERROR(EINVAL); + goto free_and_end; + } + if (avctx->codec->sample_fmts) { for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) @@ -1223,6 +1094,26 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code if (!avctx->rc_initial_buffer_occupancy) avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4; + + if (avctx->ticks_per_frame && + avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) { + av_log(avctx, AV_LOG_ERROR, + "ticks_per_frame %d too large for the timebase %d/%d.", + avctx->ticks_per_frame, + avctx->time_base.num, + avctx->time_base.den); + goto free_and_end; + } + + if (avctx->hw_frames_ctx) { + AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; + if (frames_ctx->format != avctx->pix_fmt) { + av_log(avctx, AV_LOG_ERROR, + "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n"); + ret = AVERROR(EINVAL); + goto free_and_end; + } + } } if (avctx->codec->init && !(avctx->active_thread_type & FF_THREAD_FRAME)) { @@ -1261,12 +1152,15 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code #endif } end: - entangled_thread_counter--; + if (!(codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE) && codec->init) { + entangled_thread_counter--; - /* Release any user-supplied mutex. */ - if (lockmgr_cb) { - (*lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); + /* Release any user-supplied mutex. */ + if (lockmgr_cb) { + (*lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); + } } + if (options) { av_dict_free(options); *options = tmp; @@ -1274,6 +1168,20 @@ end: return ret; free_and_end: + if (avctx->codec && + (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP)) + avctx->codec->close(avctx); + + if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) + av_opt_free(avctx->priv_data); + av_opt_free(avctx); + +#if FF_API_CODED_FRAME +FF_DISABLE_DEPRECATION_WARNINGS + av_frame_free(&avctx->coded_frame); +FF_ENABLE_DEPRECATION_WARNINGS +#endif + av_dict_free(&tmp); av_freep(&avctx->priv_data); if (avctx->internal) { @@ -1287,26 +1195,16 @@ free_and_end: int ff_alloc_packet(AVPacket *avpkt, int size) { - if (size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) + if (size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) return AVERROR(EINVAL); if (avpkt->data) { AVBufferRef *buf = avpkt->buf; -#if FF_API_DESTRUCT_PACKET -FF_DISABLE_DEPRECATION_WARNINGS - void *destruct = avpkt->destruct; -FF_ENABLE_DEPRECATION_WARNINGS -#endif if (avpkt->size < size) return AVERROR(EINVAL); av_init_packet(avpkt); -#if FF_API_DESTRUCT_PACKET -FF_DISABLE_DEPRECATION_WARNINGS - avpkt->destruct = destruct; -FF_ENABLE_DEPRECATION_WARNINGS -#endif avpkt->buf = buf; avpkt->size = size; return 0; @@ -1366,8 +1264,13 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, *got_packet_ptr = 0; - if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { - av_free_packet(avpkt); + if (!avctx->codec->encode2) { + av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n"); + return AVERROR(ENOSYS); + } + + if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) { + av_packet_unref(avpkt); av_init_packet(avpkt); return 0; } @@ -1388,12 +1291,19 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, frame = &tmp; } + /* extract audio service type metadata */ + if (frame) { + AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE); + if (sd && sd->size >= sizeof(enum AVAudioServiceType)) + avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data; + } + /* check for valid frame size */ if (frame) { - if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { + if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) { if (frame->nb_samples > avctx->frame_size) return AVERROR(EINVAL); - } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { + } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) { if (frame->nb_samples < avctx->frame_size && !avctx->internal->last_audio_frame) { ret = pad_last_frame(avctx, &padded_frame, frame); @@ -1414,7 +1324,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); if (!ret) { if (*got_packet_ptr) { - if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) { + if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) { if (avpkt->pts == AV_NOPTS_VALUE) avpkt->pts = frame->pts; if (!avpkt->duration) @@ -1436,7 +1346,7 @@ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, } if (ret < 0 || !*got_packet_ptr) { - av_free_packet(avpkt); + av_packet_unref(avpkt); av_init_packet(avpkt); goto end; } @@ -1466,8 +1376,13 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, *got_packet_ptr = 0; - if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { - av_free_packet(avpkt); + if (!avctx->codec->encode2) { + av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n"); + return AVERROR(ENOSYS); + } + + if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) { + av_packet_unref(avpkt); av_init_packet(avpkt); avpkt->size = 0; return 0; @@ -1482,7 +1397,7 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, if (!ret) { if (!*got_packet_ptr) avpkt->size = 0; - else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) + else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) avpkt->pts = avpkt->dts = frame->pts; if (!user_packet && avpkt->size) { @@ -1495,7 +1410,7 @@ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, } if (ret < 0 || !*got_packet_ptr) - av_free_packet(avpkt); + av_packet_unref(avpkt); emms_c(); return ret; @@ -1526,10 +1441,11 @@ static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) if (!data) return 0; - if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) { + if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) { av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter " "changes, but PARAM_CHANGE side data was sent to it.\n"); - return AVERROR(EINVAL); + ret = AVERROR(EINVAL); + goto fail2; } if (size < 4) @@ -1564,13 +1480,20 @@ static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) size -= 8; ret = ff_set_dimensions(avctx, avctx->width, avctx->height); if (ret < 0) - return ret; + goto fail2; } return 0; fail: av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n"); - return AVERROR_INVALIDDATA; + ret = AVERROR_INVALIDDATA; +fail2: + if (ret < 0) { + av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); + if (avctx->err_recognition & AV_EF_EXPLODE) + return ret; + } + return 0; } static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame) @@ -1629,26 +1552,30 @@ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *pi if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) return -1; + if (!avctx->codec->decode) { + av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n"); + return AVERROR(ENOSYS); + } + avctx->internal->pkt = avpkt; ret = apply_param_change(avctx, avpkt); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); - if (avctx->err_recognition & AV_EF_EXPLODE) - return ret; - } + if (ret < 0) + return ret; av_frame_unref(picture); - if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { + if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size || + (avctx->active_thread_type & FF_THREAD_FRAME)) { if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, avpkt); else { ret = avctx->codec->decode(avctx, picture, got_picture_ptr, avpkt); - picture->pkt_dts = avpkt->dts; + if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS)) + picture->pkt_dts = avpkt->dts; /* get_buffer is supposed to set frame parameters */ - if (!(avctx->codec->capabilities & CODEC_CAP_DR1)) { + if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) { picture->sample_aspect_ratio = avctx->sample_aspect_ratio; picture->width = avctx->width; picture->height = avctx->height; @@ -1689,6 +1616,11 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, *got_frame_ptr = 0; + if (!avctx->codec->decode) { + av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n"); + return AVERROR(ENOSYS); + } + avctx->internal->pkt = avpkt; if (!avpkt->data && avpkt->size) { @@ -1697,15 +1629,12 @@ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, } ret = apply_param_change(avctx, avpkt); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); - if (avctx->err_recognition & AV_EF_EXPLODE) - return ret; - } + if (ret < 0) + return ret; av_frame_unref(frame); - if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) { + if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) { ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt); if (ret >= 0 && *got_frame_ptr) { avctx->frame_number++; @@ -1745,10 +1674,10 @@ void avsubtitle_free(AVSubtitle *sub) int i; for (i = 0; i < sub->num_rects; i++) { - av_freep(&sub->rects[i]->pict.data[0]); - av_freep(&sub->rects[i]->pict.data[1]); - av_freep(&sub->rects[i]->pict.data[2]); - av_freep(&sub->rects[i]->pict.data[3]); + av_freep(&sub->rects[i]->data[0]); + av_freep(&sub->rects[i]->data[1]); + av_freep(&sub->rects[i]->data[2]); + av_freep(&sub->rects[i]->data[3]); av_freep(&sub->rects[i]->text); av_freep(&sub->rects[i]->ass); av_freep(&sub->rects[i]); @@ -1759,17 +1688,257 @@ void avsubtitle_free(AVSubtitle *sub) memset(sub, 0, sizeof(AVSubtitle)); } +static int do_decode(AVCodecContext *avctx, AVPacket *pkt) +{ + int got_frame; + int ret; + + av_assert0(!avctx->internal->buffer_frame->buf[0]); + + if (!pkt) + pkt = avctx->internal->buffer_pkt; + + // This is the lesser evil. The field is for compatibility with legacy users + // of the legacy API, and users using the new API should not be forced to + // even know about this field. + avctx->refcounted_frames = 1; + + // Some codecs (at least wma lossless) will crash when feeding drain packets + // after EOF was signaled. + if (avctx->internal->draining_done) + return AVERROR_EOF; + + if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { + ret = avcodec_decode_video2(avctx, avctx->internal->buffer_frame, + &got_frame, pkt); + if (ret >= 0) + ret = pkt->size; + } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { + ret = avcodec_decode_audio4(avctx, avctx->internal->buffer_frame, + &got_frame, pkt); + } else { + ret = AVERROR(EINVAL); + } + + if (ret < 0) + return ret; + + if (avctx->internal->draining && !got_frame) + avctx->internal->draining_done = 1; + + if (ret >= pkt->size) { + av_packet_unref(avctx->internal->buffer_pkt); + } else { + int consumed = ret; + + if (pkt != avctx->internal->buffer_pkt) { + av_packet_unref(avctx->internal->buffer_pkt); + if ((ret = av_packet_ref(avctx->internal->buffer_pkt, pkt)) < 0) + return ret; + } + + avctx->internal->buffer_pkt->data += consumed; + avctx->internal->buffer_pkt->size -= consumed; + avctx->internal->buffer_pkt->pts = AV_NOPTS_VALUE; + avctx->internal->buffer_pkt->dts = AV_NOPTS_VALUE; + } + + if (got_frame) + av_assert0(avctx->internal->buffer_frame->buf[0]); + + return 0; +} + +int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt) +{ + int ret; + + if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec)) + return AVERROR(EINVAL); + + if (avctx->internal->draining) + return AVERROR_EOF; + + if (!avpkt || !avpkt->size) { + avctx->internal->draining = 1; + avpkt = NULL; + + if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) + return 0; + } + + if (avctx->codec->send_packet) { + if (avpkt) { + ret = apply_param_change(avctx, (AVPacket *)avpkt); + if (ret < 0) + return ret; + } + return avctx->codec->send_packet(avctx, avpkt); + } + + // Emulation via old API. Assume avpkt is likely not refcounted, while + // decoder output is always refcounted, and avoid copying. + + if (avctx->internal->buffer_pkt->size || avctx->internal->buffer_frame->buf[0]) + return AVERROR(EAGAIN); + + // The goal is decoding the first frame of the packet without using memcpy, + // because the common case is having only 1 frame per packet (especially + // with video, but audio too). In other cases, it can't be avoided, unless + // the user is feeding refcounted packets. + return do_decode(avctx, (AVPacket *)avpkt); +} + +int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame) +{ + int ret; + + av_frame_unref(frame); + + if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec)) + return AVERROR(EINVAL); + + if (avctx->codec->receive_frame) { + if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) + return AVERROR_EOF; + return avctx->codec->receive_frame(avctx, frame); + } + + // Emulation via old API. + + if (!avctx->internal->buffer_frame->buf[0]) { + if (!avctx->internal->buffer_pkt->size && !avctx->internal->draining) + return AVERROR(EAGAIN); + + while (1) { + if ((ret = do_decode(avctx, avctx->internal->buffer_pkt)) < 0) { + av_packet_unref(avctx->internal->buffer_pkt); + return ret; + } + // Some audio decoders may consume partial data without returning + // a frame (fate-wmapro-2ch). There is no way to make the caller + // call avcodec_receive_frame() again without returning a frame, + // so try to decode more in these cases. + if (avctx->internal->buffer_frame->buf[0] || + !avctx->internal->buffer_pkt->size) + break; + } + } + + if (!avctx->internal->buffer_frame->buf[0]) + return avctx->internal->draining ? AVERROR_EOF : AVERROR(EAGAIN); + + av_frame_move_ref(frame, avctx->internal->buffer_frame); + return 0; +} + +static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet) +{ + int ret; + *got_packet = 0; + + av_packet_unref(avctx->internal->buffer_pkt); + avctx->internal->buffer_pkt_valid = 0; + + if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { + ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt, + frame, got_packet); + } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { + ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt, + frame, got_packet); + } else { + ret = AVERROR(EINVAL); + } + + if (ret >= 0 && *got_packet) { + // Encoders must always return ref-counted buffers. + // Side-data only packets have no data and can be not ref-counted. + av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf); + avctx->internal->buffer_pkt_valid = 1; + ret = 0; + } else { + av_packet_unref(avctx->internal->buffer_pkt); + } + + return ret; +} + +int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame) +{ + if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) + return AVERROR(EINVAL); + + if (avctx->internal->draining) + return AVERROR_EOF; + + if (!frame) { + avctx->internal->draining = 1; + + if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) + return 0; + } + + if (avctx->codec->send_frame) + return avctx->codec->send_frame(avctx, frame); + + // Emulation via old API. Do it here instead of avcodec_receive_packet, because: + // 1. if the AVFrame is not refcounted, the copying will be much more + // expensive than copying the packet data + // 2. assume few users use non-refcounted AVPackets, so usually no copy is + // needed + + if (avctx->internal->buffer_pkt_valid) + return AVERROR(EAGAIN); + + return do_encode(avctx, frame, &(int){0}); +} + +int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) +{ + av_packet_unref(avpkt); + + if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec)) + return AVERROR(EINVAL); + + if (avctx->codec->receive_packet) { + if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) + return AVERROR_EOF; + return avctx->codec->receive_packet(avctx, avpkt); + } + + // Emulation via old API. + + if (!avctx->internal->buffer_pkt_valid) { + int got_packet; + int ret; + if (!avctx->internal->draining) + return AVERROR(EAGAIN); + ret = do_encode(avctx, NULL, &got_packet); + if (ret < 0) + return ret; + if (ret >= 0 && !got_packet) + return AVERROR_EOF; + } + + av_packet_move_ref(avpkt, avctx->internal->buffer_pkt); + avctx->internal->buffer_pkt_valid = 0; + return 0; +} + av_cold int avcodec_close(AVCodecContext *avctx) { + int i; + if (avcodec_is_open(avctx)) { FramePool *pool = avctx->internal->pool; - int i; + if (HAVE_THREADS && avctx->internal->thread_ctx) ff_thread_free(avctx); if (avctx->codec && avctx->codec->close) avctx->codec->close(avctx); - avctx->coded_frame = NULL; av_frame_free(&avctx->internal->to_free); + av_frame_free(&avctx->internal->buffer_frame); + av_packet_free(&avctx->internal->buffer_pkt); for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) av_buffer_pool_uninit(&pool->pools[i]); av_freep(&avctx->internal->pool); @@ -1781,12 +1950,25 @@ av_cold int avcodec_close(AVCodecContext *avctx) av_freep(&avctx->internal); } + for (i = 0; i < avctx->nb_coded_side_data; i++) + av_freep(&avctx->coded_side_data[i].data); + av_freep(&avctx->coded_side_data); + avctx->nb_coded_side_data = 0; + + av_buffer_unref(&avctx->hw_frames_ctx); + if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) av_opt_free(avctx->priv_data); av_opt_free(avctx); av_freep(&avctx->priv_data); - if (av_codec_is_encoder(avctx->codec)) + if (av_codec_is_encoder(avctx->codec)) { av_freep(&avctx->extradata); +#if FF_API_CODED_FRAME +FF_DISABLE_DEPRECATION_WARNINGS + av_frame_free(&avctx->coded_frame); +FF_ENABLE_DEPRECATION_WARNINGS +#endif + } avctx->codec = NULL; avctx->active_thread_type = 0; @@ -1800,7 +1982,7 @@ static AVCodec *find_encdec(enum AVCodecID id, int encoder) while (p) { if ((encoder ? av_codec_is_encoder(p) : av_codec_is_decoder(p)) && p->id == id) { - if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) { + if (p->capabilities & AV_CODEC_CAP_EXPERIMENTAL && !experimental) { experimental = p; } else return p; @@ -1895,22 +2077,15 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) { const char *codec_name; const char *profile = NULL; - const AVCodec *p; char buf1[32]; int bitrate; int new_line = 0; AVRational display_aspect_ratio; + const AVCodecDescriptor *desc = avcodec_descriptor_get(enc->codec_id); - if (enc->codec) - p = enc->codec; - else if (encode) - p = avcodec_find_encoder(enc->codec_id); - else - p = avcodec_find_decoder(enc->codec_id); - - if (p) { - codec_name = p->name; - profile = av_get_profile_name(p, enc->profile); + if (desc) { + codec_name = desc->name; + profile = avcodec_profile_name(enc->codec_id, enc->profile); } else if (enc->codec_id == AV_CODEC_ID_MPEG2TS) { /* fake mpeg2 transport stream codec (currently not * registered) */ @@ -2034,10 +2209,10 @@ void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) return; } if (encode) { - if (enc->flags & CODEC_FLAG_PASS1) + if (enc->flags & AV_CODEC_FLAG_PASS1) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 1"); - if (enc->flags & CODEC_FLAG_PASS2) + if (enc->flags & AV_CODEC_FLAG_PASS2) snprintf(buf + strlen(buf), buf_size - strlen(buf), ", pass 2"); } @@ -2061,6 +2236,21 @@ const char *av_get_profile_name(const AVCodec *codec, int profile) return NULL; } +const char *avcodec_profile_name(enum AVCodecID codec_id, int profile) +{ + const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id); + const AVProfile *p; + + if (profile == FF_PROFILE_UNKNOWN || !desc || !desc->profiles) + return NULL; + + for (p = desc->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) + if (p->profile == profile) + return p->name; + + return NULL; +} + unsigned avcodec_version(void) { return LIBAVCODEC_VERSION_INT; @@ -2079,6 +2269,12 @@ const char *avcodec_license(void) void avcodec_flush_buffers(AVCodecContext *avctx) { + avctx->internal->draining = 0; + avctx->internal->draining_done = 0; + av_frame_unref(avctx->internal->buffer_frame); + av_packet_unref(avctx->internal->buffer_pkt); + avctx->internal->buffer_pkt_valid = 0; + if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) ff_thread_flush(avctx); else if (avctx->codec->flush) @@ -2105,6 +2301,7 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id) case AV_CODEC_ID_PCM_ZORK: return 8; case AV_CODEC_ID_PCM_S16BE: + case AV_CODEC_ID_PCM_S16BE_PLANAR: case AV_CODEC_ID_PCM_S16LE: case AV_CODEC_ID_PCM_S16LE_PLANAR: case AV_CODEC_ID_PCM_U16BE: @@ -2151,21 +2348,15 @@ int av_get_bits_per_sample(enum AVCodecID codec_id) } } -int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) +static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, + uint32_t tag, int bits_per_coded_sample, int frame_bytes) { - int id, sr, ch, ba, tag, bps; - - id = avctx->codec_id; - sr = avctx->sample_rate; - ch = avctx->channels; - ba = avctx->block_align; - tag = avctx->codec_tag; - bps = av_get_exact_bits_per_sample(avctx->codec_id); + int bps = av_get_exact_bits_per_sample(id); /* codecs with an exact constant bits per sample */ if (bps > 0 && ch > 0 && frame_bytes > 0) return (frame_bytes * 8) / (bps * ch); - bps = avctx->bits_per_coded_sample; + bps = bits_per_coded_sample; /* codecs with a fixed packet duration */ switch (id) { @@ -2269,7 +2460,7 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) if (ba > 0) { /* calc from frame_bytes, channels, and block_align */ int blocks = frame_bytes / ba; - switch (avctx->codec_id) { + switch (id) { case AV_CODEC_ID_ADPCM_IMA_WAV: return blocks * (1 + (ba - 4 * ch) / (4 * ch) * 8); case AV_CODEC_ID_ADPCM_IMA_DK3: @@ -2283,7 +2474,7 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) if (bps > 0) { /* calc from frame_bytes, channels, and bits_per_coded_sample */ - switch (avctx->codec_id) { + switch (id) { case AV_CODEC_ID_PCM_DVD: return 2 * (frame_bytes / ((bps * 2 / 8) * ch)); case AV_CODEC_ID_PCM_BLURAY: @@ -2298,6 +2489,22 @@ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) return 0; } +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) +{ + return get_audio_frame_duration(avctx->codec_id, avctx->sample_rate, + avctx->channels, avctx->block_align, + avctx->codec_tag, avctx->bits_per_coded_sample, + frame_bytes); +} + +int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes) +{ + return get_audio_frame_duration(par->codec_id, par->sample_rate, + par->channels, par->block_align, + par->codec_tag, par->bits_per_coded_sample, + frame_bytes); +} + #if !HAVE_THREADS int ff_thread_init(AVCodecContext *s) { @@ -2477,20 +2684,6 @@ void ff_thread_await_progress(ThreadFrame *f, int progress, int field) #endif -enum AVMediaType avcodec_get_type(enum AVCodecID codec_id) -{ - if (codec_id <= AV_CODEC_ID_NONE) - return AVMEDIA_TYPE_UNKNOWN; - else if (codec_id < AV_CODEC_ID_FIRST_AUDIO) - return AVMEDIA_TYPE_VIDEO; - else if (codec_id < AV_CODEC_ID_FIRST_SUBTITLE) - return AVMEDIA_TYPE_AUDIO; - else if (codec_id < AV_CODEC_ID_FIRST_UNKNOWN) - return AVMEDIA_TYPE_SUBTITLE; - - return AVMEDIA_TYPE_UNKNOWN; -} - int avcodec_is_open(AVCodecContext *s) { return !!s->internal; @@ -2528,3 +2721,195 @@ const uint8_t *avpriv_find_start_code(const uint8_t *restrict p, return p + 4; } + +AVCPBProperties *av_cpb_properties_alloc(size_t *size) +{ + AVCPBProperties *props = av_mallocz(sizeof(AVCPBProperties)); + if (!props) + return NULL; + + if (size) + *size = sizeof(*props); + + props->vbv_delay = UINT64_MAX; + + return props; +} + +AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx) +{ + AVPacketSideData *tmp; + AVCPBProperties *props; + size_t size; + + props = av_cpb_properties_alloc(&size); + if (!props) + return NULL; + + tmp = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*tmp)); + if (!tmp) { + av_freep(&props); + return NULL; + } + + avctx->coded_side_data = tmp; + avctx->nb_coded_side_data++; + + avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_CPB_PROPERTIES; + avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)props; + avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = size; + + return props; +} + +static void codec_parameters_reset(AVCodecParameters *par) +{ + av_freep(&par->extradata); + + memset(par, 0, sizeof(*par)); + + par->codec_type = AVMEDIA_TYPE_UNKNOWN; + par->codec_id = AV_CODEC_ID_NONE; + par->format = -1; + par->field_order = AV_FIELD_UNKNOWN; + par->color_range = AVCOL_RANGE_UNSPECIFIED; + par->color_primaries = AVCOL_PRI_UNSPECIFIED; + par->color_trc = AVCOL_TRC_UNSPECIFIED; + par->color_space = AVCOL_SPC_UNSPECIFIED; + par->chroma_location = AVCHROMA_LOC_UNSPECIFIED; + par->sample_aspect_ratio = (AVRational){ 0, 1 }; +} + +AVCodecParameters *avcodec_parameters_alloc(void) +{ + AVCodecParameters *par = av_mallocz(sizeof(*par)); + + if (!par) + return NULL; + codec_parameters_reset(par); + return par; +} + +void avcodec_parameters_free(AVCodecParameters **ppar) +{ + AVCodecParameters *par = *ppar; + + if (!par) + return; + codec_parameters_reset(par); + + av_freep(ppar); +} + +int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src) +{ + codec_parameters_reset(dst); + memcpy(dst, src, sizeof(*dst)); + + dst->extradata = NULL; + dst->extradata_size = 0; + if (src->extradata) { + dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); + if (!dst->extradata) + return AVERROR(ENOMEM); + memcpy(dst->extradata, src->extradata, src->extradata_size); + dst->extradata_size = src->extradata_size; + } + + return 0; +} + +int avcodec_parameters_from_context(AVCodecParameters *par, + const AVCodecContext *codec) +{ + codec_parameters_reset(par); + + par->codec_type = codec->codec_type; + par->codec_id = codec->codec_id; + par->codec_tag = codec->codec_tag; + + par->bit_rate = codec->bit_rate; + par->bits_per_coded_sample = codec->bits_per_coded_sample; + par->profile = codec->profile; + par->level = codec->level; + + switch (par->codec_type) { + case AVMEDIA_TYPE_VIDEO: + par->format = codec->pix_fmt; + par->width = codec->width; + par->height = codec->height; + par->field_order = codec->field_order; + par->color_range = codec->color_range; + par->color_primaries = codec->color_primaries; + par->color_trc = codec->color_trc; + par->color_space = codec->colorspace; + par->chroma_location = codec->chroma_sample_location; + par->sample_aspect_ratio = codec->sample_aspect_ratio; + break; + case AVMEDIA_TYPE_AUDIO: + par->format = codec->sample_fmt; + par->channel_layout = codec->channel_layout; + par->channels = codec->channels; + par->sample_rate = codec->sample_rate; + par->block_align = codec->block_align; + par->initial_padding = codec->initial_padding; + break; + } + + if (codec->extradata) { + par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); + if (!par->extradata) + return AVERROR(ENOMEM); + memcpy(par->extradata, codec->extradata, codec->extradata_size); + par->extradata_size = codec->extradata_size; + } + + return 0; +} + +int avcodec_parameters_to_context(AVCodecContext *codec, + const AVCodecParameters *par) +{ + codec->codec_type = par->codec_type; + codec->codec_id = par->codec_id; + codec->codec_tag = par->codec_tag; + + codec->bit_rate = par->bit_rate; + codec->bits_per_coded_sample = par->bits_per_coded_sample; + codec->profile = par->profile; + codec->level = par->level; + + switch (par->codec_type) { + case AVMEDIA_TYPE_VIDEO: + codec->pix_fmt = par->format; + codec->width = par->width; + codec->height = par->height; + codec->field_order = par->field_order; + codec->color_range = par->color_range; + codec->color_primaries = par->color_primaries; + codec->color_trc = par->color_trc; + codec->colorspace = par->color_space; + codec->chroma_sample_location = par->chroma_location; + codec->sample_aspect_ratio = par->sample_aspect_ratio; + break; + case AVMEDIA_TYPE_AUDIO: + codec->sample_fmt = par->format; + codec->channel_layout = par->channel_layout; + codec->channels = par->channels; + codec->sample_rate = par->sample_rate; + codec->block_align = par->block_align; + codec->initial_padding = par->initial_padding; + break; + } + + if (par->extradata) { + av_freep(&codec->extradata); + codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); + if (!codec->extradata) + return AVERROR(ENOMEM); + memcpy(codec->extradata, par->extradata, par->extradata_size); + codec->extradata_size = par->extradata_size; + } + + return 0; +}