X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fdecode.c;h=3ef1ece25bf77c6380d2865d81ea18ac44be44ea;hb=6223d77578df8146596c878c96e243095fb9e201;hp=a32ff2fcd395c877e62eec10c3b8107d6102cc85;hpb=47e12966b75490cfa5fb8ed65a48a9a3d84a7bce;p=ffmpeg diff --git a/libavcodec/decode.c b/libavcodec/decode.c index a32ff2fcd39..3ef1ece25bf 100644 --- a/libavcodec/decode.c +++ b/libavcodec/decode.c @@ -41,13 +41,33 @@ #include "avcodec.h" #include "bytestream.h" #include "decode.h" -#include "hwaccel.h" +#include "hwconfig.h" #include "internal.h" +#include "packet_internal.h" #include "thread.h" +typedef struct FramePool { + /** + * Pools for each data plane. For audio all the planes have the same size, + * so only pools[0] is used. + */ + AVBufferPool *pools[4]; + + /* + * Pool parameters + */ + int format; + int width, height; + int stride_align[AV_NUM_DATA_POINTERS]; + int linesize[4]; + int planes; + int channels; + int samples; +} FramePool; + static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt) { - int size = 0, ret; + int size, ret; const uint8_t *data; uint32_t flags; int64_t val; @@ -123,15 +143,24 @@ fail2: return 0; } -static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt) +#define IS_EMPTY(pkt) (!(pkt)->data) + +static int extract_packet_props(AVCodecInternal *avci, AVPacket *pkt) { int ret = 0; - av_packet_unref(avci->last_pkt_props); - if (pkt) { - ret = av_packet_copy_props(avci->last_pkt_props, pkt); - if (!ret) - avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props(). + ret = avpriv_packet_list_put(&avci->pkt_props, &avci->pkt_props_tail, pkt, + av_packet_copy_props, 0); + if (ret < 0) + return ret; + avci->pkt_props_tail->pkt.size = pkt->size; // HACK: Needed for ff_decode_frame_props(). + avci->pkt_props_tail->pkt.data = (void*)1; // HACK: Needed for IS_EMPTY(). + + if (IS_EMPTY(avci->last_pkt_props)) { + ret = avpriv_packet_list_get(&avci->pkt_props, + &avci->pkt_props_tail, + avci->last_pkt_props); + av_assert0(ret != AVERROR(EAGAIN)); } return ret; } @@ -185,147 +214,37 @@ static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame) int ff_decode_bsfs_init(AVCodecContext *avctx) { AVCodecInternal *avci = avctx->internal; - DecodeFilterContext *s = &avci->filter; - const char *bsfs_str; int ret; - if (s->nb_bsfs) + if (avci->bsf) return 0; - bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null"; - while (bsfs_str && *bsfs_str) { - AVBSFContext **tmp; - const AVBitStreamFilter *filter; - char *bsf, *bsf_options_str, *bsf_name; - - bsf = av_get_token(&bsfs_str, ","); - if (!bsf) { - ret = AVERROR(ENOMEM); - goto fail; - } - bsf_name = av_strtok(bsf, "=", &bsf_options_str); - if (!bsf_name) { - av_freep(&bsf); - ret = AVERROR(ENOMEM); - goto fail; - } - - filter = av_bsf_get_by_name(bsf_name); - if (!filter) { - av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s " - "requested by a decoder. This is a bug, please report it.\n", - bsf_name); - av_freep(&bsf); + ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf); + if (ret < 0) { + av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret)); + if (ret != AVERROR(ENOMEM)) ret = AVERROR_BUG; - goto fail; - } - - tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs)); - if (!tmp) { - av_freep(&bsf); - ret = AVERROR(ENOMEM); - goto fail; - } - s->bsfs = tmp; - s->nb_bsfs++; - - ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]); - if (ret < 0) { - av_freep(&bsf); - goto fail; - } - - if (s->nb_bsfs == 1) { - /* We do not currently have an API for passing the input timebase into decoders, - * but no filters used here should actually need it. - * So we make up some plausible-looking number (the MPEG 90kHz timebase) */ - s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 }; - ret = avcodec_parameters_from_context(s->bsfs[s->nb_bsfs - 1]->par_in, - avctx); - } else { - s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out; - ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in, - s->bsfs[s->nb_bsfs - 2]->par_out); - } - if (ret < 0) { - av_freep(&bsf); - goto fail; - } - - if (bsf_options_str && filter->priv_class) { - const AVOption *opt = av_opt_next(s->bsfs[s->nb_bsfs - 1]->priv_data, NULL); - const char * shorthand[2] = {NULL}; - - if (opt) - shorthand[0] = opt->name; - - ret = av_opt_set_from_string(s->bsfs[s->nb_bsfs - 1]->priv_data, bsf_options_str, shorthand, "=", ":"); - if (ret < 0) { - if (ret != AVERROR(ENOMEM)) { - av_log(avctx, AV_LOG_ERROR, "Invalid options for bitstream filter %s " - "requested by the decoder. This is a bug, please report it.\n", - bsf_name); - ret = AVERROR_BUG; - } - av_freep(&bsf); - goto fail; - } - } - av_freep(&bsf); + goto fail; + } - ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]); - if (ret < 0) - goto fail; + /* We do not currently have an API for passing the input timebase into decoders, + * but no filters used here should actually need it. + * So we make up some plausible-looking number (the MPEG 90kHz timebase) */ + avci->bsf->time_base_in = (AVRational){ 1, 90000 }; + ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx); + if (ret < 0) + goto fail; - if (*bsfs_str) - bsfs_str++; - } + ret = av_bsf_init(avci->bsf); + if (ret < 0) + goto fail; return 0; fail: - ff_decode_bsfs_uninit(avctx); + av_bsf_free(&avci->bsf); return ret; } -/* try to get one output packet from the filter chain */ -static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt) -{ - DecodeFilterContext *s = &avctx->internal->filter; - int idx, ret; - - /* start with the last filter in the chain */ - idx = s->nb_bsfs - 1; - while (idx >= 0) { - /* request a packet from the currently selected filter */ - ret = av_bsf_receive_packet(s->bsfs[idx], pkt); - if (ret == AVERROR(EAGAIN)) { - /* no packets available, try the next filter up the chain */ - ret = 0; - idx--; - continue; - } else if (ret < 0 && ret != AVERROR_EOF) { - return ret; - } - - /* got a packet or EOF -- pass it to the caller or to the next filter - * down the chain */ - if (idx == s->nb_bsfs - 1) { - return ret; - } else { - idx++; - ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt); - if (ret < 0) { - av_log(avctx, AV_LOG_ERROR, - "Error pre-processing a packet before decoding\n"); - av_packet_unref(pkt); - return ret; - } - } - } - - return AVERROR(EAGAIN); -} - int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt) { AVCodecInternal *avci = avctx->internal; @@ -334,7 +253,7 @@ int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt) if (avci->draining) return AVERROR_EOF; - ret = bsfs_poll(avctx, pkt); + ret = av_bsf_receive_packet(avci->bsf, pkt); if (ret == AVERROR_EOF) avci->draining = 1; if (ret < 0) @@ -399,7 +318,7 @@ static int64_t guess_correct_pts(AVCodecContext *ctx, * returning any output, so this function needs to be called in a loop until it * returns EAGAIN. **/ -static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame) +static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame) { AVCodecInternal *avci = avctx->internal; DecodeSimpleContext *ds = &avci->ds; @@ -480,32 +399,32 @@ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame) side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size); if(side && side_size>=10) { - avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier; + avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier; discard_padding = AV_RL32(side + 4); av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n", - avctx->internal->skip_samples, (int)discard_padding); + avci->skip_samples, (int)discard_padding); skip_reason = AV_RL8(side + 8); discard_reason = AV_RL8(side + 9); } if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { - avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples); + avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples); got_frame = 0; } - if (avctx->internal->skip_samples > 0 && got_frame && + if (avci->skip_samples > 0 && got_frame && !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) { - if(frame->nb_samples <= avctx->internal->skip_samples){ + if(frame->nb_samples <= avci->skip_samples){ got_frame = 0; - avctx->internal->skip_samples -= frame->nb_samples; + avci->skip_samples -= frame->nb_samples; av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n", - avctx->internal->skip_samples); + avci->skip_samples); } else { - av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples, - frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format); + av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples, + frame->nb_samples - avci->skip_samples, avctx->channels, frame->format); if(avctx->pkt_timebase.num && avctx->sample_rate) { - int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples, + int64_t diff_ts = av_rescale_q(avci->skip_samples, (AVRational){1, avctx->sample_rate}, avctx->pkt_timebase); if(frame->pts!=AV_NOPTS_VALUE) @@ -524,9 +443,9 @@ FF_ENABLE_DEPRECATION_WARNINGS av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n"); } av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n", - avctx->internal->skip_samples, frame->nb_samples); - frame->nb_samples -= avctx->internal->skip_samples; - avctx->internal->skip_samples = 0; + avci->skip_samples, frame->nb_samples); + frame->nb_samples -= avci->skip_samples; + avci->skip_samples = 0; } } @@ -552,11 +471,11 @@ FF_ENABLE_DEPRECATION_WARNINGS if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) { AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10); if (fside) { - AV_WL32(fside->data, avctx->internal->skip_samples); + AV_WL32(fside->data, avci->skip_samples); AV_WL32(fside->data + 4, discard_padding); AV_WL8(fside->data + 8, skip_reason); AV_WL8(fside->data + 9, discard_reason); - avctx->internal->skip_samples = 0; + avci->skip_samples = 0; } } } @@ -581,7 +500,7 @@ FF_ENABLE_DEPRECATION_WARNINGS /* do not stop draining when actual_got_frame != 0 or ret < 0 */ /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */ - if (avctx->internal->draining && !actual_got_frame) { + if (avci->draining && !actual_got_frame) { if (ret < 0) { /* prevent infinite loop if a decoder wrongly always return error on draining */ /* reasonable nb_errors_max = maximum b frames + thread count */ @@ -603,6 +522,7 @@ FF_ENABLE_DEPRECATION_WARNINGS if (ret >= pkt->size || ret < 0) { av_packet_unref(pkt); + av_packet_unref(avci->last_pkt_props); } else { int consumed = ret; @@ -641,9 +561,11 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame) av_assert0(!frame->buf[0]); - if (avctx->codec->receive_frame) + if (avctx->codec->receive_frame) { ret = avctx->codec->receive_frame(avctx, frame); - else + if (ret != AVERROR(EAGAIN)) + av_packet_unref(avci->last_pkt_props); + } else ret = decode_simple_receive_frame(avctx, frame); if (ret == AVERROR_EOF) @@ -695,7 +617,7 @@ int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacke return ret; } - ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt); + ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt); if (ret < 0) { av_packet_unref(avci->buffer_pkt); return ret; @@ -740,7 +662,7 @@ static int apply_cropping(AVCodecContext *avctx, AVFrame *frame) int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame) { AVCodecInternal *avci = avctx->internal; - int ret; + int ret, changed; av_frame_unref(frame); @@ -765,6 +687,51 @@ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *fr avctx->frame_number++; + if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) { + + if (avctx->frame_number == 1) { + avci->initial_format = frame->format; + switch(avctx->codec_type) { + case AVMEDIA_TYPE_VIDEO: + avci->initial_width = frame->width; + avci->initial_height = frame->height; + break; + case AVMEDIA_TYPE_AUDIO: + avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate : + avctx->sample_rate; + avci->initial_channels = frame->channels; + avci->initial_channel_layout = frame->channel_layout; + break; + } + } + + if (avctx->frame_number > 1) { + changed = avci->initial_format != frame->format; + + switch(avctx->codec_type) { + case AVMEDIA_TYPE_VIDEO: + changed |= avci->initial_width != frame->width || + avci->initial_height != frame->height; + break; + case AVMEDIA_TYPE_AUDIO: + changed |= avci->initial_sample_rate != frame->sample_rate || + avci->initial_sample_rate != avctx->sample_rate || + avci->initial_channels != frame->channels || + avci->initial_channel_layout != frame->channel_layout; + break; + } + + if (changed) { + avci->changed_frames_dropped++; + av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64 + " drop count: %d \n", + avctx->frame_number, frame->pts, + avci->changed_frames_dropped); + av_frame_unref(frame); + return AVERROR_INPUT_CHANGED; + } + } + } return 0; } @@ -782,7 +749,6 @@ static int compat_decode(AVCodecContext *avctx, AVFrame *frame, } *got_frame = 0; - avci->compat_decode = 1; if (avci->compat_decode_partial_size > 0 && avci->compat_decode_partial_size != pkt->size) { @@ -1460,23 +1426,70 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) return ret; } +static void frame_pool_free(void *opaque, uint8_t *data) +{ + FramePool *pool = (FramePool*)data; + int i; + + for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) + av_buffer_pool_uninit(&pool->pools[i]); + + av_freep(&data); +} + +static AVBufferRef *frame_pool_alloc(void) +{ + FramePool *pool = av_mallocz(sizeof(*pool)); + AVBufferRef *buf; + + if (!pool) + return NULL; + + buf = av_buffer_create((uint8_t*)pool, sizeof(*pool), + frame_pool_free, NULL, 0); + if (!buf) { + av_freep(&pool); + return NULL; + } + + return buf; +} + static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) { - FramePool *pool = avctx->internal->pool; - int i, ret; + FramePool *pool = avctx->internal->pool ? + (FramePool*)avctx->internal->pool->data : NULL; + AVBufferRef *pool_buf; + int i, ret, ch, planes; + + if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { + int planar = av_sample_fmt_is_planar(frame->format); + ch = frame->channels; + planes = planar ? ch : 1; + } + + if (pool && pool->format == frame->format) { + if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && + pool->width == frame->width && pool->height == frame->height) + return 0; + if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes && + pool->channels == ch && frame->nb_samples == pool->samples) + return 0; + } + + pool_buf = frame_pool_alloc(); + if (!pool_buf) + return AVERROR(ENOMEM); + pool = (FramePool*)pool_buf->data; switch (avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: { - uint8_t *data[4]; int linesize[4]; - int size[4] = { 0 }; int w = frame->width; int h = frame->height; - int tmpsize, unaligned; - - if (pool->format == frame->format && - pool->width == frame->width && pool->height == frame->height) - return 0; + int unaligned; + ptrdiff_t linesize1[4]; + size_t size[4]; avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align); @@ -1485,7 +1498,7 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2 ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w); if (ret < 0) - return ret; + goto fail; // increase alignment of w for next try (rhs gives the lowest bit set in w) w += w & ~(w - 1); @@ -1494,19 +1507,19 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) unaligned |= linesize[i] % pool->stride_align[i]; } while (unaligned); - tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h, - NULL, linesize); - if (tmpsize < 0) - return tmpsize; - - for (i = 0; i < 3 && data[i + 1]; i++) - size[i] = data[i + 1] - data[i]; - size[i] = tmpsize - (data[i] - data[0]); + for (i = 0; i < 4; i++) + linesize1[i] = linesize[i]; + ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1); + if (ret < 0) + goto fail; for (i = 0; i < 4; i++) { - av_buffer_pool_uninit(&pool->pools[i]); pool->linesize[i] = linesize[i]; if (size[i]) { + if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) { + ret = AVERROR(EINVAL); + goto fail; + } pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1, CONFIG_MEMORY_POISONING ? NULL : @@ -1524,15 +1537,6 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) break; } case AVMEDIA_TYPE_AUDIO: { - int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout); - int planar = av_sample_fmt_is_planar(frame->format); - int planes = planar ? ch : 1; - - if (pool->format == frame->format && pool->planes == planes && - pool->channels == ch && frame->nb_samples == pool->samples) - return 0; - - av_buffer_pool_uninit(&pool->pools[0]); ret = av_samples_get_buffer_size(&pool->linesize[0], ch, frame->nb_samples, frame->format, 0); if (ret < 0) @@ -1552,19 +1556,19 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) } default: av_assert0(0); } + + av_buffer_unref(&avctx->internal->pool); + avctx->internal->pool = pool_buf; + return 0; fail: - for (i = 0; i < 4; i++) - av_buffer_pool_uninit(&pool->pools[i]); - pool->format = -1; - pool->planes = pool->channels = pool->samples = 0; - pool->width = pool->height = 0; + av_buffer_unref(&pool_buf); return ret; } static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) { - FramePool *pool = avctx->internal->pool; + FramePool *pool = (FramePool*)avctx->internal->pool->data; int planes = pool->planes; int i; @@ -1609,7 +1613,7 @@ fail: static int video_get_buffer(AVCodecContext *s, AVFrame *pic) { - FramePool *pool = s->internal->pool; + FramePool *pool = (FramePool*)s->internal->pool->data; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format); int i; @@ -1692,7 +1696,7 @@ static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame) int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) { - const AVPacket *pkt = avctx->internal->last_pkt_props; + AVPacket *pkt = avctx->internal->last_pkt_props; int i; static const struct { enum AVPacketSideDataType packet; @@ -1706,8 +1710,15 @@ int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame) { AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA }, { AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL }, { AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC }, + { AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE }, + { AV_PKT_DATA_S12M_TIMECODE, AV_FRAME_DATA_S12M_TIMECODE }, }; + if (IS_EMPTY(pkt)) + avpriv_packet_list_get(&avctx->internal->pkt_props, + &avctx->internal->pkt_props_tail, + pkt); + if (pkt) { frame->pts = pkt->pts; #if FF_API_PKT_PTS @@ -1858,7 +1869,7 @@ int ff_attach_decode_data(AVFrame *frame) return 0; } -static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) +int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) { const AVHWAccel *hwaccel = avctx->hwaccel; int override_dimensions = 1; @@ -1867,7 +1878,8 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) { av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n"); - return AVERROR(EINVAL); + ret = AVERROR(EINVAL); + goto fail; } if (frame->width <= 0 || frame->height <= 0) { @@ -1878,12 +1890,19 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) { av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n"); - return AVERROR(EINVAL); + ret = AVERROR(EINVAL); + goto fail; + } + } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) { + if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) { + av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples); + ret = AVERROR(EINVAL); + goto fail; } } ret = ff_decode_frame_props(avctx, frame); if (ret < 0) - return ret; + goto fail; if (hwaccel) { if (hwaccel->alloc_frame) { @@ -1895,13 +1914,13 @@ static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) ret = avctx->get_buffer2(avctx, frame, flags); if (ret < 0) - goto end; + goto fail; validate_avframe_allocation(avctx, frame); ret = ff_attach_decode_data(frame); if (ret < 0) - goto end; + goto fail; end: if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions && @@ -1910,23 +1929,16 @@ end: frame->height = avctx->height; } - if (ret < 0) - av_frame_unref(frame); - - return ret; -} - -int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) -{ - int ret = get_buffer_internal(avctx, frame, flags); +fail: if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); - frame->width = frame->height = 0; + av_frame_unref(frame); } + return ret; } -static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame) +static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) { AVFrame *tmp; int ret; @@ -1942,7 +1954,7 @@ static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame) if (!frame->data[0]) return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); - if (av_frame_is_writable(frame)) + if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame)) return ff_decode_frame_props(avctx, frame); tmp = av_frame_alloc(); @@ -1963,55 +1975,10 @@ static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame) return 0; } -int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame) +int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) { - int ret = reget_buffer_internal(avctx, frame); + int ret = reget_buffer_internal(avctx, frame, flags); if (ret < 0) av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } - -static void bsfs_flush(AVCodecContext *avctx) -{ - DecodeFilterContext *s = &avctx->internal->filter; - - for (int i = 0; i < s->nb_bsfs; i++) - av_bsf_flush(s->bsfs[i]); -} - -void avcodec_flush_buffers(AVCodecContext *avctx) -{ - avctx->internal->draining = 0; - avctx->internal->draining_done = 0; - avctx->internal->nb_draining_errors = 0; - av_frame_unref(avctx->internal->buffer_frame); - av_frame_unref(avctx->internal->compat_decode_frame); - av_packet_unref(avctx->internal->buffer_pkt); - avctx->internal->buffer_pkt_valid = 0; - - av_packet_unref(avctx->internal->ds.in_pkt); - - if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) - ff_thread_flush(avctx); - else if (avctx->codec->flush) - avctx->codec->flush(avctx); - - avctx->pts_correction_last_pts = - avctx->pts_correction_last_dts = INT64_MIN; - - bsfs_flush(avctx); - - if (!avctx->refcounted_frames) - av_frame_unref(avctx->internal->to_free); -} - -void ff_decode_bsfs_uninit(AVCodecContext *avctx) -{ - DecodeFilterContext *s = &avctx->internal->filter; - int i; - - for (i = 0; i < s->nb_bsfs; i++) - av_bsf_free(&s->bsfs[i]); - av_freep(&s->bsfs); - s->nb_bsfs = 0; -}