*width = FFALIGN(*width, w_align);
*height = FFALIGN(*height, h_align);
- if (s->codec_id == AV_CODEC_ID_H264 || s->lowres)
+ if (s->codec_id == AV_CODEC_ID_H264 || s->lowres) {
// some of the optimized chroma MC reads one line too much
// which is also done in mpeg decoders with lowres > 0
*height += 2;
+ // H.264 uses edge emulation for out of frame motion vectors, for this
+ // it requires a temporary area large enough to hold a 21x21 block,
+ // increasing witdth ensure that the temporary area is large enough,
+ // the next rounded up width is 32
+ *width = FFMAX(*width, 32);
+ }
+
for (i = 0; i < 4; i++)
linesize_align[i] = STRIDE_ALIGN;
}
}
}
+static int add_metadata_from_side_data(AVPacket *avpkt, AVFrame *frame)
+{
+ int size;
+ const uint8_t *side_metadata;
+
+ AVDictionary **frame_md = avpriv_frame_get_metadatap(frame);
+
+ side_metadata = av_packet_get_side_data(avpkt,
+ AV_PKT_DATA_STRINGS_METADATA, &size);
+ return av_packet_unpack_dictionary(side_metadata, size, frame_md);
+}
+
int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame)
{
AVPacket *pkt = avctx->internal->pkt;
memcpy(frame_sd->data, packet_sd, size);
}
}
+ add_metadata_from_side_data(pkt, frame);
} else {
frame->pkt_pts = AV_NOPTS_VALUE;
av_frame_set_pkt_pos (frame, -1);
ret = ff_decode_frame_props(avctx, frame);
if (ret < 0)
return ret;
- if ((ret = ff_init_buffer_info(avctx, frame)) < 0)
- return ret;
if (hwaccel) {
if (hwaccel->alloc_frame) {
int size = 0, ret;
const uint8_t *data;
uint32_t flags;
+ int64_t val;
data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
if (!data)
if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
if (size < 4)
goto fail;
- avctx->channels = bytestream_get_le32(&data);
+ val = bytestream_get_le32(&data);
+ if (val <= 0 || val > INT_MAX) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
+ return AVERROR_INVALIDDATA;
+ }
+ avctx->channels = val;
size -= 4;
}
if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
if (size < 4)
goto fail;
- avctx->sample_rate = bytestream_get_le32(&data);
+ val = bytestream_get_le32(&data);
+ if (val <= 0 || val > INT_MAX) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
+ return AVERROR_INVALIDDATA;
+ }
+ avctx->sample_rate = val;
size -= 4;
}
if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
return AVERROR_INVALIDDATA;
}
-static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame)
-{
- int size;
- const uint8_t *side_metadata;
-
- AVDictionary **frame_md = avpriv_frame_get_metadatap(frame);
-
- side_metadata = av_packet_get_side_data(avctx->internal->pkt,
- AV_PKT_DATA_STRINGS_METADATA, &size);
- return av_packet_unpack_dictionary(side_metadata, size, frame_md);
-}
-
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
{
int ret;
if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt;
}
}
- add_metadata_from_side_data(avctx, picture);
fail:
emms_c(); //needed to avoid an emms_c() call before every return;
frame->pkt_dts = avpkt->dts;
}
if (ret >= 0 && *got_frame_ptr) {
- add_metadata_from_side_data(avctx, frame);
avctx->frame_number++;
av_frame_set_best_effort_timestamp(frame,
guess_correct_pts(avctx,