X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fh264.c;h=86d453bba7835bedcffbbbaf4f6f37d34fb8bd7c;hb=7dc827b7091b1ca85508b536fa776b49f363b0f4;hp=38e471d998e12cdf1e591a6849ed9b4e48ecad34;hpb=fce99322b0cc0b9e3ce71be0870368e618e54ffa;p=ffmpeg diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 38e471d998e..86d453bba78 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -25,6 +25,7 @@ * @author Michael Niedermayer */ +#include "libavutil/avassert.h" #include "libavutil/imgutils.h" #include "internal.h" #include "cabac.h" @@ -42,10 +43,7 @@ #include "rectangle.h" #include "svq3.h" #include "thread.h" -#include "vdpau_internal.h" -#include "libavutil/avassert.h" -// #undef NDEBUG #include const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 }; @@ -62,6 +60,93 @@ static const uint8_t div6[QP_MAX_NUM + 1] = { 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, }; +static const uint8_t field_scan[16] = { + 0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4, + 0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4, + 2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4, + 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4, +}; + +static const uint8_t field_scan8x8[64] = { + 0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8, + 1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8, + 2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8, + 0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8, + 2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8, + 2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8, + 2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8, + 3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8, + 3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8, + 4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8, + 4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8, + 5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8, + 5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8, + 7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8, + 6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8, + 7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8, +}; + +static const uint8_t field_scan8x8_cavlc[64] = { + 0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8, + 2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8, + 3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8, + 5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8, + 0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8, + 1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8, + 3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8, + 5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8, + 0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8, + 1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8, + 3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8, + 5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8, + 1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8, + 1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8, + 3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8, + 6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8, +}; + +// zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)] +static const uint8_t zigzag_scan8x8_cavlc[64] = { + 0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8, + 4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8, + 3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8, + 2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8, + 1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8, + 3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8, + 2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8, + 3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8, + 0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8, + 2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8, + 1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8, + 4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8, + 0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8, + 1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8, + 0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8, + 5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8, +}; + +static const uint8_t dequant4_coeff_init[6][3] = { + { 10, 13, 16 }, + { 11, 14, 18 }, + { 13, 16, 20 }, + { 14, 18, 23 }, + { 16, 20, 25 }, + { 18, 23, 29 }, +}; + +static const uint8_t dequant8_coeff_init_scan[16] = { + 0, 3, 4, 3, 3, 1, 5, 1, 4, 5, 2, 5, 3, 1, 5, 1 +}; + +static const uint8_t dequant8_coeff_init[6][6] = { + { 20, 18, 32, 19, 25, 24 }, + { 22, 19, 35, 21, 28, 26 }, + { 26, 23, 42, 24, 33, 31 }, + { 28, 25, 45, 26, 35, 33 }, + { 32, 28, 51, 30, 40, 38 }, + { 36, 32, 58, 34, 46, 43 }, +}; + static const enum AVPixelFormat h264_hwaccel_pixfmt_list_420[] = { #if CONFIG_H264_DXVA2_HWACCEL AV_PIX_FMT_DXVA2_VLD, @@ -100,7 +185,7 @@ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int (*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped) { - H264Context *h = opaque; + H264Context *h = opaque; h->mb_x = mb_x; h->mb_y = mb_y; @@ -132,7 +217,7 @@ void ff_h264_draw_horiz_band(H264Context *h, int y, int height) const int field_pic = h->picture_structure != PICT_FRAME; if (field_pic) { height <<= 1; - y <<= 1; + y <<= 1; } height = FFMIN(height, avctx->height - y); @@ -146,7 +231,7 @@ void ff_h264_draw_horiz_band(H264Context *h, int y, int height) int i; if (cur->f.pict_type == AV_PICTURE_TYPE_B || h->low_delay || - (avctx->slice_flags & SLICE_FLAG_CODED_ORDER)) + (avctx->slice_flags & SLICE_FLAG_CODED_ORDER)) src = &cur->f; else if (last) src = &last->f; @@ -171,7 +256,7 @@ static void unref_picture(H264Context *h, Picture *pic) int off = offsetof(Picture, tf) + sizeof(pic->tf); int i; - if (!pic->f.data[0]) + if (!pic->f.buf[0]) return; ff_thread_release_buffer(h->avctx, &pic->tf); @@ -193,7 +278,7 @@ static void release_unused_pictures(H264Context *h, int remove_current) /* release non reference frames */ for (i = 0; i < MAX_PICTURE_COUNT; i++) { - if (h->DPB[i].f.data[0] && !h->DPB[i].reference && + if (h->DPB[i].f.buf[0] && !h->DPB[i].reference && (remove_current || &h->DPB[i] != h->cur_pic_ptr)) { unref_picture(h, &h->DPB[i]); } @@ -213,7 +298,6 @@ static int ref_picture(H264Context *h, Picture *dst, Picture *src) if (ret < 0) goto fail; - dst->qscale_table_buf = av_buffer_ref(src->qscale_table_buf); dst->mb_type_buf = av_buffer_ref(src->mb_type_buf); if (!dst->qscale_table_buf || !dst->mb_type_buf) @@ -221,7 +305,7 @@ static int ref_picture(H264Context *h, Picture *dst, Picture *src) dst->qscale_table = src->qscale_table; dst->mb_type = src->mb_type; - for (i = 0; i < 2; i ++) { + for (i = 0; i < 2; i++) { dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]); dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]); if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i]) @@ -243,15 +327,16 @@ static int ref_picture(H264Context *h, Picture *dst, Picture *src) memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc)); memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count)); - dst->poc = src->poc; - dst->frame_num = src->frame_num; - dst->mmco_reset = src->mmco_reset; - dst->pic_id = src->pic_id; - dst->long_ref = src->long_ref; - dst->mbaff = src->mbaff; - dst->field_picture = src->field_picture; - dst->needs_realloc = src->needs_realloc; - dst->reference = src->reference; + dst->poc = src->poc; + dst->frame_num = src->frame_num; + dst->mmco_reset = src->mmco_reset; + dst->pic_id = src->pic_id; + dst->long_ref = src->long_ref; + dst->mbaff = src->mbaff; + dst->field_picture = src->field_picture; + dst->needs_realloc = src->needs_realloc; + dst->reference = src->reference; + dst->recovered = src->recovered; return 0; fail: @@ -259,7 +344,6 @@ fail: return ret; } - static int alloc_scratch_buffers(H264Context *h, int linesize) { int alloc_size = FFALIGN(FFABS(linesize) + 32, 32); @@ -370,7 +454,7 @@ fail: static inline int pic_is_unused(H264Context *h, Picture *pic) { - if (pic->f.data[0] == NULL) + if (!pic->f.buf[0]) return 1; if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF)) return 1; @@ -417,7 +501,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h) av_log(h->avctx, AV_LOG_ERROR, "top block unavailable for requested intra4x4 mode %d at %d %d\n", status, h->mb_x, h->mb_y); - return -1; + return AVERROR_INVALIDDATA; } else if (status) { h->intra4x4_pred_mode_cache[scan8[0] + i] = status; } @@ -433,7 +517,7 @@ int ff_h264_check_intra4x4_pred_mode(H264Context *h) av_log(h->avctx, AV_LOG_ERROR, "left block unavailable for requested intra4x4 mode %d at %d %d\n", status, h->mb_x, h->mb_y); - return -1; + return AVERROR_INVALIDDATA; } else if (status) { h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status; } @@ -456,7 +540,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma) av_log(h->avctx, AV_LOG_ERROR, "out of range intra chroma pred mode at %d %d\n", h->mb_x, h->mb_y); - return -1; + return AVERROR_INVALIDDATA; } if (!(h->top_samples_available & 0x8000)) { @@ -465,7 +549,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma) av_log(h->avctx, AV_LOG_ERROR, "top block unavailable for requested intra mode at %d %d\n", h->mb_x, h->mb_y); - return -1; + return AVERROR_INVALIDDATA; } } @@ -481,7 +565,7 @@ int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma) av_log(h->avctx, AV_LOG_ERROR, "left block unavailable for requested intra mode at %d %d\n", h->mb_x, h->mb_y); - return -1; + return AVERROR_INVALIDDATA; } } @@ -503,19 +587,21 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, length--; #define STARTCODE_TEST \ - if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \ - if (src[i + 2] != 3) { \ - /* startcode, so we must be past the end */ \ - length = i; \ - } \ - break; \ - } + if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \ + if (src[i + 2] != 3) { \ + /* startcode, so we must be past the end */ \ + length = i; \ + } \ + break; \ + } + #if HAVE_FAST_UNALIGNED #define FIND_FIRST_ZERO \ - if (i > 0 && !src[i]) \ - i--; \ - while (src[i]) \ - i++ + if (i > 0 && !src[i]) \ + i--; \ + while (src[i]) \ + i++ + #if HAVE_FAST_64BIT for (i = 0; i + 1 < length; i += 9) { if (!((~AV_RN64A(src + i) & @@ -583,8 +669,8 @@ const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, } while (si < length) dst[di++] = src[si++]; -nsc: +nsc: memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE); *dst_length = di; @@ -616,12 +702,12 @@ static int decode_rbsp_trailing(H264Context *h, const uint8_t *src) static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n, int height, int y_offset, int list) { - int raw_my = h->mv_cache[list][scan8[n]][1]; + int raw_my = h->mv_cache[list][scan8[n]][1]; int filter_height_up = (raw_my & 3) ? 2 : 0; int filter_height_down = (raw_my & 3) ? 3 : 0; - int full_my = (raw_my >> 2) + y_offset; - int top = full_my - filter_height_up; - int bottom = full_my + filter_height_down + height; + int full_my = (raw_my >> 2) + y_offset; + int top = full_my - filter_height_up; + int bottom = full_my + filter_height_down + height; return FFMAX(abs(top), bottom); } @@ -641,7 +727,7 @@ static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n, // Error resilience puts the current picture in the ref list. // Don't try to wait on these as it will cause a deadlock. // Fields can wait on each other, though. - if (ref->tf.progress->data != h->cur_pic.tf.progress->data || + if (ref->tf.progress->data != h->cur_pic.tf.progress->data || (ref->reference & 3) != h->picture_structure) { my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0); if (refs[0][ref_n] < 0) @@ -790,7 +876,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8; int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8; const int luma_xy = (mx & 3) + ((my & 3) << 2); - int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; + ptrdiff_t offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; uint8_t *src_y = pic->f.data[0] + offset; uint8_t *src_cb, *src_cr; int extra_width = 0; @@ -813,7 +899,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, full_my + 16 /*FIXME*/ > pic_height + extra_height) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_y - (2 << pixel_shift) - 2 * h->mb_linesize, - h->mb_linesize, + h->mb_linesize, h->mb_linesize, 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, full_my - 2, pic_width, pic_height); src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize; @@ -832,7 +918,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, if (emu) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb - (2 << pixel_shift) - 2 * h->mb_linesize, - h->mb_linesize, + h->mb_linesize, h->mb_linesize, 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, full_my - 2, pic_width, pic_height); @@ -846,7 +932,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, if (emu) { h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr - (2 << pixel_shift) - 2 * h->mb_linesize, - h->mb_linesize, + h->mb_linesize, h->mb_linesize, 16 + 5, 16 + 5 /*FIXME*/, full_mx - 2, full_my - 2, pic_width, pic_height); @@ -871,7 +957,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, (my >> ysh) * h->mb_uvlinesize; if (emu) { - h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb, h->mb_uvlinesize, + h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cb, + h->mb_uvlinesize, h->mb_uvlinesize, 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh), pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */)); src_cb = h->edge_emu_buffer; @@ -881,7 +968,8 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7); if (emu) { - h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr, h->mb_uvlinesize, + h->vdsp.emulated_edge_mc(h->edge_emu_buffer, src_cr, + h->mb_uvlinesize, h->mb_uvlinesize, 9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh), pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */)); src_cr = h->edge_emu_buffer; @@ -1204,8 +1292,8 @@ static void init_dequant_tables(H264Context *h) int ff_h264_alloc_tables(H264Context *h) { - const int big_mb_num = h->mb_stride * (h->mb_height + 1); - const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count; + const int big_mb_num = h->mb_stride * (h->mb_height + 1); + const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count; int x, y, i; FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode, @@ -1260,7 +1348,7 @@ int ff_h264_alloc_tables(H264Context *h) fail: free_tables(h, 1); - return -1; + return AVERROR(ENOMEM); } /** @@ -1335,7 +1423,7 @@ static int context_init(H264Context *h) er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride; er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) * - h->mb_stride + h->mb_width; + h->mb_stride + h->mb_width; FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table, mb_array_size * sizeof(uint8_t), fail); @@ -1359,7 +1447,7 @@ static int context_init(H264Context *h) return 0; fail: - return -1; // free_tables will clean up for us + return AVERROR(ENOMEM); // free_tables will clean up for us } static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, @@ -1368,6 +1456,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int ff_h264_decode_extradata(H264Context *h) { AVCodecContext *avctx = h->avctx; + int ret; if (avctx->extradata[0] == 1) { int i, cnt, nalsize; @@ -1377,7 +1466,7 @@ int ff_h264_decode_extradata(H264Context *h) if (avctx->extradata_size < 7) { av_log(avctx, AV_LOG_ERROR, "avcC too short\n"); - return -1; + return AVERROR_INVALIDDATA; } /* sps and pps in the avcC always have length coded with 2 bytes, * so put a fake nal_length_size = 2 while parsing them */ @@ -1388,11 +1477,12 @@ int ff_h264_decode_extradata(H264Context *h) for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if (p - avctx->extradata + nalsize > avctx->extradata_size) - return -1; - if (decode_nal_units(h, p, nalsize, 1) < 0) { + return AVERROR_INVALIDDATA; + ret = decode_nal_units(h, p, nalsize, 1); + if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed\n", i); - return -1; + return ret; } p += nalsize; } @@ -1401,11 +1491,12 @@ int ff_h264_decode_extradata(H264Context *h) for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if (p - avctx->extradata + nalsize > avctx->extradata_size) - return -1; - if (decode_nal_units(h, p, nalsize, 1) < 0) { + return AVERROR_INVALIDDATA; + ret = decode_nal_units(h, p, nalsize, 1); + if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed\n", i); - return -1; + return ret; } p += nalsize; } @@ -1413,8 +1504,9 @@ int ff_h264_decode_extradata(H264Context *h) h->nal_length_size = (avctx->extradata[4] & 0x03) + 1; } else { h->is_avc = 0; - if (decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1) < 0) - return -1; + ret = decode_nal_units(h, avctx->extradata, avctx->extradata_size, 1); + if (ret < 0) + return ret; } return 0; } @@ -1423,6 +1515,7 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; int i; + int ret; h->avctx = avctx; @@ -1458,7 +1551,9 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) ff_h264_decode_init_vlc(); - h->pixel_shift = 0; + ff_init_cabac_states(); + + h->pixel_shift = 0; h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8; h->thread_context[0] = h; @@ -1468,15 +1563,19 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) h->prev_poc_msb = 1 << 16; h->x264_build = -1; ff_h264_reset_sei(h); + h->recovery_frame = -1; + h->frame_recovered = 0; if (avctx->codec_id == AV_CODEC_ID_H264) { if (avctx->ticks_per_frame == 1) h->avctx->time_base.den *= 2; avctx->ticks_per_frame = 2; } - if (avctx->extradata_size > 0 && avctx->extradata && - ff_h264_decode_extradata(h)) - return -1; + if (avctx->extradata_size > 0 && avctx->extradata) { + ret = ff_h264_decode_extradata(h); + if (ret < 0) + return ret; + } if (h->sps.bitstream_restriction_flag && h->avctx->has_b_frames < h->sps.num_reorder_frames) { @@ -1493,8 +1592,8 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) #undef REBASE_PICTURE #define REBASE_PICTURE(pic, new_ctx, old_ctx) \ ((pic && pic >= old_ctx->DPB && \ - pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \ - &new_ctx->DPB[pic - old_ctx->DPB] : NULL) + pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \ + &new_ctx->DPB[pic - old_ctx->DPB] : NULL) static void copy_picture_range(Picture **to, Picture **from, int count, H264Context *new_base, @@ -1511,19 +1610,24 @@ static void copy_picture_range(Picture **to, Picture **from, int count, } } -static void copy_parameter_set(void **to, void **from, int count, int size) +static int copy_parameter_set(void **to, void **from, int count, int size) { int i; for (i = 0; i < count; i++) { - if (to[i] && !from[i]) + if (to[i] && !from[i]) { av_freep(&to[i]); - else if (from[i] && !to[i]) + } else if (from[i] && !to[i]) { to[i] = av_malloc(size); + if (!to[i]) + return AVERROR(ENOMEM); + } if (from[i]) memcpy(to[i], from[i], size); } + + return 0; } static int decode_init_thread_copy(AVCodecContext *avctx) @@ -1560,10 +1664,10 @@ static int decode_update_thread_context(AVCodecContext *dst, return 0; if (inited && - (h->width != h1->width || - h->height != h1->height || - h->mb_width != h1->mb_width || - h->mb_height != h1->mb_height || + (h->width != h1->width || + h->height != h1->height || + h->mb_width != h1->mb_width || + h->mb_height != h1->mb_height || h->sps.bit_depth_luma != h1->sps.bit_depth_luma || h->sps.chroma_format_idc != h1->sps.chroma_format_idc || h->sps.colorspace != h1->sps.colorspace)) { @@ -1619,18 +1723,23 @@ static int decode_update_thread_context(AVCodecContext *dst, avcodec_get_frame_defaults(&h->cur_pic.f); h->cur_pic.tf.f = &h->cur_pic.f; - h->avctx = dst; - h->DPB = NULL; + h->avctx = dst; + h->DPB = NULL; h->qscale_table_pool = NULL; - h->mb_type_pool = NULL; - h->ref_index_pool = NULL; - h->motion_val_pool = NULL; + h->mb_type_pool = NULL; + h->ref_index_pool = NULL; + h->motion_val_pool = NULL; - if (ff_h264_alloc_tables(h) < 0) { + ret = ff_h264_alloc_tables(h); + if (ret < 0) { av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n"); - return AVERROR(ENOMEM); + return ret; + } + ret = context_init(h); + if (ret < 0) { + av_log(dst, AV_LOG_ERROR, "context_init() failed.\n"); + return ret; } - context_init(h); for (i = 0; i < 2; i++) { h->rbsp_buffer[i] = NULL; @@ -1658,12 +1767,12 @@ static int decode_update_thread_context(AVCodecContext *dst, for (i = 0; i < MAX_PICTURE_COUNT; i++) { unref_picture(h, &h->DPB[i]); - if (h1->DPB[i].f.data[0] && + if (h1->DPB[i].f.buf[0] && (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0) return ret; } - h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); + h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1); unref_picture(h, &h->cur_pic); if ((ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0) return ret; @@ -1682,11 +1791,15 @@ static int decode_update_thread_context(AVCodecContext *dst, h->is_avc = h1->is_avc; // SPS/PPS - copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers, - MAX_SPS_COUNT, sizeof(SPS)); + if ((ret = copy_parameter_set((void **)h->sps_buffers, + (void **)h1->sps_buffers, + MAX_SPS_COUNT, sizeof(SPS))) < 0) + return ret; h->sps = h1->sps; - copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers, - MAX_PPS_COUNT, sizeof(PPS)); + if ((ret = copy_parameter_set((void **)h->pps_buffers, + (void **)h1->pps_buffers, + MAX_PPS_COUNT, sizeof(PPS))) < 0) + return ret; h->pps = h1->pps; // Dequantization matrices @@ -1731,6 +1844,9 @@ static int decode_update_thread_context(AVCodecContext *dst, h->prev_frame_num = h->frame_num; h->outputed_poc = h->next_outputed_poc; + h->recovery_frame = h1->recovery_frame; + h->frame_recovered = h1->frame_recovered; + return err; } @@ -1750,7 +1866,7 @@ static int h264_frame_start(H264Context *h) } pic = &h->DPB[i]; - pic->reference = h->droppable ? 0 : h->picture_structure; + pic->reference = h->droppable ? 0 : h->picture_structure; pic->f.coded_picture_number = h->coded_picture_number++; pic->field_picture = h->picture_structure != PICT_FRAME; /* @@ -1760,6 +1876,7 @@ static int h264_frame_start(H264Context *h) */ pic->f.key_frame = 0; pic->mmco_reset = 0; + pic->recovered = 0; if ((ret = alloc_picture(h, pic)) < 0) return ret; @@ -1832,7 +1949,7 @@ static void decode_postinit(H264Context *h, int setup_finished) int i, pics, out_of_order, out_idx; int invalid = 0, cnt = 0; - h->cur_pic_ptr->f.pict_type = h->pict_type; + h->cur_pic_ptr->f.pict_type = h->pict_type; if (h->next_output_pic) return; @@ -2031,6 +2148,15 @@ static void decode_postinit(H264Context *h, int setup_finished) av_log(h->avctx, AV_LOG_DEBUG, "no picture\n"); } + if (h->next_output_pic) { + if (h->next_output_pic->recovered) { + // We have reached an recovery point and all frames after it in + // display order are "recovered". + h->frame_recovered |= FRAME_RECOVERED_SEI; + } + h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI); + } + if (setup_finished && !h->avctx->hwaccel) ff_thread_finish_setup(h->avctx); } @@ -2254,8 +2380,8 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, if (IS_INTRA4x4(mb_type)) { if (IS_8x8DCT(mb_type)) { if (transform_bypass) { - idct_dc_add = - idct_add = h->h264dsp.h264_add_pixels8_clear; + idct_dc_add = + idct_add = h->h264dsp.h264_add_pixels8_clear; } else { idct_dc_add = h->h264dsp.h264_idct8_dc_add; idct_add = h->h264dsp.h264_idct8_add; @@ -2338,7 +2464,8 @@ static av_always_inline void hl_decode_mb_predict_luma(H264Context *h, 0 * 16, 1 * 16, 4 * 16, 5 * 16, 2 * 16, 3 * 16, 6 * 16, 7 * 16, 8 * 16, 9 * 16, 12 * 16, 13 * 16, - 10 * 16, 11 * 16, 14 * 16, 15 * 16 }; + 10 * 16, 11 * 16, 14 * 16, 15 * 16 + }; for (i = 0; i < 16; i++) dctcoef_set(h->mb + (p * 256 << pixel_shift), pixel_shift, dc_mapping[i], @@ -2438,7 +2565,8 @@ void ff_h264_hl_decode_mb(H264Context *h) { const int mb_xy = h->mb_xy; const int mb_type = h->cur_pic.mb_type[mb_xy]; - int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0; + int is_complex = CONFIG_SMALL || h->is_complex || + IS_INTRA_PCM(mb_type) || h->qscale == 0; if (CHROMA444(h)) { if (is_complex || h->pixel_shift) @@ -2453,7 +2581,7 @@ void ff_h264_hl_decode_mb(H264Context *h) hl_decode_mb_simple_8(h); } -static int pred_weight_table(H264Context *h) +int ff_pred_weight_table(H264Context *h) { int list, i; int luma_def, chroma_def; @@ -2495,7 +2623,7 @@ static int pred_weight_table(H264Context *h) h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb); if (h->chroma_weight[i][list][j][0] != chroma_def || h->chroma_weight[i][list][j][1] != 0) { - h->use_weight_chroma = 1; + h->use_weight_chroma = 1; h->chroma_weight_flag[list] = 1; } } @@ -2537,7 +2665,7 @@ static void implicit_weight_table(H264Context *h, int field) } if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF(h) && h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) { - h->use_weight = 0; + h->use_weight = 0; h->use_weight_chroma = 0; return; } @@ -2599,7 +2727,7 @@ static void flush_change(H264Context *h) int i; for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) h->last_pocs[i] = INT_MIN; - h->outputed_poc = h->next_outputed_poc = INT_MIN; + h->outputed_poc = h->next_outputed_poc = INT_MIN; h->prev_interlaced_frame = 1; idr(h); if (h->cur_pic_ptr) @@ -2610,6 +2738,8 @@ static void flush_change(H264Context *h) memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0])); memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1])); ff_h264_reset_sei(h); + h->recovery_frame = -1; + h->frame_recovered = 0; } /* forget old pics after a seek */ @@ -2640,13 +2770,15 @@ static void flush_dpb(AVCodecContext *avctx) h->parse_context.overread_index = 0; h->parse_context.index = 0; h->parse_context.last_index = 0; + + free_tables(h, 1); + h->context_initialized = 0; } -static int init_poc(H264Context *h) +int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc) { const int max_frame_num = 1 << h->sps.log2_max_frame_num; int field_poc[2]; - Picture *cur = h->cur_pic_ptr; h->frame_num_offset = h->prev_frame_num_offset; if (h->frame_num < h->prev_frame_num) @@ -2655,9 +2787,11 @@ static int init_poc(H264Context *h) if (h->sps.poc_type == 0) { const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb; - if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2) + if (h->poc_lsb < h->prev_poc_lsb && + h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2) h->poc_msb = h->prev_poc_msb + max_poc_lsb; - else if (h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2) + else if (h->poc_lsb > h->prev_poc_lsb && + h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2) h->poc_msb = h->prev_poc_msb - max_poc_lsb; else h->poc_msb = h->prev_poc_msb; @@ -2711,10 +2845,10 @@ static int init_poc(H264Context *h) } if (h->picture_structure != PICT_BOTTOM_FIELD) - h->cur_pic_ptr->field_poc[0] = field_poc[0]; + pic_field_poc[0] = field_poc[0]; if (h->picture_structure != PICT_TOP_FIELD) - h->cur_pic_ptr->field_poc[1] = field_poc[1]; - cur->poc = FFMIN(cur->field_poc[0], cur->field_poc[1]); + pic_field_poc[1] = field_poc[1]; + *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]); return 0; } @@ -2766,10 +2900,6 @@ static int field_end(H264Context *h, int in_setup) ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, h->picture_structure == PICT_BOTTOM_FIELD); - if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) - ff_vdpau_h264_set_reference_frames(h); - if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { if (!h->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); @@ -2787,10 +2917,6 @@ static int field_end(H264Context *h, int in_setup) "hardware accelerator failed to decode picture\n"); } - if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) - ff_vdpau_h264_picture_complete(h); - /* * FIXME: Error handling code does not seem to support interlaced * when slices span multiple rows @@ -2894,13 +3020,6 @@ static int h264_set_parameter_from_sps(H264Context *h) if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || h->cur_chroma_format_idc != h->sps.chroma_format_idc) { - if (h->avctx->codec && - h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU && - (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) { - av_log(h->avctx, AV_LOG_ERROR, - "VDPAU decoding does not support video colorspace.\n"); - return AVERROR_INVALIDDATA; - } if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) { h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; h->cur_chroma_format_idc = h->sps.chroma_format_idc; @@ -2996,7 +3115,7 @@ static int init_dimensions(H264Context *h) av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n"); h->sps.crop_bottom = h->sps.crop_top = h->sps.crop_right = h->sps.crop_left = 0; - h->sps.crop = 0; + h->sps.crop = 0; width = h->width; height = h->height; @@ -3015,7 +3134,7 @@ static int h264_slice_header_init(H264Context *h, int reinit) int nb_slices = (HAVE_THREADS && h->avctx->active_thread_type & FF_THREAD_SLICE) ? h->avctx->thread_count : 1; - int i; + int i, ret; h->avctx->sample_aspect_ratio = h->sps.sar; av_assert0(h->avctx->sample_aspect_ratio.den); @@ -3030,18 +3149,19 @@ static int h264_slice_header_init(H264Context *h, int reinit) h->sps.num_units_in_tick, den, 1 << 30); } - h->avctx->hwaccel = ff_find_hwaccel(h->avctx->codec->id, h->avctx->pix_fmt); + h->avctx->hwaccel = ff_find_hwaccel(h->avctx); if (reinit) free_tables(h, 0); - h->first_field = 0; + h->first_field = 0; h->prev_interlaced_frame = 1; init_scan_tables(h); - if (ff_h264_alloc_tables(h) < 0) { + ret = ff_h264_alloc_tables(h); + if (ret < 0) { av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory for h264\n"); - return AVERROR(ENOMEM); + return ret; } if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) { @@ -3057,40 +3177,43 @@ static int h264_slice_header_init(H264Context *h, int reinit) h->slice_context_count = nb_slices; if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) { - if (context_init(h) < 0) { + ret = context_init(h); + if (ret < 0) { av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n"); - return -1; + return ret; } } else { for (i = 1; i < h->slice_context_count; i++) { H264Context *c; - c = h->thread_context[i] = av_mallocz(sizeof(H264Context)); - c->avctx = h->avctx; - c->dsp = h->dsp; - c->vdsp = h->vdsp; - c->h264dsp = h->h264dsp; - c->h264qpel = h->h264qpel; - c->h264chroma = h->h264chroma; - c->sps = h->sps; - c->pps = h->pps; - c->pixel_shift = h->pixel_shift; - c->width = h->width; - c->height = h->height; - c->linesize = h->linesize; - c->uvlinesize = h->uvlinesize; - c->chroma_x_shift = h->chroma_x_shift; - c->chroma_y_shift = h->chroma_y_shift; - c->qscale = h->qscale; - c->droppable = h->droppable; + c = h->thread_context[i] = av_mallocz(sizeof(H264Context)); + if (!c) + return AVERROR(ENOMEM); + c->avctx = h->avctx; + c->dsp = h->dsp; + c->vdsp = h->vdsp; + c->h264dsp = h->h264dsp; + c->h264qpel = h->h264qpel; + c->h264chroma = h->h264chroma; + c->sps = h->sps; + c->pps = h->pps; + c->pixel_shift = h->pixel_shift; + c->width = h->width; + c->height = h->height; + c->linesize = h->linesize; + c->uvlinesize = h->uvlinesize; + c->chroma_x_shift = h->chroma_x_shift; + c->chroma_y_shift = h->chroma_y_shift; + c->qscale = h->qscale; + c->droppable = h->droppable; c->data_partitioning = h->data_partitioning; - c->low_delay = h->low_delay; - c->mb_width = h->mb_width; - c->mb_height = h->mb_height; - c->mb_stride = h->mb_stride; - c->mb_num = h->mb_num; - c->flags = h->flags; - c->workaround_bugs = h->workaround_bugs; - c->pict_type = h->pict_type; + c->low_delay = h->low_delay; + c->mb_width = h->mb_width; + c->mb_height = h->mb_height; + c->mb_stride = h->mb_stride; + c->mb_num = h->mb_num; + c->flags = h->flags; + c->workaround_bugs = h->workaround_bugs; + c->pict_type = h->pict_type; init_scan_tables(c); clone_tables(c, h, i); @@ -3098,9 +3221,9 @@ static int h264_slice_header_init(H264Context *h, int reinit) } for (i = 0; i < h->slice_context_count; i++) - if (context_init(h->thread_context[i]) < 0) { + if ((ret = context_init(h->thread_context[i])) < 0) { av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n"); - return -1; + return ret; } } @@ -3109,6 +3232,50 @@ static int h264_slice_header_init(H264Context *h, int reinit) return 0; } +int ff_set_ref_count(H264Context *h) +{ + int num_ref_idx_active_override_flag, max_refs; + + // set defaults, might be overridden a few lines later + h->ref_count[0] = h->pps.ref_count[0]; + h->ref_count[1] = h->pps.ref_count[1]; + + if (h->slice_type_nos != AV_PICTURE_TYPE_I) { + if (h->slice_type_nos == AV_PICTURE_TYPE_B) + h->direct_spatial_mv_pred = get_bits1(&h->gb); + num_ref_idx_active_override_flag = get_bits1(&h->gb); + + if (num_ref_idx_active_override_flag) { + h->ref_count[0] = get_ue_golomb(&h->gb) + 1; + if (h->ref_count[0] < 1) + return AVERROR_INVALIDDATA; + if (h->slice_type_nos == AV_PICTURE_TYPE_B) { + h->ref_count[1] = get_ue_golomb(&h->gb) + 1; + if (h->ref_count[1] < 1) + return AVERROR_INVALIDDATA; + } + } + + if (h->slice_type_nos == AV_PICTURE_TYPE_B) + h->list_count = 2; + else + h->list_count = 1; + } else { + h->list_count = 0; + h->ref_count[0] = h->ref_count[1] = 0; + } + + max_refs = h->picture_structure == PICT_FRAME ? 16 : 32; + + if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) { + av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n"); + h->ref_count[0] = h->ref_count[1] = 0; + return AVERROR_INVALIDDATA; + } + + return 0; +} + /** * Decode a slice header. * This will also call ff_MPV_common_init() and frame_start() as needed. @@ -3123,11 +3290,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0) { unsigned int first_mb_in_slice; unsigned int pps_id; - int num_ref_idx_active_override_flag, max_refs, ret; + int ret; unsigned int slice_type, tmp, i, j; int default_ref_list_done = 0; int last_pic_structure, last_pic_droppable; int needs_reinit = 0; + int field_pic_flag, bottom_field_flag; h->me.qpel_put = h->h264qpel.put_h264_qpel_pixels_tab; h->me.qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab; @@ -3154,7 +3322,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) av_log(h->avctx, AV_LOG_ERROR, "slice type too large (%d) at %d %d\n", h->slice_type, h->mb_x, h->mb_y); - return -1; + return AVERROR_INVALIDDATA; } if (slice_type > 4) { slice_type -= 5; @@ -3176,13 +3344,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0) pps_id = get_ue_golomb(&h->gb); if (pps_id >= MAX_PPS_COUNT) { av_log(h->avctx, AV_LOG_ERROR, "pps_id out of range\n"); - return -1; + return AVERROR_INVALIDDATA; } if (!h0->pps_buffers[pps_id]) { av_log(h->avctx, AV_LOG_ERROR, "non-existing PPS %u referenced\n", pps_id); - return -1; + return AVERROR_INVALIDDATA; } h->pps = *h0->pps_buffers[pps_id]; @@ -3190,7 +3358,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) av_log(h->avctx, AV_LOG_ERROR, "non-existing SPS %u referenced\n", h->pps.sps_id); - return -1; + return AVERROR_INVALIDDATA; } if (h->pps.sps_id != h->current_sps_id || @@ -3250,7 +3418,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0) (h->width != h->avctx->coded_width || h->height != h->avctx->coded_height || needs_reinit)) { - if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "changing width/height on " "slice %d\n", h0->current_slice + 1); @@ -3276,7 +3443,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (h != h0) { av_log(h->avctx, AV_LOG_ERROR, "Cannot (re-)initialize context during parallel decoding.\n"); - return -1; + return AVERROR_PATCHWELCOME; } if ((ret = get_pixel_format(h)) < 0) @@ -3305,8 +3472,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (h->sps.frame_mbs_only_flag) { h->picture_structure = PICT_FRAME; } else { - if (get_bits1(&h->gb)) { // field_pic_flag - h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag + field_pic_flag = get_bits1(&h->gb); + if (field_pic_flag) { + bottom_field_flag = get_bits1(&h->gb); + h->picture_structure = PICT_TOP_FIELD + bottom_field_flag; } else { h->picture_structure = PICT_FRAME; h->mb_aff_frame = h->sps.mb_aff; @@ -3355,7 +3524,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) * since that can modify s->current_picture_ptr. */ if (h0->first_field) { assert(h0->cur_pic_ptr); - assert(h0->cur_pic_ptr->f.data[0]); + assert(h0->cur_pic_ptr->f.buf[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ @@ -3404,30 +3573,36 @@ static int decode_slice_header(H264Context *h, H264Context *h0) Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL; av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n", h->frame_num, h->prev_frame_num); - if (h264_frame_start(h) < 0) - return -1; + ret = h264_frame_start(h); + if (ret < 0) + return ret; h->prev_frame_num++; - h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; + h->prev_frame_num %= 1 << h->sps.log2_max_frame_num; h->cur_pic_ptr->frame_num = h->prev_frame_num; ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1); - if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 && - h->avctx->err_recognition & AV_EF_EXPLODE) + ret = ff_generate_sliding_window_mmcos(h, 1); + if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) return ret; - if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 && - (h->avctx->err_recognition & AV_EF_EXPLODE)) - return AVERROR_INVALIDDATA; - /* Error concealment: if a ref is missing, copy the previous ref in its place. - * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions - * about there being no actual duplicates. - * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're - * concealing a lost frame, this probably isn't noticeable by comparison, but it should - * be fixed. */ + ret = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); + if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) + return ret; + /* Error concealment: If a ref is missing, copy the previous ref + * in its place. + * FIXME: Avoiding a memcpy would be nice, but ref handling makes + * many assumptions about there being no actual duplicates. + * FIXME: This does not copy padding for out-of-frame motion + * vectors. Given we are concealing a lost frame, this probably + * is not noticeable by comparison, but it should be fixed. */ if (h->short_ref_count) { if (prev) { - av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize, - (const uint8_t **)prev->f.data, prev->f.linesize, - h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16); + av_image_copy(h->short_ref[0]->f.data, + h->short_ref[0]->f.linesize, + (const uint8_t **)prev->f.data, + prev->f.linesize, + h->avctx->pix_fmt, + h->mb_width * 16, + h->mb_height * 16); h->short_ref[0]->poc = prev->poc + 2; } h->short_ref[0]->frame_num = h->prev_frame_num; @@ -3439,7 +3614,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) * frame, or to allocate a new one. */ if (h0->first_field) { assert(h0->cur_pic_ptr); - assert(h0->cur_pic_ptr->f.data[0]); + assert(h0->cur_pic_ptr->f.buf[0]); assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF); /* figure out if we have a complementary field pair */ @@ -3468,7 +3643,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (!FIELD_PICTURE(h) || h0->first_field) { if (h264_frame_start(h) < 0) { h0->first_field = 0; - return -1; + return AVERROR_INVALIDDATA; } } else { release_unused_pictures(h, 0); @@ -3483,10 +3658,11 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num || first_mb_in_slice >= h->mb_num) { av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n"); - return -1; + return AVERROR_INVALIDDATA; } h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width; - h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h); + h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << + FIELD_OR_MBAFF_PICTURE(h); if (h->picture_structure == PICT_BOTTOM_FIELD) h->resync_mb_y = h->mb_y = h->mb_y + 1; assert(h->mb_y < h->mb_height); @@ -3516,62 +3692,30 @@ static int decode_slice_header(H264Context *h, H264Context *h0) h->delta_poc[1] = get_se_golomb(&h->gb); } - init_poc(h); + ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc); if (h->pps.redundant_pic_cnt_present) h->redundant_pic_count = get_ue_golomb(&h->gb); - // set defaults, might be overridden a few lines later - h->ref_count[0] = h->pps.ref_count[0]; - h->ref_count[1] = h->pps.ref_count[1]; - - if (h->slice_type_nos != AV_PICTURE_TYPE_I) { - if (h->slice_type_nos == AV_PICTURE_TYPE_B) - h->direct_spatial_mv_pred = get_bits1(&h->gb); - num_ref_idx_active_override_flag = get_bits1(&h->gb); - - if (num_ref_idx_active_override_flag) { - h->ref_count[0] = get_ue_golomb(&h->gb) + 1; - if (h->ref_count[0] < 1) - return AVERROR_INVALIDDATA; - if (h->slice_type_nos == AV_PICTURE_TYPE_B) { - h->ref_count[1] = get_ue_golomb(&h->gb) + 1; - if (h->ref_count[1] < 1) - return AVERROR_INVALIDDATA; - } - } - - if (h->slice_type_nos == AV_PICTURE_TYPE_B) - h->list_count = 2; - else - h->list_count = 1; - } else { - h->list_count = 0; - h->ref_count[0] = h->ref_count[1] = 0; - } - - - max_refs = h->picture_structure == PICT_FRAME ? 16 : 32; - - if (h->ref_count[0] > max_refs || h->ref_count[1] > max_refs) { - av_log(h->avctx, AV_LOG_ERROR, "reference overflow\n"); - h->ref_count[0] = h->ref_count[1] = 0; - return AVERROR_INVALIDDATA; - } + ret = ff_set_ref_count(h); + if (ret < 0) + return ret; if (!default_ref_list_done) ff_h264_fill_default_ref_list(h); - if (h->slice_type_nos != AV_PICTURE_TYPE_I && - ff_h264_decode_ref_pic_list_reordering(h) < 0) { - h->ref_count[1] = h->ref_count[0] = 0; - return -1; + if (h->slice_type_nos != AV_PICTURE_TYPE_I) { + ret = ff_h264_decode_ref_pic_list_reordering(h); + if (ret < 0) { + h->ref_count[1] = h->ref_count[0] = 0; + return ret; + } } if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) || (h->pps.weighted_bipred_idc == 1 && h->slice_type_nos == AV_PICTURE_TYPE_B)) - pred_weight_table(h); + ff_pred_weight_table(h); else if (h->pps.weighted_bipred_idc == 2 && h->slice_type_nos == AV_PICTURE_TYPE_B) { implicit_weight_table(h, -1); @@ -3588,12 +3732,13 @@ static int decode_slice_header(H264Context *h, H264Context *h0) // or h->mmco, which will cause ref list mix-ups and decoding errors // further down the line. This may break decoding if the first slice is // corrupt, thus we only do this if frame-mt is enabled. - if (h->nal_ref_idc && - ff_h264_decode_ref_pic_marking(h0, &h->gb, - !(h->avctx->active_thread_type & FF_THREAD_FRAME) || - h0->current_slice == 0) < 0 && - (h->avctx->err_recognition & AV_EF_EXPLODE)) - return AVERROR_INVALIDDATA; + if (h->nal_ref_idc) { + ret = ff_h264_decode_ref_pic_marking(h0, &h->gb, + !(h->avctx->active_thread_type & FF_THREAD_FRAME) || + h0->current_slice == 0); + if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) + return AVERROR_INVALIDDATA; + } if (FRAME_MBAFF(h)) { ff_h264_fill_mbaff_ref_list(h); @@ -3612,7 +3757,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) tmp = get_ue_golomb_31(&h->gb); if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n"); - return -1; + return AVERROR_INVALIDDATA; } h->cabac_init_idc = tmp; } @@ -3621,7 +3766,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) tmp = h->pps.init_qp + get_se_golomb(&h->gb); if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) { av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp); - return -1; + return AVERROR_INVALIDDATA; } h->qscale = tmp; h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale); @@ -3641,7 +3786,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) if (tmp > 2) { av_log(h->avctx, AV_LOG_ERROR, "deblocking_filter_idc %u out of range\n", tmp); - return -1; + return AVERROR_INVALIDDATA; } h->deblocking_filter = tmp; if (h->deblocking_filter < 2) @@ -3655,7 +3800,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) av_log(h->avctx, AV_LOG_ERROR, "deblocking filter parameters %d %d out of range\n", h->slice_alpha_c0_offset, h->slice_beta_offset); - return -1; + return AVERROR_INVALIDDATA; } } } @@ -3696,7 +3841,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) 6 * (h->sps.bit_depth_luma - 8); h0->last_slice_type = slice_type; - h->slice_num = ++h0->current_slice; + h->slice_num = ++h0->current_slice; if (h->slice_num >= MAX_SLICES) { av_log(h->avctx, AV_LOG_ERROR, "Too many slices, increase MAX_SLICES and recompile\n"); @@ -3707,7 +3852,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0) int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j]; for (i = 0; i < 16; i++) { id_list[i] = 60; - if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) { + if (j < h->list_count && i < h->ref_count[j] && + h->ref_list[j][i].f.buf[0]) { int k; AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer; for (k = 0; k < h->short_ref_count; k++) @@ -3723,13 +3869,12 @@ static int decode_slice_header(H264Context *h, H264Context *h0) } } - ref2frm[0] = - ref2frm[1] = -1; + ref2frm[0] = + ref2frm[1] = -1; for (i = 0; i < 16; i++) - ref2frm[i + 2] = 4 * id_list[i] + - (h->ref_list[j][i].reference & 3); - ref2frm[18 + 0] = - ref2frm[18 + 1] = -1; + ref2frm[i + 2] = 4 * id_list[i] + (h->ref_list[j][i].reference & 3); + ref2frm[18 + 0] = + ref2frm[18 + 1] = -1; for (i = 16; i < 48; i++) ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] + (h->ref_list[j][i].reference & 3); @@ -3773,7 +3918,7 @@ int ff_h264_get_slice_type(const H264Context *h) case AV_PICTURE_TYPE_SI: return 4; default: - return -1; + return AVERROR_INVALIDDATA; } } @@ -3884,7 +4029,7 @@ static int fill_filter_caches(H264Context *h, int mb_type) } else { if (curr_mb_field_flag) top_xy += h->mb_stride & - (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1); + (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1); if (left_mb_field_flag != curr_mb_field_flag) left_xy[LBOT] += h->mb_stride; } @@ -3966,18 +4111,18 @@ static int fill_filter_caches(H264Context *h, int mb_type) * from what the loop filter needs */ if (!CABAC(h) && h->pps.transform_8x8_mode) { if (IS_8x8DCT(top_type)) { - nnz_cache[4 + 8 * 0] = - nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12; - nnz_cache[6 + 8 * 0] = - nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12; + nnz_cache[4 + 8 * 0] = + nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12; + nnz_cache[6 + 8 * 0] = + nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12; } if (IS_8x8DCT(left_type[LTOP])) { - nnz_cache[3 + 8 * 1] = - nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF + nnz_cache[3 + 8 * 1] = + nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF } if (IS_8x8DCT(left_type[LBOT])) { - nnz_cache[3 + 8 * 3] = - nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF + nnz_cache[3 + 8 * 3] = + nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF } if (IS_8x8DCT(mb_type)) { @@ -4146,7 +4291,6 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) align_get_bits(&h->gb); /* init cabac */ - ff_init_cabac_states(); ff_init_cabac_decoder(&h->cabac, h->gb.buffer + get_bits_count(&h->gb) / 8, (get_bits_left(&h->gb) + 7) / 8); @@ -4177,7 +4321,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) if ((h->workaround_bugs & FF_BUG_TRUNCATED) && h->cabac.bytestream > h->cabac.bytestream_end + 2) { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, - h->mb_y, ER_MB_END); + h->mb_y, ER_MB_END); if (h->mb_x >= lf_x_start) loop_filter(h, lf_x_start, h->mb_x + 1); return 0; @@ -4188,8 +4332,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) h->mb_x, h->mb_y, h->cabac.bytestream_end - h->cabac.bytestream); er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, - h->mb_y, ER_MB_ERROR); - return -1; + h->mb_y, ER_MB_ERROR); + return AVERROR_INVALIDDATA; } if (++h->mb_x >= h->mb_width) { @@ -4208,7 +4352,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) tprintf(h->avctx, "slice end %d %d\n", get_bits_count(&h->gb), h->gb.size_in_bits); er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1, - h->mb_y, ER_MB_END); + h->mb_y, ER_MB_END); if (h->mb_x > lf_x_start) loop_filter(h, lf_x_start, h->mb_x); return 0; @@ -4235,8 +4379,8 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) av_log(h->avctx, AV_LOG_ERROR, "error while decoding MB %d %d\n", h->mb_x, h->mb_y); er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, - h->mb_y, ER_MB_ERROR); - return -1; + h->mb_y, ER_MB_ERROR); + return ret; } if (++h->mb_x >= h->mb_width) { @@ -4255,16 +4399,16 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) if (get_bits_left(&h->gb) == 0) { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, - h->mb_x - 1, h->mb_y, - ER_MB_END); + h->mb_x - 1, h->mb_y, + ER_MB_END); return 0; } else { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, - h->mb_x - 1, h->mb_y, - ER_MB_END); + h->mb_x - 1, h->mb_y, + ER_MB_END); - return -1; + return AVERROR_INVALIDDATA; } } } @@ -4272,19 +4416,20 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg) if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) { tprintf(h->avctx, "slice end %d %d\n", get_bits_count(&h->gb), h->gb.size_in_bits); + if (get_bits_left(&h->gb) == 0) { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, - h->mb_x - 1, h->mb_y, - ER_MB_END); + h->mb_x - 1, h->mb_y, + ER_MB_END); if (h->mb_x > lf_x_start) loop_filter(h, lf_x_start, h->mb_x); return 0; } else { er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x, - h->mb_y, ER_MB_ERROR); + h->mb_y, ER_MB_ERROR); - return -1; + return AVERROR_INVALIDDATA; } } } @@ -4303,15 +4448,14 @@ static int execute_decode_slices(H264Context *h, int context_count) H264Context *hx; int i; - if (h->avctx->hwaccel || - h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) + if (h->avctx->hwaccel) return 0; if (context_count == 1) { return decode_slice(avctx, &h); } else { for (i = 1; i < context_count; i++) { - hx = h->thread_context[i]; - hx->er.error_count = 0; + hx = h->thread_context[i]; + hx->er.error_count = 0; } avctx->execute(avctx, decode_slice, h->thread_context, @@ -4341,6 +4485,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int pass = !(avctx->active_thread_type & FF_THREAD_FRAME); int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts int nal_index; + int ret = 0; h->max_contexts = h->slice_context_count; if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) { @@ -4399,7 +4544,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length, &consumed, next_avc - buf_index); if (ptr == NULL || dst_length < 0) { - buf_index = -1; + ret = -1; goto end; } i = buf_index + consumed; @@ -4409,7 +4554,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, h->workaround_bugs |= FF_BUG_TRUNCATED; if (!(h->workaround_bugs & FF_BUG_TRUNCATED)) - while (ptr[dst_length - 1] == 0 && dst_length > 0) + while (dst_length > 0 && ptr[dst_length - 1] == 0) dst_length--; bit_length = !dst_length ? 0 : (8 * dst_length - @@ -4448,8 +4593,9 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, continue; } - // FIXME do not discard SEI id - if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0) + if (avctx->skip_frame >= AVDISCARD_NONREF && + h->nal_ref_idc == 0 && + h->nal_unit_type != NAL_SEI) continue; again: @@ -4460,8 +4606,11 @@ again: (h->avctx->active_thread_type & FF_THREAD_FRAME) && (hx->nal_unit_type != NAL_PPS && hx->nal_unit_type != NAL_SPS)) { - av_log(avctx, AV_LOG_INFO, "Ignoring NAL unit %d during " - "extradata parsing\n", hx->nal_unit_type); + if (hx->nal_unit_type < NAL_AUD || + hx->nal_unit_type > NAL_AUXILIARY_SLICE) + av_log(avctx, AV_LOG_INFO, + "Ignoring NAL unit %d during extradata parsing\n", + hx->nal_unit_type); hx->nal_unit_type = NAL_FF_IGNORE; } err = 0; @@ -4470,33 +4619,46 @@ again: if (h->nal_unit_type != NAL_IDR_SLICE) { av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of idr and non-idr slices\n"); - buf_index = -1; + ret = -1; goto end; } idr(h); // FIXME ensure we don't lose some frames if there is reordering case NAL_SLICE: init_get_bits(&hx->gb, ptr, bit_length); - hx->intra_gb_ptr = - hx->inter_gb_ptr = &hx->gb; + hx->intra_gb_ptr = + hx->inter_gb_ptr = &hx->gb; hx->data_partitioning = 0; if ((err = decode_slice_header(hx, h))) break; + if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) { + h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) & + ((1 << h->sps.log2_max_frame_num) - 1); + } + h->cur_pic_ptr->f.key_frame |= (hx->nal_unit_type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0); + if (hx->nal_unit_type == NAL_IDR_SLICE || + h->recovery_frame == h->frame_num) { + h->recovery_frame = -1; + h->cur_pic_ptr->recovered = 1; + } + // If we have an IDR, all frames after it in decoded order are + // "recovered". + if (hx->nal_unit_type == NAL_IDR_SLICE) + h->frame_recovered |= FRAME_RECOVERED_IDR; + h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR); + if (h->current_slice == 1) { if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) decode_postinit(h, nal_index >= nals_needed); if (h->avctx->hwaccel && - h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0) - return -1; - if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) - ff_vdpau_h264_picture_start(h); + (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0) + return ret; } if (hx->redundant_pic_count == 0 && @@ -4508,18 +4670,11 @@ again: hx->slice_type_nos == AV_PICTURE_TYPE_I) && avctx->skip_frame < AVDISCARD_ALL) { if (avctx->hwaccel) { - if (avctx->hwaccel->decode_slice(avctx, - &buf[buf_index - consumed], - consumed) < 0) - return -1; - } else if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) { - static const uint8_t start_code[] = { - 0x00, 0x00, 0x01 }; - ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], start_code, - sizeof(start_code)); - ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], &buf[buf_index - consumed], - consumed); + ret = avctx->hwaccel->decode_slice(avctx, + &buf[buf_index - consumed], + consumed); + if (ret < 0) + return ret; } else context_count++; } @@ -4560,8 +4715,8 @@ again: break; case NAL_SPS: init_get_bits(&h->gb, ptr, bit_length); - if (ff_h264_decode_seq_parameter_set(h) < 0 && - h->is_avc && (nalsize != consumed) && nalsize) { + ret = ff_h264_decode_seq_parameter_set(h); + if (ret < 0 && h->is_avc && (nalsize != consumed) && nalsize) { av_log(h->avctx, AV_LOG_DEBUG, "SPS decoding failure, trying again with the complete NAL\n"); init_get_bits(&h->gb, buf + buf_index + 1 - consumed, @@ -4569,10 +4724,10 @@ again: ff_h264_decode_seq_parameter_set(h); } - if (h264_set_parameter_from_sps(h) < 0) { - buf_index = -1; + ret = h264_set_parameter_from_sps(h); + if (ret < 0) goto end; - } + break; case NAL_PPS: init_get_bits(&h->gb, ptr, bit_length); @@ -4621,7 +4776,7 @@ end: h->picture_structure == PICT_BOTTOM_FIELD); } - return buf_index; + return (ret < 0) ? ret : buf_index; } /** @@ -4651,7 +4806,7 @@ static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src) int hshift = (i > 0) ? h->chroma_x_shift : 0; int vshift = (i > 0) ? h->chroma_y_shift : 0; int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) + - (h->sps.crop_top >> vshift) * dst->linesize[i]; + (h->sps.crop_top >> vshift) * dst->linesize[i]; dst->data[i] += off; } return 0; @@ -4667,7 +4822,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int buf_index = 0; int ret; - h->flags = avctx->flags; + h->flags = avctx->flags; /* end of stream, output what is still in the buffers */ out: @@ -4705,7 +4860,7 @@ out: buf_index = decode_nal_units(h, buf, buf_size, 0); if (buf_index < 0) - return -1; + return AVERROR_INVALIDDATA; if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) { buf_size = 0; @@ -4716,7 +4871,7 @@ out: if (avctx->skip_frame >= AVDISCARD_NONREF) return 0; av_log(avctx, AV_LOG_ERROR, "no frame!\n"); - return -1; + return AVERROR_INVALIDDATA; } if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) || @@ -4726,10 +4881,12 @@ out: field_end(h, 0); - if (!h->next_output_pic) { - /* Wait for second field. */ - *got_frame = 0; - } else { + *got_frame = 0; + if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) || + h->next_output_pic->recovered)) { + if (!h->next_output_pic->recovered) + h->next_output_pic->f.flags |= AV_FRAME_FLAG_CORRUPT; + ret = output_frame(h, pict, &h->next_output_pic->f); if (ret < 0) return ret; @@ -4737,7 +4894,7 @@ out: } } - assert(pict->data[0] || !*got_frame); + assert(pict->buf[0] || !*got_frame); return get_consumed_bytes(buf_index, buf_size); } @@ -4757,7 +4914,7 @@ av_cold void ff_h264_free_context(H264Context *h) static av_cold int h264_decode_end(AVCodecContext *avctx) { - H264Context *h = avctx->priv_data; + H264Context *h = avctx->priv_data; ff_h264_free_context(h); @@ -4785,6 +4942,7 @@ static const AVProfile profiles[] = { AVCodec ff_h264_decoder = { .name = "h264", + .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_H264, .priv_data_size = sizeof(H264Context), @@ -4795,26 +4953,7 @@ AVCodec ff_h264_decoder = { CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS, .flush = flush_dpb, - .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"), .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy), .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context), .profiles = NULL_IF_CONFIG_SMALL(profiles), }; - -#if CONFIG_H264_VDPAU_DECODER -AVCodec ff_h264_vdpau_decoder = { - .name = "h264_vdpau", - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_H264, - .priv_data_size = sizeof(H264Context), - .init = ff_h264_decode_init, - .close = h264_decode_end, - .decode = decode_frame, - .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, - .flush = flush_dpb, - .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"), - .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264, - AV_PIX_FMT_NONE}, - .profiles = NULL_IF_CONFIG_SMALL(profiles), -}; -#endif