X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fh264.c;h=1bfb1e2f258bdce2adc1581a426265c006cf8d83;hb=04e7f6d2d01cba4c7fb2ad84b13819fa3e4e1425;hp=23d5b10390d36d11e89b625c6c2d3954dbcbceaa;hpb=6ee0eb38c015d7bf139b0058e4d28244830d9d01;p=ffmpeg diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 23d5b10390d..1bfb1e2f258 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -82,7 +82,7 @@ static void filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, static Picture * remove_long(H264Context *h, int i, int ref_mask); static av_always_inline uint32_t pack16to32(int a, int b){ -#ifdef WORDS_BIGENDIAN +#if HAVE_BIGENDIAN return (b&0xFFFF) + (a<<16); #else return (a&0xFFFF) + (b<<16); @@ -1999,6 +1999,8 @@ static void free_tables(H264Context *h){ av_freep(&hx->s.obmc_scratchpad); av_freep(&hx->rbsp_buffer[1]); av_freep(&hx->rbsp_buffer[0]); + hx->rbsp_buffer_size[0] = 0; + hx->rbsp_buffer_size[1] = 0; if (i) av_freep(&h->thread_context[i]); } } @@ -2077,22 +2079,22 @@ static int alloc_tables(H264Context *h){ const int big_mb_num= s->mb_stride * (s->mb_height+1); int x,y; - CHECKED_ALLOCZ(h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t)) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->intra4x4_pred_mode, big_mb_num * 8 * sizeof(uint8_t), fail) - CHECKED_ALLOCZ(h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t)) - CHECKED_ALLOCZ(h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base)) - CHECKED_ALLOCZ(h->cbp_table, big_mb_num * sizeof(uint16_t)) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->non_zero_count , big_mb_num * 16 * sizeof(uint8_t), fail) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->slice_table_base , (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base), fail) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->cbp_table, big_mb_num * sizeof(uint16_t), fail) - CHECKED_ALLOCZ(h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t)) - CHECKED_ALLOCZ(h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t)); - CHECKED_ALLOCZ(h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t)); - CHECKED_ALLOCZ(h->direct_table, 32*big_mb_num * sizeof(uint8_t)); + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->chroma_pred_mode_table, big_mb_num * sizeof(uint8_t), fail) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[0], 32*big_mb_num * sizeof(uint16_t), fail); + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mvd_table[1], 32*big_mb_num * sizeof(uint16_t), fail); + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->direct_table, 32*big_mb_num * sizeof(uint8_t) , fail); memset(h->slice_table_base, -1, (big_mb_num+s->mb_stride) * sizeof(*h->slice_table_base)); h->slice_table= h->slice_table_base + s->mb_stride*2 + 1; - CHECKED_ALLOCZ(h->mb2b_xy , big_mb_num * sizeof(uint32_t)); - CHECKED_ALLOCZ(h->mb2b8_xy , big_mb_num * sizeof(uint32_t)); + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b_xy , big_mb_num * sizeof(uint32_t), fail); + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->mb2b8_xy , big_mb_num * sizeof(uint32_t), fail); for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ const int mb_xy= x + y*s->mb_stride; @@ -2139,8 +2141,8 @@ static void clone_tables(H264Context *dst, H264Context *src){ * Allocate buffers which are not shared amongst multiple threads. */ static int context_init(H264Context *h){ - CHECKED_ALLOCZ(h->top_borders[0], h->s.mb_width * (16+8+8) * sizeof(uint8_t)) - CHECKED_ALLOCZ(h->top_borders[1], h->s.mb_width * (16+8+8) * sizeof(uint8_t)) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->top_borders[0], h->s.mb_width * (16+8+8) * sizeof(uint8_t), fail) + FF_ALLOCZ_OR_GOTO(h->s.avctx, h->top_borders[1], h->s.mb_width * (16+8+8) * sizeof(uint8_t), fail) return 0; fail: @@ -2196,11 +2198,6 @@ static av_cold int decode_init(AVCodecContext *avctx){ if(!avctx->has_b_frames) s->low_delay= 1; - if(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) - avctx->pix_fmt= PIX_FMT_VDPAU_H264; - else - avctx->pix_fmt= avctx->get_format(avctx, avctx->codec->pix_fmts); - avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; decode_init_vlc(); @@ -2240,6 +2237,7 @@ static int frame_start(H264Context *h){ * See decode_nal_units(). */ s->current_picture_ptr->key_frame= 0; + s->current_picture_ptr->mmco_reset= 0; assert(s->linesize && s->uvlinesize); @@ -3369,6 +3367,7 @@ static int execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){ h->poc_msb= h->frame_num= s->current_picture_ptr->frame_num= 0; + s->current_picture_ptr->mmco_reset=1; break; default: assert(0); } @@ -3808,6 +3807,22 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ if (!s->context_initialized) { if(h != h0) return -1; // we cant (re-)initialize context during parallel decoding + + avcodec_set_dimensions(s->avctx, s->width, s->height); + s->avctx->sample_aspect_ratio= h->sps.sar; + if(!s->avctx->sample_aspect_ratio.den) + s->avctx->sample_aspect_ratio.den = 1; + + if(h->sps.timing_info_present_flag){ + s->avctx->time_base= (AVRational){h->sps.num_units_in_tick, h->sps.time_scale}; + if(h->x264_build > 0 && h->x264_build < 44) + s->avctx->time_base.den *= 2; + av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den, + s->avctx->time_base.num, s->avctx->time_base.den, 1<<30); + } + s->avctx->pix_fmt = s->avctx->get_format(s->avctx, s->avctx->codec->pix_fmts); + s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt); + if (MPV_common_init(s) < 0) return -1; s->first_field = 0; @@ -3830,20 +3845,6 @@ static int decode_slice_header(H264Context *h, H264Context *h0){ for(i = 0; i < s->avctx->thread_count; i++) if(context_init(h->thread_context[i]) < 0) return -1; - - s->avctx->width = s->width; - s->avctx->height = s->height; - s->avctx->sample_aspect_ratio= h->sps.sar; - if(!s->avctx->sample_aspect_ratio.den) - s->avctx->sample_aspect_ratio.den = 1; - - if(h->sps.timing_info_present_flag){ - s->avctx->time_base= (AVRational){h->sps.num_units_in_tick, h->sps.time_scale}; - if(h->x264_build > 0 && h->x264_build < 44) - s->avctx->time_base.den *= 2; - av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den, - s->avctx->time_base.num, s->avctx->time_base.den, 1<<30); - } } h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num); @@ -6691,7 +6692,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg){ ff_init_cabac_states( &h->cabac); ff_init_cabac_decoder( &h->cabac, s->gb.buffer + get_bits_count(&s->gb)/8, - ( s->gb.size_in_bits - get_bits_count(&s->gb) + 7)/8); + (get_bits_left(&s->gb) + 7)/8); /* calculate pre-state */ for( i= 0; i < 460; i++ ) { int pre; @@ -7085,6 +7086,10 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps){ if(sps->timing_info_present_flag){ sps->num_units_in_tick = get_bits_long(&s->gb, 32); sps->time_scale = get_bits_long(&s->gb, 32); + if(sps->num_units_in_tick-1 > 0x7FFFFFFEU || sps->time_scale-1 > 0x7FFFFFFEU){ + av_log(h->s.avctx, AV_LOG_ERROR, "time_scale/num_units_in_tick invalid or unsupported (%d/%d)\n", sps->time_scale, sps->num_units_in_tick); + return -1; + } sps->fixed_frame_rate_flag = get_bits1(&s->gb); } @@ -7276,7 +7281,8 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ sps->vui_parameters_present_flag= get_bits1(&s->gb); if( sps->vui_parameters_present_flag ) - decode_vui_parameters(h, sps); + if (decode_vui_parameters(h, sps) < 0) + goto fail; if(s->avctx->debug&FF_DEBUG_PICT_INFO){ av_log(h->s.avctx, AV_LOG_DEBUG, "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%d/%d/%d/%d %s %s %d/%d\n", @@ -7454,7 +7460,7 @@ static void execute_decode_slices(H264Context *h, int context_count){ } avctx->execute(avctx, (void *)decode_slice, - (void **)h->thread_context, NULL, context_count, sizeof(void*)); + h->thread_context, NULL, context_count, sizeof(void*)); /* pull back stuff from slices to master context */ hx = h->thread_context[context_count - 1]; @@ -7503,7 +7509,7 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ nalsize = 0; for(i = 0; i < h->nal_length_size; i++) nalsize = (nalsize << 8) | buf[buf_index++]; - if(nalsize <= 1 || (nalsize+buf_index > buf_size)){ + if(nalsize <= 1 || nalsize > buf_size - buf_index){ if(nalsize == 1){ buf_index++; continue; @@ -7601,9 +7607,12 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size){ init_get_bits(&hx->s.gb, ptr, bit_length); hx->intra_gb_ptr= hx->inter_gb_ptr= NULL; + + if ((err = decode_slice_header(hx, h)) < 0) + break; + hx->s.data_partitioning = 1; - err = decode_slice_header(hx, h); break; case NAL_DPB: init_get_bits(&hx->intra_gb, ptr, bit_length); @@ -7708,7 +7717,7 @@ static int decode_frame(AVCodecContext *avctx, //FIXME factorize this with the output code below out = h->delayed_pic[0]; out_idx = 0; - for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++) + for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; @@ -7785,7 +7794,7 @@ static int decode_frame(AVCodecContext *avctx, if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){ Picture *out = s->current_picture_ptr; Picture *cur = s->current_picture_ptr; - int i, pics, cross_idr, out_of_order, out_idx; + int i, pics, out_of_order, out_idx; field_end(h); @@ -7884,20 +7893,20 @@ static int decode_frame(AVCodecContext *avctx, out = h->delayed_pic[0]; out_idx = 0; - for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame; i++) + for(i=1; h->delayed_pic[i] && !h->delayed_pic[i]->key_frame && !h->delayed_pic[i]->mmco_reset; i++) if(h->delayed_pic[i]->poc < out->poc){ out = h->delayed_pic[i]; out_idx = i; } - cross_idr = !!h->delayed_pic[i] || h->delayed_pic[0]->key_frame; - - out_of_order = !cross_idr && out->poc < h->outputed_poc; + if(s->avctx->has_b_frames == 0 && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) + h->outputed_poc= INT_MIN; + out_of_order = out->poc < h->outputed_poc; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames) { } else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) || (s->low_delay && - ((!cross_idr && out->poc > h->outputed_poc + 2) + ((h->outputed_poc != INT_MIN && out->poc > h->outputed_poc + 2) || cur->pict_type == FF_B_TYPE))) { s->low_delay = 0; @@ -7912,7 +7921,10 @@ static int decode_frame(AVCodecContext *avctx, if(!out_of_order && pics > s->avctx->has_b_frames){ *data_size = sizeof(AVFrame); - h->outputed_poc = out->poc; + if(out_idx==0 && h->delayed_pic[0] && (h->delayed_pic[0]->key_frame || h->delayed_pic[0]->mmco_reset)) { + h->outputed_poc = INT_MIN; + } else + h->outputed_poc = out->poc; *pict= *(AVFrame*)out; }else{ av_log(avctx, AV_LOG_DEBUG, "no picture\n"); @@ -8178,6 +8190,7 @@ AVCodec h264_vdpau_decoder = { CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .flush= flush_dpb, .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"), + .pix_fmts = (const enum PixelFormat[]){PIX_FMT_VDPAU_H264, PIX_FMT_NONE}, }; #endif