X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fh264.c;h=6925a8ac318a988a9c36639c40568f8d9249719b;hb=b11acd57326db6c2cc1475dd0bea2a06fbc85aa2;hp=92541f614c915ed60c3040fcad7ce1206fb80881;hpb=050324d020f843ce333276ebb6f27cc6026f37d0;p=ffmpeg diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 92541f614c9..6925a8ac318 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -83,7 +83,7 @@ void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height) { AVCodecContext *avctx = h->avctx; - const AVFrame *src = &h->cur_pic.f; + const AVFrame *src = h->cur_pic.f; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); int vshift = desc->log2_chroma_h; const int field_pic = h->picture_structure != PICT_FRAME; @@ -322,7 +322,7 @@ static int decode_rbsp_trailing(H264Context *h, const uint8_t *src) int v = *src; int r; - tprintf(h->avctx, "rbsp trailing %X\n", v); + ff_tlog(h->avctx, "rbsp trailing %X\n", v); for (r = 1; r < 9; r++) { if (v & 1) @@ -332,7 +332,7 @@ static int decode_rbsp_trailing(H264Context *h, const uint8_t *src) return 0; } -void ff_h264_free_tables(H264Context *h, int free_rbsp) +void ff_h264_free_tables(H264Context *h) { int i; @@ -355,17 +355,6 @@ void ff_h264_free_tables(H264Context *h, int free_rbsp) av_buffer_pool_uninit(&h->motion_val_pool); av_buffer_pool_uninit(&h->ref_index_pool); - if (free_rbsp && h->DPB) { - for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) - ff_h264_unref_picture(h, &h->DPB[i]); - av_freep(&h->DPB); - } else if (h->DPB) { - for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) - h->DPB[i].needs_realloc = 1; - } - - h->cur_pic_ptr = NULL; - for (i = 0; i < h->nb_slice_ctx; i++) { H264SliceContext *sl = &h->slice_ctx[i]; @@ -383,11 +372,6 @@ void ff_h264_free_tables(H264Context *h, int free_rbsp) sl->edge_emu_buffer_allocated = 0; sl->top_borders_allocated[0] = 0; sl->top_borders_allocated[1] = 0; - - if (free_rbsp) { - av_freep(&sl->rbsp_buffer); - sl->rbsp_buffer_size = 0; - } } } @@ -395,7 +379,7 @@ int ff_h264_alloc_tables(H264Context *h) { const int big_mb_num = h->mb_stride * (h->mb_height + 1); const int row_mb_num = h->mb_stride * 2 * h->avctx->thread_count; - int x, y, i; + int x, y; FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode, row_mb_num * 8 * sizeof(uint8_t), fail) @@ -441,19 +425,10 @@ int ff_h264_alloc_tables(H264Context *h) if (!h->dequant4_coeff[0]) h264_init_dequant_tables(h); - if (!h->DPB) { - h->DPB = av_mallocz_array(H264_MAX_PICTURE_COUNT, sizeof(*h->DPB)); - if (!h->DPB) - goto fail; - for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) - av_frame_unref(&h->DPB[i].f); - av_frame_unref(&h->cur_pic.f); - } - return 0; fail: - ff_h264_free_tables(h, 1); + ff_h264_free_tables(h); return AVERROR(ENOMEM); } @@ -584,50 +559,30 @@ int ff_h264_decode_extradata(H264Context *h) return 0; } -av_cold int ff_h264_decode_init(AVCodecContext *avctx) +static int h264_init_context(AVCodecContext *avctx, H264Context *h) { - H264Context *h = avctx->priv_data; int i; - int ret; - h->avctx = avctx; + h->avctx = avctx; + h->dequant_coeff_pps = -1; - h->bit_depth_luma = 8; - h->chroma_format_idc = 1; + h->picture_structure = PICT_FRAME; + h->slice_context_count = 1; + h->workaround_bugs = avctx->workaround_bugs; + h->flags = avctx->flags; + h->prev_poc_msb = 1 << 16; + h->x264_build = -1; + h->recovery_frame = -1; + h->frame_recovered = 0; - ff_h264dsp_init(&h->h264dsp, 8, 1); - ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma); - ff_h264qpel_init(&h->h264qpel, 8); - ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1); - - h->dequant_coeff_pps = -1; - - /* needed so that IDCT permutation is known early */ - ff_videodsp_init(&h->vdsp, 8); - h->cur_chroma_format_idc = -1; - - memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t)); - memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t)); - - h->picture_structure = PICT_FRAME; - h->slice_context_count = 1; - h->workaround_bugs = avctx->workaround_bugs; - h->flags = avctx->flags; + h->next_outputed_poc = INT_MIN; + for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) + h->last_pocs[i] = INT_MIN; - /* set defaults */ - // s->decode_mb = ff_h263_decode_mb; - if (!avctx->has_b_frames) - h->low_delay = 1; + ff_h264_reset_sei(h); avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; - ff_h264_decode_init_vlc(); - - ff_init_cabac_states(); - - h->pixel_shift = 0; - h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8; - h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? H264_MAX_THREADS : 1; h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx)); if (!h->slice_ctx) { @@ -635,17 +590,39 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) return AVERROR(ENOMEM); } + for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) { + h->DPB[i].f = av_frame_alloc(); + if (!h->DPB[i].f) + return AVERROR(ENOMEM); + } + + h->cur_pic.f = av_frame_alloc(); + if (!h->cur_pic.f) + return AVERROR(ENOMEM); + for (i = 0; i < h->nb_slice_ctx; i++) h->slice_ctx[i].h264 = h; - h->outputed_poc = h->next_outputed_poc = INT_MIN; - for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) - h->last_pocs[i] = INT_MIN; - h->prev_poc_msb = 1 << 16; - h->x264_build = -1; - ff_h264_reset_sei(h); - h->recovery_frame = -1; - h->frame_recovered = 0; + return 0; +} + +av_cold int ff_h264_decode_init(AVCodecContext *avctx) +{ + H264Context *h = avctx->priv_data; + int ret; + + ret = h264_init_context(avctx, h); + if (ret < 0) + return ret; + + /* set defaults */ + if (!avctx->has_b_frames) + h->low_delay = 1; + + ff_h264_decode_init_vlc(); + + ff_init_cabac_states(); + if (avctx->codec_id == AV_CODEC_ID_H264) { if (avctx->ticks_per_frame == 1) h->avctx->framerate.num *= 2; @@ -680,24 +657,17 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx) static int decode_init_thread_copy(AVCodecContext *avctx) { H264Context *h = avctx->priv_data; - int i; + int ret; if (!avctx->internal->is_copy) return 0; - memset(h->sps_buffers, 0, sizeof(h->sps_buffers)); - memset(h->pps_buffers, 0, sizeof(h->pps_buffers)); - h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? H264_MAX_THREADS : 1; - h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx)); - if (!h->slice_ctx) { - h->nb_slice_ctx = 0; - return AVERROR(ENOMEM); - } + memset(h, 0, sizeof(*h)); - for (i = 0; i < h->nb_slice_ctx; i++) - h->slice_ctx[i].h264 = h; + ret = h264_init_context(avctx, h); + if (ret < 0) + return ret; - h->avctx = avctx; h->context_initialized = 0; return 0; @@ -718,7 +688,7 @@ static void decode_postinit(H264Context *h, int setup_finished) int i, pics, out_of_order, out_idx; int invalid = 0, cnt = 0; - h->cur_pic_ptr->f.pict_type = h->pict_type; + h->cur_pic_ptr->f->pict_type = h->pict_type; if (h->next_output_pic) return; @@ -733,8 +703,8 @@ static void decode_postinit(H264Context *h, int setup_finished) return; } - cur->f.interlaced_frame = 0; - cur->f.repeat_pict = 0; + cur->f->interlaced_frame = 0; + cur->f->repeat_pict = 0; /* Signal interlacing information externally. */ /* Prioritize picture timing SEI information over used @@ -746,55 +716,55 @@ static void decode_postinit(H264Context *h, int setup_finished) break; case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: - cur->f.interlaced_frame = 1; + cur->f->interlaced_frame = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: if (FIELD_OR_MBAFF_PICTURE(h)) - cur->f.interlaced_frame = 1; + cur->f->interlaced_frame = 1; else // try to flag soft telecine progressive - cur->f.interlaced_frame = h->prev_interlaced_frame; + cur->f->interlaced_frame = h->prev_interlaced_frame; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: /* Signal the possibility of telecined film externally * (pic_struct 5,6). From these hints, let the applications * decide if they apply deinterlacing. */ - cur->f.repeat_pict = 1; + cur->f->repeat_pict = 1; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: - cur->f.repeat_pict = 2; + cur->f->repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: - cur->f.repeat_pict = 4; + cur->f->repeat_pict = 4; break; } if ((h->sei_ct_type & 3) && h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP) - cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0; + cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0; } else { /* Derive interlacing flag from used decoding process. */ - cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE(h); + cur->f->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h); } - h->prev_interlaced_frame = cur->f.interlaced_frame; + h->prev_interlaced_frame = cur->f->interlaced_frame; if (cur->field_poc[0] != cur->field_poc[1]) { /* Derive top_field_first from field pocs. */ - cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1]; + cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1]; } else { - if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) { + if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) { /* Use picture timing SEI information. Even if it is a * information of a past frame, better than nothing. */ if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP) - cur->f.top_field_first = 1; + cur->f->top_field_first = 1; else - cur->f.top_field_first = 0; + cur->f->top_field_first = 0; } else { /* Most likely progressive */ - cur->f.top_field_first = 0; + cur->f->top_field_first = 0; } } @@ -803,7 +773,7 @@ static void decode_postinit(H264Context *h, int setup_finished) h->frame_packing_arrangement_type <= 6 && h->content_interpretation_type > 0 && h->content_interpretation_type < 3) { - AVStereo3D *stereo = av_stereo3d_create_side_data(&cur->f); + AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f); if (!stereo) return; @@ -841,7 +811,7 @@ static void decode_postinit(H264Context *h, int setup_finished) if (h->sei_display_orientation_present && (h->sei_anticlockwise_rotation || h->sei_hflip || h->sei_vflip)) { double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16); - AVFrameSideData *rotation = av_frame_new_side_data(&cur->f, + AVFrameSideData *rotation = av_frame_new_side_data(cur->f, AV_FRAME_DATA_DISPLAYMATRIX, sizeof(int32_t) * 9); if (!rotation) @@ -852,6 +822,16 @@ static void decode_postinit(H264Context *h, int setup_finished) h->sei_hflip, h->sei_vflip); } + if (h->sei_reguserdata_afd_present) { + AVFrameSideData *sd = av_frame_new_side_data(cur->f, AV_FRAME_DATA_AFD, + sizeof(uint8_t)); + if (!sd) + return; + + *sd->data = h->active_format_description; + h->sei_reguserdata_afd_present = 0; + } + // FIXME do something with unavailable reference frames /* Sort B-frames into display order */ @@ -895,13 +875,13 @@ static void decode_postinit(H264Context *h, int setup_finished) cnt += out->poc < h->last_pocs[i]; invalid += out->poc == INT_MIN; } - if (!h->mmco_reset && !cur->f.key_frame && + if (!h->mmco_reset && !cur->f->key_frame && cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) { h->mmco_reset = 2; if (pics > 1) h->delayed_pic[pics - 2]->mmco_reset = 2; } - if (h->mmco_reset || cur->f.key_frame) { + if (h->mmco_reset || cur->f->key_frame) { for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) h->last_pocs[i] = INT_MIN; cnt = 0; @@ -912,16 +892,16 @@ static void decode_postinit(H264Context *h, int setup_finished) for (i = 1; i < MAX_DELAYED_PIC_COUNT && h->delayed_pic[i] && !h->delayed_pic[i - 1]->mmco_reset && - !h->delayed_pic[i]->f.key_frame; + !h->delayed_pic[i]->f->key_frame; i++) if (h->delayed_pic[i]->poc < out->poc) { out = h->delayed_pic[i]; out_idx = i; } if (h->avctx->has_b_frames == 0 && - (h->delayed_pic[0]->f.key_frame || h->mmco_reset)) + (h->delayed_pic[0]->f->key_frame || h->mmco_reset)) h->next_outputed_poc = INT_MIN; - out_of_order = !out->f.key_frame && !h->mmco_reset && + out_of_order = !out->f->key_frame && !h->mmco_reset && (out->poc < h->next_outputed_poc); if (h->sps.bitstream_restriction_flag && @@ -935,7 +915,7 @@ static void decode_postinit(H264Context *h, int setup_finished) } else if (h->low_delay && ((h->next_outputed_poc != INT_MIN && out->poc > h->next_outputed_poc + 2) || - cur->f.pict_type == AV_PICTURE_TYPE_B)) { + cur->f->pict_type == AV_PICTURE_TYPE_B)) { h->low_delay = 0; h->avctx->has_b_frames++; } @@ -960,7 +940,7 @@ static void decode_postinit(H264Context *h, int setup_finished) h->next_outputed_poc = INT_MIN; } } else { - if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f.key_frame) { + if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) { h->next_outputed_poc = INT_MIN; } else { h->next_outputed_poc = out->poc; @@ -980,8 +960,12 @@ static void decode_postinit(H264Context *h, int setup_finished) h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI); } - if (setup_finished && !h->avctx->hwaccel) + if (setup_finished && !h->avctx->hwaccel) { ff_thread_finish_setup(h->avctx); + + if (h->avctx->active_thread_type & FF_THREAD_FRAME) + h->setup_finished = 1; + } } int ff_pred_weight_table(H264Context *h, H264SliceContext *sl) @@ -1064,7 +1048,7 @@ void ff_h264_flush_change(H264Context *h) int i; for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) h->last_pocs[i] = INT_MIN; - h->outputed_poc = h->next_outputed_poc = INT_MIN; + h->next_outputed_poc = INT_MIN; h->prev_interlaced_frame = 1; idr(h); if (h->cur_pic_ptr) @@ -1085,15 +1069,14 @@ static void flush_dpb(AVCodecContext *avctx) ff_h264_flush_change(h); - if (h->DPB) - for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) - ff_h264_unref_picture(h, &h->DPB[i]); + for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) + ff_h264_unref_picture(h, &h->DPB[i]); h->cur_pic_ptr = NULL; ff_h264_unref_picture(h, &h->cur_pic); h->mb_y = 0; - ff_h264_free_tables(h, 1); + ff_h264_free_tables(h); h->context_initialized = 0; } @@ -1202,44 +1185,6 @@ int ff_h264_get_profile(SPS *sps) return profile; } -int ff_h264_set_parameter_from_sps(H264Context *h) -{ - if (h->flags & CODEC_FLAG_LOW_DELAY || - (h->sps.bitstream_restriction_flag && - !h->sps.num_reorder_frames)) { - if (h->avctx->has_b_frames > 1 || h->delayed_pic[0]) - av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. " - "Reenabling low delay requires a codec flush.\n"); - else - h->low_delay = 1; - } - - if (h->avctx->has_b_frames < 2) - h->avctx->has_b_frames = !h->low_delay; - - if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma || - h->cur_chroma_format_idc != h->sps.chroma_format_idc) { - if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 10) { - h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma; - h->cur_chroma_format_idc = h->sps.chroma_format_idc; - h->pixel_shift = h->sps.bit_depth_luma > 8; - - ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, - h->sps.chroma_format_idc); - ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma); - ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma); - ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma, - h->sps.chroma_format_idc); - ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma); - } else { - av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n", - h->sps.bit_depth_luma); - return AVERROR_INVALIDDATA; - } - } - return 0; -} - int ff_set_ref_count(H264Context *h, H264SliceContext *sl) { int ref_count[2], list_count; @@ -1520,7 +1465,7 @@ again: ((1 << h->sps.log2_max_frame_num) - 1); } - h->cur_pic_ptr->f.key_frame |= + h->cur_pic_ptr->f->key_frame |= (h->nal_unit_type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0); @@ -1586,10 +1531,6 @@ again: ff_h264_decode_seq_parameter_set(h); } - ret = ff_h264_set_parameter_from_sps(h); - if (ret < 0) - goto end; - break; case NAL_PPS: init_get_bits(&h->gb, ptr, bit_length); @@ -1691,6 +1632,7 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data, int ret; h->flags = avctx->flags; + h->setup_finished = 0; /* end of stream, output what is still in the buffers */ out: @@ -1705,7 +1647,7 @@ out: out_idx = 0; for (i = 1; h->delayed_pic[i] && - !h->delayed_pic[i]->f.key_frame && + !h->delayed_pic[i]->f->key_frame && !h->delayed_pic[i]->mmco_reset; i++) if (h->delayed_pic[i]->poc < out->poc) { @@ -1717,7 +1659,7 @@ out: h->delayed_pic[i] = h->delayed_pic[i + 1]; if (out) { - ret = output_frame(h, pict, &out->f); + ret = output_frame(h, pict, out->f); if (ret < 0) return ret; *got_frame = 1; @@ -1753,9 +1695,9 @@ out: if (h->next_output_pic && ((avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT) || h->next_output_pic->recovered)) { if (!h->next_output_pic->recovered) - h->next_output_pic->f.flags |= AV_FRAME_FLAG_CORRUPT; + h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT; - ret = output_frame(h, pict, &h->next_output_pic->f); + ret = output_frame(h, pict, h->next_output_pic->f); if (ret < 0) return ret; *got_frame = 1; @@ -1771,8 +1713,17 @@ av_cold void ff_h264_free_context(H264Context *h) { int i; - ff_h264_free_tables(h, 1); // FIXME cleanup init stuff perhaps + ff_h264_free_tables(h); + for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) { + ff_h264_unref_picture(h, &h->DPB[i]); + av_frame_free(&h->DPB[i].f); + } + + h->cur_pic_ptr = NULL; + + for (i = 0; i < h->nb_slice_ctx; i++) + av_freep(&h->slice_ctx[i].rbsp_buffer); av_freep(&h->slice_ctx); h->nb_slice_ctx = 0; @@ -1790,6 +1741,7 @@ static av_cold int h264_decode_end(AVCodecContext *avctx) ff_h264_free_context(h); ff_h264_unref_picture(h, &h->cur_pic); + av_frame_free(&h->cur_pic.f); return 0; }