case NAL_SLICE:
sl->gb = nal->gb;
- if ((err = ff_h264_decode_slice_header(h, sl)))
+ if ((err = ff_h264_decode_slice_header(h, sl, nal)))
break;
- if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
- h->recovery_frame = (h->poc.frame_num + h->sei.recovery_point.recovery_frame_cnt) &
- ((1 << h->ps.sps->log2_max_frame_num) - 1);
+ if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
+ const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
+
+ if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
+ h->valid_recovery_point = 1;
+
+ if ( h->recovery_frame < 0
+ || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
+ h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
+
+ if (!h->valid_recovery_point)
+ h->recovery_frame = h->poc.frame_num;
+ }
}
- h->cur_pic_ptr->f->key_frame |=
- (nal->type == NAL_IDR_SLICE) || (h->sei.recovery_point.recovery_frame_cnt >= 0);
+ h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
- if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
+ if (nal->type == NAL_IDR_SLICE ||
+ (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
h->recovery_frame = -1;
h->cur_pic_ptr->recovered = 1;
}
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
- int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl);
+ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
+ const H2645NAL *nal);
+#define SLICE_SINGLETHREAD 1
+#define SLICE_SKIPED 2
+
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
int ff_h264_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src);
* slice in a field (or a frame). It decides whether we are decoding a new frame
* or a second field in a pair and does the necessary setup.
*/
- static int h264_field_start(H264Context *h, const H264SliceContext *sl)
+ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
+ const H2645NAL *nal)
{
+ int i;
const SPS *sps;
int last_pic_structure, last_pic_droppable, ret;
h->poc.frame_num = frame_num;
sl->mb_mbaff = 0;
+ mb_aff_frame = 0;
+ last_mb_aff_frame = h->mb_aff_frame;
- droppable = h->nal_ref_idc == 0;
+ droppable = nal->ref_idc == 0;
if (sps->frame_mbs_only_flag) {
picture_structure = PICT_FRAME;
} else {
h->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
}
- if (h->nal_unit_type == NAL_IDR_SLICE)
+ if (nal->type == NAL_IDR_SLICE)
- get_ue_golomb(&sl->gb); /* idr_pic_id */
+ get_ue_golomb_long(&sl->gb); /* idr_pic_id */
if (sps->poc_type == 0) {
int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
(pps->weighted_bipred_idc == 1 &&
sl->slice_type_nos == AV_PICTURE_TYPE_B))
ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
- sl->slice_type_nos, &sl->pwt);
+ sl->slice_type_nos, &sl->pwt, h->avctx);
sl->explicit_ref_marking = 0;
- if (h->nal_ref_idc) {
+ if (nal->ref_idc) {
ret = ff_h264_decode_ref_pic_marking(h, sl, &sl->gb);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
return AVERROR_INVALIDDATA;
{
int i, j, ret = 0;
- ret = h264_slice_header_parse(h, sl);
+ ret = h264_slice_header_parse(h, sl, nal);
- if (ret < 0)
+ if (ret) // can not be ret<0 because of SLICE_SKIPED, SLICE_SINGLETHREAD, ...
return ret;
if (h->current_slice == 0) {
(h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
(h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
- h->nal_ref_idc == 0))
+ nal->ref_idc == 0))
sl->deblocking_filter = 0;
- if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
+ if (sl->deblocking_filter == 1 && h->max_contexts > 1) {
if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
/* Cheat slightly for speed:
* Do not bother to deblock across slices. */