mb_array_size * sizeof(uint8_t), fail);
FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
- h->mb_height * h->mb_stride, fail);
+ h->mb_height * h->mb_stride * (4*sizeof(int) + 1), fail);
FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
yc_size * sizeof(int16_t), fail);
}
#endif
-/**
- * Run setup operations that must be run after slice header decoding.
- * This includes finding the next displayed frame.
- *
- * @param h h264 master context
- * @param setup_finished enough NALs have been read that we can call
- * ff_thread_finish_setup()
- */
-static void decode_postinit(H264Context *h, int setup_finished)
-{
- const SPS *sps = h->ps.sps;
- H264Picture *out = h->cur_pic_ptr;
- H264Picture *cur = h->cur_pic_ptr;
- int i, pics, out_of_order, out_idx;
-
- if (h->next_output_pic)
- return;
-
- if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
- /* FIXME: if we have two PAFF fields in one packet, we can't start
- * the next thread here. If we have one field per packet, we can.
- * The check in decode_nal_units() is not good enough to find this
- * yet, so we assume the worst for now. */
- // if (setup_finished)
- // ff_thread_finish_setup(h->avctx);
- if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
- return;
- if (h->avctx->hwaccel || h->missing_fields <=1)
- return;
- }
-
- cur->mmco_reset = h->mmco_reset;
- h->mmco_reset = 0;
-
- // FIXME do something with unavailable reference frames
-
- /* Sort B-frames into display order */
- if (sps->bitstream_restriction_flag ||
- h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
- h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
- }
-
- for (i = 0; 1; i++) {
- if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
- if(i)
- h->last_pocs[i-1] = cur->poc;
- break;
- } else if(i) {
- h->last_pocs[i-1]= h->last_pocs[i];
- }
- }
- out_of_order = MAX_DELAYED_PIC_COUNT - i;
- if( cur->f->pict_type == AV_PICTURE_TYPE_B
- || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
- out_of_order = FFMAX(out_of_order, 1);
- if (out_of_order == MAX_DELAYED_PIC_COUNT) {
- av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
- for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
- h->last_pocs[i] = INT_MIN;
- h->last_pocs[0] = cur->poc;
- cur->mmco_reset = 1;
- } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
- int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
- av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
- h->avctx->has_b_frames = out_of_order;
- }
-
- pics = 0;
- while (h->delayed_pic[pics])
- pics++;
-
- av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
-
- h->delayed_pic[pics++] = cur;
- if (cur->reference == 0)
- cur->reference = DELAYED_PIC_REF;
-
- out = h->delayed_pic[0];
- out_idx = 0;
- for (i = 1; h->delayed_pic[i] &&
- !h->delayed_pic[i]->f->key_frame &&
- !h->delayed_pic[i]->mmco_reset;
- i++)
- if (h->delayed_pic[i]->poc < out->poc) {
- out = h->delayed_pic[i];
- out_idx = i;
- }
- if (h->avctx->has_b_frames == 0 &&
- (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
- h->next_outputed_poc = INT_MIN;
- out_of_order = out->poc < h->next_outputed_poc;
-
- if (out_of_order || pics > h->avctx->has_b_frames) {
- out->reference &= ~DELAYED_PIC_REF;
- for (i = out_idx; h->delayed_pic[i]; i++)
- h->delayed_pic[i] = h->delayed_pic[i + 1];
- }
- if (!out_of_order && pics > h->avctx->has_b_frames) {
- h->next_output_pic = out;
- if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
- h->next_outputed_poc = INT_MIN;
- } else
- h->next_outputed_poc = out->poc;
- } else {
- av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
- }
-
- if (h->next_output_pic) {
- if (h->next_output_pic->recovered) {
- // We have reached an recovery point and all frames after it in
- // display order are "recovered".
- h->frame_recovered |= FRAME_RECOVERED_SEI;
- }
- h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
- }
-
- if (setup_finished && !h->avctx->hwaccel) {
- ff_thread_finish_setup(h->avctx);
-
- if (h->avctx->active_thread_type & FF_THREAD_FRAME)
- h->setup_finished = 1;
- }
-}
-
/**
* instantaneous decoder refresh.
*/
{
int nals_needed = 0;
int first_slice = 0;
- int i;
- int ret;
+ int i, ret;
for (i = 0; i < h->pkt.nb_nals; i++) {
H2645NAL *nal = &h->pkt.nals[i];
case H264_NAL_DPA:
case H264_NAL_IDR_SLICE:
case H264_NAL_SLICE:
- ret = init_get_bits8(&gb, nal->data + 1, (nal->size - 1));
- if (ret < 0)
- return ret;
+ ret = init_get_bits8(&gb, nal->data + 1, nal->size - 1);
+ if (ret < 0) {
+ av_log(h->avctx, AV_LOG_ERROR, "Invalid zero-sized VCL NAL unit\n");
+ if (h->avctx->err_recognition & AV_EF_EXPLODE)
+ return ret;
+
+ break;
+ }
if (!get_ue_golomb_long(&gb) || // first_mb_in_slice
!first_slice ||
first_slice != nal->type)
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
{
AVCodecContext *const avctx = h->avctx;
- unsigned context_count = 0;
int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
int idr_cleared=0;
int i, ret = 0;
+ h->has_slice = 0;
h->nal_unit_type= 0;
- h->max_contexts = h->nb_slice_ctx;
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
h->current_slice = 0;
if (!h->first_field)
for (i = 0; i < h->pkt.nb_nals; i++) {
H2645NAL *nal = &h->pkt.nals[i];
- H264SliceContext *sl = &h->slice_ctx[context_count];
- int err;
+ int max_slice_ctx, err;
if (avctx->skip_frame >= AVDISCARD_NONREF &&
nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
continue;
-again:
// FIXME these should stop being context-global variables
h->nal_ref_idc = nal->ref_idc;
h->nal_unit_type = nal->type;
idr_cleared = 1;
h->has_recovery_point = 1;
case H264_NAL_SLICE:
- sl->gb = nal->gb;
+ h->has_slice = 1;
- if ((err = ff_h264_decode_slice_header(h, sl, nal)))
- break;
-
- if (sl->redundant_pic_count > 0)
+ if ((err = ff_h264_queue_decode_slice(h, nal))) {
+ H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
+ sl->ref_count[0] = sl->ref_count[1] = 0;
break;
+ }
if (h->current_slice == 1) {
- if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
- decode_postinit(h, i >= nals_needed);
+ if (avctx->active_thread_type & FF_THREAD_FRAME && !h->avctx->hwaccel &&
+ i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
+ ff_thread_finish_setup(avctx);
+ h->setup_finished = 1;
+ }
if (h->avctx->hwaccel &&
(ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
#endif
}
- if (avctx->hwaccel) {
- ret = avctx->hwaccel->decode_slice(avctx,
- nal->raw_data,
- nal->raw_size);
- if (ret < 0)
- goto end;
+ max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
+ if (h->nb_slice_ctx_queued == max_slice_ctx) {
+ if (h->avctx->hwaccel) {
+ ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
+ h->nb_slice_ctx_queued = 0;
+ } else
#if FF_API_CAP_VDPAU
- } else if (CONFIG_H264_VDPAU_DECODER &&
+ if (CONFIG_H264_VDPAU_DECODER &&
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
start_code,
ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
nal->raw_data,
nal->raw_size);
-#endif
+ ret = 0;
} else
- context_count++;
+#endif
+ ret = ff_h264_execute_decode_slices(h);
+ if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
+ goto end;
+ }
break;
case H264_NAL_DPA:
case H264_NAL_DPB:
nal->type, nal->size_bits);
}
- if (context_count == h->max_contexts) {
- ret = ff_h264_execute_decode_slices(h, context_count);
- if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
- goto end;
- context_count = 0;
- }
-
- if (err < 0 || err == SLICE_SKIPED) {
- if (err < 0)
- av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
- sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
- } else if (err == SLICE_SINGLETHREAD) {
- if (context_count > 0) {
- ret = ff_h264_execute_decode_slices(h, context_count);
- if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
- goto end;
- context_count = 0;
- }
- /* Slice could not be decoded in parallel mode, restart. */
- sl = &h->slice_ctx[0];
- goto again;
+ if (err < 0) {
+ av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
}
}
- if (context_count) {
- ret = ff_h264_execute_decode_slices(h, context_count);
- if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
- goto end;
- }
+
+ ret = ff_h264_execute_decode_slices(h);
+ if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
+ goto end;
ret = 0;
end:
}
#endif /* CONFIG_ERROR_RESILIENCE */
/* clean up */
- if (h->cur_pic_ptr && !h->droppable) {
+ if (h->cur_pic_ptr && !h->droppable && h->has_slice) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure == PICT_BOTTOM_FIELD);
}
return 1;
}
+static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
+{
+ int ret;
+
+ if (((h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
+ (h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
+ out->recovered)) {
+
+ if (!h->avctx->hwaccel &&
+ (out->field_poc[0] == INT_MAX ||
+ out->field_poc[1] == INT_MAX)
+ ) {
+ int p;
+ AVFrame *f = out->f;
+ int field = out->field_poc[0] == INT_MAX;
+ uint8_t *dst_data[4];
+ int linesizes[4];
+ const uint8_t *src_data[4];
+
+ av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
+
+ for (p = 0; p<4; p++) {
+ dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
+ src_data[p] = f->data[p] + field *f->linesize[p];
+ linesizes[p] = 2*f->linesize[p];
+ }
+
+ av_image_copy(dst_data, linesizes, src_data, linesizes,
+ f->format, f->width, f->height>>1);
+ }
+
+ ret = output_frame(h, dst, out);
+ if (ret < 0)
+ return ret;
+
+ *got_frame = 1;
+
+ if (CONFIG_MPEGVIDEO) {
+ ff_print_debug_info2(h->avctx, dst, NULL,
+ out->mb_type,
+ out->qscale_table,
+ out->motion_val,
+ NULL,
+ h->mb_width, h->mb_height, h->mb_stride, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame,
+ int *got_frame, int buf_index)
+{
+ int ret, i, out_idx;
+ H264Picture *out = h->delayed_pic[0];
+
+ h->cur_pic_ptr = NULL;
+ h->first_field = 0;
+
+ out_idx = 0;
+ for (i = 1;
+ h->delayed_pic[i] &&
+ !h->delayed_pic[i]->f->key_frame &&
+ !h->delayed_pic[i]->mmco_reset;
+ i++)
+ if (h->delayed_pic[i]->poc < out->poc) {
+ out = h->delayed_pic[i];
+ out_idx = i;
+ }
+
+ for (i = out_idx; h->delayed_pic[i]; i++)
+ h->delayed_pic[i] = h->delayed_pic[i + 1];
+
+ if (out) {
+ out->reference &= ~DELAYED_PIC_REF;
+ ret = finalize_frame(h, dst_frame, out, got_frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ return buf_index;
+}
+
static int h264_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
int buf_size = avpkt->size;
H264Context *h = avctx->priv_data;
AVFrame *pict = data;
- int buf_index = 0;
- H264Picture *out;
- int i, out_idx;
+ int buf_index;
int ret;
h->flags = avctx->flags;
h->setup_finished = 0;
+ h->nb_slice_ctx_queued = 0;
if (h->backup_width != -1) {
avctx->width = h->backup_width;
ff_h264_unref_picture(h, &h->last_pic_for_ec);
/* end of stream, output what is still in the buffers */
- if (buf_size == 0) {
- out:
-
- h->cur_pic_ptr = NULL;
- h->first_field = 0;
-
- // FIXME factorize this with the output code below
- out = h->delayed_pic[0];
- out_idx = 0;
- for (i = 1;
- h->delayed_pic[i] &&
- !h->delayed_pic[i]->f->key_frame &&
- !h->delayed_pic[i]->mmco_reset;
- i++)
- if (h->delayed_pic[i]->poc < out->poc) {
- out = h->delayed_pic[i];
- out_idx = i;
- }
-
- for (i = out_idx; h->delayed_pic[i]; i++)
- h->delayed_pic[i] = h->delayed_pic[i + 1];
-
- if (out) {
- out->reference &= ~DELAYED_PIC_REF;
- ret = output_frame(h, pict, out);
- if (ret < 0)
- return ret;
- *got_frame = 1;
- }
+ if (buf_size == 0)
+ return send_next_delayed_frame(h, pict, got_frame, 0);
- return buf_index;
- }
if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
int side_size;
uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
av_assert0(buf_index <= buf_size);
- goto out;
+ return send_next_delayed_frame(h, pict, got_frame, buf_index);
}
- if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {
if (avctx->skip_frame >= AVDISCARD_NONREF ||
buf_size >= 4 && !memcmp("Q264", buf, 4))
return buf_size;
if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
(h->mb_y >= h->mb_height && h->mb_height)) {
- if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
- decode_postinit(h, 1);
-
if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
return ret;
/* Wait for second field. */
- *got_frame = 0;
- if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
- (avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
- h->next_output_pic->recovered)) {
- if (!h->next_output_pic->recovered)
- h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
-
- if (!h->avctx->hwaccel &&
- (h->next_output_pic->field_poc[0] == INT_MAX ||
- h->next_output_pic->field_poc[1] == INT_MAX)
- ) {
- int p;
- AVFrame *f = h->next_output_pic->f;
- int field = h->next_output_pic->field_poc[0] == INT_MAX;
- uint8_t *dst_data[4];
- int linesizes[4];
- const uint8_t *src_data[4];
-
- av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
-
- for (p = 0; p<4; p++) {
- dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
- src_data[p] = f->data[p] + field *f->linesize[p];
- linesizes[p] = 2*f->linesize[p];
- }
-
- av_image_copy(dst_data, linesizes, src_data, linesizes,
- f->format, f->width, f->height>>1);
- }
-
- ret = output_frame(h, pict, h->next_output_pic);
+ if (h->next_output_pic) {
+ ret = finalize_frame(h, pict, h->next_output_pic, got_frame);
if (ret < 0)
return ret;
- *got_frame = 1;
- if (CONFIG_MPEGVIDEO) {
- ff_print_debug_info2(h->avctx, pict, NULL,
- h->next_output_pic->mb_type,
- h->next_output_pic->qscale_table,
- h->next_output_pic->motion_val,
- NULL,
- h->mb_width, h->mb_height, h->mb_stride, 1);
- }
}
}