- if(IS_INTRA4x4(mb_type)){
- if(simple || !s->encoding){
- if(IS_8x8DCT(mb_type)){
- if(transform_bypass){
- idct_dc_add =
- idct_add = s->dsp.add_pixels8;
- }else{
- idct_dc_add = h->h264dsp.h264_idct8_dc_add;
- idct_add = h->h264dsp.h264_idct8_add;
- }
- for(i=0; i<16; i+=4){
- uint8_t * const ptr= dest_y + block_offset[i];
- const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
- if(transform_bypass && h->sps.profile_idc==244 && dir<=1){
- h->hpc.pred8x8l_add[dir](ptr, h->mb + i*16, linesize);
- }else{
- const int nnz = h->non_zero_count_cache[ scan8[i] ];
- h->hpc.pred8x8l[ dir ](ptr, (h->topleft_samples_available<<i)&0x8000,
- (h->topright_samples_available<<i)&0x4000, linesize);
- if(nnz){
- if(nnz == 1 && h->mb[i*16])
- idct_dc_add(ptr, h->mb + i*16, linesize);
- else
- idct_add (ptr, h->mb + i*16, linesize);
- }
- }
- }
- }else{
- if(transform_bypass){
- idct_dc_add =
- idct_add = s->dsp.add_pixels4;
- }else{
- idct_dc_add = h->h264dsp.h264_idct_dc_add;
- idct_add = h->h264dsp.h264_idct_add;
- }
- for(i=0; i<16; i++){
- uint8_t * const ptr= dest_y + block_offset[i];
- const int dir= h->intra4x4_pred_mode_cache[ scan8[i] ];
-
- if(transform_bypass && h->sps.profile_idc==244 && dir<=1){
- h->hpc.pred4x4_add[dir](ptr, h->mb + i*16, linesize);
- }else{
- uint8_t *topright;
- int nnz, tr;
- if(dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED){
- const int topright_avail= (h->topright_samples_available<<i)&0x8000;
- assert(mb_y || linesize <= block_offset[i]);
- if(!topright_avail){
- tr= ptr[3 - linesize]*0x01010101;
- topright= (uint8_t*) &tr;
- }else
- topright= ptr + 4 - linesize;
- }else
- topright= NULL;
-
- h->hpc.pred4x4[ dir ](ptr, topright, linesize);
- nnz = h->non_zero_count_cache[ scan8[i] ];
- if(nnz){
- if(is_h264){
- if(nnz == 1 && h->mb[i*16])
- idct_dc_add(ptr, h->mb + i*16, linesize);
- else
- idct_add (ptr, h->mb + i*16, linesize);
- }else
- ff_svq3_add_idct_c(ptr, h->mb + i*16, linesize, s->qscale, 0);
- }
- }
- }
- }
- }
- }else{
- h->hpc.pred16x16[ h->intra16x16_pred_mode ](dest_y , linesize);
- if(is_h264){
- if(!transform_bypass)
- h264_luma_dc_dequant_idct_c(h->mb, s->qscale, h->dequant4_coeff[0][s->qscale][0]);
- }else
- ff_svq3_luma_dc_dequant_idct_c(h->mb, s->qscale);
+ // POC timing
+ copy_fields(h, h1, poc_lsb, redundant_pic_count);
+
+ // reference lists
+ copy_fields(h, h1, ref_count, list_count);
+ copy_fields(h, h1, ref2frm, intra_gb);
+ copy_fields(h, h1, short_ref, cabac_init_idc);
+
+ copy_picture_range(h->short_ref, h1->short_ref, 32, s, s1);
+ copy_picture_range(h->long_ref, h1->long_ref, 32, s, s1);
+ copy_picture_range(h->delayed_pic, h1->delayed_pic,
+ MAX_DELAYED_PIC_COUNT + 2, s, s1);
+
+ h->last_slice_type = h1->last_slice_type;
+
+ if (!s->current_picture_ptr)
+ return 0;
+
+ if (!s->droppable) {
+ err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
+ h->prev_poc_msb = h->poc_msb;
+ h->prev_poc_lsb = h->poc_lsb;
+ }
+ h->prev_frame_num_offset = h->frame_num_offset;
+ h->prev_frame_num = h->frame_num;
+ h->outputed_poc = h->next_outputed_poc;
+
+ return err;
+}
+
+int ff_h264_frame_start(H264Context *h)
+{
+ MpegEncContext *const s = &h->s;
+ int i;
+ const int pixel_shift = h->pixel_shift;
+
+ if (ff_MPV_frame_start(s, s->avctx) < 0)
+ return -1;
+ ff_er_frame_start(s);
+ /*
+ * ff_MPV_frame_start uses pict_type to derive key_frame.
+ * This is incorrect for H.264; IDR markings must be used.
+ * Zero here; IDR markings per slice in frame or fields are ORed in later.
+ * See decode_nal_units().
+ */
+ s->current_picture_ptr->f.key_frame = 0;
+ s->current_picture_ptr->mmco_reset = 0;
+
+ assert(s->linesize && s->uvlinesize);
+
+ for (i = 0; i < 16; i++) {
+ h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->linesize * ((scan8[i] - scan8[0]) >> 3);
+ h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->linesize * ((scan8[i] - scan8[0]) >> 3);
+ }
+ for (i = 0; i < 16; i++) {
+ h->block_offset[16 + i] =
+ h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
+ h->block_offset[48 + 16 + i] =
+ h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * s->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
+ }
+
+ /* can't be in alloc_tables because linesize isn't known there.
+ * FIXME: redo bipred weight to not require extra buffer? */
+ for (i = 0; i < s->slice_context_count; i++)
+ if (h->thread_context[i] && !h->thread_context[i]->bipred_scratchpad)
+ h->thread_context[i]->bipred_scratchpad = av_malloc(16 * 6 * s->linesize);
+
+ /* Some macroblocks can be accessed before they're available in case
+ * of lost slices, MBAFF or threading. */
+ memset(h->slice_table, -1,
+ (s->mb_height * s->mb_stride - 1) * sizeof(*h->slice_table));
+
+ // s->decode = (s->flags & CODEC_FLAG_PSNR) || !s->encoding ||
+ // s->current_picture.f.reference /* || h->contains_intra */ || 1;
+
+ /* We mark the current picture as non-reference after allocating it, so
+ * that if we break out due to an error it can be released automatically
+ * in the next ff_MPV_frame_start().
+ * SVQ3 as well as most other codecs have only last/next/current and thus
+ * get released even with set reference, besides SVQ3 and others do not
+ * mark frames as reference later "naturally". */
+ if (s->codec_id != AV_CODEC_ID_SVQ3)
+ s->current_picture_ptr->f.reference = 0;
+
+ s->current_picture_ptr->field_poc[0] =
+ s->current_picture_ptr->field_poc[1] = INT_MAX;
+
+ h->next_output_pic = NULL;
+
+ assert(s->current_picture_ptr->long_ref == 0);
+
+ return 0;
+}
+
+/**
+ * Run setup operations that must be run after slice header decoding.
+ * This includes finding the next displayed frame.
+ *
+ * @param h h264 master context
+ * @param setup_finished enough NALs have been read that we can call
+ * ff_thread_finish_setup()
+ */
+static void decode_postinit(H264Context *h, int setup_finished)
+{
+ MpegEncContext *const s = &h->s;
+ Picture *out = s->current_picture_ptr;
+ Picture *cur = s->current_picture_ptr;
+ int i, pics, out_of_order, out_idx;
+ int invalid = 0, cnt = 0;
+
+ s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_H264;
+ s->current_picture_ptr->f.pict_type = s->pict_type;
+
+ if (h->next_output_pic)
+ return;
+
+ if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
+ /* FIXME: if we have two PAFF fields in one packet, we can't start
+ * the next thread here. If we have one field per packet, we can.
+ * The check in decode_nal_units() is not good enough to find this
+ * yet, so we assume the worst for now. */
+ // if (setup_finished)
+ // ff_thread_finish_setup(s->avctx);
+ return;
+ }
+
+ cur->f.interlaced_frame = 0;
+ cur->f.repeat_pict = 0;
+
+ /* Signal interlacing information externally. */
+ /* Prioritize picture timing SEI information over used
+ * decoding process if it exists. */
+
+ if (h->sps.pic_struct_present_flag) {
+ switch (h->sei_pic_struct) {
+ case SEI_PIC_STRUCT_FRAME:
+ break;
+ case SEI_PIC_STRUCT_TOP_FIELD:
+ case SEI_PIC_STRUCT_BOTTOM_FIELD:
+ cur->f.interlaced_frame = 1;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM:
+ case SEI_PIC_STRUCT_BOTTOM_TOP:
+ if (FIELD_OR_MBAFF_PICTURE)
+ cur->f.interlaced_frame = 1;
+ else
+ // try to flag soft telecine progressive
+ cur->f.interlaced_frame = h->prev_interlaced_frame;
+ break;
+ case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
+ case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM:
+ /* Signal the possibility of telecined film externally
+ * (pic_struct 5,6). From these hints, let the applications
+ * decide if they apply deinterlacing. */
+ cur->f.repeat_pict = 1;
+ break;
+ case SEI_PIC_STRUCT_FRAME_DOUBLING:
+ // Force progressive here, doubling interlaced frame is a bad idea.
+ cur->f.repeat_pict = 2;
+ break;
+ case SEI_PIC_STRUCT_FRAME_TRIPLING:
+ cur->f.repeat_pict = 4;
+ break;
+ }
+
+ if ((h->sei_ct_type & 3) &&
+ h->sei_pic_struct <= SEI_PIC_STRUCT_BOTTOM_TOP)
+ cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
+ } else {
+ /* Derive interlacing flag from used decoding process. */
+ cur->f.interlaced_frame = FIELD_OR_MBAFF_PICTURE;
+ }
+ h->prev_interlaced_frame = cur->f.interlaced_frame;
+
+ if (cur->field_poc[0] != cur->field_poc[1]) {
+ /* Derive top_field_first from field pocs. */
+ cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
+ } else {
+ if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
+ /* Use picture timing SEI information. Even if it is a
+ * information of a past frame, better than nothing. */
+ if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
+ h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
+ cur->f.top_field_first = 1;
+ else
+ cur->f.top_field_first = 0;
+ } else {
+ /* Most likely progressive */
+ cur->f.top_field_first = 0;
+ }
+ }
+
+ // FIXME do something with unavailable reference frames
+
+ /* Sort B-frames into display order */
+
+ if (h->sps.bitstream_restriction_flag &&
+ s->avctx->has_b_frames < h->sps.num_reorder_frames) {
+ s->avctx->has_b_frames = h->sps.num_reorder_frames;
+ s->low_delay = 0;
+ }
+
+ if (s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT &&
+ !h->sps.bitstream_restriction_flag) {
+ s->avctx->has_b_frames = MAX_DELAYED_PIC_COUNT - 1;
+ s->low_delay = 0;
+ }
+
+ pics = 0;
+ while (h->delayed_pic[pics])
+ pics++;
+
+ assert(pics <= MAX_DELAYED_PIC_COUNT);
+
+ h->delayed_pic[pics++] = cur;
+ if (cur->f.reference == 0)
+ cur->f.reference = DELAYED_PIC_REF;
+
+ /* Frame reordering. This code takes pictures from coding order and sorts
+ * them by their incremental POC value into display order. It supports POC
+ * gaps, MMCO reset codes and random resets.
+ * A "display group" can start either with a IDR frame (f.key_frame = 1),
+ * and/or can be closed down with a MMCO reset code. In sequences where
+ * there is no delay, we can't detect that (since the frame was already
+ * output to the user), so we also set h->mmco_reset to detect the MMCO
+ * reset code.
+ * FIXME: if we detect insufficient delays (as per s->avctx->has_b_frames),
+ * we increase the delay between input and output. All frames affected by
+ * the lag (e.g. those that should have been output before another frame
+ * that we already returned to the user) will be dropped. This is a bug
+ * that we will fix later. */
+ for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
+ cnt += out->poc < h->last_pocs[i];
+ invalid += out->poc == INT_MIN;
+ }
+ if (!h->mmco_reset && !cur->f.key_frame &&
+ cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
+ h->mmco_reset = 2;
+ if (pics > 1)
+ h->delayed_pic[pics - 2]->mmco_reset = 2;
+ }
+ if (h->mmco_reset || cur->f.key_frame) {
+ for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
+ h->last_pocs[i] = INT_MIN;
+ cnt = 0;
+ invalid = MAX_DELAYED_PIC_COUNT;
+ }
+ out = h->delayed_pic[0];
+ out_idx = 0;
+ for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
+ h->delayed_pic[i] &&
+ !h->delayed_pic[i - 1]->mmco_reset &&
+ !h->delayed_pic[i]->f.key_frame;
+ i++)
+ if (h->delayed_pic[i]->poc < out->poc) {
+ out = h->delayed_pic[i];
+ out_idx = i;
+ }
+ if (s->avctx->has_b_frames == 0 &&
+ (h->delayed_pic[0]->f.key_frame || h->mmco_reset))
+ h->next_outputed_poc = INT_MIN;
+ out_of_order = !out->f.key_frame && !h->mmco_reset &&
+ (out->poc < h->next_outputed_poc);
+
+ if (h->sps.bitstream_restriction_flag &&
+ s->avctx->has_b_frames >= h->sps.num_reorder_frames) {
+ } else if (out_of_order && pics - 1 == s->avctx->has_b_frames &&
+ s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
+ if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
+ s->avctx->has_b_frames = FFMAX(s->avctx->has_b_frames, cnt);
+ }
+ s->low_delay = 0;
+ } else if (s->low_delay &&
+ ((h->next_outputed_poc != INT_MIN &&
+ out->poc > h->next_outputed_poc + 2) ||
+ cur->f.pict_type == AV_PICTURE_TYPE_B)) {
+ s->low_delay = 0;
+ s->avctx->has_b_frames++;
+ }
+
+ if (pics > s->avctx->has_b_frames) {
+ out->f.reference &= ~DELAYED_PIC_REF;
+ // for frame threading, the owner must be the second field's thread or
+ // else the first thread can release the picture and reuse it unsafely
+ out->owner2 = s;
+ for (i = out_idx; h->delayed_pic[i]; i++)
+ h->delayed_pic[i] = h->delayed_pic[i + 1];
+ }
+ memmove(h->last_pocs, &h->last_pocs[1],
+ sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
+ h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
+ if (!out_of_order && pics > s->avctx->has_b_frames) {
+ h->next_output_pic = out;
+ if (out->mmco_reset) {
+ if (out_idx > 0) {
+ h->next_outputed_poc = out->poc;
+ h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
+ } else {
+ h->next_outputed_poc = INT_MIN;