X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fvc1dec.c;h=d9797d53159add0515225b6786245b57f1151393;hb=03b078721c8e42c413da75f553e740d235092dad;hp=3f62201979db5b568ef4c78280479bb7890589fd;hpb=e80cbdbc57292a4687339f6a7d30e0dcff42f9e8;p=ffmpeg diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index 3f62201979d..d9797d53159 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -395,6 +395,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir) } } + if(!srcY) + return; + src_x = s->mb_x * 16 + (mx >> 2); src_y = s->mb_y * 16 + (my >> 2); uvsrc_x = s->mb_x * 8 + (uvmx >> 2); @@ -431,7 +434,7 @@ static void vc1_mc_1mv(VC1Context *v, int dir) if (v->rangeredfrm || (v->mv_mode == MV_PMODE_INTENSITY_COMP) || s->h_edge_pos < 22 || v_edge_pos < 22 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3 - || (unsigned)(src_y - s->mspel) > v_edge_pos - (my&3) - 16 - s->mspel * 3) { + || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) { uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize; srcY -= s->mspel * (1 + s->linesize); @@ -570,6 +573,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) } else srcY = s->next_picture.f.data[0]; + if(!srcY) + return; + if (v->field_mode) { if (v->cur_field_type != v->ref_field_type[dir]) my = my - 2 + 4 * v->cur_field_type; @@ -602,6 +608,8 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2; ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2; break; + default: + av_assert2(0); } s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty; @@ -795,6 +803,7 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) /* calculate chroma MV vector from four luma MVs */ if (!v->field_mode || (v->field_mode && !v->numref)) { valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty); + chroma_ref_type = v->reffield; if (!valid_count) { s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0; s->current_picture.f.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0; @@ -841,21 +850,27 @@ static void vc1_mc_4mv_chroma(VC1Context *v, int dir) if (!dir) { if (v->field_mode) { if ((v->cur_field_type != chroma_ref_type) && v->cur_field_type) { - srcU = s->current_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->current_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->current_picture.f.data[1]; + srcV = s->current_picture.f.data[2]; } else { - srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->last_picture.f.data[1]; + srcV = s->last_picture.f.data[2]; } } else { - srcU = s->last_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->last_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->last_picture.f.data[1]; + srcV = s->last_picture.f.data[2]; } } else { - srcU = s->next_picture.f.data[1] + uvsrc_y * s->uvlinesize + uvsrc_x; - srcV = s->next_picture.f.data[2] + uvsrc_y * s->uvlinesize + uvsrc_x; + srcU = s->next_picture.f.data[1]; + srcV = s->next_picture.f.data[2]; } + if(!srcU) + return; + + srcU += uvsrc_y * s->uvlinesize + uvsrc_x; + srcV += uvsrc_y * s->uvlinesize + uvsrc_x; + if (v->field_mode) { if (chroma_ref_type) { srcU += s->current_picture_ptr->f.linesize[1]; @@ -1351,7 +1366,7 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int px, py; int sum; int mixedmv_pic, num_samefield = 0, num_oppfield = 0; - int opposit, a_f, b_f, c_f; + int opposite, a_f, b_f, c_f; int16_t field_predA[2]; int16_t field_predB[2]; int16_t field_predC[2]; @@ -1459,13 +1474,19 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, } if (v->field_mode) { - if (num_samefield <= num_oppfield) - opposit = 1 - pred_flag; - else - opposit = pred_flag; + if (!v->numref) + // REFFIELD determines if the last field or the second-last field is + // to be used as reference + opposite = 1 - v->reffield; + else { + if (num_samefield <= num_oppfield) + opposite = 1 - pred_flag; + else + opposite = pred_flag; + } } else - opposit = 0; - if (opposit) { + opposite = 0; + if (opposite) { if (a_valid && !a_f) { field_predA[0] = scaleforopp(v, field_predA[0], 0, dir); field_predA[1] = scaleforopp(v, field_predA[1], 1, dir); @@ -1568,10 +1589,6 @@ static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, } } - if (v->field_mode && !s->quarter_sample) { - r_x <<= 1; - r_y <<= 1; - } if (v->field_mode && v->numref) r_y >>= 1; if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0) @@ -5151,7 +5168,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx) if (!(avctx->flags & CODEC_FLAG_GRAY)) avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts); else - avctx->pix_fmt = PIX_FMT_GRAY8; + avctx->pix_fmt = AV_PIX_FMT_GRAY8; avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt); v->s.avctx = avctx; avctx->flags |= CODEC_FLAG_EMU_EDGE; @@ -5315,7 +5332,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, MpegEncContext *s = &v->s; AVFrame *pict = data; uint8_t *buf2 = NULL; - const uint8_t *buf_start = buf; + const uint8_t *buf_start = buf, *buf_start_second_field = NULL; int mb_height, n_slices1=-1; struct { uint8_t *buf; @@ -5323,6 +5340,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, int mby_start; } *slices = NULL, *tmp; + v->second_field = 0; + if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay = 1; @@ -5341,9 +5360,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) { if (v->profile < PROFILE_ADVANCED) - avctx->pix_fmt = PIX_FMT_VDPAU_WMV3; + avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3; else - avctx->pix_fmt = PIX_FMT_VDPAU_VC1; + avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1; } //for advanced profile we may need to parse and unescape data @@ -5369,9 +5388,13 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, break; case VC1_CODE_FIELD: { int buf_size3; - slices = av_realloc(slices, sizeof(*slices) * (n_slices+1)); - if (!slices) + if (avctx->hwaccel || + s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) + buf_start_second_field = start; + tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); + if (!tmp) goto err; + slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; @@ -5393,9 +5416,10 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, break; case VC1_CODE_SLICE: { int buf_size3; - slices = av_realloc(slices, sizeof(*slices) * (n_slices+1)); - if (!slices) + tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); + if (!tmp) goto err; + slices = tmp; slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!slices[n_slices].buf) goto err; @@ -5418,6 +5442,9 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n"); goto err; } else { // found field marker, unescape second field + if (avctx->hwaccel || + s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) + buf_start_second_field = divider; tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1)); if (!tmp) goto err; @@ -5466,7 +5493,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, if (!s->context_initialized) { if (ff_msmpeg4_decode_init(avctx) < 0 || ff_vc1_decode_init_alloc_tables(v) < 0) - return -1; + goto err; s->low_delay = !avctx->has_b_frames || v->res_sprite; @@ -5506,6 +5533,11 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, goto err; } + if ((s->mb_height >> v->field_mode) == 0) { + av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n"); + goto err; + } + // process pulldown flags s->current_picture_ptr->f.repeat_pict = 0; // Pulldown flags are only valid when 'broadcast' has been set. @@ -5553,13 +5585,46 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); else if (avctx->hwaccel) { - if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0) - goto err; - if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) - goto err; - if (avctx->hwaccel->end_frame(avctx) < 0) - goto err; + if (v->field_mode && buf_start_second_field) { + // decode first field + s->picture_structure = PICT_BOTTOM_FIELD - v->tff; + if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0) + goto err; + if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0) + goto err; + if (avctx->hwaccel->end_frame(avctx) < 0) + goto err; + + // decode second field + s->gb = slices[n_slices1 + 1].gb; + s->picture_structure = PICT_TOP_FIELD + v->tff; + v->second_field = 1; + v->pic_header_flag = 0; + if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) { + av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed"); + goto err; + } + v->s.current_picture_ptr->f.pict_type = v->s.pict_type; + + if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) + goto err; + if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0) + goto err; + if (avctx->hwaccel->end_frame(avctx) < 0) + goto err; + } else { + s->picture_structure = PICT_FRAME; + if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0) + goto err; + if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0) + goto err; + if (avctx->hwaccel->end_frame(avctx) < 0) + goto err; + } } else { + if (v->fcm == ILACE_FRAME && s->pict_type == AV_PICTURE_TYPE_B) + goto err; // This codepath is still incomplete thus it is disabled + ff_er_frame_start(s); v->bits = buf_size * 8; @@ -5637,7 +5702,8 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->linesize >>= 1; s->uvlinesize >>= 1; } -//av_log(s->avctx, AV_LOG_INFO, "Consumed %i/%i bits\n", get_bits_count(&s->gb), s->gb.size_in_bits); + av_dlog(s->avctx, "Consumed %i/%i bits\n", + get_bits_count(&s->gb), s->gb.size_in_bits); // if (get_bits_count(&s->gb) > buf_size * 8) // return -1; if(s->error_occurred && s->pict_type == AV_PICTURE_TYPE_B) @@ -5704,6 +5770,7 @@ AVCodec ff_vc1_decoder = { .init = vc1_decode_init, .close = ff_vc1_decode_end, .decode = vc1_decode_frame, + .flush = ff_mpeg_flush, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"), .pix_fmts = ff_hwaccel_pixfmt_list_420, @@ -5719,6 +5786,7 @@ AVCodec ff_wmv3_decoder = { .init = vc1_decode_init, .close = ff_vc1_decode_end, .decode = vc1_decode_frame, + .flush = ff_mpeg_flush, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"), .pix_fmts = ff_hwaccel_pixfmt_list_420, @@ -5737,7 +5805,7 @@ AVCodec ff_wmv3_vdpau_decoder = { .decode = vc1_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), - .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_VDPAU_WMV3, PIX_FMT_NONE }, + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE }, .profiles = NULL_IF_CONFIG_SMALL(profiles) }; #endif @@ -5753,7 +5821,7 @@ AVCodec ff_vc1_vdpau_decoder = { .decode = vc1_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU, .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), - .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_VDPAU_VC1, PIX_FMT_NONE }, + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE }, .profiles = NULL_IF_CONFIG_SMALL(profiles) }; #endif