X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fmpeg12.c;h=d3ff54a7ab75fdf7d1544aa04cd06235efffc5e8;hb=25ef43bb289a3a3a717a684902c0a310e292beba;hp=9fbb29ef9d4f020fcf51e1e3788412fd6ccfc021;hpb=8d52ec7eb0cb18ee142c756af64c17f350b661b0;p=ffmpeg diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c index 9fbb29ef9d4..d3ff54a7ab7 100644 --- a/libavcodec/mpeg12.c +++ b/libavcodec/mpeg12.c @@ -242,7 +242,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size; unsigned int fps, v; - int n, i; + int i; uint64_t time_code; float best_aspect_error= 1E10; float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); @@ -358,8 +358,14 @@ static inline void encode_mb_skip_run(MpegEncContext *s, int run){ static void common_init(MpegEncContext *s) { +int i; + s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; + + if(!s->encoding) + for(i=0;i<64;i++) + s->dsp.idct_permutation[i]=i; } void ff_mpeg1_clean_buffers(MpegEncContext *s){ @@ -493,8 +499,9 @@ void mpeg1_encode_mb(MpegEncContext *s, cbp |= 1 << (5 - i); } - if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && - ((s->pict_type == P_TYPE && s->mv_type == MV_TYPE_16X16 && (motion_x | motion_y) == 0) || + if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 && + (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) && + ((s->pict_type == P_TYPE && (motion_x | motion_y) == 0) || (s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { s->mb_skip_run++; @@ -596,7 +603,7 @@ void mpeg1_encode_mb(MpegEncContext *s, s->mv_bits+= get_bits_diff(s); } if(cbp) - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); + put_bits(&s->pb, mbPatTable[cbp][1], mbPatTable[cbp][0]); s->f_count++; } else{ static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi @@ -675,7 +682,7 @@ void mpeg1_encode_mb(MpegEncContext *s, } s->mv_bits += get_bits_diff(s); if(cbp) - put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]); + put_bits(&s->pb, mbPatTable[cbp][1], mbPatTable[cbp][0]); } for(i=0;i<6;i++) { if (cbp & (1 << (5 - i))) { @@ -791,7 +798,7 @@ void ff_mpeg1_encode_init(MpegEncContext *s) else{ int val, bit_size, range, code; - bit_size = s->f_code - 1; + bit_size = f_code - 1; range = 1 << bit_size; val=mv; @@ -948,7 +955,7 @@ static VLC mb_ptype_vlc; static VLC mb_btype_vlc; static VLC mb_pat_vlc; -static void init_vlcs() +static void init_vlcs(void) { static int done = 0; @@ -967,7 +974,7 @@ static void init_vlcs() init_vlc(&mbincr_vlc, MBINCR_VLC_BITS, 36, &mbAddrIncrTable[0][1], 2, 1, &mbAddrIncrTable[0][0], 2, 1); - init_vlc(&mb_pat_vlc, MB_PAT_VLC_BITS, 63, + init_vlc(&mb_pat_vlc, MB_PAT_VLC_BITS, 64, &mbPatTable[0][1], 2, 1, &mbPatTable[0][0], 2, 1); @@ -1010,7 +1017,7 @@ static inline int get_qscale(MpegEncContext *s) #define MT_DMV 3 static int mpeg_decode_mb(MpegEncContext *s, - DCTELEM block[6][64]) + DCTELEM block[12][64]) { int i, j, k, cbp, val, mb_type, motion_type; @@ -1026,15 +1033,19 @@ static int mpeg_decode_mb(MpegEncContext *s, /* skip mb */ s->mb_intra = 0; - for(i=0;i<6;i++) + for(i=0;i<12;i++) s->block_last_index[i] = -1; - s->mv_type = MV_TYPE_16X16; + if(s->picture_structure == PICT_FRAME) + s->mv_type = MV_TYPE_16X16; + else + s->mv_type = MV_TYPE_FIELD; if (s->pict_type == P_TYPE) { /* if P type, zero motion vector is implied */ s->mv_dir = MV_DIR_FORWARD; s->mv[0][0][0] = s->mv[0][0][1] = 0; s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0; s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0; + s->field_select[0][0]= s->picture_structure - 1; s->mb_skiped = 1; s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16; } else { @@ -1122,7 +1133,7 @@ static int mpeg_decode_mb(MpegEncContext *s, #endif if (s->codec_id == CODEC_ID_MPEG2VIDEO) { - for(i=0;i<6;i++) { + for(i=0;i<4+(1<chroma_format);i++) { if (mpeg2_decode_block_intra(s, s->pblocks[i], i) < 0) return -1; } @@ -1146,7 +1157,13 @@ static int mpeg_decode_mb(MpegEncContext *s, s->qscale = get_qscale(s); s->mv_dir = MV_DIR_FORWARD; - s->mv_type = MV_TYPE_16X16; + if(s->picture_structure == PICT_FRAME) + s->mv_type = MV_TYPE_16X16; + else{ + s->mv_type = MV_TYPE_FIELD; + mb_type |= MB_TYPE_INTERLACED; + s->field_select[0][0]= s->picture_structure - 1; + } s->last_mv[0][0][0] = 0; s->last_mv[0][0][1] = 0; s->last_mv[0][1][0] = 0; @@ -1296,11 +1313,16 @@ static int mpeg_decode_mb(MpegEncContext *s, if (HAS_CBP(mb_type)) { cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1); - if (cbp < 0){ + if (cbp < 0 || ((cbp == 0) && (s->chroma_format < 2)) ){ av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y); return -1; } - cbp++; + if(s->chroma_format == 2){//CHROMA422 + cbp|= ( get_bits(&s->gb,2) ) << 6; + }else + if(s->chroma_format > 2){//CHROMA444 + cbp|= ( get_bits(&s->gb,6) ) << 6; + } #ifdef HAVE_XVMC //on 1 we memcpy blocks in xvmcvideo @@ -1314,13 +1336,33 @@ static int mpeg_decode_mb(MpegEncContext *s, if (s->codec_id == CODEC_ID_MPEG2VIDEO) { for(i=0;i<6;i++) { - if (cbp & 32) { + if (cbp & (1<<(5-i)) ) { if (mpeg2_decode_block_non_intra(s, s->pblocks[i], i) < 0) return -1; } else { s->block_last_index[i] = -1; } - cbp+=cbp; + } + if (s->chroma_format >= 2) { + if (s->chroma_format == 2) {//CHROMA_422) + for(i=6;i<8;i++) { + if (cbp & (1<<(6+7-i)) ) { + if (mpeg2_decode_block_non_intra(s, s->pblocks[i], i) < 0) + return -1; + } else { + s->block_last_index[i] = -1; + } + } + }else{ /*CHROMA_444*/ + for(i=6;i<12;i++) { + if (cbp & (1<<(6+11-i)) ) { + if (mpeg2_decode_block_non_intra(s, s->pblocks[i], i) < 0) + return -1; + } else { + s->block_last_index[i] = -1; + } + } + } } } else { for(i=0;i<6;i++) { @@ -1644,7 +1686,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s, component = 0; }else{ quant_matrix = s->chroma_intra_matrix; - component = n - 3; + component = (n&1) + 1; } diff = decode_dc(&s->gb, component); if (diff >= 0xffff) @@ -1712,11 +1754,17 @@ typedef struct Mpeg1Context { int repeat_field; /* true if we must repeat the field */ AVPanScan pan_scan; /** some temporary storage for the panscan */ int slice_count; + int swap_uv;//indicate VCR2 + int save_aspect_info; + } Mpeg1Context; static int mpeg_decode_init(AVCodecContext *avctx) { Mpeg1Context *s = avctx->priv_data; + MpegEncContext *s2 = &s->mpeg_enc_ctx; + + MPV_decode_defaults(s2); s->mpeg_enc_ctx.avctx= avctx; s->mpeg_enc_ctx.flags= avctx->flags; @@ -1731,37 +1779,159 @@ static int mpeg_decode_init(AVCodecContext *avctx) return 0; } +static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, + const uint8_t *new_perm){ +uint16_t temp_matrix[64]; +int i; + + memcpy(temp_matrix,matrix,64*sizeof(uint16_t)); + + for(i=0;i<64;i++){ + matrix[new_perm[i]] = temp_matrix[old_perm[i]]; + } +} + +//Call this function when we know all parameters +//it may be called in different places for mpeg1 and mpeg2 +static int mpeg_decode_postinit(AVCodecContext *avctx){ +Mpeg1Context *s1 = avctx->priv_data; +MpegEncContext *s = &s1->mpeg_enc_ctx; +uint8_t old_permutation[64]; + + + if ( + (s1->mpeg_enc_ctx_allocated == 0)|| + avctx->width != s->width || + avctx->height != s->height|| +// s1->save_aspect_info != avctx->aspect_ratio_info|| + 0) + { + + if (s1->mpeg_enc_ctx_allocated) { + MPV_common_end(s); + } + + if( (s->width == 0 )||(s->height == 0)) + return -2; + + avctx->width = s->width; + avctx->height = s->height; + avctx->bit_rate = s->bit_rate; + s1->save_aspect_info = s->aspect_ratio_info; + + //low_delay may be forced, in this case we will have B frames + //that behave like P frames + avctx->has_b_frames = !(s->low_delay); + + if(avctx->sub_id==1){//s->codec_id==avctx->codec_id==CODEC_ID + //mpeg1 fps + avctx->frame_rate = frame_rate_tab[s->frame_rate_index].num; + avctx->frame_rate_base= frame_rate_tab[s->frame_rate_index].den; + //mpeg1 aspect + avctx->sample_aspect_ratio= av_d2q( + 1.0/mpeg1_aspect[s->aspect_ratio_info], 255); + + }else{//mpeg2 + //mpeg2 fps + av_reduce( + &s->avctx->frame_rate, + &s->avctx->frame_rate_base, + frame_rate_tab[s->frame_rate_index].num * (s->frame_rate_ext_n+1), + frame_rate_tab[s->frame_rate_index].den * (s->frame_rate_ext_d+1), + 1<<30); + //mpeg2 aspect + if(s->aspect_ratio_info > 1){ + if( (s1->pan_scan.width == 0 )||(s1->pan_scan.height == 0) ){ + s->avctx->sample_aspect_ratio= + av_div_q( + mpeg2_aspect[s->aspect_ratio_info], + (AVRational){s->width, s->height} + ); + }else{ + s->avctx->sample_aspect_ratio= + av_div_q( + mpeg2_aspect[s->aspect_ratio_info], + (AVRational){s1->pan_scan.width, s1->pan_scan.height} + ); + } + }else{ + s->avctx->sample_aspect_ratio= + mpeg2_aspect[s->aspect_ratio_info]; + } + }//mpeg2 + + if(avctx->xvmc_acceleration){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); + }else{ + if(s->chroma_format < 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); + }else + if(s->chroma_format == 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_422); + }else + if(s->chroma_format > 2){ + avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_444); + } + } + //until then pix_fmt may be changed right after codec init + if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) + if( avctx->idct_algo == FF_IDCT_AUTO ) + avctx->idct_algo = FF_IDCT_SIMPLE; + + //quantization matrixes may need reordering + //if dct permutation is changed + memcpy(old_permutation,s->dsp.idct_permutation,64*sizeof(uint8_t)); + + if (MPV_common_init(s) < 0) + return -2; + + quant_matrix_rebuild(s->intra_matrix, old_permutation,s->dsp.idct_permutation); + quant_matrix_rebuild(s->inter_matrix, old_permutation,s->dsp.idct_permutation); + quant_matrix_rebuild(s->chroma_intra_matrix,old_permutation,s->dsp.idct_permutation); + quant_matrix_rebuild(s->chroma_inter_matrix,old_permutation,s->dsp.idct_permutation); + + s1->mpeg_enc_ctx_allocated = 1; + } + return 0; +} + /* return the 8 bit start code value and update the search state. Return -1 if no start code found */ -static int find_start_code(uint8_t **pbuf_ptr, uint8_t *buf_end) +static int find_start_code(const uint8_t **pbuf_ptr, const uint8_t *buf_end) { - uint8_t *buf_ptr; - unsigned int state=0xFFFFFFFF, v; - int val; + const uint8_t *buf_ptr= *pbuf_ptr; + + buf_ptr++; //gurantees that -1 is within the array + buf_end -= 2; // gurantees that +2 is within the array - buf_ptr = *pbuf_ptr; while (buf_ptr < buf_end) { - v = *buf_ptr++; - if (state == 0x000001) { - state = ((state << 8) | v) & 0xffffff; - val = state; - goto found; + if(*buf_ptr==0){ + while(buf_ptr < buf_end && buf_ptr[1]==0) + buf_ptr++; + + if(buf_ptr[-1] == 0 && buf_ptr[1] == 1){ + *pbuf_ptr = buf_ptr+3; + return buf_ptr[2] + 0x100; + } } - state = ((state << 8) | v) & 0xffffff; + buf_ptr += 2; } - val = -1; - found: - *pbuf_ptr = buf_ptr; - return val; + buf_end += 2; //undo the hack above + + *pbuf_ptr = buf_end; + return -1; } static int mpeg1_decode_picture(AVCodecContext *avctx, - uint8_t *buf, int buf_size) + const uint8_t *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; int ref, f_code, vbv_delay; + if(mpeg_decode_postinit(s->avctx) < 0) + return -2; + init_get_bits(&s->gb, buf, buf_size*8); ref = get_bits(&s->gb, 10); /* temporal ref */ @@ -1800,52 +1970,36 @@ static void mpeg_decode_sequence_extension(MpegEncContext *s) { int horiz_size_ext, vert_size_ext; int bit_rate_ext; - int frame_rate_ext_n, frame_rate_ext_d; int level, profile; skip_bits(&s->gb, 1); /* profil and level esc*/ profile= get_bits(&s->gb, 3); level= get_bits(&s->gb, 4); s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */ - skip_bits(&s->gb, 2); /* chroma_format */ + s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */ horiz_size_ext = get_bits(&s->gb, 2); vert_size_ext = get_bits(&s->gb, 2); s->width |= (horiz_size_ext << 12); s->height |= (vert_size_ext << 12); bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */ - s->bit_rate = ((s->bit_rate / 400) | (bit_rate_ext << 12)) * 400; + s->bit_rate += (bit_rate_ext << 12) * 400; skip_bits1(&s->gb); /* marker */ s->avctx->rc_buffer_size += get_bits(&s->gb, 8)*1024*16<<10; s->low_delay = get_bits1(&s->gb); if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; - frame_rate_ext_n = get_bits(&s->gb, 2); - frame_rate_ext_d = get_bits(&s->gb, 5); - av_reduce( - &s->avctx->frame_rate, - &s->avctx->frame_rate_base, - frame_rate_tab[s->frame_rate_index].num * (frame_rate_ext_n+1), - frame_rate_tab[s->frame_rate_index].den * (frame_rate_ext_d+1), - 1<<30); + s->frame_rate_ext_n = get_bits(&s->gb, 2); + s->frame_rate_ext_d = get_bits(&s->gb, 5); dprintf("sequence extension\n"); s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; s->avctx->sub_id = 2; /* indicates mpeg2 found */ - if(s->aspect_ratio_info <= 1) - s->avctx->sample_aspect_ratio= mpeg2_aspect[s->aspect_ratio_info]; - else{ - s->avctx->sample_aspect_ratio= - av_div_q( - mpeg2_aspect[s->aspect_ratio_info], - (AVRational){s->width, s->height} - ); - } - if(s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "profile: %d, level: %d vbv buffer: %d, bitrate:%d\n", profile, level, s->avctx->rc_buffer_size, s->bit_rate); + } static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) @@ -1867,14 +2021,7 @@ static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) s1->pan_scan.width= 16*w; s1->pan_scan.height=16*h; - - if(s->aspect_ratio_info > 1) - s->avctx->sample_aspect_ratio= - av_div_q( - mpeg2_aspect[s->aspect_ratio_info], - (AVRational){w, h} - ); - + if(s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h); } @@ -1882,9 +2029,23 @@ static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1) static void mpeg_decode_picture_display_extension(Mpeg1Context *s1) { MpegEncContext *s= &s1->mpeg_enc_ctx; - int i; - - for(i=0; i<1; i++){ //FIXME count + int i,nofco; + + nofco = 1; + if(s->progressive_sequence){ + if(s->repeat_first_field){ + nofco++; + if(s->top_field_first) + nofco++; + } + }else{ + if(s->picture_structure == PICT_FRAME){ + nofco++; + if(s->repeat_first_field) + nofco++; + } + } + for(i=0; ipan_scan.position[i][0]= get_sbits(&s->gb, 16); skip_bits(&s->gb, 1); //marker s1->pan_scan.position[i][1]= get_sbits(&s->gb, 16); @@ -1984,7 +2145,7 @@ static void mpeg_decode_picture_coding_extension(MpegEncContext *s) } static void mpeg_decode_extension(AVCodecContext *avctx, - uint8_t *buf, int buf_size) + const uint8_t *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; @@ -2079,7 +2240,7 @@ static int mpeg_field_start(MpegEncContext *s){ * DECODE_SLICE_OK if this slice is ok
*/ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, - uint8_t **buf, int buf_size) + const uint8_t **buf, int buf_size) { MpegEncContext *s = &s1->mpeg_enc_ctx; AVCodecContext *avctx= s->avctx; @@ -2089,8 +2250,8 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, s->resync_mb_x= s->resync_mb_y= -1; - if (mb_y >= s->mb_height){ - av_log(s->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", s->mb_y, s->mb_height); + if (mb_y<= s->mb_height){ + av_log(s->avctx, AV_LOG_ERROR, "slice below image (%d >= %d)\n", mb_y, s->mb_height); return -1; } @@ -2163,53 +2324,33 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y, return -1; if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs - const int wrap = field_pic ? 2*s->block_wrap[0] : s->block_wrap[0]; - int xy = s->mb_x*2 + 1 + (s->mb_y*2 +1)*wrap; - int motion_for_top_x, motion_for_top_y, motion_back_top_x, motion_back_top_y; - int motion_for_bottom_x, motion_for_bottom_y, motion_back_bottom_x, motion_back_bottom_y; + const int wrap = field_pic ? 2*s->b8_stride : s->b8_stride; + int xy = s->mb_x*2 + s->mb_y*2*wrap; + int motion_x, motion_y, dir, i; if(field_pic && !s->first_field) xy += wrap/2; - if (s->mb_intra) { - motion_for_top_x = motion_for_top_y = motion_back_top_x = motion_back_top_y = - motion_for_bottom_x = motion_for_bottom_y = motion_back_bottom_x = motion_back_bottom_y = 0; - }else if (s->mv_type == MV_TYPE_16X16){ - motion_for_top_x = motion_for_bottom_x = s->mv[0][0][0]; - motion_for_top_y = motion_for_bottom_y = s->mv[0][0][1]; - motion_back_top_x = motion_back_bottom_x = s->mv[1][0][0]; - motion_back_top_y = motion_back_bottom_y = s->mv[1][0][1]; - } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { - motion_for_top_x = s->mv[0][0][0]; - motion_for_top_y = s->mv[0][0][1]; - motion_for_bottom_x = s->mv[0][1][0]; - motion_for_bottom_y = s->mv[0][1][1]; - motion_back_top_x = s->mv[1][0][0]; - motion_back_top_y = s->mv[1][0][1]; - motion_back_bottom_x = s->mv[1][1][0]; - motion_back_bottom_y = s->mv[1][1][1]; - } + for(i=0; i<2; i++){ + for(dir=0; dir<2; dir++){ + if (s->mb_intra || (dir==1 && s->pict_type != B_TYPE)) { + motion_x = motion_y = 0; + }else if (s->mv_type == MV_TYPE_16X16 || (s->mv_type == MV_TYPE_FIELD && field_pic)){ + motion_x = s->mv[dir][0][0]; + motion_y = s->mv[dir][0][1]; + } else /*if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8))*/ { + motion_x = s->mv[dir][i][0]; + motion_y = s->mv[dir][i][1]; + } - s->current_picture.motion_val[0][xy][0] = motion_for_top_x; - s->current_picture.motion_val[0][xy][1] = motion_for_top_y; - s->current_picture.motion_val[0][xy + 1][0] = motion_for_top_x; - s->current_picture.motion_val[0][xy + 1][1] = motion_for_top_y; - s->current_picture.motion_val[0][xy + wrap][0] = motion_for_bottom_x; - s->current_picture.motion_val[0][xy + wrap][1] = motion_for_bottom_y; - s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_for_bottom_x; - s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_for_bottom_y; - - if(s->pict_type != B_TYPE){ - motion_back_top_x = motion_back_top_y = motion_back_bottom_x = motion_back_bottom_y = 0; + s->current_picture.motion_val[dir][xy ][0] = motion_x; + s->current_picture.motion_val[dir][xy ][1] = motion_y; + s->current_picture.motion_val[dir][xy + 1][0] = motion_x; + s->current_picture.motion_val[dir][xy + 1][1] = motion_y; + s->current_picture.ref_index [dir][xy ]= + s->current_picture.ref_index [dir][xy + 1]= s->field_select[dir][i]; + } + xy += wrap; } - - s->current_picture.motion_val[1][xy][0] = motion_back_top_x; - s->current_picture.motion_val[1][xy][1] = motion_back_top_y; - s->current_picture.motion_val[1][xy + 1][0] = motion_back_top_x; - s->current_picture.motion_val[1][xy + 1][1] = motion_back_top_y; - s->current_picture.motion_val[1][xy + wrap][0] = motion_back_bottom_x; - s->current_picture.motion_val[1][xy + wrap][1] = motion_back_bottom_y; - s->current_picture.motion_val[1][xy + 1 + wrap][0] = motion_back_bottom_x; - s->current_picture.motion_val[1][xy + 1 + wrap][1] = motion_back_bottom_y; } s->dest[0] += 16; @@ -2275,7 +2416,7 @@ eos: // end of slice static int slice_decode_thread(AVCodecContext *c, void *arg){ MpegEncContext *s= arg; - uint8_t *buf= s->gb.buffer; + const uint8_t *buf= s->gb.buffer; int mb_y= s->start_mb_y; s->error_count= 3*(s->end_mb_y - s->start_mb_y)*s->mb_width; @@ -2352,63 +2493,31 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict) } static int mpeg1_decode_sequence(AVCodecContext *avctx, - uint8_t *buf, int buf_size) + const uint8_t *buf, int buf_size) { Mpeg1Context *s1 = avctx->priv_data; MpegEncContext *s = &s1->mpeg_enc_ctx; - int width, height, i, v, j; - float aspect; + int width,height; + int i, v, j; init_get_bits(&s->gb, buf, buf_size*8); width = get_bits(&s->gb, 12); height = get_bits(&s->gb, 12); + if (width <= 0 || height <= 0 || + (width % 2) != 0 || (height % 2) != 0) + return -1; s->aspect_ratio_info= get_bits(&s->gb, 4); if (s->aspect_ratio_info == 0) return -1; - aspect= 1.0/mpeg1_aspect[s->aspect_ratio_info]; - avctx->sample_aspect_ratio= av_d2q(aspect, 255); - s->frame_rate_index = get_bits(&s->gb, 4); if (s->frame_rate_index == 0 || s->frame_rate_index > 13) return -1; s->bit_rate = get_bits(&s->gb, 18) * 400; if (get_bits1(&s->gb) == 0) /* marker */ return -1; - if (width <= 0 || height <= 0 || - (width % 2) != 0 || (height % 2) != 0) - return -1; - if (width != s->width || - height != s->height) { - /* start new mpeg1 context decoding */ - s->out_format = FMT_MPEG1; - if (s1->mpeg_enc_ctx_allocated) { - MPV_common_end(s); - } - s->width = width; - s->height = height; - avctx->has_b_frames= 1; - avctx->width = width; - avctx->height = height; - avctx->frame_rate = frame_rate_tab[s->frame_rate_index].num; - avctx->frame_rate_base= frame_rate_tab[s->frame_rate_index].den; - avctx->bit_rate = s->bit_rate; - - if(avctx->xvmc_acceleration){ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_xvmc_mpg2_420); - }else{ - avctx->pix_fmt = avctx->get_format(avctx,pixfmt_yuv_420); - } - - if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ) - if( avctx->idct_algo == FF_IDCT_AUTO ) - avctx->idct_algo = FF_IDCT_SIMPLE; - - if (MPV_common_init(s) < 0) - return -1; - s1->mpeg_enc_ctx_allocated = 1; - s->swap_uv = 0;//just in case vcr2 and mpeg2 stream have been concatinated - } + s->width = width; + s->height = height; s->avctx->rc_buffer_size= get_bits(&s->gb, 10) * 1024*16; skip_bits(&s->gb, 1); @@ -2421,19 +2530,19 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, av_log(s->avctx, AV_LOG_ERROR, "intra matrix damaged\n"); return -1; } - j = s->intra_scantable.permutated[i]; + j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; } #ifdef DEBUG dprintf("intra matrix present\n"); for(i=0;i<64;i++) - dprintf(" %d", s->intra_matrix[s->intra_scantable.permutated[i]]); + dprintf(" %d", s->intra_matrix[s->dsp.idct_permutation[i]); printf("\n"); #endif } else { for(i=0;i<64;i++) { - int j= s->dsp.idct_permutation[i]; + j = s->dsp.idct_permutation[i]; v = ff_mpeg1_default_intra_matrix[i]; s->intra_matrix[j] = v; s->chroma_intra_matrix[j] = v; @@ -2446,14 +2555,14 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, av_log(s->avctx, AV_LOG_ERROR, "inter matrix damaged\n"); return -1; } - j = s->intra_scantable.permutated[i]; + j = s->dsp.idct_permutation[ ff_zigzag_direct[i] ]; s->inter_matrix[j] = v; s->chroma_inter_matrix[j] = v; } #ifdef DEBUG dprintf("non intra matrix present\n"); for(i=0;i<64;i++) - dprintf(" %d", s->inter_matrix[s->intra_scantable.permutated[i]]); + dprintf(" %d", s->inter_matrix[s->dsp.idct_permutation[i]); printf("\n"); #endif } else { @@ -2475,8 +2584,11 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx, s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; + s->chroma_format = 1; s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG1VIDEO; avctx->sub_id = 1; /* indicates mpeg1 */ + s->out_format = FMT_MPEG1; + s->swap_uv = 0;//AFAIK VCR2 don't have SEQ_HEADER if(s->flags & CODEC_FLAG_LOW_DELAY) s->low_delay=1; if(s->avctx->debug & FF_DEBUG_PICT_INFO) @@ -2533,6 +2645,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) s->progressive_frame = 1; s->picture_structure = PICT_FRAME; s->frame_pred_frame_dct = 1; + s->chroma_format = 1; s->codec_id= s->avctx->codec_id= CODEC_ID_MPEG2VIDEO; avctx->sub_id = 2; /* indicates mpeg2 */ return 0; @@ -2568,6 +2681,36 @@ static void mpeg_decode_user_data(AVCodecContext *avctx, } } +static void mpeg_decode_gop(AVCodecContext *avctx, + const uint8_t *buf, int buf_size){ + Mpeg1Context *s1 = avctx->priv_data; + MpegEncContext *s = &s1->mpeg_enc_ctx; + + int drop_frame_flag; + int time_code_hours, time_code_minutes; + int time_code_seconds, time_code_pictures; + int broken_link; + + init_get_bits(&s->gb, buf, buf_size*8); + + drop_frame_flag = get_bits1(&s->gb); + + time_code_hours=get_bits(&s->gb,5); + time_code_minutes = get_bits(&s->gb,6); + skip_bits1(&s->gb);//marker bit + time_code_seconds = get_bits(&s->gb,6); + time_code_pictures = get_bits(&s->gb,6); + + /*broken_link indicate that after editing the + reference frames of the first B-Frames after GOP I-Frame + are missing (open gop)*/ + broken_link = get_bits1(&s->gb); + + if(s->avctx->debug & FF_DEBUG_PICT_INFO) + av_log(s->avctx, AV_LOG_DEBUG, "GOP (%2d:%02d:%02d.[%02d]) broken_link=%d\n", + time_code_hours, time_code_minutes, time_code_seconds, + time_code_pictures, broken_link); +} /** * finds the end of the current frame in the bitstream. * @return the position of the first byte of the next frame, or -1 @@ -2613,7 +2756,8 @@ static int mpeg_decode_frame(AVCodecContext *avctx, uint8_t *buf, int buf_size) { Mpeg1Context *s = avctx->priv_data; - uint8_t *buf_end, *buf_ptr; + const uint8_t *buf_end; + const uint8_t *buf_ptr; int ret, start_code, input_size; AVFrame *picture = data; MpegEncContext *s2 = &s->mpeg_enc_ctx; @@ -2680,20 +2824,20 @@ static int mpeg_decode_frame(AVCodecContext *avctx, input_size = buf_end - buf_ptr; if(avctx->debug & FF_DEBUG_STARTCODE){ - av_log(avctx, AV_LOG_DEBUG, "%3X at %d left %d\n", start_code, buf_ptr-buf, input_size); + av_log(avctx, AV_LOG_DEBUG, "%3X at %zd left %d\n", start_code, buf_ptr-buf, input_size); } /* prepare data for next start code */ switch(start_code) { case SEQ_START_CODE: mpeg1_decode_sequence(avctx, buf_ptr, - input_size); + input_size); break; case PICTURE_START_CODE: /* we have a complete image : we try to decompress it */ mpeg1_decode_picture(avctx, - buf_ptr, input_size); + buf_ptr, input_size); break; case EXT_START_CODE: mpeg_decode_extension(avctx, @@ -2705,6 +2849,8 @@ static int mpeg_decode_frame(AVCodecContext *avctx, break; case GOP_START_CODE: s2->first_field=0; + mpeg_decode_gop(avctx, + buf_ptr, input_size); break; default: if (start_code >= SLICE_MIN_START_CODE && @@ -2862,7 +3008,8 @@ AVCodec mpeg_xvmc_decoder = { NULL, mpeg_decode_end, mpeg_decode_frame, - CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED, + CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED| CODEC_CAP_HWACCEL, + .flush= ff_mpeg_flush, }; #endif