return 0;
} //FIXME cleanup like check_intra_pred_mode
-static int check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
+int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma){
MpegEncContext * const s = &h->s;
static const int8_t top [7]= {LEFT_DC_PRED8x8, 1,-1,-1};
static const int8_t left[7]= { TOP_DC_PRED8x8,-1, 2,-1,DC_128_PRED8x8};
return mode;
}
-/**
- * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
- */
-int ff_h264_check_intra16x16_pred_mode(H264Context *h, int mode)
-{
- return check_intra_pred_mode(h, mode, 0);
-}
-
-/**
- * checks if the top & left blocks are available if needed & changes the dc mode so it only uses the available blocks.
- */
-int ff_h264_check_intra_chroma_pred_mode(H264Context *h, int mode)
-{
- return check_intra_pred_mode(h, mode, 1);
-}
-
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length){
int i, si, di;
bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0; // use second escape buffer for inter data
si=h->rbsp_buffer_size[bufidx];
- av_fast_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+FF_INPUT_BUFFER_PADDING_SIZE+MAX_MBPAIR_SIZE);
+ av_fast_padded_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+MAX_MBPAIR_SIZE);
dst= h->rbsp_buffer[bufidx];
- if(si != h->rbsp_buffer_size[bufidx])
- memset(dst + length, 0, FF_INPUT_BUFFER_PADDING_SIZE+MAX_MBPAIR_SIZE);
if (dst == NULL){
return NULL;
nrefs[list]--;
if(!FIELD_PICTURE && ref_field_picture){ // frame referencing two fields
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) - !(row&1), pic_height-1), 1);
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN((row >> 1) , pic_height-1), 0);
+ ff_thread_await_progress(&ref_pic->f, FFMIN((row >> 1) - !(row & 1), pic_height - 1), 1);
+ ff_thread_await_progress(&ref_pic->f, FFMIN((row >> 1), pic_height - 1), 0);
}else if(FIELD_PICTURE && !ref_field_picture){ // field referencing one field of a frame
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row*2 + ref_field , pic_height-1), 0);
+ ff_thread_await_progress(&ref_pic->f, FFMIN(row * 2 + ref_field, pic_height - 1), 0);
}else if(FIELD_PICTURE){
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), ref_field);
+ ff_thread_await_progress(&ref_pic->f, FFMIN(row, pic_height - 1), ref_field);
}else{
- ff_thread_await_progress((AVFrame*)ref_pic, FFMIN(row, pic_height-1), 0);
+ ff_thread_await_progress(&ref_pic->f, FFMIN(row, pic_height - 1), 0);
}
}
}
s->unrestricted_mv=1;
s->dsp.dct_bits = 16;
- dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
+ ff_dsputil_init(&s->dsp, s->avctx); // needed so that idct permutation is known early
memset(h->pps.scaling_matrix4, 16, 6*16*sizeof(uint8_t));
memset(h->pps.scaling_matrix8, 16, 2*64*sizeof(uint8_t));
MpegEncContext * const s = &h->s;
int i;
- MPV_decode_defaults(s);
+ ff_MPV_decode_defaults(s);
s->avctx = avctx;
common_init(h);
int i;
const int pixel_shift = h->pixel_shift;
- if(MPV_frame_start(s, s->avctx) < 0)
+ if(ff_MPV_frame_start(s, s->avctx) < 0)
return -1;
ff_er_frame_start(s);
/*
- * MPV_frame_start uses pict_type to derive key_frame.
+ * ff_MPV_frame_start uses pict_type to derive key_frame.
* This is incorrect for H.264; IDR markings must be used.
* Zero here; IDR markings per slice in frame or fields are ORed in later.
* See decode_nal_units().
// We mark the current picture as non-reference after allocating it, so
// that if we break out due to an error it can be released automatically
- // in the next MPV_frame_start().
+ // in the next ff_MPV_frame_start().
// SVQ3 as well as most other codecs have only last/next/current and thus
// get released even with set reference, besides SVQ3 and others do not
// mark frames as reference later "naturally".
s->mb_y= 0;
if (!in_setup && !s->dropable)
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, (16*s->mb_height >> FIELD_PICTURE) - 1,
- s->picture_structure==PICT_BOTTOM_FIELD);
+ ff_thread_report_progress(&s->current_picture_ptr->f,
+ (16 * s->mb_height >> FIELD_PICTURE) - 1,
+ s->picture_structure == PICT_BOTTOM_FIELD);
if (CONFIG_H264_VDPAU_DECODER && s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
ff_vdpau_h264_set_reference_frames(s);
if (!FIELD_PICTURE)
ff_er_frame_end(s);
- MPV_frame_end(s);
+ ff_MPV_frame_end(s);
h->current_slice=0;
/**
* Decode a slice header.
- * This will also call MPV_common_init() and frame_start() as needed.
+ * This will also call ff_MPV_common_init() and frame_start() as needed.
*
* @param h h264context
* @param h0 h264 master context (differs from 'h' when doing sliced based parallel decoding)
s->avctx->level = h->sps.level_idc;
s->avctx->refs = h->sps.ref_frame_count;
- if(h == h0 && h->dequant_coeff_pps != pps_id){
- h->dequant_coeff_pps = pps_id;
- init_dequant_tables(h);
- }
-
s->mb_width= h->sps.mb_width;
s->mb_height= h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
}
free_tables(h, 0);
flush_dpb(s->avctx);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
h->list_count = 0;
}
if (!s->context_initialized) {
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
- dsputil_init(&s->dsp, s->avctx);
+ ff_dsputil_init(&s->dsp, s->avctx);
} else {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d chroma_idc: %d\n",
h->sps.bit_depth_luma, h->sps.chroma_format_idc);
else
s->avctx->pix_fmt = PIX_FMT_YUV420P10;
break;
- default:
+ case 8:
if (CHROMA444){
s->avctx->pix_fmt = s->avctx->color_range == AVCOL_RANGE_JPEG ? PIX_FMT_YUVJ444P : PIX_FMT_YUV444P;
if (s->avctx->colorspace == AVCOL_SPC_RGB) {
hwaccel_pixfmt_list_h264_jpeg_420 :
ff_hwaccel_pixfmt_list_420);
}
+ break;
+ default:
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
+ return AVERROR_INVALIDDATA;
}
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
- if (MPV_common_init(s) < 0) {
- av_log(h->s.avctx, AV_LOG_ERROR, "MPV_common_init() failed.\n");
+ if (ff_MPV_common_init(s) < 0) {
+ av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
return -1;
}
s->first_field = 0;
}
}
+ if(h == h0 && h->dequant_coeff_pps != pps_id){
+ h->dequant_coeff_pps = pps_id;
+ init_dequant_tables(h);
+ }
+
h->frame_num= get_bits(&s->gb, h->sps.log2_max_frame_num);
h->mb_mbaff = 0;
h->prev_frame_num++;
h->prev_frame_num %= 1<<h->sps.log2_max_frame_num;
s->current_picture_ptr->frame_num= h->prev_frame_num;
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, INT_MAX, 0);
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, INT_MAX, 1);
+ ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 0);
+ ff_thread_report_progress(&s->current_picture_ptr->f, INT_MAX, 1);
ff_generate_sliding_window_mmcos(h);
if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
(s->avctx->err_recognition & AV_EF_EXPLODE))
} else {
if (s0->current_picture_ptr->frame_num != h->frame_num) {
+ ff_thread_report_progress((AVFrame*)s0->current_picture_ptr, INT_MAX,
+ s0->picture_structure==PICT_BOTTOM_FIELD);
/*
* This and previous field had
* different frame_nums. Consider this field first in
h->ref_count[1]= h->pps.ref_count[1];
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
- unsigned max= (16<<(s->picture_structure != PICT_FRAME))-1;
+ unsigned max= s->picture_structure == PICT_FRAME ? 15 : 31;
+
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
h->direct_spatial_mv_pred= get_bits1(&s->gb);
}
h->ref_count[0]= get_ue_golomb(&s->gb) + 1;
if(h->slice_type_nos==AV_PICTURE_TYPE_B)
h->ref_count[1]= get_ue_golomb(&s->gb) + 1;
-
}
- if(h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
+
+ if (h->ref_count[0]-1 > max || h->ref_count[1]-1 > max){
av_log(h->s.avctx, AV_LOG_ERROR, "reference overflow\n");
- h->ref_count[0]= h->ref_count[1]= 1;
- return -1;
+ h->ref_count[0] = h->ref_count[1] = 1;
+ return AVERROR_INVALIDDATA;
}
+
if(h->slice_type_nos == AV_PICTURE_TYPE_B)
h->list_count= 2;
else
if (s->dropable) return;
- ff_thread_report_progress((AVFrame*)s->current_picture_ptr, top + height - 1,
- s->picture_structure==PICT_BOTTOM_FIELD);
+ ff_thread_report_progress(&s->current_picture_ptr->f, top + height - 1,
+ s->picture_structure == PICT_BOTTOM_FIELD);
}
static int decode_slice(struct AVCodecContext *avctx, void *arg){
if(s->mb_y >= s->mb_height){
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
- if( get_bits_count(&s->gb) == s->gb.size_in_bits
- || get_bits_count(&s->gb) < s->gb.size_in_bits && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
+ if ( get_bits_left(&s->gb) == 0
+ || get_bits_left(&s->gb) > 0 && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
return 0;
}
}
- if(get_bits_count(&s->gb) >= s->gb.size_in_bits && s->mb_skip_run<=0){
+ if (get_bits_left(&s->gb) <= 0 && s->mb_skip_run <= 0){
tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
- if(get_bits_count(&s->gb) == s->gb.size_in_bits ){
+ if (get_bits_left(&s->gb) == 0) {
ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
if (s->mb_x > lf_x_start) loop_filter(h, lf_x_start, s->mb_x);
int consumed;
int dst_length;
int bit_length;
- uint8_t *ptr;
+ const uint8_t *ptr;
int i, nalsize = 0;
int err;
break;
case NAL_SPS:
init_get_bits(&s->gb, ptr, bit_length);
- if(ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){
+ if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? (nalsize != consumed) && nalsize : 1)){
av_log(h->s.avctx, AV_LOG_DEBUG, "SPS decoding failure, trying alternative mode\n");
if(h->is_avc) av_assert0(next_avc - buf_index + consumed == nalsize);
- init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed));
+ init_get_bits(&s->gb, &buf[buf_index + 1 - consumed], 8*(next_avc - buf_index + consumed - 1));
ff_h264_decode_seq_parameter_set(h);
}
if(out){
*data_size = sizeof(AVFrame);
- *pict= *(AVFrame*)out;
+ *pict = out->f;
}
return buf_index;
}
if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
int cnt= buf[5]&0x1f;
- uint8_t *p= buf+6;
+ const uint8_t *p= buf+6;
while(cnt--){
int nalsize= AV_RB16(p) + 2;
if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
*data_size = 0; /* Wait for second field. */
if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
*data_size = sizeof(AVFrame);
- *pict = *(AVFrame*)h->next_output_pic;
+ *pict = h->next_output_pic->f;
}
}
ff_h264_remove_all_refs(h);
ff_h264_free_context(h);
- MPV_common_end(s);
+ ff_MPV_common_end(s);
// memset(h, 0, sizeof(H264Context));