DCTELEM *block, int n, int qscale);
static void dct_unquantize_h263_inter_c(MpegEncContext *s,
DCTELEM *block, int n, int qscale);
-static void dct_unquantize_h261_intra_c(MpegEncContext *s,
- DCTELEM *block, int n, int qscale);
-static void dct_unquantize_h261_inter_c(MpegEncContext *s,
- DCTELEM *block, int n, int qscale);
static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
#ifdef CONFIG_ENCODERS
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
- const uint16_t *quant_matrix, int bias, int qmin, int qmax)
+ const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
{
int qscale;
+ int shift=0;
for(qscale=qmin; qscale<=qmax; qscale++){
int i;
qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
}
}
+
+ for(i=intra; i<64; i++){
+ int64_t max= 8191;
+ if (dsp->fdct == fdct_ifast
+#ifndef FAAN_POSTSCALE
+ || dsp->fdct == ff_faandct
+#endif
+ ) {
+ max= (8191LL*aanscales[i]) >> 14;
+ }
+ while(((max * qmat[qscale][i]) >> shift) > INT_MAX){
+ shift++;
+ }
+ }
+ }
+ if(shift){
+ av_log(NULL, AV_LOG_INFO, "Warning, QMAT_SHIFT is larger then %d, overflows possible\n", QMAT_SHIFT - shift);
}
}
{
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
- s->dct_unquantize_h261_intra = dct_unquantize_h261_intra_c;
- s->dct_unquantize_h261_inter = dct_unquantize_h261_inter_c;
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
#ifdef CONFIG_ENCODERS
s->dct_quantize= dct_quantize_c;
s->denoise_dct= denoise_dct_c;
-#endif
+#endif //CONFIG_ENCODERS
#ifdef HAVE_MMX
MPV_common_init_mmx(s);
#endif //CONFIG_ENCODERS
/* load & permutate scantables
- note: only wmv uses differnt ones
+ note: only wmv uses different ones
*/
if(s->alternate_scan){
ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
if(!src->ref_index[0])
av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
if(src->motion_subsample_log2 != dst->motion_subsample_log2)
- av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesnt match! (%d!=%d)\n",
+ av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
src->motion_subsample_log2, dst->motion_subsample_log2);
memcpy(dst->mb_type, src->mb_type, s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
}
if(pic->linesize[1] != pic->linesize[2]){
- av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n");
+ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
return -1;
}
pic->mb_type= pic->mb_type_base + s->mb_stride+1;
if(s->out_format == FMT_H264){
for(i=0; i<2; i++){
- CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+2) * sizeof(int16_t))
- pic->motion_val[i]= pic->motion_val_base[i]+2;
+ CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t))
+ pic->motion_val[i]= pic->motion_val_base[i]+4;
CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
}
pic->motion_subsample_log2= 2;
}else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
for(i=0; i<2; i++){
- CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t))
- pic->motion_val[i]= pic->motion_val_base[i]+2;
+ CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t))
+ pic->motion_val[i]= pic->motion_val_base[i]+4;
CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t))
}
pic->motion_subsample_log2= 3;
memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
s->prev_pict_types[0]= s->pict_type;
if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
- pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
+ pic->age= INT_MAX; // skipped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
return 0;
fail: //for the CHECKED_ALLOCZ macro
COPY(lambda2);
COPY(picture_in_gop_number);
COPY(gop_picture_number);
- COPY(frame_pred_frame_dct); //FIXME dont set in encode_header
- COPY(progressive_frame); //FIXME dont set in encode_header
- COPY(partitioned_frame); //FIXME dont set in encode_header
+ COPY(frame_pred_frame_dct); //FIXME don't set in encode_header
+ COPY(progressive_frame); //FIXME don't set in encode_header
+ COPY(partitioned_frame); //FIXME don't set in encode_header
#undef COPY
}
done=1;
default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
- memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
for(i=-16; i<16; i++){
return -1;
}
+ if((s->width || s->height) && avcodec_check_dimensions(s->avctx, s->width, s->height))
+ return -1;
+
dsputil_init(&s->dsp, s->avctx);
DCT_common_init(s);
CHECKED_ALLOCZ(s->coded_block_base, y_size);
s->coded_block= s->coded_block_base + s->b8_stride + 1;
- /* divx501 bitstream reorder buffer */
- CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
-
/* cbp, ac_pred, pred_dir */
CHECKED_ALLOCZ(s->cbp_table , mb_array_size * sizeof(uint8_t))
CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
av_freep(&s->mbskip_table);
av_freep(&s->prev_pict_types);
av_freep(&s->bitstream_buffer);
+ s->allocated_bitstream_buffer_size=0;
+
av_freep(&s->avctx->stats_out);
av_freep(&s->ac_stats);
av_freep(&s->error_status_table);
int MPV_encode_init(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
- int i, dummy;
+ int i;
int chroma_h_shift, chroma_v_shift;
MPV_encode_defaults(s);
- avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
+ if(avctx->pix_fmt != PIX_FMT_YUVJ420P && avctx->pix_fmt != PIX_FMT_YUV420P){
+ av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
+ return -1;
+ }
+
+ if(avctx->codec_id == CODEC_ID_MJPEG || avctx->codec_id == CODEC_ID_LJPEG){
+ if(avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL && avctx->pix_fmt != PIX_FMT_YUVJ420P){
+ av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
+ return -1;
+ }
+ }else{
+ if(avctx->strict_std_compliance>FF_COMPLIANCE_INOFFICIAL && avctx->pix_fmt != PIX_FMT_YUV420P){
+ av_log(avctx, AV_LOG_ERROR, "colorspace not supported\n");
+ return -1;
+ }
+ }
s->bit_rate = avctx->bit_rate;
s->width = avctx->width;
|| s->avctx->temporal_cplx_masking
|| s->avctx->spatial_cplx_masking
|| s->avctx->p_masking
+ || s->avctx->border_masking
|| (s->flags&CODEC_FLAG_QP_RD))
&& !s->fixed_qscale;
}
if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){
- av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n");
+ av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
}
if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){
}
if(s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE){
- av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decission\n");
+ av_log(avctx, AV_LOG_ERROR, "OBMC is only supported with simple mb decision\n");
return -1;
}
}
if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
- av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supporetd by codec\n");
+ av_log(avctx, AV_LOG_ERROR, "mpeg2 style quantization not supported by codec\n");
return -1;
}
if(s->avctx->thread_count > 1)
s->rtp_mode= 1;
- i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base);
+ if(!avctx->time_base.den || !avctx->time_base.num){
+ av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
+ return -1;
+ }
+
+ i= (INT_MAX/2+128)>>8;
+ if(avctx->me_threshold >= i){
+ av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n", i - 1);
+ return -1;
+ }
+ if(avctx->mb_threshold >= i){
+ av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n", i - 1);
+ return -1;
+ }
+
+ if(avctx->b_frame_strategy && (avctx->flags&CODEC_FLAG_PASS2)){
+ av_log(avctx, AV_LOG_ERROR, "b_frame_strategy must be 0 on the second pass");
+ return -1;
+ }
+
+ i= ff_gcd(avctx->time_base.den, avctx->time_base.num);
if(i > 1){
av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
- avctx->frame_rate /= i;
- avctx->frame_rate_base /= i;
+ avctx->time_base.den /= i;
+ avctx->time_base.num /= i;
// return -1;
}
avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
- av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
- s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
+ if(avctx->codec_id == CODEC_ID_MPEG4 && s->avctx->time_base.den > (1<<16)-1){
+ av_log(avctx, AV_LOG_ERROR, "timebase not supported by mpeg 4 standard\n");
+ return -1;
+ }
+ s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
switch(avctx->codec->id) {
case CODEC_ID_MPEG1VIDEO:
avctx->delay=0;
s->low_delay=1;
break;
-#ifdef CONFIG_RISKY
+ case CODEC_ID_H261:
+ s->out_format = FMT_H261;
+ avctx->delay=0;
+ s->low_delay=1;
+ break;
case CODEC_ID_H263:
if (h263_get_picture_format(s->width, s->height) == 7) {
av_log(avctx, AV_LOG_INFO, "Input picture size isn't suitable for h263 codec! try h263+\n");
avctx->delay=0;
s->low_delay=1;
break;
+ case CODEC_ID_RV20:
+ s->out_format = FMT_H263;
+ avctx->delay=0;
+ s->low_delay=1;
+ s->modified_quant=1;
+ s->h263_aic=1;
+ s->h263_plus=1;
+ s->loop_filter=1;
+ s->unrestricted_mv= s->obmc || s->loop_filter || s->umvplus;
+ break;
case CODEC_ID_MPEG4:
s->out_format = FMT_H263;
s->h263_pred = 1;
avctx->delay=0;
s->low_delay=1;
break;
-#endif
default:
return -1;
}
s->quant_precision=5;
ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
+ ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
-#ifdef CONFIG_ENCODERS
-#ifdef CONFIG_RISKY
+#ifdef CONFIG_H261_ENCODER
+ if (s->out_format == FMT_H261)
+ ff_h261_encode_init(s);
+#endif
if (s->out_format == FMT_H263)
h263_encode_init(s);
if(s->msmpeg4_version)
ff_msmpeg4_encode_init(s);
-#endif
if (s->out_format == FMT_MPEG1)
ff_mpeg1_encode_init(s);
-#endif
/* init q matrix */
for(i=0;i<64;i++) {
int j= s->dsp.idct_permutation[i];
-#ifdef CONFIG_RISKY
if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
- }else if(s->out_format == FMT_H263){
+ }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
}else
-#endif
{ /* mpeg1/2 */
s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
/* for mjpeg, we do include qscale in the matrix */
if (s->out_format != FMT_MJPEG) {
convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
- s->intra_matrix, s->intra_quant_bias, 1, 31);
+ s->intra_matrix, s->intra_quant_bias, avctx->qmin, 31, 1);
convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
- s->inter_matrix, s->inter_quant_bias, 1, 31);
+ s->inter_matrix, s->inter_quant_bias, avctx->qmin, 31, 0);
}
if(ff_rate_control_init(s) < 0)
#endif //CONFIG_ENCODERS
-void init_rl(RLTable *rl)
+void init_rl(RLTable *rl, int use_static)
{
int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
uint8_t index_run[MAX_RUN+1];
int last, run, level, start, end, i;
+ /* If table is static, we can quit if rl->max_level[0] is not NULL */
+ if(use_static && rl->max_level[0])
+ return;
+
/* compute max_level[], max_run[] and index_run[] */
for(last=0;last<2;last++) {
if (last == 0) {
if (run > max_run[level])
max_run[level] = run;
}
- rl->max_level[last] = av_malloc(MAX_RUN + 1);
+ if(use_static)
+ rl->max_level[last] = av_mallocz_static(MAX_RUN + 1);
+ else
+ rl->max_level[last] = av_malloc(MAX_RUN + 1);
memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
- rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
+ if(use_static)
+ rl->max_run[last] = av_mallocz_static(MAX_LEVEL + 1);
+ else
+ rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
- rl->index_run[last] = av_malloc(MAX_RUN + 1);
+ if(use_static)
+ rl->index_run[last] = av_mallocz_static(MAX_RUN + 1);
+ else
+ rl->index_run[last] = av_malloc(MAX_RUN + 1);
memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
}
}
{
int i;
AVFrame *pic;
- s->mb_skiped = 0;
+ s->mb_skipped = 0;
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
}
alloc:
if(!s->encoding){
- /* release non refernce frames */
+ /* release non reference frames */
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
pic= (AVFrame*)&s->picture[i];
}
- pic->reference= s->pict_type != B_TYPE && !s->dropable ? 3 : 0;
+ pic->reference= (s->pict_type != B_TYPE || s->codec_id == CODEC_ID_H264)
+ && !s->dropable ? 3 : 0;
pic->coded_picture_number= s->coded_picture_number++;
if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
- assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
+ assert(s->pict_type != B_TYPE); //these should have been dropped if we don't have a reference
goto alloc;
}
s->hurry_up= s->avctx->hurry_up;
s->error_resilience= avctx->error_resilience;
- /* set dequantizer, we cant do it during init as it might change for mpeg4
- and we cant do it in the header decode as init isnt called for mpeg4 there yet */
+ /* set dequantizer, we can't do it during init as it might change for mpeg4
+ and we can't do it in the header decode as init isnt called for mpeg4 there yet */
if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
- }else if(s->out_format == FMT_H263){
+ }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
- }else if(s->out_format == FMT_H261){
- s->dct_unquantize_intra = s->dct_unquantize_h261_intra;
- s->dct_unquantize_inter = s->dct_unquantize_h261_inter;
}else{
s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
XVMC_field_end(s);
}else
#endif
- if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
+ if(s->unrestricted_mv && s->current_picture.reference && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
#endif
if(s->encoding){
- /* release non refernce frames */
+ /* release non-reference frames */
for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
else if(IS_16X8(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, "-");
else if(IS_8X16(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "¦");
+ av_log(s->avctx, AV_LOG_DEBUG, "|");
else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
av_log(s->avctx, AV_LOG_DEBUG, " ");
else
uint8_t *ptr;
int i;
int h_chroma_shift, v_chroma_shift;
+ const int width = s->avctx->width;
+ const int height= s->avctx->height;
+ const int mv_sample_log2= 4 - pict->motion_subsample_log2;
+ const int mv_stride= (s->mb_width << mv_sample_log2) + 1;
s->low_delay=0; //needed to see the vectors without trashing the buffers
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for(i=0; i<3; i++){
- memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*s->height:pict->linesize[i]*s->height >> v_chroma_shift);
+ memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
pict->data[i]= s->visualization_buffer[i];
}
pict->type= FF_BUFFER_TYPE_COPY;
if(!USES_LIST(pict->mb_type[mb_index], direction))
continue;
- //FIXME for h264
if(IS_8X8(pict->mb_type[mb_index])){
int i;
for(i=0; i<4; i++){
int sx= mb_x*16 + 4 + 8*(i&1);
int sy= mb_y*16 + 4 + 8*(i>>1);
- int xy= mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*s->b8_stride;
+ int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
- draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
+ draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
}
}else if(IS_16X8(pict->mb_type[mb_index])){
int i;
for(i=0; i<2; i++){
int sx=mb_x*16 + 8;
int sy=mb_y*16 + 4 + 8*i;
- int xy= mb_x*2 + (mb_y*2 + i)*s->b8_stride;
+ int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
int mx=(pict->motion_val[direction][xy][0]>>shift);
int my=(pict->motion_val[direction][xy][1]>>shift);
if(IS_INTERLACED(pict->mb_type[mb_index]))
my*=2;
- draw_arrow(ptr, sx, sy, mx+sx, my+sy, s->width, s->height, s->linesize, 100);
+ draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
+ }
+ }else if(IS_8X16(pict->mb_type[mb_index])){
+ int i;
+ for(i=0; i<2; i++){
+ int sx=mb_x*16 + 4 + 8*i;
+ int sy=mb_y*16 + 8;
+ int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
+ int mx=(pict->motion_val[direction][xy][0]>>shift);
+ int my=(pict->motion_val[direction][xy][1]>>shift);
+
+ if(IS_INTERLACED(pict->mb_type[mb_index]))
+ my*=2;
+
+ draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
}
}else{
int sx= mb_x*16 + 8;
int sy= mb_y*16 + 8;
- int xy= mb_x*2 + mb_y*2*s->b8_stride;
+ int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
- draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
+ draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
}
}
}
for(y=0; y<16; y++)
pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
}
+ if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
+ int dm= 1 << (mv_sample_log2-2);
+ for(i=0; i<4; i++){
+ int sx= mb_x*16 + 8*(i&1);
+ int sy= mb_y*16 + 8*(i>>1);
+ int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
+ //FIXME bidir
+ int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
+ if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
+ for(y=0; y<8; y++)
+ pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
+ if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
+ *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
+ }
+ }
if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
// hmm
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
AVFrame *pic=NULL;
+ int64_t pts;
int i;
const int encoding_delay= s->max_b_frames;
int direct=1;
+ if(pic_arg){
+ pts= pic_arg->pts;
+ pic_arg->display_picture_number= s->input_picture_number++;
+
+ if(pts != AV_NOPTS_VALUE){
+ if(s->user_specified_pts != AV_NOPTS_VALUE){
+ int64_t time= pts;
+ int64_t last= s->user_specified_pts;
+
+ if(time <= last){
+ av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%Ld, last=%Ld\n", pts, s->user_specified_pts);
+ return -1;
+ }
+ }
+ s->user_specified_pts= pts;
+ }else{
+ if(s->user_specified_pts != AV_NOPTS_VALUE){
+ s->user_specified_pts=
+ pts= s->user_specified_pts + 1;
+ av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pts);
+ }else{
+ pts= pic_arg->display_picture_number;
+ }
+ }
+ }
+
if(pic_arg){
if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
if(pic_arg->linesize[0] != s->linesize) direct=0;
}
}
copy_picture_attributes(s, pic, pic_arg);
-
- pic->display_picture_number= s->input_picture_number++;
-
- if(pic->pts != AV_NOPTS_VALUE){
- if(s->user_specified_pts != AV_NOPTS_VALUE){
- int64_t time= av_rescale(pic->pts, s->avctx->frame_rate, s->avctx->frame_rate_base*(int64_t)AV_TIME_BASE);
- int64_t last= av_rescale(s->user_specified_pts, s->avctx->frame_rate, s->avctx->frame_rate_base*(int64_t)AV_TIME_BASE);
-
- if(time <= last){
- av_log(s->avctx, AV_LOG_ERROR, "Error, Invalid timestamp=%Ld, last=%Ld\n", pic->pts, s->user_specified_pts);
- return -1;
- }
- }
- s->user_specified_pts= pic->pts;
- }else{
- if(s->user_specified_pts != AV_NOPTS_VALUE){
- s->user_specified_pts=
- pic->pts= s->user_specified_pts + AV_TIME_BASE*(int64_t)s->avctx->frame_rate_base / s->avctx->frame_rate;
- av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic->pts);
- }else{
- pic->pts= av_rescale(pic->display_picture_number*(int64_t)s->avctx->frame_rate_base, AV_TIME_BASE, s->avctx->frame_rate);
- }
- }
+ pic->pts= pts; //we set this here to avoid modifiying pic_arg
}
/* shift buffer entries */
return 0;
}
+static int skip_check(MpegEncContext *s, Picture *p, Picture *ref){
+ int x, y, plane;
+ int score=0;
+ int64_t score64=0;
+
+ for(plane=0; plane<3; plane++){
+ const int stride= p->linesize[plane];
+ const int bw= plane ? 1 : 2;
+ for(y=0; y<s->mb_height*bw; y++){
+ for(x=0; x<s->mb_width*bw; x++){
+ int v= s->dsp.frame_skip_cmp[1](s, p->data[plane] + 8*(x + y*stride), ref->data[plane] + 8*(x + y*stride), stride, 8);
+
+ switch(s->avctx->frame_skip_exp){
+ case 0: score= FFMAX(score, v); break;
+ case 1: score+= ABS(v);break;
+ case 2: score+= v*v;break;
+ case 3: score64+= ABS(v*v*(int64_t)v);break;
+ case 4: score64+= v*v*(int64_t)(v*v);break;
+ }
+ }
+ }
+ }
+
+ if(score) score64= score;
+
+ if(score64 < s->avctx->frame_skip_threshold)
+ return 1;
+ if(score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda)>>8))
+ return 1;
+ return 0;
+}
+
static void select_input_picture(MpegEncContext *s){
int i;
s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
- /* set next picture types & ordering */
+ /* set next picture type & ordering */
if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
s->reordered_input_picture[0]= s->input_picture[0];
s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++;
}else{
int b_frames;
+
+ if(s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor){
+ if(skip_check(s, s->input_picture[0], s->next_picture_ptr)){
+//av_log(NULL, AV_LOG_DEBUG, "skip %p %Ld\n", s->input_picture[0]->data[0], s->input_picture[0]->pts);
+
+ if(s->input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
+ for(i=0; i<4; i++)
+ s->input_picture[0]->data[i]= NULL;
+ s->input_picture[0]->type= 0;
+ }else{
+ assert( s->input_picture[0]->type==FF_BUFFER_TYPE_USER
+ || s->input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
+ s->avctx->release_buffer(s->avctx, (AVFrame*)s->input_picture[0]);
+ }
+
+ goto no_output_pic;
+ }
+ }
+
if(s->flags&CODEC_FLAG_PASS2){
for(i=0; i<s->max_b_frames+1; i++){
int pict_num= s->input_picture[0]->display_picture_number + i;
- int pict_type= s->rc_context.entry[pict_num].new_pict_type;
- s->input_picture[i]->pict_type= pict_type;
-
- if(i + 1 >= s->rc_context.num_entries) break;
+
+ if(pict_num >= s->rc_context.num_entries)
+ break;
+ if(!s->input_picture[i]){
+ s->rc_context.entry[pict_num-1].new_pict_type = P_TYPE;
+ break;
+ }
+
+ s->input_picture[i]->pict_type=
+ s->rc_context.entry[pict_num].new_pict_type;
}
}
- if(s->input_picture[0]->pict_type){
- /* user selected pict_type */
- for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
- if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
- }
-
- if(b_frames > s->max_b_frames){
- av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
- b_frames = s->max_b_frames;
- }
- }else if(s->avctx->b_frame_strategy==0){
+ if(s->avctx->b_frame_strategy==0){
b_frames= s->max_b_frames;
while(b_frames && !s->input_picture[b_frames]) b_frames--;
}else if(s->avctx->b_frame_strategy==1){
s->input_picture[i-1]->data[0], s->linesize) + 1;
}
}
- for(i=0; i<s->max_b_frames; i++){
+ for(i=0; i<s->max_b_frames+1; i++){
if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
}
//static int b_count=0;
//b_count+= b_frames;
//av_log(s->avctx, AV_LOG_DEBUG, "b_frames: %d\n", b_count);
+
+ for(i= b_frames - 1; i>=0; i--){
+ int type= s->input_picture[i]->pict_type;
+ if(type && type != B_TYPE)
+ b_frames= i;
+ }
+ if(s->input_picture[b_frames]->pict_type == B_TYPE && b_frames == s->max_b_frames){
+ av_log(s->avctx, AV_LOG_ERROR, "warning, too many b frames in a row\n");
+ }
+
if(s->picture_in_gop_number + b_frames >= s->gop_size){
+ if((s->flags2 & CODEC_FLAG2_STRICT_GOP) && s->gop_size > s->picture_in_gop_number){
+ b_frames= s->gop_size - s->picture_in_gop_number - 1;
+ }else{
if(s->flags & CODEC_FLAG_CLOSED_GOP)
b_frames=0;
s->input_picture[b_frames]->pict_type= I_TYPE;
+ }
}
if( (s->flags & CODEC_FLAG_CLOSED_GOP)
}
}
}
-
+no_output_pic:
if(s->reordered_input_picture[0]){
s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
copy_picture(&s->new_picture, s->reordered_input_picture[0]);
if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
- // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
+ // input is a shared pix, so we can't modifiy it -> alloc a new one & ensure that the shared one is reuseable
int i= ff_find_unused_picture(s, 0);
Picture *pic= &s->picture[i];
AVFrame *pic_arg = data;
int i, stuffing_count;
- if(avctx->pix_fmt != PIX_FMT_YUV420P){
+ if(avctx->pix_fmt != PIX_FMT_YUV420P && avctx->pix_fmt != PIX_FMT_YUVJ420P){
av_log(avctx, AV_LOG_ERROR, "this codec supports only YUV420P\n");
return -1;
}
int start_y= s->thread_context[i]->start_mb_y;
int end_y= s->thread_context[i]-> end_mb_y;
int h= s->mb_height;
- uint8_t *start= buf + buf_size*start_y/h;
- uint8_t *end = buf + buf_size* end_y/h;
+ uint8_t *start= buf + (size_t)(((int64_t) buf_size)*start_y/h);
+ uint8_t *end = buf + (size_t)(((int64_t) buf_size)* end_y/h);
init_put_bits(&s->thread_context[i]->pb, start, end - start);
}
avctx->error[i] += s->current_picture_ptr->error[i];
}
+ if(s->flags&CODEC_FLAG_PASS1)
+ assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits + avctx->i_tex_bits + avctx->p_tex_bits == put_bits_count(&s->pb));
flush_put_bits(&s->pb);
s->frame_bits = put_bits_count(&s->pb);
stuffing_count= ff_vbv_update(s, s->frame_bits);
if(stuffing_count){
+ if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < stuffing_count + 50){
+ av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
+ return -1;
+ }
+
switch(s->codec_id){
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
return emu;
}
+static inline int hpel_motion_lowres(MpegEncContext *s,
+ uint8_t *dest, uint8_t *src,
+ int field_based, int field_select,
+ int src_x, int src_y,
+ int width, int height, int stride,
+ int h_edge_pos, int v_edge_pos,
+ int w, int h, h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y)
+{
+ const int lowres= s->avctx->lowres;
+ const int s_mask= (2<<lowres)-1;
+ int emu=0;
+ int sx, sy;
+
+ if(s->quarter_sample){
+ motion_x/=2;
+ motion_y/=2;
+ }
+
+ sx= motion_x & s_mask;
+ sy= motion_y & s_mask;
+ src_x += motion_x >> (lowres+1);
+ src_y += motion_y >> (lowres+1);
+
+ src += src_y * stride + src_x;
+
+ if( (unsigned)src_x > h_edge_pos - (!!sx) - w
+ || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
+ ff_emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
+ src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
+ src= s->edge_emu_buffer;
+ emu=1;
+ }
+
+ sx <<= 2 - lowres;
+ sy <<= 2 - lowres;
+ if(field_select)
+ src += s->linesize;
+ pix_op[lowres](dest, src, stride, h, sx, sy);
+ return emu;
+}
+
/* apply one mpeg motion vector to the three components */
static always_inline void mpeg_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
pix_op[s->chroma_x_shift][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
pix_op[s->chroma_x_shift][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
}
+#if defined(CONFIG_H261_ENCODER) || defined(CONFIG_H261_DECODER)
+ if(s->out_format == FMT_H261){
+ ff_h261_loop_filter(s);
+ }
+#endif
}
/* apply one mpeg motion vector to the three components */
linesize = s->current_picture.linesize[0] << field_based;
uvlinesize = s->current_picture.linesize[1] << field_based;
+ if(s->quarter_sample){ //FIXME obviously not perfect but qpel wont work in lowres anyway
+ motion_x/=2;
+ motion_y/=2;
+ }
+
+ if(field_based){
+ motion_y += (bottom_field - field_select)*((1<<lowres)-1);
+ }
+
sx= motion_x & s_mask;
sy= motion_y & s_mask;
src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
- src_y = s->mb_y*2*block_s + (motion_y >> (lowres+1));
+ src_y =(s->mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
if (s->out_format == FMT_H263) {
- uvsx = sx | ((motion_x & 2)>>1);
- uvsy = sy | ((motion_y & 2)>>1);
+ uvsx = ((motion_x>>1) & s_mask) | (sx&1);
+ uvsy = ((motion_y>>1) & s_mask) | (sy&1);
uvsrc_x = src_x>>1;
uvsrc_y = src_y>>1;
}else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
uvsx = mx & s_mask;
uvsy = my & s_mask;
uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
- uvsrc_y = s->mb_y*block_s + (my >> (lowres+1));
+ uvsrc_y =(s->mb_y*block_s>>field_based) + (my >> (lowres+1));
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
}
}
+ if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
+ dest_y += s->linesize;
+ dest_cb+= s->uvlinesize;
+ dest_cr+= s->uvlinesize;
+ }
+
+ if(field_select){
+ ptr_y += s->linesize;
+ ptr_cb+= s->uvlinesize;
+ ptr_cr+= s->uvlinesize;
+ }
+
sx <<= 2 - lowres;
sy <<= 2 - lowres;
pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
pix_op[lowres](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
pix_op[lowres](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
}
+ //FIXME h261 lowres loop filter
}
//FIXME move to dsputil, avg variant, 16x16 version
pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
}
+static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op,
+ int mx, int my){
+ const int lowres= s->avctx->lowres;
+ const int block_s= 8>>lowres;
+ const int s_mask= (2<<lowres)-1;
+ const int h_edge_pos = s->h_edge_pos >> (lowres+1);
+ const int v_edge_pos = s->v_edge_pos >> (lowres+1);
+ int emu=0, src_x, src_y, offset, sx, sy;
+ uint8_t *ptr;
+
+ if(s->quarter_sample){
+ mx/=2;
+ my/=2;
+ }
+
+ /* In case of 8X8, we construct a single chroma motion vector
+ with a special rounding */
+ mx= ff_h263_round_chroma(mx);
+ my= ff_h263_round_chroma(my);
+
+ sx= mx & s_mask;
+ sy= my & s_mask;
+ src_x = s->mb_x*block_s + (mx >> (lowres+1));
+ src_y = s->mb_y*block_s + (my >> (lowres+1));
+
+ offset = src_y * s->uvlinesize + src_x;
+ ptr = ref_picture[1] + offset;
+ if(s->flags&CODEC_FLAG_EMU_EDGE){
+ if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
+ || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr= s->edge_emu_buffer;
+ emu=1;
+ }
+ }
+ sx <<= 2 - lowres;
+ sy <<= 2 - lowres;
+ pix_op[lowres](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
+
+ ptr = ref_picture[2] + offset;
+ if(emu){
+ ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr= s->edge_emu_buffer;
+ }
+ pix_op[lowres](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
+}
+
/**
- * motion compesation of a single macroblock
+ * motion compensation of a single macroblock
* @param s context
* @param dest_y luma destination pointer
* @param dest_cb chroma cb/u destination pointer
const int mot_stride= s->b8_stride;
const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
- assert(!s->mb_skiped);
+ assert(!s->mb_skipped);
memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
switch(s->mv_type) {
case MV_TYPE_16X16:
-#ifdef CONFIG_RISKY
if(s->mcsel){
if(s->real_sprite_warping_points==1){
gmc1_motion(s, dest_y, dest_cb, dest_cr,
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1], 16);
}else
-#endif
{
mpeg_motion(s, dest_y, dest_cb, dest_cr,
0, 0, 0,
}
/**
- * motion compesation of a single macroblock
+ * motion compensation of a single macroblock
* @param s context
* @param dest_y luma destination pointer
* @param dest_cb chroma cb/u destination pointer
int dir, uint8_t **ref_picture,
h264_chroma_mc_func *pix_op)
{
- assert(s->mv_type == MV_TYPE_16X16);
- mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
- 0, 0, 0,
- ref_picture, pix_op,
- s->mv[dir][0][0], s->mv[dir][0][1], 16>>s->avctx->lowres);
+ int mx, my;
+ int mb_x, mb_y, i;
+ const int lowres= s->avctx->lowres;
+ const int block_s= 8>>lowres;
+
+ mb_x = s->mb_x;
+ mb_y = s->mb_y;
+
+ switch(s->mv_type) {
+ case MV_TYPE_16X16:
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, 0,
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
+ break;
+ case MV_TYPE_8X8:
+ mx = 0;
+ my = 0;
+ for(i=0;i<4;i++) {
+ hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
+ ref_picture[0], 0, 0,
+ (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
+ s->width, s->height, s->linesize,
+ s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
+ block_s, block_s, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1]);
+
+ mx += s->mv[dir][i][0];
+ my += s->mv[dir][i][1];
+ }
+
+ if(!(s->flags&CODEC_FLAG_GRAY))
+ chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
+ break;
+ case MV_TYPE_FIELD:
+ if (s->picture_structure == PICT_FRAME) {
+ /* top field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], block_s);
+ /* bottom field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 1, s->field_select[dir][1],
+ ref_picture, pix_op,
+ s->mv[dir][1][0], s->mv[dir][1][1], block_s);
+ } else {
+ if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != B_TYPE && !s->first_field){
+ ref_picture= s->current_picture_ptr->data;
+ }
+
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s);
+ }
+ break;
+ case MV_TYPE_16X8:
+ for(i=0; i<2; i++){
+ uint8_t ** ref2picture;
+
+ if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == B_TYPE || s->first_field){
+ ref2picture= ref_picture;
+ }else{
+ ref2picture= s->current_picture_ptr->data;
+ }
+
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][i],
+ ref2picture, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s);
+
+ dest_y += 2*block_s*s->linesize;
+ dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
+ dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
+ }
+ break;
+ case MV_TYPE_DMV:
+ if(s->picture_structure == PICT_FRAME){
+ for(i=0; i<2; i++){
+ int j;
+ for(j=0; j<2; j++){
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, j, j^i,
+ ref_picture, pix_op,
+ s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s);
+ }
+ pix_op = s->dsp.avg_h264_chroma_pixels_tab;
+ }
+ }else{
+ for(i=0; i<2; i++){
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->picture_structure != i+1,
+ ref_picture, pix_op,
+ s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s);
+
+ // after put we make avg of the same block
+ pix_op = s->dsp.avg_h264_chroma_pixels_tab;
+
+ //opposite parity is always in the same frame if this is second field
+ if(!s->first_field){
+ ref_picture = s->current_picture_ptr->data;
+ }
+ }
+ }
+ break;
+ default: assert(0);
+ }
}
/* put block[] to dest[] */
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
- const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize= s->current_picture.linesize[1];
const int readable= s->pict_type != B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
assert(age);
- if (s->mb_skiped) {
- s->mb_skiped= 0;
+ if (s->mb_skipped) {
+ s->mb_skipped= 0;
assert(s->pict_type!=I_TYPE);
- (*mbskip_ptr) ++; /* indicate that this time we skiped it */
+ (*mbskip_ptr) ++; /* indicate that this time we skipped it */
if(*mbskip_ptr >99) *mbskip_ptr= 99;
/* if previous was skipped too, then nothing to do ! */
if (!s->mb_intra) {
/* motion handling */
- /* decoding or more than one mb_type (MC was allready done otherwise) */
+ /* decoding or more than one mb_type (MC was already done otherwise) */
if(!s->encoding){
if(lowres_flag){
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
}
/* skip dequant / idct if we are really late ;) */
- if(s->hurry_up>1) return;
+ if(s->hurry_up>1) goto skip_idct;
+ if(s->avctx->skip_idct){
+ if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == B_TYPE)
+ ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != I_TYPE)
+ || s->avctx->skip_idct >= AVDISCARD_ALL)
+ goto skip_idct;
+ }
/* add dct residue */
if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
}
}//fi gray
}
-#ifdef CONFIG_RISKY
else{
ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
}
-#endif
} else {
/* dct only in intra block */
if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
}//gray
}
}
+skip_idct:
if(!readable){
s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
}
if(overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
- av_log(s->avctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel);
+ av_log(s->avctx, AV_LOG_INFO, "warning, clipping %d dct coefficients to %d..%d\n", overflow, minlevel, maxlevel);
}
#endif //CONFIG_ENCODERS
if(s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
}
- h= FFMIN(h, s->height - y);
+ h= FFMIN(h, s->avctx->height - y);
if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
src= (AVFrame*)s->current_picture_ptr;
}
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
- const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
+ const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize= s->current_picture.linesize[1];
const int mb_size= 4 - s->avctx->lowres;
case CODEC_ID_MPEG1VIDEO:
case CODEC_ID_MPEG2VIDEO:
mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
-#ifdef CONFIG_RISKY
case CODEC_ID_MPEG4:
mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
case CODEC_ID_MSMPEG4V2:
msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
case CODEC_ID_WMV2:
ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
+#ifdef CONFIG_H261_ENCODER
+ case CODEC_ID_H261:
+ ff_h261_encode_mb(s, s->block, motion_x, motion_y); break;
+#endif
case CODEC_ID_H263:
case CODEC_ID_H263P:
case CODEC_ID_FLV1:
case CODEC_ID_RV10:
+ case CODEC_ID_RV20:
h263_encode_mb(s, s->block, motion_x, motion_y); break;
-#endif
case CODEC_ID_MJPEG:
mjpeg_encode_mb(s, s->block); break;
default:
}
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
+ s->mb_x= s->mb_y= 0;
+
s->parse_context.state= -1;
s->parse_context.frame_start_found= 0;
s->parse_context.overread= 0;
d->misc_bits= s->misc_bits;
d->last_bits= 0;
- d->mb_skiped= 0;
+ d->mb_skipped= 0;
d->qscale= s->qscale;
d->dquant= s->dquant;
}
d->misc_bits= s->misc_bits;
d->mb_intra= s->mb_intra;
- d->mb_skiped= s->mb_skiped;
+ d->mb_skipped= s->mb_skipped;
d->mv_type= s->mv_type;
d->mv_dir= s->mv_dir;
d->pb= s->pb;
align_put_bits(&s->pb);
flush_put_bits(&s->pb);
+
+ if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
+ s->misc_bits+= get_bits_diff(s);
}
static int encode_thread(AVCodecContext *c, void *arg){
int mb_x, mb_y, pdif = 0;
int i, j;
MpegEncContext best_s, backup_s;
- uint8_t bit_buf[2][3000];
- uint8_t bit_buf2[2][3000];
- uint8_t bit_buf_tex[2][3000];
+ uint8_t bit_buf[2][MAX_MB_BYTES];
+ uint8_t bit_buf2[2][MAX_MB_BYTES];
+ uint8_t bit_buf_tex[2][MAX_MB_BYTES];
PutBitContext pb[2], pb2[2], tex_pb[2];
//printf("%d->%d\n", s->resync_mb_y, s->end_mb_y);
for(i=0; i<2; i++){
- init_put_bits(&pb [i], bit_buf [i], 3000);
- init_put_bits(&pb2 [i], bit_buf2 [i], 3000);
- init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
+ init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
+ init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
+ init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
}
s->last_bits= put_bits_count(&s->pb);
s->last_mv_dir = 0;
-#ifdef CONFIG_RISKY
switch(s->codec_id){
case CODEC_ID_H263:
case CODEC_ID_H263P:
ff_mpeg4_init_partitions(s);
break;
}
-#endif
s->resync_mb_x=0;
s->resync_mb_y=0;
ff_init_block_index(s);
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
- const int xy= mb_y*s->mb_stride + mb_x;
+ int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
int mb_type= s->mb_type[xy];
// int d;
int dmin= INT_MAX;
int dir;
+ if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+ if(s->data_partitioning){
+ if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
+ || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+ }
+
s->mb_x = mb_x;
+ s->mb_y = mb_y; // moved into loop, can get changed by H.261
ff_update_block_index(s);
+#ifdef CONFIG_H261_ENCODER
+ if(s->codec_id == CODEC_ID_H261){
+ ff_h261_reorder_mb_index(s);
+ xy= s->mb_y*s->mb_stride + s->mb_x;
+ mb_type= s->mb_type[xy];
+ }
+#endif
+
/* write gob / video packet header */
-#ifdef CONFIG_RISKY
if(s->rtp_mode){
int current_packet_size, is_gob_start;
current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
- int r= put_bits_count(&s->pb)/8 + s->picture_number + s->codec_id + s->mb_x + s->mb_y;
+ int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
int d= 100 / s->avctx->error_rate;
if(r % d == 0){
current_packet_size=0;
assert(pbBufPtr(&s->pb) == s->ptr_lastgob);
}
}
-
- if (s->avctx->rtp_callback)
- s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, 0);
+
+ if (s->avctx->rtp_callback){
+ int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
+ s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
+ }
switch(s->codec_id){
case CODEC_ID_MPEG4:
s->resync_mb_y=mb_y;
}
}
-#endif
if( (s->resync_mb_x == s->mb_x)
&& s->resync_mb_y+1 == s->mb_y){
s->first_slice_line=0;
}
- s->mb_skiped=0;
+ s->mb_skipped=0;
s->dquant=0; //only for QP_RD
if(mb_type & (mb_type-1) || (s->flags & CODEC_FLAG_QP_RD)){ // more than 1 MB type possible or CODEC_FLAG_QP_RD
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
&dmin, &next_block, 0, 0);
}
- if(mb_type&CANDIDATE_MB_TYPE_SKIPED){
+ if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->mb_intra= 0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
- encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPED, pb, pb2, tex_pb,
+ encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
&dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
}
if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
s->mb_intra= 0;
-#ifdef CONFIG_RISKY
ff_mpeg4_set_direct_mv(s, mx, my);
-#endif
encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
&dmin, &next_block, mx, my);
}
}
s->last_bits= put_bits_count(&s->pb);
-#ifdef CONFIG_RISKY
if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
ff_h263_update_motion_val(s);
-#endif
if(next_block==0){ //FIXME 16 vs linesize16
s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
s->mb_intra= 0;
motion_x=s->b_direct_mv_table[xy][0];
motion_y=s->b_direct_mv_table[xy][1];
-#ifdef CONFIG_RISKY
ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
-#endif
break;
case CANDIDATE_MB_TYPE_BIDIR:
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
encode_mb(s, motion_x, motion_y);
- // RAL: Update last macrobloc type
+ // RAL: Update last macroblock type
s->last_mv_dir = s->mv_dir;
-#ifdef CONFIG_RISKY
if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
ff_h263_update_motion_val(s);
-#endif
MPV_decode_mb(s, s->block);
}
s, s->new_picture .data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,
s->dest[2], w>>1, h>>1, s->uvlinesize);
}
- if(s->loop_filter)
- ff_h263_loop_filter(s);
+ if(s->loop_filter){
+ if(s->out_format == FMT_H263)
+ ff_h263_loop_filter(s);
+ }
//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, put_bits_count(&s->pb));
}
}
-#ifdef CONFIG_RISKY
- //not beautifull here but we must write it before flushing so it has to be here
+ //not beautiful here but we must write it before flushing so it has to be here
if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
msmpeg4_encode_ext_header(s);
-#endif
write_slice_end(s);
/* Send the last GOB if RTP */
if (s->avctx->rtp_callback) {
+ int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
/* Call the RTP callback to send the last GOB */
emms_c();
- s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, 0);
+ s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
}
return 0;
s->me.mb_var_sum_temp =
s->me.mc_mb_var_sum_temp = 0;
-#ifdef CONFIG_RISKY
/* we need to initialize some time vars before we can encode b-frames */
// RAL: Condition added for MPEG1VIDEO
if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
ff_set_mpeg4_time(s, s->picture_number); //FIXME rename and use has_b_frames or similar
-#endif
s->me.scene_change_score=0;
s->no_rounding ^= 1;
}
- s->mb_intra=0; //for the rate distoration & bit compare functions
+ s->mb_intra=0; //for the rate distortion & bit compare functions
for(i=1; i<s->avctx->thread_count; i++){
ff_update_duplicate_context(s->thread_context[i], s);
}
/* Estimate motion for every MB */
if(s->pict_type != I_TYPE){
+ s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
+ s->lambda2= (s->lambda2* s->avctx->me_penalty_compensation + 128)>>8;
if(s->pict_type != B_TYPE && s->avctx->me_threshold==0){
if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
s->avctx->execute(s->avctx, pre_estimate_motion_thread, (void**)&(s->thread_context[0]), NULL, s->avctx->thread_count);
s->current_picture.quality = ff_rate_estimate_qscale(s); //FIXME pic_ptr
if(s->adaptive_quant){
-#ifdef CONFIG_RISKY
switch(s->codec_id){
case CODEC_ID_MPEG4:
ff_clean_mpeg4_qscales(s);
ff_clean_h263_qscales(s);
break;
}
-#endif
s->lambda= s->lambda_table[0];
//FIXME broken
update_qscale(s);
if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE))
- s->qscale= 3; //reduce cliping problems
+ s->qscale= 3; //reduce clipping problems
if (s->out_format == FMT_MJPEG) {
/* for mjpeg, we do include qscale in the matrix */
for(i=1;i<64;i++){
int j= s->dsp.idct_permutation[i];
- s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
+ s->intra_matrix[j] = clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3) & 0xFF;
}
convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
- s->intra_matrix, s->intra_quant_bias, 8, 8);
+ s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
s->qscale= 8;
}
case FMT_MJPEG:
mjpeg_picture_header(s);
break;
-#ifdef CONFIG_RISKY
+#ifdef CONFIG_H261_ENCODER
+ case FMT_H261:
+ ff_h261_encode_picture_header(s, picture_number);
+ break;
+#endif
case FMT_H263:
if (s->codec_id == CODEC_ID_WMV2)
ff_wmv2_encode_picture_header(s, picture_number);
msmpeg4_encode_picture_header(s, picture_number);
else if (s->h263_pred)
mpeg4_encode_picture_header(s, picture_number);
+#ifdef CONFIG_RV10_ENCODER
else if (s->codec_id == CODEC_ID_RV10)
rv10_encode_picture_header(s, picture_number);
+#endif
+#ifdef CONFIG_RV20_ENCODER
+ else if (s->codec_id == CODEC_ID_RV20)
+ rv20_encode_picture_header(s, picture_number);
+#endif
else if (s->codec_id == CODEC_ID_FLV1)
ff_flv_encode_picture_header(s, picture_number);
else
h263_encode_picture_header(s, picture_number);
break;
-#endif
case FMT_MPEG1:
mpeg1_encode_picture_header(s, picture_number);
break;
}
}
- *overflow= s->max_qcoeff < max; //overflow might have happend
+ *overflow= s->max_qcoeff < max; //overflow might have happened
if(last_non_zero < start_i){
memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
DCTELEM *block, int16_t *weight, DCTELEM *orig,
int n, int qscale){
int16_t rem[64];
- DCTELEM d1[64];
+ DCTELEM d1[64] __align16;
const int *qmat;
const uint8_t *scantable= s->intra_scantable.scantable;
const uint8_t *perm_scantable= s->intra_scantable.permutated;
#endif
dc += (1<<(RECON_SHIFT-1));
for(i=0; i<64; i++){
- rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly insteadof copying to rem[]
+ rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
}
#ifdef REFINE_STATS
STOP_TIMER("memset rem[]")}
block[j]=0;
}
}
- *overflow= s->max_qcoeff < max; //overflow might have happend
+ *overflow= s->max_qcoeff < max; //overflow might have happened
/* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
}
}
-static void dct_unquantize_h261_intra_c(MpegEncContext *s,
- DCTELEM *block, int n, int qscale)
-{
- int i, level, even;
- int nCoeffs;
-
- assert(s->block_last_index[n]>=0);
-
- if (n < 4)
- block[0] = block[0] * s->y_dc_scale;
- else
- block[0] = block[0] * s->c_dc_scale;
- even = (qscale & 1)^1;
- nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
-
- for(i=1; i<=nCoeffs; i++){
- level = block[i];
- if (level){
- if (level < 0){
- level = qscale * ((level << 1) - 1) + even;
- }else{
- level = qscale * ((level << 1) + 1) - even;
- }
- }
- block[i] = level;
- }
-}
-
-static void dct_unquantize_h261_inter_c(MpegEncContext *s,
- DCTELEM *block, int n, int qscale)
-{
- int i, level, even;
- int nCoeffs;
-
- assert(s->block_last_index[n]>=0);
-
- even = (qscale & 1)^1;
-
- nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
-
- for(i=0; i<=nCoeffs; i++){
- level = block[i];
- if (level){
- if (level < 0){
- level = qscale * ((level << 1) - 1) + even;
- }else{
- level = qscale * ((level << 1) + 1) - even;
- }
- }
- block[i] = level;
- }
-}
-
-static const AVOption mpeg4_options[] =
-{
- AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
- AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
- "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
- bit_rate_tolerance, 4, 240000000, 8000),
- AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
- AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
- AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
- rc_eq, "tex^qComp,option1,options2", 0),
- AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
- rc_min_rate, 4, 24000000, 0),
- AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
- rc_max_rate, 4, 24000000, 0),
- AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
- rc_buffer_aggressivity, 4, 24000000, 0),
- AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
- rc_initial_cplx, 0., 9999999., 0),
- AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
- i_quant_factor, 0., 0., 0),
- AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
- i_quant_factor, -999999., 999999., 0),
- AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
- dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
- AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
- lumi_masking, 0., 999999., 0),
- AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
- temporal_cplx_masking, 0., 999999., 0),
- AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
- spatial_cplx_masking, 0., 999999., 0),
- AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
- p_masking, 0., 999999., 0),
- AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
- dark_masking, 0., 999999., 0),
- AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
- idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
-
- AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
- mb_qmin, 0, 8, 0),
- AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
- mb_qmin, 0, 8, 0),
-
- AVOPTION_CODEC_INT("me_cmp", "ME compare function",
- me_cmp, 0, 24000000, 0),
- AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
- me_sub_cmp, 0, 24000000, 0),
-
-
- AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
- dia_size, 0, 24000000, 0),
- AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
- last_predictor_count, 0, 24000000, 0),
-
- AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
- pre_me, 0, 24000000, 0),
- AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
- me_pre_cmp, 0, 24000000, 0),
-
- AVOPTION_CODEC_INT("me_range", "maximum ME search range",
- me_range, 0, 24000000, 0),
- AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
- pre_dia_size, 0, 24000000, 0),
- AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
- me_subpel_quality, 0, 24000000, 0),
- AVOPTION_CODEC_INT("me_range", "maximum ME search range",
- me_range, 0, 24000000, 0),
- AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
- flags, CODEC_FLAG_PSNR, 0),
- AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
- rc_override),
- AVOPTION_SUB(avoptions_common),
- AVOPTION_END()
-};
-
#ifdef CONFIG_ENCODERS
-#ifdef CONFIG_RISKY
AVCodec h263_encoder = {
"h263",
CODEC_TYPE_VIDEO,
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec h263p_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec flv_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec rv10_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
+};
+
+AVCodec rv20_encoder = {
+ "rv20",
+ CODEC_TYPE_VIDEO,
+ CODEC_ID_RV20,
+ sizeof(MpegEncContext),
+ MPV_encode_init,
+ MPV_encode_picture,
+ MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec mpeg4_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
- .options = mpeg4_options,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
.capabilities= CODEC_CAP_DELAY,
};
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
- .options = mpeg4_options,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec msmpeg4v2_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
- .options = mpeg4_options,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec msmpeg4v3_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
- .options = mpeg4_options,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
AVCodec wmv1_encoder = {
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
- .options = mpeg4_options,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1},
};
-#endif
-
AVCodec mjpeg_encoder = {
"mjpeg",
CODEC_TYPE_VIDEO,
MPV_encode_init,
MPV_encode_picture,
MPV_encode_end,
+ .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUVJ420P, -1},
};
#endif //CONFIG_ENCODERS