2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
35 #include "mpegvideo.h"
41 #include "aandcttab.h"
43 #include "mpeg4video.h"
45 #include "bytestream.h"
51 static int encode_picture(MpegEncContext *s, int picture_number);
52 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
53 static int sse_mb(MpegEncContext *s);
54 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
55 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
57 /* enable all paranoid tests for rounding, overflows, etc... */
62 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
63 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
65 const AVOption ff_mpv_generic_options[] = {
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71 uint16_t (*qmat16)[2][64],
72 const uint16_t *quant_matrix,
73 int bias, int qmin, int qmax, int intra)
78 for (qscale = qmin; qscale <= qmax; qscale++) {
80 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81 dsp->fdct == ff_jpeg_fdct_islow_10 ||
82 dsp->fdct == ff_faandct) {
83 for (i = 0; i < 64; i++) {
84 const int j = dsp->idct_permutation[i];
85 /* 16 <= qscale * quant_matrix[i] <= 7905
86 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87 * 19952 <= x <= 249205026
88 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89 * 3444240 >= (1 << 36) / (x) >= 275 */
91 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92 (qscale * quant_matrix[j]));
94 } else if (dsp->fdct == ff_fdct_ifast) {
95 for (i = 0; i < 64; i++) {
96 const int j = dsp->idct_permutation[i];
97 /* 16 <= qscale * quant_matrix[i] <= 7905
98 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99 * 19952 <= x <= 249205026
100 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101 * 3444240 >= (1 << 36) / (x) >= 275 */
103 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104 (ff_aanscales[i] * qscale *
108 for (i = 0; i < 64; i++) {
109 const int j = dsp->idct_permutation[i];
110 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
111 * Assume x = qscale * quant_matrix[i]
113 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
114 * so 32768 >= (1 << 19) / (x) >= 67 */
115 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
116 (qscale * quant_matrix[j]));
117 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
118 // (qscale * quant_matrix[i]);
119 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
120 (qscale * quant_matrix[j]);
122 if (qmat16[qscale][0][i] == 0 ||
123 qmat16[qscale][0][i] == 128 * 256)
124 qmat16[qscale][0][i] = 128 * 256 - 1;
125 qmat16[qscale][1][i] =
126 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
127 qmat16[qscale][0][i]);
131 for (i = intra; i < 64; i++) {
133 if (dsp->fdct == ff_fdct_ifast) {
134 max = (8191LL * ff_aanscales[i]) >> 14;
136 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
142 av_log(NULL, AV_LOG_INFO,
143 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
148 static inline void update_qscale(MpegEncContext *s)
150 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
151 (FF_LAMBDA_SHIFT + 7);
152 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
154 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
158 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
164 for (i = 0; i < 64; i++) {
165 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
172 * init s->current_picture.qscale_table from s->lambda_table
174 void ff_init_qscale_tab(MpegEncContext *s)
176 int8_t * const qscale_table = s->current_picture.f.qscale_table;
179 for (i = 0; i < s->mb_num; i++) {
180 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
181 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
182 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
187 static void copy_picture_attributes(MpegEncContext *s,
193 dst->pict_type = src->pict_type;
194 dst->quality = src->quality;
195 dst->coded_picture_number = src->coded_picture_number;
196 dst->display_picture_number = src->display_picture_number;
197 //dst->reference = src->reference;
199 dst->interlaced_frame = src->interlaced_frame;
200 dst->top_field_first = src->top_field_first;
202 if (s->avctx->me_threshold) {
203 if (!src->motion_val[0])
204 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
206 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
207 if (!src->ref_index[0])
208 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
209 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
210 av_log(s->avctx, AV_LOG_ERROR,
211 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
212 src->motion_subsample_log2, dst->motion_subsample_log2);
214 memcpy(dst->mb_type, src->mb_type,
215 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
217 for (i = 0; i < 2; i++) {
218 int stride = ((16 * s->mb_width ) >>
219 src->motion_subsample_log2) + 1;
220 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
222 if (src->motion_val[i] &&
223 src->motion_val[i] != dst->motion_val[i]) {
224 memcpy(dst->motion_val[i], src->motion_val[i],
225 2 * stride * height * sizeof(int16_t));
227 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
228 memcpy(dst->ref_index[i], src->ref_index[i],
229 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
235 static void update_duplicate_context_after_me(MpegEncContext *dst,
238 #define COPY(a) dst->a= src->a
240 COPY(current_picture);
246 COPY(picture_in_gop_number);
247 COPY(gop_picture_number);
248 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
249 COPY(progressive_frame); // FIXME don't set in encode_header
250 COPY(partitioned_frame); // FIXME don't set in encode_header
255 * Set the given MpegEncContext to defaults for encoding.
256 * the changed fields will not depend upon the prior state of the MpegEncContext.
258 static void MPV_encode_defaults(MpegEncContext *s)
261 ff_MPV_common_defaults(s);
263 for (i = -16; i < 16; i++) {
264 default_fcode_tab[i + MAX_MV] = 1;
266 s->me.mv_penalty = default_mv_penalty;
267 s->fcode_tab = default_fcode_tab;
270 /* init video encoder */
271 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
273 MpegEncContext *s = avctx->priv_data;
275 int chroma_h_shift, chroma_v_shift;
277 MPV_encode_defaults(s);
279 switch (avctx->codec_id) {
280 case AV_CODEC_ID_MPEG2VIDEO:
281 if (avctx->pix_fmt != PIX_FMT_YUV420P &&
282 avctx->pix_fmt != PIX_FMT_YUV422P) {
283 av_log(avctx, AV_LOG_ERROR,
284 "only YUV420 and YUV422 are supported\n");
288 case AV_CODEC_ID_LJPEG:
289 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
290 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
291 avctx->pix_fmt != PIX_FMT_YUVJ444P &&
292 avctx->pix_fmt != PIX_FMT_BGRA &&
293 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
294 avctx->pix_fmt != PIX_FMT_YUV422P &&
295 avctx->pix_fmt != PIX_FMT_YUV444P) ||
296 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
297 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
301 case AV_CODEC_ID_MJPEG:
302 if (avctx->pix_fmt != PIX_FMT_YUVJ420P &&
303 avctx->pix_fmt != PIX_FMT_YUVJ422P &&
304 ((avctx->pix_fmt != PIX_FMT_YUV420P &&
305 avctx->pix_fmt != PIX_FMT_YUV422P) ||
306 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
307 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
312 if (avctx->pix_fmt != PIX_FMT_YUV420P) {
313 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
318 switch (avctx->pix_fmt) {
319 case PIX_FMT_YUVJ422P:
320 case PIX_FMT_YUV422P:
321 s->chroma_format = CHROMA_422;
323 case PIX_FMT_YUVJ420P:
324 case PIX_FMT_YUV420P:
326 s->chroma_format = CHROMA_420;
330 s->bit_rate = avctx->bit_rate;
331 s->width = avctx->width;
332 s->height = avctx->height;
333 if (avctx->gop_size > 600 &&
334 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
335 av_log(avctx, AV_LOG_ERROR,
336 "Warning keyframe interval too large! reducing it ...\n");
337 avctx->gop_size = 600;
339 s->gop_size = avctx->gop_size;
341 s->flags = avctx->flags;
342 s->flags2 = avctx->flags2;
343 s->max_b_frames = avctx->max_b_frames;
344 s->codec_id = avctx->codec->id;
345 #if FF_API_MPV_GLOBAL_OPTS
346 if (avctx->luma_elim_threshold)
347 s->luma_elim_threshold = avctx->luma_elim_threshold;
348 if (avctx->chroma_elim_threshold)
349 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
351 s->strict_std_compliance = avctx->strict_std_compliance;
352 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
353 s->mpeg_quant = avctx->mpeg_quant;
354 s->rtp_mode = !!avctx->rtp_payload_size;
355 s->intra_dc_precision = avctx->intra_dc_precision;
356 s->user_specified_pts = AV_NOPTS_VALUE;
358 if (s->gop_size <= 1) {
365 s->me_method = avctx->me_method;
368 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
370 #if FF_API_MPV_GLOBAL_OPTS
371 if (s->flags & CODEC_FLAG_QP_RD)
372 s->mpv_flags |= FF_MPV_FLAG_QP_RD;
375 s->adaptive_quant = (s->avctx->lumi_masking ||
376 s->avctx->dark_masking ||
377 s->avctx->temporal_cplx_masking ||
378 s->avctx->spatial_cplx_masking ||
379 s->avctx->p_masking ||
380 s->avctx->border_masking ||
381 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
384 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
386 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
387 av_log(avctx, AV_LOG_ERROR,
388 "a vbv buffer size is needed, "
389 "for encoding with a maximum bitrate\n");
393 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
394 av_log(avctx, AV_LOG_INFO,
395 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
398 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
399 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
403 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
404 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
408 if (avctx->rc_max_rate &&
409 avctx->rc_max_rate == avctx->bit_rate &&
410 avctx->rc_max_rate != avctx->rc_min_rate) {
411 av_log(avctx, AV_LOG_INFO,
412 "impossible bitrate constraints, this will fail\n");
415 if (avctx->rc_buffer_size &&
416 avctx->bit_rate * (int64_t)avctx->time_base.num >
417 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
418 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
422 if (!s->fixed_qscale &&
423 avctx->bit_rate * av_q2d(avctx->time_base) >
424 avctx->bit_rate_tolerance) {
425 av_log(avctx, AV_LOG_ERROR,
426 "bitrate tolerance too small for bitrate\n");
430 if (s->avctx->rc_max_rate &&
431 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
432 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
433 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
434 90000LL * (avctx->rc_buffer_size - 1) >
435 s->avctx->rc_max_rate * 0xFFFFLL) {
436 av_log(avctx, AV_LOG_INFO,
437 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
438 "specified vbv buffer is too large for the given bitrate!\n");
441 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
442 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
443 s->codec_id != AV_CODEC_ID_FLV1) {
444 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
448 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
449 av_log(avctx, AV_LOG_ERROR,
450 "OBMC is only supported with simple mb decision\n");
454 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
455 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
459 if (s->max_b_frames &&
460 s->codec_id != AV_CODEC_ID_MPEG4 &&
461 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
462 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
463 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
467 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
468 s->codec_id == AV_CODEC_ID_H263 ||
469 s->codec_id == AV_CODEC_ID_H263P) &&
470 (avctx->sample_aspect_ratio.num > 255 ||
471 avctx->sample_aspect_ratio.den > 255)) {
472 av_log(avctx, AV_LOG_ERROR,
473 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
474 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
478 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
479 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
480 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
484 // FIXME mpeg2 uses that too
485 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
486 av_log(avctx, AV_LOG_ERROR,
487 "mpeg2 style quantization not supported by codec\n");
491 #if FF_API_MPV_GLOBAL_OPTS
492 if (s->flags & CODEC_FLAG_CBP_RD)
493 s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
496 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
497 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
501 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
502 s->avctx->mb_decision != FF_MB_DECISION_RD) {
503 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
507 if (s->avctx->scenechange_threshold < 1000000000 &&
508 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
509 av_log(avctx, AV_LOG_ERROR,
510 "closed gop with scene change detection are not supported yet, "
511 "set threshold to 1000000000\n");
515 if (s->flags & CODEC_FLAG_LOW_DELAY) {
516 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
517 av_log(avctx, AV_LOG_ERROR,
518 "low delay forcing is only available for mpeg2\n");
521 if (s->max_b_frames != 0) {
522 av_log(avctx, AV_LOG_ERROR,
523 "b frames cannot be used with low delay\n");
528 if (s->q_scale_type == 1) {
529 if (avctx->qmax > 12) {
530 av_log(avctx, AV_LOG_ERROR,
531 "non linear quant only supports qmax <= 12 currently\n");
536 if (s->avctx->thread_count > 1 &&
537 s->codec_id != AV_CODEC_ID_MPEG4 &&
538 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
539 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
540 (s->codec_id != AV_CODEC_ID_H263P)) {
541 av_log(avctx, AV_LOG_ERROR,
542 "multi threaded encoding not supported by codec\n");
546 if (s->avctx->thread_count < 1) {
547 av_log(avctx, AV_LOG_ERROR,
548 "automatic thread number detection not supported by codec,"
553 if (s->avctx->thread_count > 1)
556 if (!avctx->time_base.den || !avctx->time_base.num) {
557 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
561 i = (INT_MAX / 2 + 128) >> 8;
562 if (avctx->me_threshold >= i) {
563 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
567 if (avctx->mb_threshold >= i) {
568 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
573 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
574 av_log(avctx, AV_LOG_INFO,
575 "notice: b_frame_strategy only affects the first pass\n");
576 avctx->b_frame_strategy = 0;
579 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
581 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
582 avctx->time_base.den /= i;
583 avctx->time_base.num /= i;
587 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
588 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
589 // (a + x * 3 / 8) / x
590 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
591 s->inter_quant_bias = 0;
593 s->intra_quant_bias = 0;
595 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
598 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
599 s->intra_quant_bias = avctx->intra_quant_bias;
600 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
601 s->inter_quant_bias = avctx->inter_quant_bias;
603 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
606 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
607 s->avctx->time_base.den > (1 << 16) - 1) {
608 av_log(avctx, AV_LOG_ERROR,
609 "timebase %d/%d not supported by MPEG 4 standard, "
610 "the maximum admitted value for the timebase denominator "
611 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
615 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
617 #if FF_API_MPV_GLOBAL_OPTS
618 if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
619 s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
620 if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
621 s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
622 if (avctx->quantizer_noise_shaping)
623 s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
626 switch (avctx->codec->id) {
627 case AV_CODEC_ID_MPEG1VIDEO:
628 s->out_format = FMT_MPEG1;
629 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
630 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
632 case AV_CODEC_ID_MPEG2VIDEO:
633 s->out_format = FMT_MPEG1;
634 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
635 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
638 case AV_CODEC_ID_LJPEG:
639 case AV_CODEC_ID_MJPEG:
640 s->out_format = FMT_MJPEG;
641 s->intra_only = 1; /* force intra only for jpeg */
642 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
643 avctx->pix_fmt == PIX_FMT_BGRA) {
644 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
645 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
646 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
648 s->mjpeg_vsample[0] = 2;
649 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
650 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
651 s->mjpeg_hsample[0] = 2;
652 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
653 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
655 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
656 ff_mjpeg_encode_init(s) < 0)
661 case AV_CODEC_ID_H261:
662 if (!CONFIG_H261_ENCODER)
664 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
665 av_log(avctx, AV_LOG_ERROR,
666 "The specified picture size of %dx%d is not valid for the "
667 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
668 s->width, s->height);
671 s->out_format = FMT_H261;
675 case AV_CODEC_ID_H263:
676 if (!CONFIG_H263_ENCODER)
678 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
679 s->width, s->height) == 8) {
680 av_log(avctx, AV_LOG_INFO,
681 "The specified picture size of %dx%d is not valid for "
682 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
683 "352x288, 704x576, and 1408x1152."
684 "Try H.263+.\n", s->width, s->height);
687 s->out_format = FMT_H263;
691 case AV_CODEC_ID_H263P:
692 s->out_format = FMT_H263;
695 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
696 s->modified_quant = s->h263_aic;
697 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
698 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
701 /* These are just to be sure */
705 case AV_CODEC_ID_FLV1:
706 s->out_format = FMT_H263;
707 s->h263_flv = 2; /* format = 1; 11-bit codes */
708 s->unrestricted_mv = 1;
709 s->rtp_mode = 0; /* don't allow GOB */
713 case AV_CODEC_ID_RV10:
714 s->out_format = FMT_H263;
718 case AV_CODEC_ID_RV20:
719 s->out_format = FMT_H263;
722 s->modified_quant = 1;
726 s->unrestricted_mv = 0;
728 case AV_CODEC_ID_MPEG4:
729 s->out_format = FMT_H263;
731 s->unrestricted_mv = 1;
732 s->low_delay = s->max_b_frames ? 0 : 1;
733 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
735 case AV_CODEC_ID_MSMPEG4V2:
736 s->out_format = FMT_H263;
738 s->unrestricted_mv = 1;
739 s->msmpeg4_version = 2;
743 case AV_CODEC_ID_MSMPEG4V3:
744 s->out_format = FMT_H263;
746 s->unrestricted_mv = 1;
747 s->msmpeg4_version = 3;
748 s->flipflop_rounding = 1;
752 case AV_CODEC_ID_WMV1:
753 s->out_format = FMT_H263;
755 s->unrestricted_mv = 1;
756 s->msmpeg4_version = 4;
757 s->flipflop_rounding = 1;
761 case AV_CODEC_ID_WMV2:
762 s->out_format = FMT_H263;
764 s->unrestricted_mv = 1;
765 s->msmpeg4_version = 5;
766 s->flipflop_rounding = 1;
774 avctx->has_b_frames = !s->low_delay;
778 s->progressive_frame =
779 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
780 CODEC_FLAG_INTERLACED_ME) ||
784 if (ff_MPV_common_init(s) < 0)
788 ff_MPV_encode_init_x86(s);
790 if (!s->dct_quantize)
791 s->dct_quantize = ff_dct_quantize_c;
793 s->denoise_dct = denoise_dct_c;
794 s->fast_dct_quantize = s->dct_quantize;
796 s->dct_quantize = dct_quantize_trellis_c;
798 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
799 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
801 s->quant_precision = 5;
803 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
804 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
806 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
807 ff_h261_encode_init(s);
808 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
809 ff_h263_encode_init(s);
810 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
811 ff_msmpeg4_encode_init(s);
812 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
813 && s->out_format == FMT_MPEG1)
814 ff_mpeg1_encode_init(s);
817 for (i = 0; i < 64; i++) {
818 int j = s->dsp.idct_permutation[i];
819 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
821 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
822 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
823 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
825 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
828 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
829 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
831 if (s->avctx->intra_matrix)
832 s->intra_matrix[j] = s->avctx->intra_matrix[i];
833 if (s->avctx->inter_matrix)
834 s->inter_matrix[j] = s->avctx->inter_matrix[i];
837 /* precompute matrix */
838 /* for mjpeg, we do include qscale in the matrix */
839 if (s->out_format != FMT_MJPEG) {
840 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
841 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
843 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
844 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
848 if (ff_rate_control_init(s) < 0)
854 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
856 MpegEncContext *s = avctx->priv_data;
858 ff_rate_control_uninit(s);
860 ff_MPV_common_end(s);
861 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
862 s->out_format == FMT_MJPEG)
863 ff_mjpeg_encode_close(s);
865 av_freep(&avctx->extradata);
870 static int get_sae(uint8_t *src, int ref, int stride)
875 for (y = 0; y < 16; y++) {
876 for (x = 0; x < 16; x++) {
877 acc += FFABS(src[x + y * stride] - ref);
884 static int get_intra_count(MpegEncContext *s, uint8_t *src,
885 uint8_t *ref, int stride)
893 for (y = 0; y < h; y += 16) {
894 for (x = 0; x < w; x += 16) {
895 int offset = x + y * stride;
896 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
898 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
899 int sae = get_sae(src + offset, mean, stride);
901 acc += sae + 500 < sad;
908 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
913 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
914 (s->low_delay ? 0 : 1);
919 pic_arg->display_picture_number = s->input_picture_number++;
921 if (pts != AV_NOPTS_VALUE) {
922 if (s->user_specified_pts != AV_NOPTS_VALUE) {
924 int64_t last = s->user_specified_pts;
927 av_log(s->avctx, AV_LOG_ERROR,
928 "Error, Invalid timestamp=%"PRId64", "
929 "last=%"PRId64"\n", pts, s->user_specified_pts);
933 if (!s->low_delay && pic_arg->display_picture_number == 1)
934 s->dts_delta = time - last;
936 s->user_specified_pts = pts;
938 if (s->user_specified_pts != AV_NOPTS_VALUE) {
939 s->user_specified_pts =
940 pts = s->user_specified_pts + 1;
941 av_log(s->avctx, AV_LOG_INFO,
942 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
945 pts = pic_arg->display_picture_number;
951 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
953 if (pic_arg->linesize[0] != s->linesize)
955 if (pic_arg->linesize[1] != s->uvlinesize)
957 if (pic_arg->linesize[2] != s->uvlinesize)
960 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
961 pic_arg->linesize[1], s->linesize, s->uvlinesize);
964 i = ff_find_unused_picture(s, 1);
968 pic = &s->picture[i].f;
971 for (i = 0; i < 4; i++) {
972 pic->data[i] = pic_arg->data[i];
973 pic->linesize[i] = pic_arg->linesize[i];
975 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
979 i = ff_find_unused_picture(s, 0);
983 pic = &s->picture[i].f;
986 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
990 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
991 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
992 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
995 int h_chroma_shift, v_chroma_shift;
996 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift,
999 for (i = 0; i < 3; i++) {
1000 int src_stride = pic_arg->linesize[i];
1001 int dst_stride = i ? s->uvlinesize : s->linesize;
1002 int h_shift = i ? h_chroma_shift : 0;
1003 int v_shift = i ? v_chroma_shift : 0;
1004 int w = s->width >> h_shift;
1005 int h = s->height >> v_shift;
1006 uint8_t *src = pic_arg->data[i];
1007 uint8_t *dst = pic->data[i];
1009 if (!s->avctx->rc_buffer_size)
1010 dst += INPLACE_OFFSET;
1012 if (src_stride == dst_stride)
1013 memcpy(dst, src, src_stride * h);
1016 memcpy(dst, src, w);
1024 copy_picture_attributes(s, pic, pic_arg);
1025 pic->pts = pts; // we set this here to avoid modifiying pic_arg
1028 /* shift buffer entries */
1029 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1030 s->input_picture[i - 1] = s->input_picture[i];
1032 s->input_picture[encoding_delay] = (Picture*) pic;
1037 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1041 int64_t score64 = 0;
1043 for (plane = 0; plane < 3; plane++) {
1044 const int stride = p->f.linesize[plane];
1045 const int bw = plane ? 1 : 2;
1046 for (y = 0; y < s->mb_height * bw; y++) {
1047 for (x = 0; x < s->mb_width * bw; x++) {
1048 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1049 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1050 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1051 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1053 switch (s->avctx->frame_skip_exp) {
1054 case 0: score = FFMAX(score, v); break;
1055 case 1: score += FFABS(v); break;
1056 case 2: score += v * v; break;
1057 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1058 case 4: score64 += v * v * (int64_t)(v * v); break;
1067 if (score64 < s->avctx->frame_skip_threshold)
1069 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1074 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1076 AVPacket pkt = { 0 };
1077 int ret, got_output;
1079 av_init_packet(&pkt);
1080 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1085 av_free_packet(&pkt);
1089 static int estimate_best_b_count(MpegEncContext *s)
1091 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1092 AVCodecContext *c = avcodec_alloc_context3(NULL);
1093 AVFrame input[FF_MAX_B_FRAMES + 2];
1094 const int scale = s->avctx->brd_scale;
1095 int i, j, out_size, p_lambda, b_lambda, lambda2;
1096 int64_t best_rd = INT64_MAX;
1097 int best_b_count = -1;
1099 assert(scale >= 0 && scale <= 3);
1102 //s->next_picture_ptr->quality;
1103 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1104 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1105 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1106 if (!b_lambda) // FIXME we should do this somewhere else
1107 b_lambda = p_lambda;
1108 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1111 c->width = s->width >> scale;
1112 c->height = s->height >> scale;
1113 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1114 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1115 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1116 c->mb_decision = s->avctx->mb_decision;
1117 c->me_cmp = s->avctx->me_cmp;
1118 c->mb_cmp = s->avctx->mb_cmp;
1119 c->me_sub_cmp = s->avctx->me_sub_cmp;
1120 c->pix_fmt = PIX_FMT_YUV420P;
1121 c->time_base = s->avctx->time_base;
1122 c->max_b_frames = s->max_b_frames;
1124 if (avcodec_open2(c, codec, NULL) < 0)
1127 for (i = 0; i < s->max_b_frames + 2; i++) {
1128 int ysize = c->width * c->height;
1129 int csize = (c->width / 2) * (c->height / 2);
1130 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1131 s->next_picture_ptr;
1133 avcodec_get_frame_defaults(&input[i]);
1134 input[i].data[0] = av_malloc(ysize + 2 * csize);
1135 input[i].data[1] = input[i].data[0] + ysize;
1136 input[i].data[2] = input[i].data[1] + csize;
1137 input[i].linesize[0] = c->width;
1138 input[i].linesize[1] =
1139 input[i].linesize[2] = c->width / 2;
1141 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1142 pre_input = *pre_input_ptr;
1144 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1145 pre_input.f.data[0] += INPLACE_OFFSET;
1146 pre_input.f.data[1] += INPLACE_OFFSET;
1147 pre_input.f.data[2] += INPLACE_OFFSET;
1150 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1151 pre_input.f.data[0], pre_input.f.linesize[0],
1152 c->width, c->height);
1153 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1154 pre_input.f.data[1], pre_input.f.linesize[1],
1155 c->width >> 1, c->height >> 1);
1156 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1157 pre_input.f.data[2], pre_input.f.linesize[2],
1158 c->width >> 1, c->height >> 1);
1162 for (j = 0; j < s->max_b_frames + 1; j++) {
1165 if (!s->input_picture[j])
1168 c->error[0] = c->error[1] = c->error[2] = 0;
1170 input[0].pict_type = AV_PICTURE_TYPE_I;
1171 input[0].quality = 1 * FF_QP2LAMBDA;
1173 out_size = encode_frame(c, &input[0]);
1175 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1177 for (i = 0; i < s->max_b_frames + 1; i++) {
1178 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1180 input[i + 1].pict_type = is_p ?
1181 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1182 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1184 out_size = encode_frame(c, &input[i + 1]);
1186 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1189 /* get the delayed frames */
1191 out_size = encode_frame(c, NULL);
1192 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1195 rd += c->error[0] + c->error[1] + c->error[2];
1206 for (i = 0; i < s->max_b_frames + 2; i++) {
1207 av_freep(&input[i].data[0]);
1210 return best_b_count;
1213 static int select_input_picture(MpegEncContext *s)
1217 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1218 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1219 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1221 /* set next picture type & ordering */
1222 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1223 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1224 s->next_picture_ptr == NULL || s->intra_only) {
1225 s->reordered_input_picture[0] = s->input_picture[0];
1226 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1227 s->reordered_input_picture[0]->f.coded_picture_number =
1228 s->coded_picture_number++;
1232 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1233 if (s->picture_in_gop_number < s->gop_size &&
1234 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1235 // FIXME check that te gop check above is +-1 correct
1236 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1237 for (i = 0; i < 4; i++)
1238 s->input_picture[0]->f.data[i] = NULL;
1239 s->input_picture[0]->f.type = 0;
1241 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1242 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1244 s->avctx->release_buffer(s->avctx,
1245 &s->input_picture[0]->f);
1249 ff_vbv_update(s, 0);
1255 if (s->flags & CODEC_FLAG_PASS2) {
1256 for (i = 0; i < s->max_b_frames + 1; i++) {
1257 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1259 if (pict_num >= s->rc_context.num_entries)
1261 if (!s->input_picture[i]) {
1262 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1266 s->input_picture[i]->f.pict_type =
1267 s->rc_context.entry[pict_num].new_pict_type;
1271 if (s->avctx->b_frame_strategy == 0) {
1272 b_frames = s->max_b_frames;
1273 while (b_frames && !s->input_picture[b_frames])
1275 } else if (s->avctx->b_frame_strategy == 1) {
1276 for (i = 1; i < s->max_b_frames + 1; i++) {
1277 if (s->input_picture[i] &&
1278 s->input_picture[i]->b_frame_score == 0) {
1279 s->input_picture[i]->b_frame_score =
1281 s->input_picture[i ]->f.data[0],
1282 s->input_picture[i - 1]->f.data[0],
1286 for (i = 0; i < s->max_b_frames + 1; i++) {
1287 if (s->input_picture[i] == NULL ||
1288 s->input_picture[i]->b_frame_score - 1 >
1289 s->mb_num / s->avctx->b_sensitivity)
1293 b_frames = FFMAX(0, i - 1);
1296 for (i = 0; i < b_frames + 1; i++) {
1297 s->input_picture[i]->b_frame_score = 0;
1299 } else if (s->avctx->b_frame_strategy == 2) {
1300 b_frames = estimate_best_b_count(s);
1302 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1308 for (i = b_frames - 1; i >= 0; i--) {
1309 int type = s->input_picture[i]->f.pict_type;
1310 if (type && type != AV_PICTURE_TYPE_B)
1313 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1314 b_frames == s->max_b_frames) {
1315 av_log(s->avctx, AV_LOG_ERROR,
1316 "warning, too many b frames in a row\n");
1319 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1320 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1321 s->gop_size > s->picture_in_gop_number) {
1322 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1324 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1326 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1330 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1331 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1334 s->reordered_input_picture[0] = s->input_picture[b_frames];
1335 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1336 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1337 s->reordered_input_picture[0]->f.coded_picture_number =
1338 s->coded_picture_number++;
1339 for (i = 0; i < b_frames; i++) {
1340 s->reordered_input_picture[i + 1] = s->input_picture[i];
1341 s->reordered_input_picture[i + 1]->f.pict_type =
1343 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1344 s->coded_picture_number++;
1349 if (s->reordered_input_picture[0]) {
1350 s->reordered_input_picture[0]->f.reference =
1351 s->reordered_input_picture[0]->f.pict_type !=
1352 AV_PICTURE_TYPE_B ? 3 : 0;
1354 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1356 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1357 s->avctx->rc_buffer_size) {
1358 // input is a shared pix, so we can't modifiy it -> alloc a new
1359 // one & ensure that the shared one is reuseable
1362 int i = ff_find_unused_picture(s, 0);
1365 pic = &s->picture[i];
1367 pic->f.reference = s->reordered_input_picture[0]->f.reference;
1368 if (ff_alloc_picture(s, pic, 0) < 0) {
1372 /* mark us unused / free shared pic */
1373 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1374 s->avctx->release_buffer(s->avctx,
1375 &s->reordered_input_picture[0]->f);
1376 for (i = 0; i < 4; i++)
1377 s->reordered_input_picture[0]->f.data[i] = NULL;
1378 s->reordered_input_picture[0]->f.type = 0;
1380 copy_picture_attributes(s, &pic->f,
1381 &s->reordered_input_picture[0]->f);
1383 s->current_picture_ptr = pic;
1385 // input is not a shared pix -> reuse buffer for current_pix
1387 assert(s->reordered_input_picture[0]->f.type ==
1388 FF_BUFFER_TYPE_USER ||
1389 s->reordered_input_picture[0]->f.type ==
1390 FF_BUFFER_TYPE_INTERNAL);
1392 s->current_picture_ptr = s->reordered_input_picture[0];
1393 for (i = 0; i < 4; i++) {
1394 s->new_picture.f.data[i] += INPLACE_OFFSET;
1397 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1399 s->picture_number = s->new_picture.f.display_picture_number;
1401 memset(&s->new_picture, 0, sizeof(Picture));
1406 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1407 const AVFrame *pic_arg, int *got_packet)
1409 MpegEncContext *s = avctx->priv_data;
1410 int i, stuffing_count, ret;
1411 int context_count = s->slice_context_count;
1413 s->picture_in_gop_number++;
1415 if (load_input_picture(s, pic_arg) < 0)
1418 if (select_input_picture(s) < 0) {
1423 if (s->new_picture.f.data[0]) {
1425 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1428 s->mb_info_ptr = av_packet_new_side_data(pkt,
1429 AV_PKT_DATA_H263_MB_INFO,
1430 s->mb_width*s->mb_height*12);
1431 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1434 for (i = 0; i < context_count; i++) {
1435 int start_y = s->thread_context[i]->start_mb_y;
1436 int end_y = s->thread_context[i]-> end_mb_y;
1437 int h = s->mb_height;
1438 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1439 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1441 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1444 s->pict_type = s->new_picture.f.pict_type;
1446 ff_MPV_frame_start(s, avctx);
1448 if (encode_picture(s, s->picture_number) < 0)
1451 avctx->header_bits = s->header_bits;
1452 avctx->mv_bits = s->mv_bits;
1453 avctx->misc_bits = s->misc_bits;
1454 avctx->i_tex_bits = s->i_tex_bits;
1455 avctx->p_tex_bits = s->p_tex_bits;
1456 avctx->i_count = s->i_count;
1457 // FIXME f/b_count in avctx
1458 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1459 avctx->skip_count = s->skip_count;
1461 ff_MPV_frame_end(s);
1463 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1464 ff_mjpeg_encode_picture_trailer(s);
1466 if (avctx->rc_buffer_size) {
1467 RateControlContext *rcc = &s->rc_context;
1468 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1470 if (put_bits_count(&s->pb) > max_size &&
1471 s->lambda < s->avctx->lmax) {
1472 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1473 (s->qscale + 1) / s->qscale);
1474 if (s->adaptive_quant) {
1476 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1477 s->lambda_table[i] =
1478 FFMAX(s->lambda_table[i] + 1,
1479 s->lambda_table[i] * (s->qscale + 1) /
1482 s->mb_skipped = 0; // done in MPV_frame_start()
1483 // done in encode_picture() so we must undo it
1484 if (s->pict_type == AV_PICTURE_TYPE_P) {
1485 if (s->flipflop_rounding ||
1486 s->codec_id == AV_CODEC_ID_H263P ||
1487 s->codec_id == AV_CODEC_ID_MPEG4)
1488 s->no_rounding ^= 1;
1490 if (s->pict_type != AV_PICTURE_TYPE_B) {
1491 s->time_base = s->last_time_base;
1492 s->last_non_b_time = s->time - s->pp_time;
1494 for (i = 0; i < context_count; i++) {
1495 PutBitContext *pb = &s->thread_context[i]->pb;
1496 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1501 assert(s->avctx->rc_max_rate);
1504 if (s->flags & CODEC_FLAG_PASS1)
1505 ff_write_pass1_stats(s);
1507 for (i = 0; i < 4; i++) {
1508 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1509 avctx->error[i] += s->current_picture_ptr->f.error[i];
1512 if (s->flags & CODEC_FLAG_PASS1)
1513 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1514 avctx->i_tex_bits + avctx->p_tex_bits ==
1515 put_bits_count(&s->pb));
1516 flush_put_bits(&s->pb);
1517 s->frame_bits = put_bits_count(&s->pb);
1519 stuffing_count = ff_vbv_update(s, s->frame_bits);
1520 if (stuffing_count) {
1521 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1522 stuffing_count + 50) {
1523 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1527 switch (s->codec_id) {
1528 case AV_CODEC_ID_MPEG1VIDEO:
1529 case AV_CODEC_ID_MPEG2VIDEO:
1530 while (stuffing_count--) {
1531 put_bits(&s->pb, 8, 0);
1534 case AV_CODEC_ID_MPEG4:
1535 put_bits(&s->pb, 16, 0);
1536 put_bits(&s->pb, 16, 0x1C3);
1537 stuffing_count -= 4;
1538 while (stuffing_count--) {
1539 put_bits(&s->pb, 8, 0xFF);
1543 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1545 flush_put_bits(&s->pb);
1546 s->frame_bits = put_bits_count(&s->pb);
1549 /* update mpeg1/2 vbv_delay for CBR */
1550 if (s->avctx->rc_max_rate &&
1551 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1552 s->out_format == FMT_MPEG1 &&
1553 90000LL * (avctx->rc_buffer_size - 1) <=
1554 s->avctx->rc_max_rate * 0xFFFFLL) {
1555 int vbv_delay, min_delay;
1556 double inbits = s->avctx->rc_max_rate *
1557 av_q2d(s->avctx->time_base);
1558 int minbits = s->frame_bits - 8 *
1559 (s->vbv_delay_ptr - s->pb.buf - 1);
1560 double bits = s->rc_context.buffer_index + minbits - inbits;
1563 av_log(s->avctx, AV_LOG_ERROR,
1564 "Internal error, negative bits\n");
1566 assert(s->repeat_first_field == 0);
1568 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1569 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1570 s->avctx->rc_max_rate;
1572 vbv_delay = FFMAX(vbv_delay, min_delay);
1574 assert(vbv_delay < 0xFFFF);
1576 s->vbv_delay_ptr[0] &= 0xF8;
1577 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1578 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1579 s->vbv_delay_ptr[2] &= 0x07;
1580 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1581 avctx->vbv_delay = vbv_delay * 300;
1583 s->total_bits += s->frame_bits;
1584 avctx->frame_bits = s->frame_bits;
1586 pkt->pts = s->current_picture.f.pts;
1587 if (!s->low_delay) {
1588 if (!s->current_picture.f.coded_picture_number)
1589 pkt->dts = pkt->pts - s->dts_delta;
1591 pkt->dts = s->reordered_pts;
1592 s->reordered_pts = s->input_picture[0]->f.pts;
1594 pkt->dts = pkt->pts;
1595 if (s->current_picture.f.key_frame)
1596 pkt->flags |= AV_PKT_FLAG_KEY;
1598 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1602 assert((s->frame_bits & 7) == 0);
1604 pkt->size = s->frame_bits / 8;
1605 *got_packet = !!pkt->size;
1609 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1610 int n, int threshold)
1612 static const char tab[64] = {
1613 3, 2, 2, 1, 1, 1, 1, 1,
1614 1, 1, 1, 1, 1, 1, 1, 1,
1615 1, 1, 1, 1, 1, 1, 1, 1,
1616 0, 0, 0, 0, 0, 0, 0, 0,
1617 0, 0, 0, 0, 0, 0, 0, 0,
1618 0, 0, 0, 0, 0, 0, 0, 0,
1619 0, 0, 0, 0, 0, 0, 0, 0,
1620 0, 0, 0, 0, 0, 0, 0, 0
1625 DCTELEM *block = s->block[n];
1626 const int last_index = s->block_last_index[n];
1629 if (threshold < 0) {
1631 threshold = -threshold;
1635 /* Are all we could set to zero already zero? */
1636 if (last_index <= skip_dc - 1)
1639 for (i = 0; i <= last_index; i++) {
1640 const int j = s->intra_scantable.permutated[i];
1641 const int level = FFABS(block[j]);
1643 if (skip_dc && i == 0)
1647 } else if (level > 1) {
1653 if (score >= threshold)
1655 for (i = skip_dc; i <= last_index; i++) {
1656 const int j = s->intra_scantable.permutated[i];
1660 s->block_last_index[n] = 0;
1662 s->block_last_index[n] = -1;
1665 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1669 const int maxlevel = s->max_qcoeff;
1670 const int minlevel = s->min_qcoeff;
1674 i = 1; // skip clipping of intra dc
1678 for (; i <= last_index; i++) {
1679 const int j = s->intra_scantable.permutated[i];
1680 int level = block[j];
1682 if (level > maxlevel) {
1685 } else if (level < minlevel) {
1693 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1694 av_log(s->avctx, AV_LOG_INFO,
1695 "warning, clipping %d dct coefficients to %d..%d\n",
1696 overflow, minlevel, maxlevel);
1699 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1703 for (y = 0; y < 8; y++) {
1704 for (x = 0; x < 8; x++) {
1710 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1711 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1712 int v = ptr[x2 + y2 * stride];
1718 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1723 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1724 int motion_x, int motion_y,
1725 int mb_block_height,
1728 int16_t weight[8][64];
1729 DCTELEM orig[8][64];
1730 const int mb_x = s->mb_x;
1731 const int mb_y = s->mb_y;
1734 int dct_offset = s->linesize * 8; // default for progressive frames
1735 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1738 for (i = 0; i < mb_block_count; i++)
1739 skip_dct[i] = s->skipdct;
1741 if (s->adaptive_quant) {
1742 const int last_qp = s->qscale;
1743 const int mb_xy = mb_x + mb_y * s->mb_stride;
1745 s->lambda = s->lambda_table[mb_xy];
1748 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1749 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1750 s->dquant = s->qscale - last_qp;
1752 if (s->out_format == FMT_H263) {
1753 s->dquant = av_clip(s->dquant, -2, 2);
1755 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1757 if (s->pict_type == AV_PICTURE_TYPE_B) {
1758 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1761 if (s->mv_type == MV_TYPE_8X8)
1767 ff_set_qscale(s, last_qp + s->dquant);
1768 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1769 ff_set_qscale(s, s->qscale + s->dquant);
1771 wrap_y = s->linesize;
1772 wrap_c = s->uvlinesize;
1773 ptr_y = s->new_picture.f.data[0] +
1774 (mb_y * 16 * wrap_y) + mb_x * 16;
1775 ptr_cb = s->new_picture.f.data[1] +
1776 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1777 ptr_cr = s->new_picture.f.data[2] +
1778 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1780 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1781 uint8_t *ebuf = s->edge_emu_buffer + 32;
1782 s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1783 mb_y * 16, s->width, s->height);
1785 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1786 mb_block_height, mb_x * 8, mb_y * 8,
1787 s->width >> 1, s->height >> 1);
1788 ptr_cb = ebuf + 18 * wrap_y;
1789 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1790 mb_block_height, mb_x * 8, mb_y * 8,
1791 s->width >> 1, s->height >> 1);
1792 ptr_cr = ebuf + 18 * wrap_y + 8;
1796 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1797 int progressive_score, interlaced_score;
1799 s->interlaced_dct = 0;
1800 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1802 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1803 NULL, wrap_y, 8) - 400;
1805 if (progressive_score > 0) {
1806 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1807 NULL, wrap_y * 2, 8) +
1808 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1809 NULL, wrap_y * 2, 8);
1810 if (progressive_score > interlaced_score) {
1811 s->interlaced_dct = 1;
1813 dct_offset = wrap_y;
1815 if (s->chroma_format == CHROMA_422)
1821 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1822 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1823 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1824 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1826 if (s->flags & CODEC_FLAG_GRAY) {
1830 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1831 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1832 if (!s->chroma_y_shift) { /* 422 */
1833 s->dsp.get_pixels(s->block[6],
1834 ptr_cb + (dct_offset >> 1), wrap_c);
1835 s->dsp.get_pixels(s->block[7],
1836 ptr_cr + (dct_offset >> 1), wrap_c);
1840 op_pixels_func (*op_pix)[4];
1841 qpel_mc_func (*op_qpix)[16];
1842 uint8_t *dest_y, *dest_cb, *dest_cr;
1844 dest_y = s->dest[0];
1845 dest_cb = s->dest[1];
1846 dest_cr = s->dest[2];
1848 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1849 op_pix = s->dsp.put_pixels_tab;
1850 op_qpix = s->dsp.put_qpel_pixels_tab;
1852 op_pix = s->dsp.put_no_rnd_pixels_tab;
1853 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1856 if (s->mv_dir & MV_DIR_FORWARD) {
1857 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1858 s->last_picture.f.data,
1860 op_pix = s->dsp.avg_pixels_tab;
1861 op_qpix = s->dsp.avg_qpel_pixels_tab;
1863 if (s->mv_dir & MV_DIR_BACKWARD) {
1864 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1865 s->next_picture.f.data,
1869 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1870 int progressive_score, interlaced_score;
1872 s->interlaced_dct = 0;
1873 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1876 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1877 ptr_y + wrap_y * 8, wrap_y,
1880 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1881 progressive_score -= 400;
1883 if (progressive_score > 0) {
1884 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1887 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1891 if (progressive_score > interlaced_score) {
1892 s->interlaced_dct = 1;
1894 dct_offset = wrap_y;
1896 if (s->chroma_format == CHROMA_422)
1902 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1903 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1904 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1905 dest_y + dct_offset, wrap_y);
1906 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1907 dest_y + dct_offset + 8, wrap_y);
1909 if (s->flags & CODEC_FLAG_GRAY) {
1913 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1914 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1915 if (!s->chroma_y_shift) { /* 422 */
1916 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1917 dest_cb + (dct_offset >> 1), wrap_c);
1918 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1919 dest_cr + (dct_offset >> 1), wrap_c);
1922 /* pre quantization */
1923 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1924 2 * s->qscale * s->qscale) {
1926 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1927 wrap_y, 8) < 20 * s->qscale)
1929 if (s->dsp.sad[1](NULL, ptr_y + 8,
1930 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1932 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1933 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1935 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1936 dest_y + dct_offset + 8,
1937 wrap_y, 8) < 20 * s->qscale)
1939 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1940 wrap_c, 8) < 20 * s->qscale)
1942 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1943 wrap_c, 8) < 20 * s->qscale)
1945 if (!s->chroma_y_shift) { /* 422 */
1946 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1947 dest_cb + (dct_offset >> 1),
1948 wrap_c, 8) < 20 * s->qscale)
1950 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1951 dest_cr + (dct_offset >> 1),
1952 wrap_c, 8) < 20 * s->qscale)
1958 if (s->quantizer_noise_shaping) {
1960 get_visual_weight(weight[0], ptr_y , wrap_y);
1962 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1964 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1966 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1968 get_visual_weight(weight[4], ptr_cb , wrap_c);
1970 get_visual_weight(weight[5], ptr_cr , wrap_c);
1971 if (!s->chroma_y_shift) { /* 422 */
1973 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1976 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1979 memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
1982 /* DCT & quantize */
1983 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1985 for (i = 0; i < mb_block_count; i++) {
1988 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1989 // FIXME we could decide to change to quantizer instead of
1991 // JS: I don't think that would be a good idea it could lower
1992 // quality instead of improve it. Just INTRADC clipping
1993 // deserves changes in quantizer
1995 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1997 s->block_last_index[i] = -1;
1999 if (s->quantizer_noise_shaping) {
2000 for (i = 0; i < mb_block_count; i++) {
2002 s->block_last_index[i] =
2003 dct_quantize_refine(s, s->block[i], weight[i],
2004 orig[i], i, s->qscale);
2009 if (s->luma_elim_threshold && !s->mb_intra)
2010 for (i = 0; i < 4; i++)
2011 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2012 if (s->chroma_elim_threshold && !s->mb_intra)
2013 for (i = 4; i < mb_block_count; i++)
2014 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2016 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2017 for (i = 0; i < mb_block_count; i++) {
2018 if (s->block_last_index[i] == -1)
2019 s->coded_score[i] = INT_MAX / 256;
2024 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2025 s->block_last_index[4] =
2026 s->block_last_index[5] = 0;
2028 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2031 // non c quantize code returns incorrect block_last_index FIXME
2032 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2033 for (i = 0; i < mb_block_count; i++) {
2035 if (s->block_last_index[i] > 0) {
2036 for (j = 63; j > 0; j--) {
2037 if (s->block[i][s->intra_scantable.permutated[j]])
2040 s->block_last_index[i] = j;
2045 /* huffman encode */
2046 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2047 case AV_CODEC_ID_MPEG1VIDEO:
2048 case AV_CODEC_ID_MPEG2VIDEO:
2049 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2050 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2052 case AV_CODEC_ID_MPEG4:
2053 if (CONFIG_MPEG4_ENCODER)
2054 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2056 case AV_CODEC_ID_MSMPEG4V2:
2057 case AV_CODEC_ID_MSMPEG4V3:
2058 case AV_CODEC_ID_WMV1:
2059 if (CONFIG_MSMPEG4_ENCODER)
2060 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2062 case AV_CODEC_ID_WMV2:
2063 if (CONFIG_WMV2_ENCODER)
2064 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2066 case AV_CODEC_ID_H261:
2067 if (CONFIG_H261_ENCODER)
2068 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2070 case AV_CODEC_ID_H263:
2071 case AV_CODEC_ID_H263P:
2072 case AV_CODEC_ID_FLV1:
2073 case AV_CODEC_ID_RV10:
2074 case AV_CODEC_ID_RV20:
2075 if (CONFIG_H263_ENCODER)
2076 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2078 case AV_CODEC_ID_MJPEG:
2079 if (CONFIG_MJPEG_ENCODER)
2080 ff_mjpeg_encode_mb(s, s->block);
2087 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2089 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2090 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2093 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2096 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2099 d->mb_skip_run= s->mb_skip_run;
2101 d->last_dc[i] = s->last_dc[i];
2104 d->mv_bits= s->mv_bits;
2105 d->i_tex_bits= s->i_tex_bits;
2106 d->p_tex_bits= s->p_tex_bits;
2107 d->i_count= s->i_count;
2108 d->f_count= s->f_count;
2109 d->b_count= s->b_count;
2110 d->skip_count= s->skip_count;
2111 d->misc_bits= s->misc_bits;
2115 d->qscale= s->qscale;
2116 d->dquant= s->dquant;
2118 d->esc3_level_length= s->esc3_level_length;
2121 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2124 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2125 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2128 d->mb_skip_run= s->mb_skip_run;
2130 d->last_dc[i] = s->last_dc[i];
2133 d->mv_bits= s->mv_bits;
2134 d->i_tex_bits= s->i_tex_bits;
2135 d->p_tex_bits= s->p_tex_bits;
2136 d->i_count= s->i_count;
2137 d->f_count= s->f_count;
2138 d->b_count= s->b_count;
2139 d->skip_count= s->skip_count;
2140 d->misc_bits= s->misc_bits;
2142 d->mb_intra= s->mb_intra;
2143 d->mb_skipped= s->mb_skipped;
2144 d->mv_type= s->mv_type;
2145 d->mv_dir= s->mv_dir;
2147 if(s->data_partitioning){
2149 d->tex_pb= s->tex_pb;
2153 d->block_last_index[i]= s->block_last_index[i];
2154 d->interlaced_dct= s->interlaced_dct;
2155 d->qscale= s->qscale;
2157 d->esc3_level_length= s->esc3_level_length;
2160 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2161 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2162 int *dmin, int *next_block, int motion_x, int motion_y)
2165 uint8_t *dest_backup[3];
2167 copy_context_before_encode(s, backup, type);
2169 s->block= s->blocks[*next_block];
2170 s->pb= pb[*next_block];
2171 if(s->data_partitioning){
2172 s->pb2 = pb2 [*next_block];
2173 s->tex_pb= tex_pb[*next_block];
2177 memcpy(dest_backup, s->dest, sizeof(s->dest));
2178 s->dest[0] = s->rd_scratchpad;
2179 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2180 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2181 assert(s->linesize >= 32); //FIXME
2184 encode_mb(s, motion_x, motion_y);
2186 score= put_bits_count(&s->pb);
2187 if(s->data_partitioning){
2188 score+= put_bits_count(&s->pb2);
2189 score+= put_bits_count(&s->tex_pb);
2192 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2193 ff_MPV_decode_mb(s, s->block);
2195 score *= s->lambda2;
2196 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2200 memcpy(s->dest, dest_backup, sizeof(s->dest));
2207 copy_context_after_encode(best, s, type);
2211 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2212 uint32_t *sq = ff_squareTbl + 256;
2217 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2218 else if(w==8 && h==8)
2219 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2223 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2232 static int sse_mb(MpegEncContext *s){
2236 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2237 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2240 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2241 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2242 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2243 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2245 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2246 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2247 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2250 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2251 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2252 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2255 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2256 MpegEncContext *s= *(void**)arg;
2260 s->me.dia_size= s->avctx->pre_dia_size;
2261 s->first_slice_line=1;
2262 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2263 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2264 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2266 s->first_slice_line=0;
2274 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2275 MpegEncContext *s= *(void**)arg;
2277 ff_check_alignment();
2279 s->me.dia_size= s->avctx->dia_size;
2280 s->first_slice_line=1;
2281 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2282 s->mb_x=0; //for block init below
2283 ff_init_block_index(s);
2284 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2285 s->block_index[0]+=2;
2286 s->block_index[1]+=2;
2287 s->block_index[2]+=2;
2288 s->block_index[3]+=2;
2290 /* compute motion vector & mb_type and store in context */
2291 if(s->pict_type==AV_PICTURE_TYPE_B)
2292 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2294 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2296 s->first_slice_line=0;
2301 static int mb_var_thread(AVCodecContext *c, void *arg){
2302 MpegEncContext *s= *(void**)arg;
2305 ff_check_alignment();
2307 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2308 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2311 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2313 int sum = s->dsp.pix_sum(pix, s->linesize);
2315 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2317 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2318 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2319 s->me.mb_var_sum_temp += varc;
2325 static void write_slice_end(MpegEncContext *s){
2326 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2327 if(s->partitioned_frame){
2328 ff_mpeg4_merge_partitions(s);
2331 ff_mpeg4_stuffing(&s->pb);
2332 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2333 ff_mjpeg_encode_stuffing(&s->pb);
2336 avpriv_align_put_bits(&s->pb);
2337 flush_put_bits(&s->pb);
2339 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2340 s->misc_bits+= get_bits_diff(s);
2343 static void write_mb_info(MpegEncContext *s)
2345 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2346 int offset = put_bits_count(&s->pb);
2347 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2348 int gobn = s->mb_y / s->gob_index;
2350 if (CONFIG_H263_ENCODER)
2351 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2352 bytestream_put_le32(&ptr, offset);
2353 bytestream_put_byte(&ptr, s->qscale);
2354 bytestream_put_byte(&ptr, gobn);
2355 bytestream_put_le16(&ptr, mba);
2356 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2357 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2358 /* 4MV not implemented */
2359 bytestream_put_byte(&ptr, 0); /* hmv2 */
2360 bytestream_put_byte(&ptr, 0); /* vmv2 */
2363 static void update_mb_info(MpegEncContext *s, int startcode)
2367 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2368 s->mb_info_size += 12;
2369 s->prev_mb_info = s->last_mb_info;
2372 s->prev_mb_info = put_bits_count(&s->pb)/8;
2373 /* This might have incremented mb_info_size above, and we return without
2374 * actually writing any info into that slot yet. But in that case,
2375 * this will be called again at the start of the after writing the
2376 * start code, actually writing the mb info. */
2380 s->last_mb_info = put_bits_count(&s->pb)/8;
2381 if (!s->mb_info_size)
2382 s->mb_info_size += 12;
2386 static int encode_thread(AVCodecContext *c, void *arg){
2387 MpegEncContext *s= *(void**)arg;
2388 int mb_x, mb_y, pdif = 0;
2389 int chr_h= 16>>s->chroma_y_shift;
2391 MpegEncContext best_s, backup_s;
2392 uint8_t bit_buf[2][MAX_MB_BYTES];
2393 uint8_t bit_buf2[2][MAX_MB_BYTES];
2394 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2395 PutBitContext pb[2], pb2[2], tex_pb[2];
2397 ff_check_alignment();
2400 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2401 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2402 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2405 s->last_bits= put_bits_count(&s->pb);
2416 /* init last dc values */
2417 /* note: quant matrix value (8) is implied here */
2418 s->last_dc[i] = 128 << s->intra_dc_precision;
2420 s->current_picture.f.error[i] = 0;
2423 memset(s->last_mv, 0, sizeof(s->last_mv));
2427 switch(s->codec_id){
2428 case AV_CODEC_ID_H263:
2429 case AV_CODEC_ID_H263P:
2430 case AV_CODEC_ID_FLV1:
2431 if (CONFIG_H263_ENCODER)
2432 s->gob_index = ff_h263_get_gob_height(s);
2434 case AV_CODEC_ID_MPEG4:
2435 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2436 ff_mpeg4_init_partitions(s);
2442 s->first_slice_line = 1;
2443 s->ptr_lastgob = s->pb.buf;
2444 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2448 ff_set_qscale(s, s->qscale);
2449 ff_init_block_index(s);
2451 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2452 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2453 int mb_type= s->mb_type[xy];
2458 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2459 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2462 if(s->data_partitioning){
2463 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2464 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2465 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2471 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2472 ff_update_block_index(s);
2474 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2475 ff_h261_reorder_mb_index(s);
2476 xy= s->mb_y*s->mb_stride + s->mb_x;
2477 mb_type= s->mb_type[xy];
2480 /* write gob / video packet header */
2482 int current_packet_size, is_gob_start;
2484 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2486 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2488 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2490 switch(s->codec_id){
2491 case AV_CODEC_ID_H263:
2492 case AV_CODEC_ID_H263P:
2493 if(!s->h263_slice_structured)
2494 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2496 case AV_CODEC_ID_MPEG2VIDEO:
2497 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2498 case AV_CODEC_ID_MPEG1VIDEO:
2499 if(s->mb_skip_run) is_gob_start=0;
2504 if(s->start_mb_y != mb_y || mb_x!=0){
2507 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2508 ff_mpeg4_init_partitions(s);
2512 assert((put_bits_count(&s->pb)&7) == 0);
2513 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2515 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2516 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2517 int d= 100 / s->avctx->error_rate;
2519 current_packet_size=0;
2520 s->pb.buf_ptr= s->ptr_lastgob;
2521 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2525 if (s->avctx->rtp_callback){
2526 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2527 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2529 update_mb_info(s, 1);
2531 switch(s->codec_id){
2532 case AV_CODEC_ID_MPEG4:
2533 if (CONFIG_MPEG4_ENCODER) {
2534 ff_mpeg4_encode_video_packet_header(s);
2535 ff_mpeg4_clean_buffers(s);
2538 case AV_CODEC_ID_MPEG1VIDEO:
2539 case AV_CODEC_ID_MPEG2VIDEO:
2540 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2541 ff_mpeg1_encode_slice_header(s);
2542 ff_mpeg1_clean_buffers(s);
2545 case AV_CODEC_ID_H263:
2546 case AV_CODEC_ID_H263P:
2547 if (CONFIG_H263_ENCODER)
2548 ff_h263_encode_gob_header(s, mb_y);
2552 if(s->flags&CODEC_FLAG_PASS1){
2553 int bits= put_bits_count(&s->pb);
2554 s->misc_bits+= bits - s->last_bits;
2558 s->ptr_lastgob += current_packet_size;
2559 s->first_slice_line=1;
2560 s->resync_mb_x=mb_x;
2561 s->resync_mb_y=mb_y;
2565 if( (s->resync_mb_x == s->mb_x)
2566 && s->resync_mb_y+1 == s->mb_y){
2567 s->first_slice_line=0;
2571 s->dquant=0; //only for QP_RD
2573 update_mb_info(s, 0);
2575 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2577 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2579 copy_context_before_encode(&backup_s, s, -1);
2581 best_s.data_partitioning= s->data_partitioning;
2582 best_s.partitioned_frame= s->partitioned_frame;
2583 if(s->data_partitioning){
2584 backup_s.pb2= s->pb2;
2585 backup_s.tex_pb= s->tex_pb;
2588 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2589 s->mv_dir = MV_DIR_FORWARD;
2590 s->mv_type = MV_TYPE_16X16;
2592 s->mv[0][0][0] = s->p_mv_table[xy][0];
2593 s->mv[0][0][1] = s->p_mv_table[xy][1];
2594 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2595 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2597 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2598 s->mv_dir = MV_DIR_FORWARD;
2599 s->mv_type = MV_TYPE_FIELD;
2602 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2603 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2604 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2606 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2607 &dmin, &next_block, 0, 0);
2609 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2610 s->mv_dir = MV_DIR_FORWARD;
2611 s->mv_type = MV_TYPE_16X16;
2615 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2616 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2618 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2619 s->mv_dir = MV_DIR_FORWARD;
2620 s->mv_type = MV_TYPE_8X8;
2623 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2624 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2626 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2627 &dmin, &next_block, 0, 0);
2629 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2630 s->mv_dir = MV_DIR_FORWARD;
2631 s->mv_type = MV_TYPE_16X16;
2633 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2634 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2635 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2636 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2638 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2639 s->mv_dir = MV_DIR_BACKWARD;
2640 s->mv_type = MV_TYPE_16X16;
2642 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2643 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2644 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2645 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2647 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2648 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2649 s->mv_type = MV_TYPE_16X16;
2651 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2652 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2653 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2654 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2655 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2656 &dmin, &next_block, 0, 0);
2658 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2659 s->mv_dir = MV_DIR_FORWARD;
2660 s->mv_type = MV_TYPE_FIELD;
2663 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2664 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2665 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2667 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2668 &dmin, &next_block, 0, 0);
2670 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2671 s->mv_dir = MV_DIR_BACKWARD;
2672 s->mv_type = MV_TYPE_FIELD;
2675 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2676 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2677 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2679 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2680 &dmin, &next_block, 0, 0);
2682 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2683 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2684 s->mv_type = MV_TYPE_FIELD;
2686 for(dir=0; dir<2; dir++){
2688 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2689 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2690 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2693 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2694 &dmin, &next_block, 0, 0);
2696 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2698 s->mv_type = MV_TYPE_16X16;
2702 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2703 &dmin, &next_block, 0, 0);
2704 if(s->h263_pred || s->h263_aic){
2706 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2708 ff_clean_intra_table_entries(s); //old mode?
2712 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2713 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2714 const int last_qp= backup_s.qscale;
2717 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2718 static const int dquant_tab[4]={-1,1,-2,2};
2720 assert(backup_s.dquant == 0);
2723 s->mv_dir= best_s.mv_dir;
2724 s->mv_type = MV_TYPE_16X16;
2725 s->mb_intra= best_s.mb_intra;
2726 s->mv[0][0][0] = best_s.mv[0][0][0];
2727 s->mv[0][0][1] = best_s.mv[0][0][1];
2728 s->mv[1][0][0] = best_s.mv[1][0][0];
2729 s->mv[1][0][1] = best_s.mv[1][0][1];
2731 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2732 for(; qpi<4; qpi++){
2733 int dquant= dquant_tab[qpi];
2734 qp= last_qp + dquant;
2735 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2737 backup_s.dquant= dquant;
2738 if(s->mb_intra && s->dc_val[0]){
2740 dc[i]= s->dc_val[0][ s->block_index[i] ];
2741 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2745 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2746 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2747 if(best_s.qscale != qp){
2748 if(s->mb_intra && s->dc_val[0]){
2750 s->dc_val[0][ s->block_index[i] ]= dc[i];
2751 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2758 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2759 int mx= s->b_direct_mv_table[xy][0];
2760 int my= s->b_direct_mv_table[xy][1];
2762 backup_s.dquant = 0;
2763 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2765 ff_mpeg4_set_direct_mv(s, mx, my);
2766 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2767 &dmin, &next_block, mx, my);
2769 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2770 backup_s.dquant = 0;
2771 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2773 ff_mpeg4_set_direct_mv(s, 0, 0);
2774 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2775 &dmin, &next_block, 0, 0);
2777 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2780 coded |= s->block_last_index[i];
2783 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2784 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2785 mx=my=0; //FIXME find the one we actually used
2786 ff_mpeg4_set_direct_mv(s, mx, my);
2787 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2795 s->mv_dir= best_s.mv_dir;
2796 s->mv_type = best_s.mv_type;
2798 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2799 s->mv[0][0][1] = best_s.mv[0][0][1];
2800 s->mv[1][0][0] = best_s.mv[1][0][0];
2801 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2804 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2805 &dmin, &next_block, mx, my);
2810 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2812 copy_context_after_encode(s, &best_s, -1);
2814 pb_bits_count= put_bits_count(&s->pb);
2815 flush_put_bits(&s->pb);
2816 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2819 if(s->data_partitioning){
2820 pb2_bits_count= put_bits_count(&s->pb2);
2821 flush_put_bits(&s->pb2);
2822 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2823 s->pb2= backup_s.pb2;
2825 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2826 flush_put_bits(&s->tex_pb);
2827 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2828 s->tex_pb= backup_s.tex_pb;
2830 s->last_bits= put_bits_count(&s->pb);
2832 if (CONFIG_H263_ENCODER &&
2833 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2834 ff_h263_update_motion_val(s);
2836 if(next_block==0){ //FIXME 16 vs linesize16
2837 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2838 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2839 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2842 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2843 ff_MPV_decode_mb(s, s->block);
2845 int motion_x = 0, motion_y = 0;
2846 s->mv_type=MV_TYPE_16X16;
2847 // only one MB-Type possible
2850 case CANDIDATE_MB_TYPE_INTRA:
2853 motion_x= s->mv[0][0][0] = 0;
2854 motion_y= s->mv[0][0][1] = 0;
2856 case CANDIDATE_MB_TYPE_INTER:
2857 s->mv_dir = MV_DIR_FORWARD;
2859 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2860 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2862 case CANDIDATE_MB_TYPE_INTER_I:
2863 s->mv_dir = MV_DIR_FORWARD;
2864 s->mv_type = MV_TYPE_FIELD;
2867 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2868 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2869 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2872 case CANDIDATE_MB_TYPE_INTER4V:
2873 s->mv_dir = MV_DIR_FORWARD;
2874 s->mv_type = MV_TYPE_8X8;
2877 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2878 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2881 case CANDIDATE_MB_TYPE_DIRECT:
2882 if (CONFIG_MPEG4_ENCODER) {
2883 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2885 motion_x=s->b_direct_mv_table[xy][0];
2886 motion_y=s->b_direct_mv_table[xy][1];
2887 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2890 case CANDIDATE_MB_TYPE_DIRECT0:
2891 if (CONFIG_MPEG4_ENCODER) {
2892 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2894 ff_mpeg4_set_direct_mv(s, 0, 0);
2897 case CANDIDATE_MB_TYPE_BIDIR:
2898 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2900 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2901 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2902 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2903 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2905 case CANDIDATE_MB_TYPE_BACKWARD:
2906 s->mv_dir = MV_DIR_BACKWARD;
2908 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2909 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2911 case CANDIDATE_MB_TYPE_FORWARD:
2912 s->mv_dir = MV_DIR_FORWARD;
2914 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2915 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2917 case CANDIDATE_MB_TYPE_FORWARD_I:
2918 s->mv_dir = MV_DIR_FORWARD;
2919 s->mv_type = MV_TYPE_FIELD;
2922 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2923 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2924 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2927 case CANDIDATE_MB_TYPE_BACKWARD_I:
2928 s->mv_dir = MV_DIR_BACKWARD;
2929 s->mv_type = MV_TYPE_FIELD;
2932 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2933 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2934 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2937 case CANDIDATE_MB_TYPE_BIDIR_I:
2938 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2939 s->mv_type = MV_TYPE_FIELD;
2941 for(dir=0; dir<2; dir++){
2943 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2944 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2945 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2950 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2953 encode_mb(s, motion_x, motion_y);
2955 // RAL: Update last macroblock type
2956 s->last_mv_dir = s->mv_dir;
2958 if (CONFIG_H263_ENCODER &&
2959 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2960 ff_h263_update_motion_val(s);
2962 ff_MPV_decode_mb(s, s->block);
2965 /* clean the MV table in IPS frames for direct mode in B frames */
2966 if(s->mb_intra /* && I,P,S_TYPE */){
2967 s->p_mv_table[xy][0]=0;
2968 s->p_mv_table[xy][1]=0;
2971 if(s->flags&CODEC_FLAG_PSNR){
2975 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2976 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2978 s->current_picture.f.error[0] += sse(
2979 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2980 s->dest[0], w, h, s->linesize);
2981 s->current_picture.f.error[1] += sse(
2982 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2983 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2984 s->current_picture.f.error[2] += sse(
2985 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2986 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2989 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2990 ff_h263_loop_filter(s);
2992 av_dlog(s->avctx, "MB %d %d bits\n",
2993 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2997 //not beautiful here but we must write it before flushing so it has to be here
2998 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2999 ff_msmpeg4_encode_ext_header(s);
3003 /* Send the last GOB if RTP */
3004 if (s->avctx->rtp_callback) {
3005 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3006 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3007 /* Call the RTP callback to send the last GOB */
3009 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3015 #define MERGE(field) dst->field += src->field; src->field=0
3016 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3017 MERGE(me.scene_change_score);
3018 MERGE(me.mc_mb_var_sum_temp);
3019 MERGE(me.mb_var_sum_temp);
3022 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3025 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3026 MERGE(dct_count[1]);
3036 MERGE(padding_bug_score);
3037 MERGE(current_picture.f.error[0]);
3038 MERGE(current_picture.f.error[1]);
3039 MERGE(current_picture.f.error[2]);
3041 if(dst->avctx->noise_reduction){
3042 for(i=0; i<64; i++){
3043 MERGE(dct_error_sum[0][i]);
3044 MERGE(dct_error_sum[1][i]);
3048 assert(put_bits_count(&src->pb) % 8 ==0);
3049 assert(put_bits_count(&dst->pb) % 8 ==0);
3050 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3051 flush_put_bits(&dst->pb);
3054 static int estimate_qp(MpegEncContext *s, int dry_run){
3055 if (s->next_lambda){
3056 s->current_picture_ptr->f.quality =
3057 s->current_picture.f.quality = s->next_lambda;
3058 if(!dry_run) s->next_lambda= 0;
3059 } else if (!s->fixed_qscale) {
3060 s->current_picture_ptr->f.quality =
3061 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3062 if (s->current_picture.f.quality < 0)
3066 if(s->adaptive_quant){
3067 switch(s->codec_id){
3068 case AV_CODEC_ID_MPEG4:
3069 if (CONFIG_MPEG4_ENCODER)
3070 ff_clean_mpeg4_qscales(s);
3072 case AV_CODEC_ID_H263:
3073 case AV_CODEC_ID_H263P:
3074 case AV_CODEC_ID_FLV1:
3075 if (CONFIG_H263_ENCODER)
3076 ff_clean_h263_qscales(s);
3079 ff_init_qscale_tab(s);
3082 s->lambda= s->lambda_table[0];
3085 s->lambda = s->current_picture.f.quality;
3090 /* must be called before writing the header */
3091 static void set_frame_distances(MpegEncContext * s){
3092 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3093 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3095 if(s->pict_type==AV_PICTURE_TYPE_B){
3096 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3097 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3099 s->pp_time= s->time - s->last_non_b_time;
3100 s->last_non_b_time= s->time;
3101 assert(s->picture_number==0 || s->pp_time > 0);
3105 static int encode_picture(MpegEncContext *s, int picture_number)
3109 int context_count = s->slice_context_count;
3111 s->picture_number = picture_number;
3113 /* Reset the average MB variance */
3114 s->me.mb_var_sum_temp =
3115 s->me.mc_mb_var_sum_temp = 0;
3117 /* we need to initialize some time vars before we can encode b-frames */
3118 // RAL: Condition added for MPEG1VIDEO
3119 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3120 set_frame_distances(s);
3121 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3122 ff_set_mpeg4_time(s);
3124 s->me.scene_change_score=0;
3126 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3128 if(s->pict_type==AV_PICTURE_TYPE_I){
3129 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3130 else s->no_rounding=0;
3131 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3132 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3133 s->no_rounding ^= 1;
3136 if(s->flags & CODEC_FLAG_PASS2){
3137 if (estimate_qp(s,1) < 0)
3139 ff_get_2pass_fcode(s);
3140 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3141 if(s->pict_type==AV_PICTURE_TYPE_B)
3142 s->lambda= s->last_lambda_for[s->pict_type];
3144 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3148 s->mb_intra=0; //for the rate distortion & bit compare functions
3149 for(i=1; i<context_count; i++){
3150 ff_update_duplicate_context(s->thread_context[i], s);
3156 /* Estimate motion for every MB */
3157 if(s->pict_type != AV_PICTURE_TYPE_I){
3158 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3159 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3160 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3161 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3162 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3166 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3167 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3169 for(i=0; i<s->mb_stride*s->mb_height; i++)
3170 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3172 if(!s->fixed_qscale){
3173 /* finding spatial complexity for I-frame rate control */
3174 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3177 for(i=1; i<context_count; i++){
3178 merge_context_after_me(s, s->thread_context[i]);
3180 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3181 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3184 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3185 s->pict_type= AV_PICTURE_TYPE_I;
3186 for(i=0; i<s->mb_stride*s->mb_height; i++)
3187 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3188 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3189 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3193 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3194 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3196 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3198 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3199 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3200 s->f_code= FFMAX3(s->f_code, a, b);
3203 ff_fix_long_p_mvs(s);
3204 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3205 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3209 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3210 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3215 if(s->pict_type==AV_PICTURE_TYPE_B){
3218 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3219 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3220 s->f_code = FFMAX(a, b);
3222 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3223 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3224 s->b_code = FFMAX(a, b);
3226 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3227 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3228 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3229 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3230 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3232 for(dir=0; dir<2; dir++){
3235 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3236 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3237 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3238 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3246 if (estimate_qp(s, 0) < 0)
3249 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3250 s->qscale= 3; //reduce clipping problems
3252 if (s->out_format == FMT_MJPEG) {
3253 /* for mjpeg, we do include qscale in the matrix */
3255 int j= s->dsp.idct_permutation[i];
3257 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3259 s->y_dc_scale_table=
3260 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3261 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3262 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3263 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3267 //FIXME var duplication
3268 s->current_picture_ptr->f.key_frame =
3269 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3270 s->current_picture_ptr->f.pict_type =
3271 s->current_picture.f.pict_type = s->pict_type;
3273 if (s->current_picture.f.key_frame)
3274 s->picture_in_gop_number=0;
3276 s->last_bits= put_bits_count(&s->pb);
3277 switch(s->out_format) {
3279 if (CONFIG_MJPEG_ENCODER)
3280 ff_mjpeg_encode_picture_header(s);
3283 if (CONFIG_H261_ENCODER)
3284 ff_h261_encode_picture_header(s, picture_number);
3287 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3288 ff_wmv2_encode_picture_header(s, picture_number);
3289 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3290 ff_msmpeg4_encode_picture_header(s, picture_number);
3291 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3292 ff_mpeg4_encode_picture_header(s, picture_number);
3293 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3294 ff_rv10_encode_picture_header(s, picture_number);
3295 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3296 ff_rv20_encode_picture_header(s, picture_number);
3297 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3298 ff_flv_encode_picture_header(s, picture_number);
3299 else if (CONFIG_H263_ENCODER)
3300 ff_h263_encode_picture_header(s, picture_number);
3303 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3304 ff_mpeg1_encode_picture_header(s, picture_number);
3311 bits= put_bits_count(&s->pb);
3312 s->header_bits= bits - s->last_bits;
3314 for(i=1; i<context_count; i++){
3315 update_duplicate_context_after_me(s->thread_context[i], s);
3317 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3318 for(i=1; i<context_count; i++){
3319 merge_context_after_encode(s, s->thread_context[i]);
3325 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
3326 const int intra= s->mb_intra;
3329 s->dct_count[intra]++;
3331 for(i=0; i<64; i++){
3332 int level= block[i];
3336 s->dct_error_sum[intra][i] += level;
3337 level -= s->dct_offset[intra][i];
3338 if(level<0) level=0;
3340 s->dct_error_sum[intra][i] -= level;
3341 level += s->dct_offset[intra][i];
3342 if(level>0) level=0;
3349 static int dct_quantize_trellis_c(MpegEncContext *s,
3350 DCTELEM *block, int n,
3351 int qscale, int *overflow){
3353 const uint8_t *scantable= s->intra_scantable.scantable;
3354 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3356 unsigned int threshold1, threshold2;
3368 int coeff_count[64];
3369 int qmul, qadd, start_i, last_non_zero, i, dc;
3370 const int esc_length= s->ac_esc_length;
3372 uint8_t * last_length;
3373 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3375 s->dsp.fdct (block);
3377 if(s->dct_error_sum)
3378 s->denoise_dct(s, block);
3380 qadd= ((qscale-1)|1)*8;
3391 /* For AIC we skip quant/dequant of INTRADC */
3396 /* note: block[0] is assumed to be positive */
3397 block[0] = (block[0] + (q >> 1)) / q;
3400 qmat = s->q_intra_matrix[qscale];
3401 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3402 bias= 1<<(QMAT_SHIFT-1);
3403 length = s->intra_ac_vlc_length;
3404 last_length= s->intra_ac_vlc_last_length;
3408 qmat = s->q_inter_matrix[qscale];
3409 length = s->inter_ac_vlc_length;
3410 last_length= s->inter_ac_vlc_last_length;
3414 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3415 threshold2= (threshold1<<1);
3417 for(i=63; i>=start_i; i--) {
3418 const int j = scantable[i];
3419 int level = block[j] * qmat[j];
3421 if(((unsigned)(level+threshold1))>threshold2){
3427 for(i=start_i; i<=last_non_zero; i++) {
3428 const int j = scantable[i];
3429 int level = block[j] * qmat[j];
3431 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3432 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3433 if(((unsigned)(level+threshold1))>threshold2){
3435 level= (bias + level)>>QMAT_SHIFT;
3437 coeff[1][i]= level-1;
3438 // coeff[2][k]= level-2;
3440 level= (bias - level)>>QMAT_SHIFT;
3441 coeff[0][i]= -level;
3442 coeff[1][i]= -level+1;
3443 // coeff[2][k]= -level+2;
3445 coeff_count[i]= FFMIN(level, 2);
3446 assert(coeff_count[i]);
3449 coeff[0][i]= (level>>31)|1;
3454 *overflow= s->max_qcoeff < max; //overflow might have happened
3456 if(last_non_zero < start_i){
3457 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3458 return last_non_zero;
3461 score_tab[start_i]= 0;
3462 survivor[0]= start_i;
3465 for(i=start_i; i<=last_non_zero; i++){
3466 int level_index, j, zero_distortion;
3467 int dct_coeff= FFABS(block[ scantable[i] ]);
3468 int best_score=256*256*256*120;
3470 if (s->dsp.fdct == ff_fdct_ifast)
3471 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3472 zero_distortion= dct_coeff*dct_coeff;
3474 for(level_index=0; level_index < coeff_count[i]; level_index++){
3476 int level= coeff[level_index][i];
3477 const int alevel= FFABS(level);
3482 if(s->out_format == FMT_H263){
3483 unquant_coeff= alevel*qmul + qadd;
3485 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3487 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3488 unquant_coeff = (unquant_coeff - 1) | 1;
3490 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3491 unquant_coeff = (unquant_coeff - 1) | 1;
3496 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3498 if((level&(~127)) == 0){
3499 for(j=survivor_count-1; j>=0; j--){
3500 int run= i - survivor[j];
3501 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3502 score += score_tab[i-run];
3504 if(score < best_score){
3507 level_tab[i+1]= level-64;
3511 if(s->out_format == FMT_H263){
3512 for(j=survivor_count-1; j>=0; j--){
3513 int run= i - survivor[j];
3514 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3515 score += score_tab[i-run];
3516 if(score < last_score){
3519 last_level= level-64;
3525 distortion += esc_length*lambda;
3526 for(j=survivor_count-1; j>=0; j--){
3527 int run= i - survivor[j];
3528 int score= distortion + score_tab[i-run];
3530 if(score < best_score){
3533 level_tab[i+1]= level-64;
3537 if(s->out_format == FMT_H263){
3538 for(j=survivor_count-1; j>=0; j--){
3539 int run= i - survivor[j];
3540 int score= distortion + score_tab[i-run];
3541 if(score < last_score){
3544 last_level= level-64;
3552 score_tab[i+1]= best_score;
3554 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3555 if(last_non_zero <= 27){
3556 for(; survivor_count; survivor_count--){
3557 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3561 for(; survivor_count; survivor_count--){
3562 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3567 survivor[ survivor_count++ ]= i+1;
3570 if(s->out_format != FMT_H263){
3571 last_score= 256*256*256*120;
3572 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3573 int score= score_tab[i];
3574 if(i) score += lambda*2; //FIXME exacter?
3576 if(score < last_score){
3579 last_level= level_tab[i];
3580 last_run= run_tab[i];
3585 s->coded_score[n] = last_score;
3587 dc= FFABS(block[0]);
3588 last_non_zero= last_i - 1;
3589 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3591 if(last_non_zero < start_i)
3592 return last_non_zero;
3594 if(last_non_zero == 0 && start_i == 0){
3596 int best_score= dc * dc;
3598 for(i=0; i<coeff_count[0]; i++){
3599 int level= coeff[i][0];
3600 int alevel= FFABS(level);
3601 int unquant_coeff, score, distortion;
3603 if(s->out_format == FMT_H263){
3604 unquant_coeff= (alevel*qmul + qadd)>>3;
3606 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3607 unquant_coeff = (unquant_coeff - 1) | 1;
3609 unquant_coeff = (unquant_coeff + 4) >> 3;
3610 unquant_coeff<<= 3 + 3;
3612 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3614 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3615 else score= distortion + esc_length*lambda;
3617 if(score < best_score){
3619 best_level= level - 64;
3622 block[0]= best_level;
3623 s->coded_score[n] = best_score - dc*dc;
3624 if(best_level == 0) return -1;
3625 else return last_non_zero;
3631 block[ perm_scantable[last_non_zero] ]= last_level;
3634 for(; i>start_i; i -= run_tab[i] + 1){
3635 block[ perm_scantable[i-1] ]= level_tab[i];
3638 return last_non_zero;
3641 //#define REFINE_STATS 1
3642 static int16_t basis[64][64];
3644 static void build_basis(uint8_t *perm){
3651 double s= 0.25*(1<<BASIS_SHIFT);
3653 int perm_index= perm[index];
3654 if(i==0) s*= sqrt(0.5);
3655 if(j==0) s*= sqrt(0.5);
3656 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3663 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3664 DCTELEM *block, int16_t *weight, DCTELEM *orig,
3667 LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
3668 const uint8_t *scantable= s->intra_scantable.scantable;
3669 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3670 // unsigned int threshold1, threshold2;
3675 int qmul, qadd, start_i, last_non_zero, i, dc;
3677 uint8_t * last_length;
3679 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3682 static int after_last=0;
3683 static int to_zero=0;
3684 static int from_zero=0;
3687 static int messed_sign=0;
3690 if(basis[0][0] == 0)
3691 build_basis(s->dsp.idct_permutation);
3702 /* For AIC we skip quant/dequant of INTRADC */
3706 q <<= RECON_SHIFT-3;
3707 /* note: block[0] is assumed to be positive */
3709 // block[0] = (block[0] + (q >> 1)) / q;
3711 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3712 // bias= 1<<(QMAT_SHIFT-1);
3713 length = s->intra_ac_vlc_length;
3714 last_length= s->intra_ac_vlc_last_length;
3718 length = s->inter_ac_vlc_length;
3719 last_length= s->inter_ac_vlc_last_length;
3721 last_non_zero = s->block_last_index[n];
3726 dc += (1<<(RECON_SHIFT-1));
3727 for(i=0; i<64; i++){
3728 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3731 STOP_TIMER("memset rem[]")}
3734 for(i=0; i<64; i++){
3739 w= FFABS(weight[i]) + qns*one;
3740 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3743 // w=weight[i] = (63*qns + (w/2)) / w;
3749 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3755 for(i=start_i; i<=last_non_zero; i++){
3756 int j= perm_scantable[i];
3757 const int level= block[j];
3761 if(level<0) coeff= qmul*level - qadd;
3762 else coeff= qmul*level + qadd;
3763 run_tab[rle_index++]=run;
3766 s->dsp.add_8x8basis(rem, basis[j], coeff);
3772 if(last_non_zero>0){
3773 STOP_TIMER("init rem[]")
3780 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3783 int run2, best_unquant_change=0, analyze_gradient;
3787 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3789 if(analyze_gradient){
3793 for(i=0; i<64; i++){
3796 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3799 STOP_TIMER("rem*w*w")}
3809 const int level= block[0];
3810 int change, old_coeff;
3812 assert(s->mb_intra);
3816 for(change=-1; change<=1; change+=2){
3817 int new_level= level + change;
3818 int score, new_coeff;
3820 new_coeff= q*new_level;
3821 if(new_coeff >= 2048 || new_coeff < 0)
3824 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3825 if(score<best_score){
3828 best_change= change;
3829 best_unquant_change= new_coeff - old_coeff;
3836 run2= run_tab[rle_index++];
3840 for(i=start_i; i<64; i++){
3841 int j= perm_scantable[i];
3842 const int level= block[j];
3843 int change, old_coeff;
3845 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3849 if(level<0) old_coeff= qmul*level - qadd;
3850 else old_coeff= qmul*level + qadd;
3851 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3855 assert(run2>=0 || i >= last_non_zero );
3858 for(change=-1; change<=1; change+=2){
3859 int new_level= level + change;
3860 int score, new_coeff, unquant_change;
3863 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3867 if(new_level<0) new_coeff= qmul*new_level - qadd;
3868 else new_coeff= qmul*new_level + qadd;
3869 if(new_coeff >= 2048 || new_coeff <= -2048)
3871 //FIXME check for overflow
3874 if(level < 63 && level > -63){
3875 if(i < last_non_zero)
3876 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3877 - length[UNI_AC_ENC_INDEX(run, level+64)];
3879 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3880 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3883 assert(FFABS(new_level)==1);
3885 if(analyze_gradient){
3886 int g= d1[ scantable[i] ];
3887 if(g && (g^new_level) >= 0)
3891 if(i < last_non_zero){
3892 int next_i= i + run2 + 1;
3893 int next_level= block[ perm_scantable[next_i] ] + 64;
3895 if(next_level&(~127))
3898 if(next_i < last_non_zero)
3899 score += length[UNI_AC_ENC_INDEX(run, 65)]
3900 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3901 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3903 score += length[UNI_AC_ENC_INDEX(run, 65)]
3904 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3905 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3907 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3909 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3910 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3916 assert(FFABS(level)==1);
3918 if(i < last_non_zero){
3919 int next_i= i + run2 + 1;
3920 int next_level= block[ perm_scantable[next_i] ] + 64;
3922 if(next_level&(~127))
3925 if(next_i < last_non_zero)
3926 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3927 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3928 - length[UNI_AC_ENC_INDEX(run, 65)];
3930 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3931 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3932 - length[UNI_AC_ENC_INDEX(run, 65)];
3934 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3936 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3937 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3944 unquant_change= new_coeff - old_coeff;
3945 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3947 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3948 if(score<best_score){
3951 best_change= change;
3952 best_unquant_change= unquant_change;
3956 prev_level= level + 64;
3957 if(prev_level&(~127))
3966 STOP_TIMER("iterative step")}
3970 int j= perm_scantable[ best_coeff ];
3972 block[j] += best_change;
3974 if(best_coeff > last_non_zero){
3975 last_non_zero= best_coeff;
3983 if(block[j] - best_change){
3984 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3996 for(; last_non_zero>=start_i; last_non_zero--){
3997 if(block[perm_scantable[last_non_zero]])
4003 if(256*256*256*64 % count == 0){
4004 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4009 for(i=start_i; i<=last_non_zero; i++){
4010 int j= perm_scantable[i];
4011 const int level= block[j];
4014 run_tab[rle_index++]=run;
4021 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4027 if(last_non_zero>0){
4028 STOP_TIMER("iterative search")
4033 return last_non_zero;
4036 int ff_dct_quantize_c(MpegEncContext *s,
4037 DCTELEM *block, int n,
4038 int qscale, int *overflow)
4040 int i, j, level, last_non_zero, q, start_i;
4042 const uint8_t *scantable= s->intra_scantable.scantable;
4045 unsigned int threshold1, threshold2;
4047 s->dsp.fdct (block);
4049 if(s->dct_error_sum)
4050 s->denoise_dct(s, block);
4060 /* For AIC we skip quant/dequant of INTRADC */
4063 /* note: block[0] is assumed to be positive */
4064 block[0] = (block[0] + (q >> 1)) / q;
4067 qmat = s->q_intra_matrix[qscale];
4068 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4072 qmat = s->q_inter_matrix[qscale];
4073 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4075 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4076 threshold2= (threshold1<<1);
4077 for(i=63;i>=start_i;i--) {
4079 level = block[j] * qmat[j];
4081 if(((unsigned)(level+threshold1))>threshold2){
4088 for(i=start_i; i<=last_non_zero; i++) {
4090 level = block[j] * qmat[j];
4092 // if( bias+level >= (1<<QMAT_SHIFT)
4093 // || bias-level >= (1<<QMAT_SHIFT)){
4094 if(((unsigned)(level+threshold1))>threshold2){
4096 level= (bias + level)>>QMAT_SHIFT;
4099 level= (bias - level)>>QMAT_SHIFT;
4107 *overflow= s->max_qcoeff < max; //overflow might have happened
4109 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4110 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4111 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4113 return last_non_zero;
4116 #define OFFSET(x) offsetof(MpegEncContext, x)
4117 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4118 static const AVOption h263_options[] = {
4119 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4120 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4121 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4126 static const AVClass h263_class = {
4127 .class_name = "H.263 encoder",
4128 .item_name = av_default_item_name,
4129 .option = h263_options,
4130 .version = LIBAVUTIL_VERSION_INT,
4133 AVCodec ff_h263_encoder = {
4135 .type = AVMEDIA_TYPE_VIDEO,
4136 .id = AV_CODEC_ID_H263,
4137 .priv_data_size = sizeof(MpegEncContext),
4138 .init = ff_MPV_encode_init,
4139 .encode2 = ff_MPV_encode_picture,
4140 .close = ff_MPV_encode_end,
4141 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
4142 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4143 .priv_class = &h263_class,
4146 static const AVOption h263p_options[] = {
4147 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4148 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4149 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4150 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4154 static const AVClass h263p_class = {
4155 .class_name = "H.263p encoder",
4156 .item_name = av_default_item_name,
4157 .option = h263p_options,
4158 .version = LIBAVUTIL_VERSION_INT,
4161 AVCodec ff_h263p_encoder = {
4163 .type = AVMEDIA_TYPE_VIDEO,
4164 .id = AV_CODEC_ID_H263P,
4165 .priv_data_size = sizeof(MpegEncContext),
4166 .init = ff_MPV_encode_init,
4167 .encode2 = ff_MPV_encode_picture,
4168 .close = ff_MPV_encode_end,
4169 .capabilities = CODEC_CAP_SLICE_THREADS,
4170 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
4171 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4172 .priv_class = &h263p_class,
4175 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4177 AVCodec ff_msmpeg4v2_encoder = {
4178 .name = "msmpeg4v2",
4179 .type = AVMEDIA_TYPE_VIDEO,
4180 .id = AV_CODEC_ID_MSMPEG4V2,
4181 .priv_data_size = sizeof(MpegEncContext),
4182 .init = ff_MPV_encode_init,
4183 .encode2 = ff_MPV_encode_picture,
4184 .close = ff_MPV_encode_end,
4185 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
4186 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4187 .priv_class = &msmpeg4v2_class,
4190 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4192 AVCodec ff_msmpeg4v3_encoder = {
4194 .type = AVMEDIA_TYPE_VIDEO,
4195 .id = AV_CODEC_ID_MSMPEG4V3,
4196 .priv_data_size = sizeof(MpegEncContext),
4197 .init = ff_MPV_encode_init,
4198 .encode2 = ff_MPV_encode_picture,
4199 .close = ff_MPV_encode_end,
4200 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
4201 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4202 .priv_class = &msmpeg4v3_class,
4205 FF_MPV_GENERIC_CLASS(wmv1)
4207 AVCodec ff_wmv1_encoder = {
4209 .type = AVMEDIA_TYPE_VIDEO,
4210 .id = AV_CODEC_ID_WMV1,
4211 .priv_data_size = sizeof(MpegEncContext),
4212 .init = ff_MPV_encode_init,
4213 .encode2 = ff_MPV_encode_picture,
4214 .close = ff_MPV_encode_end,
4215 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P, PIX_FMT_NONE },
4216 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4217 .priv_class = &wmv1_class,