2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
38 #include "mpegvideo.h"
45 #include "aandcttab.h"
47 #include "mpeg4video.h"
49 #include "bytestream.h"
55 static int encode_picture(MpegEncContext *s, int picture_number);
56 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
57 static int sse_mb(MpegEncContext *s);
58 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
59 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
61 /* enable all paranoid tests for rounding, overflows, etc... */
66 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
67 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
69 const AVOption ff_mpv_generic_options[] = {
74 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
75 uint16_t (*qmat16)[2][64],
76 const uint16_t *quant_matrix,
77 int bias, int qmin, int qmax, int intra)
82 for (qscale = qmin; qscale <= qmax; qscale++) {
84 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
85 dsp->fdct == ff_jpeg_fdct_islow_10 ||
86 dsp->fdct == ff_faandct) {
87 for (i = 0; i < 64; i++) {
88 const int j = dsp->idct_permutation[i];
89 /* 16 <= qscale * quant_matrix[i] <= 7905
90 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
91 * 19952 <= x <= 249205026
92 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
93 * 3444240 >= (1 << 36) / (x) >= 275 */
95 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
96 (qscale * quant_matrix[j]));
98 } else if (dsp->fdct == ff_fdct_ifast) {
99 for (i = 0; i < 64; i++) {
100 const int j = dsp->idct_permutation[i];
101 /* 16 <= qscale * quant_matrix[i] <= 7905
102 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
103 * 19952 <= x <= 249205026
104 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
105 * 3444240 >= (1 << 36) / (x) >= 275 */
107 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
108 (ff_aanscales[i] * qscale *
112 for (i = 0; i < 64; i++) {
113 const int j = dsp->idct_permutation[i];
114 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
115 * Assume x = qscale * quant_matrix[i]
117 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
118 * so 32768 >= (1 << 19) / (x) >= 67 */
119 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
120 (qscale * quant_matrix[j]));
121 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
122 // (qscale * quant_matrix[i]);
123 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
124 (qscale * quant_matrix[j]);
126 if (qmat16[qscale][0][i] == 0 ||
127 qmat16[qscale][0][i] == 128 * 256)
128 qmat16[qscale][0][i] = 128 * 256 - 1;
129 qmat16[qscale][1][i] =
130 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
131 qmat16[qscale][0][i]);
135 for (i = intra; i < 64; i++) {
137 if (dsp->fdct == ff_fdct_ifast) {
138 max = (8191LL * ff_aanscales[i]) >> 14;
140 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
146 av_log(NULL, AV_LOG_INFO,
147 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
152 static inline void update_qscale(MpegEncContext *s)
154 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
155 (FF_LAMBDA_SHIFT + 7);
156 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
158 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
162 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
168 for (i = 0; i < 64; i++) {
169 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
176 * init s->current_picture.qscale_table from s->lambda_table
178 void ff_init_qscale_tab(MpegEncContext *s)
180 int8_t * const qscale_table = s->current_picture.f.qscale_table;
183 for (i = 0; i < s->mb_num; i++) {
184 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
185 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
186 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
191 static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst,
196 dst->pict_type = src->pict_type;
197 dst->quality = src->quality;
198 dst->coded_picture_number = src->coded_picture_number;
199 dst->display_picture_number = src->display_picture_number;
200 //dst->reference = src->reference;
202 dst->interlaced_frame = src->interlaced_frame;
203 dst->top_field_first = src->top_field_first;
205 if (s->avctx->me_threshold) {
206 if (!src->motion_val[0])
207 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
209 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
210 if (!src->ref_index[0])
211 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
212 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
213 av_log(s->avctx, AV_LOG_ERROR,
214 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
215 src->motion_subsample_log2, dst->motion_subsample_log2);
217 memcpy(dst->mb_type, src->mb_type,
218 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
220 for (i = 0; i < 2; i++) {
221 int stride = ((16 * s->mb_width ) >>
222 src->motion_subsample_log2) + 1;
223 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
225 if (src->motion_val[i] &&
226 src->motion_val[i] != dst->motion_val[i]) {
227 memcpy(dst->motion_val[i], src->motion_val[i],
228 2 * stride * height * sizeof(int16_t));
230 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
231 memcpy(dst->ref_index[i], src->ref_index[i],
232 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
238 static void update_duplicate_context_after_me(MpegEncContext *dst,
241 #define COPY(a) dst->a= src->a
243 COPY(current_picture);
249 COPY(picture_in_gop_number);
250 COPY(gop_picture_number);
251 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
252 COPY(progressive_frame); // FIXME don't set in encode_header
253 COPY(partitioned_frame); // FIXME don't set in encode_header
258 * Set the given MpegEncContext to defaults for encoding.
259 * the changed fields will not depend upon the prior state of the MpegEncContext.
261 static void MPV_encode_defaults(MpegEncContext *s)
264 ff_MPV_common_defaults(s);
266 for (i = -16; i < 16; i++) {
267 default_fcode_tab[i + MAX_MV] = 1;
269 s->me.mv_penalty = default_mv_penalty;
270 s->fcode_tab = default_fcode_tab;
273 /* init video encoder */
274 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
276 MpegEncContext *s = avctx->priv_data;
278 int chroma_h_shift, chroma_v_shift;
280 MPV_encode_defaults(s);
282 switch (avctx->codec_id) {
283 case AV_CODEC_ID_MPEG2VIDEO:
284 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
285 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
286 av_log(avctx, AV_LOG_ERROR,
287 "only YUV420 and YUV422 are supported\n");
291 case AV_CODEC_ID_LJPEG:
292 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
293 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
294 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
295 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
296 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
298 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
299 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
300 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
304 case AV_CODEC_ID_MJPEG:
305 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
306 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
307 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
308 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
309 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
310 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
315 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
316 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
321 switch (avctx->pix_fmt) {
322 case AV_PIX_FMT_YUVJ422P:
323 case AV_PIX_FMT_YUV422P:
324 s->chroma_format = CHROMA_422;
326 case AV_PIX_FMT_YUVJ420P:
327 case AV_PIX_FMT_YUV420P:
329 s->chroma_format = CHROMA_420;
333 s->bit_rate = avctx->bit_rate;
334 s->width = avctx->width;
335 s->height = avctx->height;
336 if (avctx->gop_size > 600 &&
337 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
338 av_log(avctx, AV_LOG_ERROR,
339 "Warning keyframe interval too large! reducing it ...\n");
340 avctx->gop_size = 600;
342 s->gop_size = avctx->gop_size;
344 s->flags = avctx->flags;
345 s->flags2 = avctx->flags2;
346 s->max_b_frames = avctx->max_b_frames;
347 s->codec_id = avctx->codec->id;
348 #if FF_API_MPV_GLOBAL_OPTS
349 if (avctx->luma_elim_threshold)
350 s->luma_elim_threshold = avctx->luma_elim_threshold;
351 if (avctx->chroma_elim_threshold)
352 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
354 s->strict_std_compliance = avctx->strict_std_compliance;
355 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
356 s->mpeg_quant = avctx->mpeg_quant;
357 s->rtp_mode = !!avctx->rtp_payload_size;
358 s->intra_dc_precision = avctx->intra_dc_precision;
359 s->user_specified_pts = AV_NOPTS_VALUE;
361 if (s->gop_size <= 1) {
368 s->me_method = avctx->me_method;
371 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
373 #if FF_API_MPV_GLOBAL_OPTS
374 if (s->flags & CODEC_FLAG_QP_RD)
375 s->mpv_flags |= FF_MPV_FLAG_QP_RD;
378 s->adaptive_quant = (s->avctx->lumi_masking ||
379 s->avctx->dark_masking ||
380 s->avctx->temporal_cplx_masking ||
381 s->avctx->spatial_cplx_masking ||
382 s->avctx->p_masking ||
383 s->avctx->border_masking ||
384 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
387 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
389 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
390 av_log(avctx, AV_LOG_ERROR,
391 "a vbv buffer size is needed, "
392 "for encoding with a maximum bitrate\n");
396 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
397 av_log(avctx, AV_LOG_INFO,
398 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
401 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
402 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
406 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
407 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
411 if (avctx->rc_max_rate &&
412 avctx->rc_max_rate == avctx->bit_rate &&
413 avctx->rc_max_rate != avctx->rc_min_rate) {
414 av_log(avctx, AV_LOG_INFO,
415 "impossible bitrate constraints, this will fail\n");
418 if (avctx->rc_buffer_size &&
419 avctx->bit_rate * (int64_t)avctx->time_base.num >
420 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
421 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
425 if (!s->fixed_qscale &&
426 avctx->bit_rate * av_q2d(avctx->time_base) >
427 avctx->bit_rate_tolerance) {
428 av_log(avctx, AV_LOG_ERROR,
429 "bitrate tolerance too small for bitrate\n");
433 if (s->avctx->rc_max_rate &&
434 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
435 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
436 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
437 90000LL * (avctx->rc_buffer_size - 1) >
438 s->avctx->rc_max_rate * 0xFFFFLL) {
439 av_log(avctx, AV_LOG_INFO,
440 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
441 "specified vbv buffer is too large for the given bitrate!\n");
444 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
445 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
446 s->codec_id != AV_CODEC_ID_FLV1) {
447 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
451 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
452 av_log(avctx, AV_LOG_ERROR,
453 "OBMC is only supported with simple mb decision\n");
457 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
458 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
462 if (s->max_b_frames &&
463 s->codec_id != AV_CODEC_ID_MPEG4 &&
464 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
465 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
466 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
470 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
471 s->codec_id == AV_CODEC_ID_H263 ||
472 s->codec_id == AV_CODEC_ID_H263P) &&
473 (avctx->sample_aspect_ratio.num > 255 ||
474 avctx->sample_aspect_ratio.den > 255)) {
475 av_log(avctx, AV_LOG_ERROR,
476 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
477 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
481 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
482 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
483 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
487 // FIXME mpeg2 uses that too
488 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
489 av_log(avctx, AV_LOG_ERROR,
490 "mpeg2 style quantization not supported by codec\n");
494 #if FF_API_MPV_GLOBAL_OPTS
495 if (s->flags & CODEC_FLAG_CBP_RD)
496 s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
499 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
500 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
504 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
505 s->avctx->mb_decision != FF_MB_DECISION_RD) {
506 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
510 if (s->avctx->scenechange_threshold < 1000000000 &&
511 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
512 av_log(avctx, AV_LOG_ERROR,
513 "closed gop with scene change detection are not supported yet, "
514 "set threshold to 1000000000\n");
518 if (s->flags & CODEC_FLAG_LOW_DELAY) {
519 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
520 av_log(avctx, AV_LOG_ERROR,
521 "low delay forcing is only available for mpeg2\n");
524 if (s->max_b_frames != 0) {
525 av_log(avctx, AV_LOG_ERROR,
526 "b frames cannot be used with low delay\n");
531 if (s->q_scale_type == 1) {
532 if (avctx->qmax > 12) {
533 av_log(avctx, AV_LOG_ERROR,
534 "non linear quant only supports qmax <= 12 currently\n");
539 if (s->avctx->thread_count > 1 &&
540 s->codec_id != AV_CODEC_ID_MPEG4 &&
541 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
542 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
543 (s->codec_id != AV_CODEC_ID_H263P)) {
544 av_log(avctx, AV_LOG_ERROR,
545 "multi threaded encoding not supported by codec\n");
549 if (s->avctx->thread_count < 1) {
550 av_log(avctx, AV_LOG_ERROR,
551 "automatic thread number detection not supported by codec,"
556 if (s->avctx->thread_count > 1)
559 if (!avctx->time_base.den || !avctx->time_base.num) {
560 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
564 i = (INT_MAX / 2 + 128) >> 8;
565 if (avctx->me_threshold >= i) {
566 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
570 if (avctx->mb_threshold >= i) {
571 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
576 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
577 av_log(avctx, AV_LOG_INFO,
578 "notice: b_frame_strategy only affects the first pass\n");
579 avctx->b_frame_strategy = 0;
582 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
584 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
585 avctx->time_base.den /= i;
586 avctx->time_base.num /= i;
590 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
591 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
592 // (a + x * 3 / 8) / x
593 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
594 s->inter_quant_bias = 0;
596 s->intra_quant_bias = 0;
598 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
601 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
602 s->intra_quant_bias = avctx->intra_quant_bias;
603 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
604 s->inter_quant_bias = avctx->inter_quant_bias;
606 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
609 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
610 s->avctx->time_base.den > (1 << 16) - 1) {
611 av_log(avctx, AV_LOG_ERROR,
612 "timebase %d/%d not supported by MPEG 4 standard, "
613 "the maximum admitted value for the timebase denominator "
614 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
618 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
620 #if FF_API_MPV_GLOBAL_OPTS
621 if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
622 s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
623 if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
624 s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
625 if (avctx->quantizer_noise_shaping)
626 s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
629 switch (avctx->codec->id) {
630 case AV_CODEC_ID_MPEG1VIDEO:
631 s->out_format = FMT_MPEG1;
632 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
633 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
635 case AV_CODEC_ID_MPEG2VIDEO:
636 s->out_format = FMT_MPEG1;
637 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
638 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
641 case AV_CODEC_ID_LJPEG:
642 case AV_CODEC_ID_MJPEG:
643 s->out_format = FMT_MJPEG;
644 s->intra_only = 1; /* force intra only for jpeg */
645 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
646 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
647 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
648 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
649 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
651 s->mjpeg_vsample[0] = 2;
652 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
653 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
654 s->mjpeg_hsample[0] = 2;
655 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
656 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
658 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
659 ff_mjpeg_encode_init(s) < 0)
664 case AV_CODEC_ID_H261:
665 if (!CONFIG_H261_ENCODER)
667 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
668 av_log(avctx, AV_LOG_ERROR,
669 "The specified picture size of %dx%d is not valid for the "
670 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
671 s->width, s->height);
674 s->out_format = FMT_H261;
678 case AV_CODEC_ID_H263:
679 if (!CONFIG_H263_ENCODER)
681 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
682 s->width, s->height) == 8) {
683 av_log(avctx, AV_LOG_INFO,
684 "The specified picture size of %dx%d is not valid for "
685 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
686 "352x288, 704x576, and 1408x1152."
687 "Try H.263+.\n", s->width, s->height);
690 s->out_format = FMT_H263;
694 case AV_CODEC_ID_H263P:
695 s->out_format = FMT_H263;
698 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
699 s->modified_quant = s->h263_aic;
700 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
701 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
704 /* These are just to be sure */
708 case AV_CODEC_ID_FLV1:
709 s->out_format = FMT_H263;
710 s->h263_flv = 2; /* format = 1; 11-bit codes */
711 s->unrestricted_mv = 1;
712 s->rtp_mode = 0; /* don't allow GOB */
716 case AV_CODEC_ID_RV10:
717 s->out_format = FMT_H263;
721 case AV_CODEC_ID_RV20:
722 s->out_format = FMT_H263;
725 s->modified_quant = 1;
729 s->unrestricted_mv = 0;
731 case AV_CODEC_ID_MPEG4:
732 s->out_format = FMT_H263;
734 s->unrestricted_mv = 1;
735 s->low_delay = s->max_b_frames ? 0 : 1;
736 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
738 case AV_CODEC_ID_MSMPEG4V2:
739 s->out_format = FMT_H263;
741 s->unrestricted_mv = 1;
742 s->msmpeg4_version = 2;
746 case AV_CODEC_ID_MSMPEG4V3:
747 s->out_format = FMT_H263;
749 s->unrestricted_mv = 1;
750 s->msmpeg4_version = 3;
751 s->flipflop_rounding = 1;
755 case AV_CODEC_ID_WMV1:
756 s->out_format = FMT_H263;
758 s->unrestricted_mv = 1;
759 s->msmpeg4_version = 4;
760 s->flipflop_rounding = 1;
764 case AV_CODEC_ID_WMV2:
765 s->out_format = FMT_H263;
767 s->unrestricted_mv = 1;
768 s->msmpeg4_version = 5;
769 s->flipflop_rounding = 1;
777 avctx->has_b_frames = !s->low_delay;
781 s->progressive_frame =
782 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
783 CODEC_FLAG_INTERLACED_ME) ||
787 if (ff_MPV_common_init(s) < 0)
791 ff_MPV_encode_init_x86(s);
793 if (!s->dct_quantize)
794 s->dct_quantize = ff_dct_quantize_c;
796 s->denoise_dct = denoise_dct_c;
797 s->fast_dct_quantize = s->dct_quantize;
799 s->dct_quantize = dct_quantize_trellis_c;
801 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
802 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
804 s->quant_precision = 5;
806 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
807 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
809 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
810 ff_h261_encode_init(s);
811 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
812 ff_h263_encode_init(s);
813 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
814 ff_msmpeg4_encode_init(s);
815 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
816 && s->out_format == FMT_MPEG1)
817 ff_mpeg1_encode_init(s);
820 for (i = 0; i < 64; i++) {
821 int j = s->dsp.idct_permutation[i];
822 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
824 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
825 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
826 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
828 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
831 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
832 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
834 if (s->avctx->intra_matrix)
835 s->intra_matrix[j] = s->avctx->intra_matrix[i];
836 if (s->avctx->inter_matrix)
837 s->inter_matrix[j] = s->avctx->inter_matrix[i];
840 /* precompute matrix */
841 /* for mjpeg, we do include qscale in the matrix */
842 if (s->out_format != FMT_MJPEG) {
843 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
844 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
846 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
847 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
851 if (ff_rate_control_init(s) < 0)
857 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
859 MpegEncContext *s = avctx->priv_data;
861 ff_rate_control_uninit(s);
863 ff_MPV_common_end(s);
864 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
865 s->out_format == FMT_MJPEG)
866 ff_mjpeg_encode_close(s);
868 av_freep(&avctx->extradata);
873 static int get_sae(uint8_t *src, int ref, int stride)
878 for (y = 0; y < 16; y++) {
879 for (x = 0; x < 16; x++) {
880 acc += FFABS(src[x + y * stride] - ref);
887 static int get_intra_count(MpegEncContext *s, uint8_t *src,
888 uint8_t *ref, int stride)
896 for (y = 0; y < h; y += 16) {
897 for (x = 0; x < w; x += 16) {
898 int offset = x + y * stride;
899 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
901 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
902 int sae = get_sae(src + offset, mean, stride);
904 acc += sae + 500 < sad;
911 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
915 int i, display_picture_number = 0;
916 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
917 (s->low_delay ? 0 : 1);
922 display_picture_number = s->input_picture_number++;
924 if (pts != AV_NOPTS_VALUE) {
925 if (s->user_specified_pts != AV_NOPTS_VALUE) {
927 int64_t last = s->user_specified_pts;
930 av_log(s->avctx, AV_LOG_ERROR,
931 "Error, Invalid timestamp=%"PRId64", "
932 "last=%"PRId64"\n", pts, s->user_specified_pts);
936 if (!s->low_delay && display_picture_number == 1)
937 s->dts_delta = time - last;
939 s->user_specified_pts = pts;
941 if (s->user_specified_pts != AV_NOPTS_VALUE) {
942 s->user_specified_pts =
943 pts = s->user_specified_pts + 1;
944 av_log(s->avctx, AV_LOG_INFO,
945 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
948 pts = display_picture_number;
954 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
956 if (pic_arg->linesize[0] != s->linesize)
958 if (pic_arg->linesize[1] != s->uvlinesize)
960 if (pic_arg->linesize[2] != s->uvlinesize)
963 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
964 pic_arg->linesize[1], s->linesize, s->uvlinesize);
967 i = ff_find_unused_picture(s, 1);
971 pic = &s->picture[i].f;
974 for (i = 0; i < 4; i++) {
975 pic->data[i] = pic_arg->data[i];
976 pic->linesize[i] = pic_arg->linesize[i];
978 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
982 i = ff_find_unused_picture(s, 0);
986 pic = &s->picture[i].f;
989 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
993 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
994 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
995 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
998 int h_chroma_shift, v_chroma_shift;
999 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1003 for (i = 0; i < 3; i++) {
1004 int src_stride = pic_arg->linesize[i];
1005 int dst_stride = i ? s->uvlinesize : s->linesize;
1006 int h_shift = i ? h_chroma_shift : 0;
1007 int v_shift = i ? v_chroma_shift : 0;
1008 int w = s->width >> h_shift;
1009 int h = s->height >> v_shift;
1010 uint8_t *src = pic_arg->data[i];
1011 uint8_t *dst = pic->data[i];
1013 if (!s->avctx->rc_buffer_size)
1014 dst += INPLACE_OFFSET;
1016 if (src_stride == dst_stride)
1017 memcpy(dst, src, src_stride * h);
1020 memcpy(dst, src, w);
1028 copy_picture_attributes(s, pic, pic_arg);
1029 pic->display_picture_number = display_picture_number;
1030 pic->pts = pts; // we set this here to avoid modifiying pic_arg
1033 /* shift buffer entries */
1034 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1035 s->input_picture[i - 1] = s->input_picture[i];
1037 s->input_picture[encoding_delay] = (Picture*) pic;
1042 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1046 int64_t score64 = 0;
1048 for (plane = 0; plane < 3; plane++) {
1049 const int stride = p->f.linesize[plane];
1050 const int bw = plane ? 1 : 2;
1051 for (y = 0; y < s->mb_height * bw; y++) {
1052 for (x = 0; x < s->mb_width * bw; x++) {
1053 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1054 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1055 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1056 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1058 switch (s->avctx->frame_skip_exp) {
1059 case 0: score = FFMAX(score, v); break;
1060 case 1: score += FFABS(v); break;
1061 case 2: score += v * v; break;
1062 case 3: score64 += FFABS(v * v * (int64_t)v); break;
1063 case 4: score64 += v * v * (int64_t)(v * v); break;
1072 if (score64 < s->avctx->frame_skip_threshold)
1074 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1079 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1081 AVPacket pkt = { 0 };
1082 int ret, got_output;
1084 av_init_packet(&pkt);
1085 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1090 av_free_packet(&pkt);
1094 static int estimate_best_b_count(MpegEncContext *s)
1096 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1097 AVCodecContext *c = avcodec_alloc_context3(NULL);
1098 AVFrame input[FF_MAX_B_FRAMES + 2];
1099 const int scale = s->avctx->brd_scale;
1100 int i, j, out_size, p_lambda, b_lambda, lambda2;
1101 int64_t best_rd = INT64_MAX;
1102 int best_b_count = -1;
1104 assert(scale >= 0 && scale <= 3);
1107 //s->next_picture_ptr->quality;
1108 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1109 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1110 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1111 if (!b_lambda) // FIXME we should do this somewhere else
1112 b_lambda = p_lambda;
1113 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1116 c->width = s->width >> scale;
1117 c->height = s->height >> scale;
1118 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1119 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1120 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1121 c->mb_decision = s->avctx->mb_decision;
1122 c->me_cmp = s->avctx->me_cmp;
1123 c->mb_cmp = s->avctx->mb_cmp;
1124 c->me_sub_cmp = s->avctx->me_sub_cmp;
1125 c->pix_fmt = AV_PIX_FMT_YUV420P;
1126 c->time_base = s->avctx->time_base;
1127 c->max_b_frames = s->max_b_frames;
1129 if (avcodec_open2(c, codec, NULL) < 0)
1132 for (i = 0; i < s->max_b_frames + 2; i++) {
1133 int ysize = c->width * c->height;
1134 int csize = (c->width / 2) * (c->height / 2);
1135 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1136 s->next_picture_ptr;
1138 avcodec_get_frame_defaults(&input[i]);
1139 input[i].data[0] = av_malloc(ysize + 2 * csize);
1140 input[i].data[1] = input[i].data[0] + ysize;
1141 input[i].data[2] = input[i].data[1] + csize;
1142 input[i].linesize[0] = c->width;
1143 input[i].linesize[1] =
1144 input[i].linesize[2] = c->width / 2;
1146 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1147 pre_input = *pre_input_ptr;
1149 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1150 pre_input.f.data[0] += INPLACE_OFFSET;
1151 pre_input.f.data[1] += INPLACE_OFFSET;
1152 pre_input.f.data[2] += INPLACE_OFFSET;
1155 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1156 pre_input.f.data[0], pre_input.f.linesize[0],
1157 c->width, c->height);
1158 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1159 pre_input.f.data[1], pre_input.f.linesize[1],
1160 c->width >> 1, c->height >> 1);
1161 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1162 pre_input.f.data[2], pre_input.f.linesize[2],
1163 c->width >> 1, c->height >> 1);
1167 for (j = 0; j < s->max_b_frames + 1; j++) {
1170 if (!s->input_picture[j])
1173 c->error[0] = c->error[1] = c->error[2] = 0;
1175 input[0].pict_type = AV_PICTURE_TYPE_I;
1176 input[0].quality = 1 * FF_QP2LAMBDA;
1178 out_size = encode_frame(c, &input[0]);
1180 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1182 for (i = 0; i < s->max_b_frames + 1; i++) {
1183 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1185 input[i + 1].pict_type = is_p ?
1186 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1187 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1189 out_size = encode_frame(c, &input[i + 1]);
1191 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1194 /* get the delayed frames */
1196 out_size = encode_frame(c, NULL);
1197 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1200 rd += c->error[0] + c->error[1] + c->error[2];
1211 for (i = 0; i < s->max_b_frames + 2; i++) {
1212 av_freep(&input[i].data[0]);
1215 return best_b_count;
1218 static int select_input_picture(MpegEncContext *s)
1222 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1223 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1224 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1226 /* set next picture type & ordering */
1227 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1228 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1229 s->next_picture_ptr == NULL || s->intra_only) {
1230 s->reordered_input_picture[0] = s->input_picture[0];
1231 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1232 s->reordered_input_picture[0]->f.coded_picture_number =
1233 s->coded_picture_number++;
1237 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1238 if (s->picture_in_gop_number < s->gop_size &&
1239 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1240 // FIXME check that te gop check above is +-1 correct
1241 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1242 for (i = 0; i < 4; i++)
1243 s->input_picture[0]->f.data[i] = NULL;
1244 s->input_picture[0]->f.type = 0;
1246 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1247 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
1249 s->avctx->release_buffer(s->avctx,
1250 &s->input_picture[0]->f);
1254 ff_vbv_update(s, 0);
1260 if (s->flags & CODEC_FLAG_PASS2) {
1261 for (i = 0; i < s->max_b_frames + 1; i++) {
1262 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1264 if (pict_num >= s->rc_context.num_entries)
1266 if (!s->input_picture[i]) {
1267 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1271 s->input_picture[i]->f.pict_type =
1272 s->rc_context.entry[pict_num].new_pict_type;
1276 if (s->avctx->b_frame_strategy == 0) {
1277 b_frames = s->max_b_frames;
1278 while (b_frames && !s->input_picture[b_frames])
1280 } else if (s->avctx->b_frame_strategy == 1) {
1281 for (i = 1; i < s->max_b_frames + 1; i++) {
1282 if (s->input_picture[i] &&
1283 s->input_picture[i]->b_frame_score == 0) {
1284 s->input_picture[i]->b_frame_score =
1286 s->input_picture[i ]->f.data[0],
1287 s->input_picture[i - 1]->f.data[0],
1291 for (i = 0; i < s->max_b_frames + 1; i++) {
1292 if (s->input_picture[i] == NULL ||
1293 s->input_picture[i]->b_frame_score - 1 >
1294 s->mb_num / s->avctx->b_sensitivity)
1298 b_frames = FFMAX(0, i - 1);
1301 for (i = 0; i < b_frames + 1; i++) {
1302 s->input_picture[i]->b_frame_score = 0;
1304 } else if (s->avctx->b_frame_strategy == 2) {
1305 b_frames = estimate_best_b_count(s);
1307 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1313 for (i = b_frames - 1; i >= 0; i--) {
1314 int type = s->input_picture[i]->f.pict_type;
1315 if (type && type != AV_PICTURE_TYPE_B)
1318 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1319 b_frames == s->max_b_frames) {
1320 av_log(s->avctx, AV_LOG_ERROR,
1321 "warning, too many b frames in a row\n");
1324 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1325 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1326 s->gop_size > s->picture_in_gop_number) {
1327 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1329 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1331 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1335 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1336 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1339 s->reordered_input_picture[0] = s->input_picture[b_frames];
1340 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1341 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1342 s->reordered_input_picture[0]->f.coded_picture_number =
1343 s->coded_picture_number++;
1344 for (i = 0; i < b_frames; i++) {
1345 s->reordered_input_picture[i + 1] = s->input_picture[i];
1346 s->reordered_input_picture[i + 1]->f.pict_type =
1348 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1349 s->coded_picture_number++;
1354 if (s->reordered_input_picture[0]) {
1355 s->reordered_input_picture[0]->f.reference =
1356 s->reordered_input_picture[0]->f.pict_type !=
1357 AV_PICTURE_TYPE_B ? 3 : 0;
1359 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1361 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
1362 s->avctx->rc_buffer_size) {
1363 // input is a shared pix, so we can't modifiy it -> alloc a new
1364 // one & ensure that the shared one is reuseable
1367 int i = ff_find_unused_picture(s, 0);
1370 pic = &s->picture[i];
1372 pic->f.reference = s->reordered_input_picture[0]->f.reference;
1373 if (ff_alloc_picture(s, pic, 0) < 0) {
1377 /* mark us unused / free shared pic */
1378 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
1379 s->avctx->release_buffer(s->avctx,
1380 &s->reordered_input_picture[0]->f);
1381 for (i = 0; i < 4; i++)
1382 s->reordered_input_picture[0]->f.data[i] = NULL;
1383 s->reordered_input_picture[0]->f.type = 0;
1385 copy_picture_attributes(s, &pic->f,
1386 &s->reordered_input_picture[0]->f);
1388 s->current_picture_ptr = pic;
1390 // input is not a shared pix -> reuse buffer for current_pix
1392 assert(s->reordered_input_picture[0]->f.type ==
1393 FF_BUFFER_TYPE_USER ||
1394 s->reordered_input_picture[0]->f.type ==
1395 FF_BUFFER_TYPE_INTERNAL);
1397 s->current_picture_ptr = s->reordered_input_picture[0];
1398 for (i = 0; i < 4; i++) {
1399 s->new_picture.f.data[i] += INPLACE_OFFSET;
1402 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1404 s->picture_number = s->new_picture.f.display_picture_number;
1406 memset(&s->new_picture, 0, sizeof(Picture));
1411 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1412 const AVFrame *pic_arg, int *got_packet)
1414 MpegEncContext *s = avctx->priv_data;
1415 int i, stuffing_count, ret;
1416 int context_count = s->slice_context_count;
1418 s->picture_in_gop_number++;
1420 if (load_input_picture(s, pic_arg) < 0)
1423 if (select_input_picture(s) < 0) {
1428 if (s->new_picture.f.data[0]) {
1430 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1433 s->mb_info_ptr = av_packet_new_side_data(pkt,
1434 AV_PKT_DATA_H263_MB_INFO,
1435 s->mb_width*s->mb_height*12);
1436 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1439 for (i = 0; i < context_count; i++) {
1440 int start_y = s->thread_context[i]->start_mb_y;
1441 int end_y = s->thread_context[i]-> end_mb_y;
1442 int h = s->mb_height;
1443 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1444 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1446 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1449 s->pict_type = s->new_picture.f.pict_type;
1451 ff_MPV_frame_start(s, avctx);
1453 if (encode_picture(s, s->picture_number) < 0)
1456 avctx->header_bits = s->header_bits;
1457 avctx->mv_bits = s->mv_bits;
1458 avctx->misc_bits = s->misc_bits;
1459 avctx->i_tex_bits = s->i_tex_bits;
1460 avctx->p_tex_bits = s->p_tex_bits;
1461 avctx->i_count = s->i_count;
1462 // FIXME f/b_count in avctx
1463 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1464 avctx->skip_count = s->skip_count;
1466 ff_MPV_frame_end(s);
1468 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1469 ff_mjpeg_encode_picture_trailer(s);
1471 if (avctx->rc_buffer_size) {
1472 RateControlContext *rcc = &s->rc_context;
1473 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1475 if (put_bits_count(&s->pb) > max_size &&
1476 s->lambda < s->avctx->lmax) {
1477 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1478 (s->qscale + 1) / s->qscale);
1479 if (s->adaptive_quant) {
1481 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1482 s->lambda_table[i] =
1483 FFMAX(s->lambda_table[i] + 1,
1484 s->lambda_table[i] * (s->qscale + 1) /
1487 s->mb_skipped = 0; // done in MPV_frame_start()
1488 // done in encode_picture() so we must undo it
1489 if (s->pict_type == AV_PICTURE_TYPE_P) {
1490 if (s->flipflop_rounding ||
1491 s->codec_id == AV_CODEC_ID_H263P ||
1492 s->codec_id == AV_CODEC_ID_MPEG4)
1493 s->no_rounding ^= 1;
1495 if (s->pict_type != AV_PICTURE_TYPE_B) {
1496 s->time_base = s->last_time_base;
1497 s->last_non_b_time = s->time - s->pp_time;
1499 for (i = 0; i < context_count; i++) {
1500 PutBitContext *pb = &s->thread_context[i]->pb;
1501 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1506 assert(s->avctx->rc_max_rate);
1509 if (s->flags & CODEC_FLAG_PASS1)
1510 ff_write_pass1_stats(s);
1512 for (i = 0; i < 4; i++) {
1513 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1514 avctx->error[i] += s->current_picture_ptr->f.error[i];
1517 if (s->flags & CODEC_FLAG_PASS1)
1518 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1519 avctx->i_tex_bits + avctx->p_tex_bits ==
1520 put_bits_count(&s->pb));
1521 flush_put_bits(&s->pb);
1522 s->frame_bits = put_bits_count(&s->pb);
1524 stuffing_count = ff_vbv_update(s, s->frame_bits);
1525 if (stuffing_count) {
1526 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1527 stuffing_count + 50) {
1528 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1532 switch (s->codec_id) {
1533 case AV_CODEC_ID_MPEG1VIDEO:
1534 case AV_CODEC_ID_MPEG2VIDEO:
1535 while (stuffing_count--) {
1536 put_bits(&s->pb, 8, 0);
1539 case AV_CODEC_ID_MPEG4:
1540 put_bits(&s->pb, 16, 0);
1541 put_bits(&s->pb, 16, 0x1C3);
1542 stuffing_count -= 4;
1543 while (stuffing_count--) {
1544 put_bits(&s->pb, 8, 0xFF);
1548 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1550 flush_put_bits(&s->pb);
1551 s->frame_bits = put_bits_count(&s->pb);
1554 /* update mpeg1/2 vbv_delay for CBR */
1555 if (s->avctx->rc_max_rate &&
1556 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1557 s->out_format == FMT_MPEG1 &&
1558 90000LL * (avctx->rc_buffer_size - 1) <=
1559 s->avctx->rc_max_rate * 0xFFFFLL) {
1560 int vbv_delay, min_delay;
1561 double inbits = s->avctx->rc_max_rate *
1562 av_q2d(s->avctx->time_base);
1563 int minbits = s->frame_bits - 8 *
1564 (s->vbv_delay_ptr - s->pb.buf - 1);
1565 double bits = s->rc_context.buffer_index + minbits - inbits;
1568 av_log(s->avctx, AV_LOG_ERROR,
1569 "Internal error, negative bits\n");
1571 assert(s->repeat_first_field == 0);
1573 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1574 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1575 s->avctx->rc_max_rate;
1577 vbv_delay = FFMAX(vbv_delay, min_delay);
1579 assert(vbv_delay < 0xFFFF);
1581 s->vbv_delay_ptr[0] &= 0xF8;
1582 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1583 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1584 s->vbv_delay_ptr[2] &= 0x07;
1585 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1586 avctx->vbv_delay = vbv_delay * 300;
1588 s->total_bits += s->frame_bits;
1589 avctx->frame_bits = s->frame_bits;
1591 pkt->pts = s->current_picture.f.pts;
1592 if (!s->low_delay) {
1593 if (!s->current_picture.f.coded_picture_number)
1594 pkt->dts = pkt->pts - s->dts_delta;
1596 pkt->dts = s->reordered_pts;
1597 s->reordered_pts = s->input_picture[0]->f.pts;
1599 pkt->dts = pkt->pts;
1600 if (s->current_picture.f.key_frame)
1601 pkt->flags |= AV_PKT_FLAG_KEY;
1603 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1607 assert((s->frame_bits & 7) == 0);
1609 pkt->size = s->frame_bits / 8;
1610 *got_packet = !!pkt->size;
1614 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1615 int n, int threshold)
1617 static const char tab[64] = {
1618 3, 2, 2, 1, 1, 1, 1, 1,
1619 1, 1, 1, 1, 1, 1, 1, 1,
1620 1, 1, 1, 1, 1, 1, 1, 1,
1621 0, 0, 0, 0, 0, 0, 0, 0,
1622 0, 0, 0, 0, 0, 0, 0, 0,
1623 0, 0, 0, 0, 0, 0, 0, 0,
1624 0, 0, 0, 0, 0, 0, 0, 0,
1625 0, 0, 0, 0, 0, 0, 0, 0
1630 int16_t *block = s->block[n];
1631 const int last_index = s->block_last_index[n];
1634 if (threshold < 0) {
1636 threshold = -threshold;
1640 /* Are all we could set to zero already zero? */
1641 if (last_index <= skip_dc - 1)
1644 for (i = 0; i <= last_index; i++) {
1645 const int j = s->intra_scantable.permutated[i];
1646 const int level = FFABS(block[j]);
1648 if (skip_dc && i == 0)
1652 } else if (level > 1) {
1658 if (score >= threshold)
1660 for (i = skip_dc; i <= last_index; i++) {
1661 const int j = s->intra_scantable.permutated[i];
1665 s->block_last_index[n] = 0;
1667 s->block_last_index[n] = -1;
1670 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1674 const int maxlevel = s->max_qcoeff;
1675 const int minlevel = s->min_qcoeff;
1679 i = 1; // skip clipping of intra dc
1683 for (; i <= last_index; i++) {
1684 const int j = s->intra_scantable.permutated[i];
1685 int level = block[j];
1687 if (level > maxlevel) {
1690 } else if (level < minlevel) {
1698 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1699 av_log(s->avctx, AV_LOG_INFO,
1700 "warning, clipping %d dct coefficients to %d..%d\n",
1701 overflow, minlevel, maxlevel);
1704 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1708 for (y = 0; y < 8; y++) {
1709 for (x = 0; x < 8; x++) {
1715 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1716 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1717 int v = ptr[x2 + y2 * stride];
1723 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1728 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1729 int motion_x, int motion_y,
1730 int mb_block_height,
1733 int16_t weight[8][64];
1734 int16_t orig[8][64];
1735 const int mb_x = s->mb_x;
1736 const int mb_y = s->mb_y;
1739 int dct_offset = s->linesize * 8; // default for progressive frames
1740 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1743 for (i = 0; i < mb_block_count; i++)
1744 skip_dct[i] = s->skipdct;
1746 if (s->adaptive_quant) {
1747 const int last_qp = s->qscale;
1748 const int mb_xy = mb_x + mb_y * s->mb_stride;
1750 s->lambda = s->lambda_table[mb_xy];
1753 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1754 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1755 s->dquant = s->qscale - last_qp;
1757 if (s->out_format == FMT_H263) {
1758 s->dquant = av_clip(s->dquant, -2, 2);
1760 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1762 if (s->pict_type == AV_PICTURE_TYPE_B) {
1763 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1766 if (s->mv_type == MV_TYPE_8X8)
1772 ff_set_qscale(s, last_qp + s->dquant);
1773 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1774 ff_set_qscale(s, s->qscale + s->dquant);
1776 wrap_y = s->linesize;
1777 wrap_c = s->uvlinesize;
1778 ptr_y = s->new_picture.f.data[0] +
1779 (mb_y * 16 * wrap_y) + mb_x * 16;
1780 ptr_cb = s->new_picture.f.data[1] +
1781 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1782 ptr_cr = s->new_picture.f.data[2] +
1783 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1785 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1786 uint8_t *ebuf = s->edge_emu_buffer + 32;
1787 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1788 mb_y * 16, s->width, s->height);
1790 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1791 mb_block_height, mb_x * 8, mb_y * 8,
1792 s->width >> 1, s->height >> 1);
1793 ptr_cb = ebuf + 18 * wrap_y;
1794 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1795 mb_block_height, mb_x * 8, mb_y * 8,
1796 s->width >> 1, s->height >> 1);
1797 ptr_cr = ebuf + 18 * wrap_y + 8;
1801 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1802 int progressive_score, interlaced_score;
1804 s->interlaced_dct = 0;
1805 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1807 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1808 NULL, wrap_y, 8) - 400;
1810 if (progressive_score > 0) {
1811 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1812 NULL, wrap_y * 2, 8) +
1813 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1814 NULL, wrap_y * 2, 8);
1815 if (progressive_score > interlaced_score) {
1816 s->interlaced_dct = 1;
1818 dct_offset = wrap_y;
1820 if (s->chroma_format == CHROMA_422)
1826 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1827 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1828 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1829 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1831 if (s->flags & CODEC_FLAG_GRAY) {
1835 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1836 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1837 if (!s->chroma_y_shift) { /* 422 */
1838 s->dsp.get_pixels(s->block[6],
1839 ptr_cb + (dct_offset >> 1), wrap_c);
1840 s->dsp.get_pixels(s->block[7],
1841 ptr_cr + (dct_offset >> 1), wrap_c);
1845 op_pixels_func (*op_pix)[4];
1846 qpel_mc_func (*op_qpix)[16];
1847 uint8_t *dest_y, *dest_cb, *dest_cr;
1849 dest_y = s->dest[0];
1850 dest_cb = s->dest[1];
1851 dest_cr = s->dest[2];
1853 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1854 op_pix = s->dsp.put_pixels_tab;
1855 op_qpix = s->dsp.put_qpel_pixels_tab;
1857 op_pix = s->dsp.put_no_rnd_pixels_tab;
1858 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1861 if (s->mv_dir & MV_DIR_FORWARD) {
1862 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1863 s->last_picture.f.data,
1865 op_pix = s->dsp.avg_pixels_tab;
1866 op_qpix = s->dsp.avg_qpel_pixels_tab;
1868 if (s->mv_dir & MV_DIR_BACKWARD) {
1869 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1870 s->next_picture.f.data,
1874 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1875 int progressive_score, interlaced_score;
1877 s->interlaced_dct = 0;
1878 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1881 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1882 ptr_y + wrap_y * 8, wrap_y,
1885 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1886 progressive_score -= 400;
1888 if (progressive_score > 0) {
1889 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1892 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1896 if (progressive_score > interlaced_score) {
1897 s->interlaced_dct = 1;
1899 dct_offset = wrap_y;
1901 if (s->chroma_format == CHROMA_422)
1907 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1908 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1909 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1910 dest_y + dct_offset, wrap_y);
1911 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1912 dest_y + dct_offset + 8, wrap_y);
1914 if (s->flags & CODEC_FLAG_GRAY) {
1918 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1919 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1920 if (!s->chroma_y_shift) { /* 422 */
1921 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1922 dest_cb + (dct_offset >> 1), wrap_c);
1923 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1924 dest_cr + (dct_offset >> 1), wrap_c);
1927 /* pre quantization */
1928 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1929 2 * s->qscale * s->qscale) {
1931 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1932 wrap_y, 8) < 20 * s->qscale)
1934 if (s->dsp.sad[1](NULL, ptr_y + 8,
1935 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1937 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1938 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1940 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1941 dest_y + dct_offset + 8,
1942 wrap_y, 8) < 20 * s->qscale)
1944 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1945 wrap_c, 8) < 20 * s->qscale)
1947 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1948 wrap_c, 8) < 20 * s->qscale)
1950 if (!s->chroma_y_shift) { /* 422 */
1951 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1952 dest_cb + (dct_offset >> 1),
1953 wrap_c, 8) < 20 * s->qscale)
1955 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1956 dest_cr + (dct_offset >> 1),
1957 wrap_c, 8) < 20 * s->qscale)
1963 if (s->quantizer_noise_shaping) {
1965 get_visual_weight(weight[0], ptr_y , wrap_y);
1967 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1969 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1971 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1973 get_visual_weight(weight[4], ptr_cb , wrap_c);
1975 get_visual_weight(weight[5], ptr_cr , wrap_c);
1976 if (!s->chroma_y_shift) { /* 422 */
1978 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1981 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1984 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1987 /* DCT & quantize */
1988 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1990 for (i = 0; i < mb_block_count; i++) {
1993 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1994 // FIXME we could decide to change to quantizer instead of
1996 // JS: I don't think that would be a good idea it could lower
1997 // quality instead of improve it. Just INTRADC clipping
1998 // deserves changes in quantizer
2000 clip_coeffs(s, s->block[i], s->block_last_index[i]);
2002 s->block_last_index[i] = -1;
2004 if (s->quantizer_noise_shaping) {
2005 for (i = 0; i < mb_block_count; i++) {
2007 s->block_last_index[i] =
2008 dct_quantize_refine(s, s->block[i], weight[i],
2009 orig[i], i, s->qscale);
2014 if (s->luma_elim_threshold && !s->mb_intra)
2015 for (i = 0; i < 4; i++)
2016 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2017 if (s->chroma_elim_threshold && !s->mb_intra)
2018 for (i = 4; i < mb_block_count; i++)
2019 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2021 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2022 for (i = 0; i < mb_block_count; i++) {
2023 if (s->block_last_index[i] == -1)
2024 s->coded_score[i] = INT_MAX / 256;
2029 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2030 s->block_last_index[4] =
2031 s->block_last_index[5] = 0;
2033 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2036 // non c quantize code returns incorrect block_last_index FIXME
2037 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2038 for (i = 0; i < mb_block_count; i++) {
2040 if (s->block_last_index[i] > 0) {
2041 for (j = 63; j > 0; j--) {
2042 if (s->block[i][s->intra_scantable.permutated[j]])
2045 s->block_last_index[i] = j;
2050 /* huffman encode */
2051 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2052 case AV_CODEC_ID_MPEG1VIDEO:
2053 case AV_CODEC_ID_MPEG2VIDEO:
2054 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2055 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2057 case AV_CODEC_ID_MPEG4:
2058 if (CONFIG_MPEG4_ENCODER)
2059 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2061 case AV_CODEC_ID_MSMPEG4V2:
2062 case AV_CODEC_ID_MSMPEG4V3:
2063 case AV_CODEC_ID_WMV1:
2064 if (CONFIG_MSMPEG4_ENCODER)
2065 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2067 case AV_CODEC_ID_WMV2:
2068 if (CONFIG_WMV2_ENCODER)
2069 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2071 case AV_CODEC_ID_H261:
2072 if (CONFIG_H261_ENCODER)
2073 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2075 case AV_CODEC_ID_H263:
2076 case AV_CODEC_ID_H263P:
2077 case AV_CODEC_ID_FLV1:
2078 case AV_CODEC_ID_RV10:
2079 case AV_CODEC_ID_RV20:
2080 if (CONFIG_H263_ENCODER)
2081 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2083 case AV_CODEC_ID_MJPEG:
2084 if (CONFIG_MJPEG_ENCODER)
2085 ff_mjpeg_encode_mb(s, s->block);
2092 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2094 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
2095 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2098 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2101 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2104 d->mb_skip_run= s->mb_skip_run;
2106 d->last_dc[i] = s->last_dc[i];
2109 d->mv_bits= s->mv_bits;
2110 d->i_tex_bits= s->i_tex_bits;
2111 d->p_tex_bits= s->p_tex_bits;
2112 d->i_count= s->i_count;
2113 d->f_count= s->f_count;
2114 d->b_count= s->b_count;
2115 d->skip_count= s->skip_count;
2116 d->misc_bits= s->misc_bits;
2120 d->qscale= s->qscale;
2121 d->dquant= s->dquant;
2123 d->esc3_level_length= s->esc3_level_length;
2126 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2129 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2130 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2133 d->mb_skip_run= s->mb_skip_run;
2135 d->last_dc[i] = s->last_dc[i];
2138 d->mv_bits= s->mv_bits;
2139 d->i_tex_bits= s->i_tex_bits;
2140 d->p_tex_bits= s->p_tex_bits;
2141 d->i_count= s->i_count;
2142 d->f_count= s->f_count;
2143 d->b_count= s->b_count;
2144 d->skip_count= s->skip_count;
2145 d->misc_bits= s->misc_bits;
2147 d->mb_intra= s->mb_intra;
2148 d->mb_skipped= s->mb_skipped;
2149 d->mv_type= s->mv_type;
2150 d->mv_dir= s->mv_dir;
2152 if(s->data_partitioning){
2154 d->tex_pb= s->tex_pb;
2158 d->block_last_index[i]= s->block_last_index[i];
2159 d->interlaced_dct= s->interlaced_dct;
2160 d->qscale= s->qscale;
2162 d->esc3_level_length= s->esc3_level_length;
2165 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2166 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2167 int *dmin, int *next_block, int motion_x, int motion_y)
2170 uint8_t *dest_backup[3];
2172 copy_context_before_encode(s, backup, type);
2174 s->block= s->blocks[*next_block];
2175 s->pb= pb[*next_block];
2176 if(s->data_partitioning){
2177 s->pb2 = pb2 [*next_block];
2178 s->tex_pb= tex_pb[*next_block];
2182 memcpy(dest_backup, s->dest, sizeof(s->dest));
2183 s->dest[0] = s->rd_scratchpad;
2184 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2185 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2186 assert(s->linesize >= 32); //FIXME
2189 encode_mb(s, motion_x, motion_y);
2191 score= put_bits_count(&s->pb);
2192 if(s->data_partitioning){
2193 score+= put_bits_count(&s->pb2);
2194 score+= put_bits_count(&s->tex_pb);
2197 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2198 ff_MPV_decode_mb(s, s->block);
2200 score *= s->lambda2;
2201 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2205 memcpy(s->dest, dest_backup, sizeof(s->dest));
2212 copy_context_after_encode(best, s, type);
2216 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2217 uint32_t *sq = ff_squareTbl + 256;
2222 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2223 else if(w==8 && h==8)
2224 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2228 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2237 static int sse_mb(MpegEncContext *s){
2241 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2242 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2245 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2246 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2247 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2248 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2250 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2251 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2252 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2255 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2256 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2257 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2260 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2261 MpegEncContext *s= *(void**)arg;
2265 s->me.dia_size= s->avctx->pre_dia_size;
2266 s->first_slice_line=1;
2267 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2268 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2269 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2271 s->first_slice_line=0;
2279 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2280 MpegEncContext *s= *(void**)arg;
2282 ff_check_alignment();
2284 s->me.dia_size= s->avctx->dia_size;
2285 s->first_slice_line=1;
2286 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2287 s->mb_x=0; //for block init below
2288 ff_init_block_index(s);
2289 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2290 s->block_index[0]+=2;
2291 s->block_index[1]+=2;
2292 s->block_index[2]+=2;
2293 s->block_index[3]+=2;
2295 /* compute motion vector & mb_type and store in context */
2296 if(s->pict_type==AV_PICTURE_TYPE_B)
2297 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2299 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2301 s->first_slice_line=0;
2306 static int mb_var_thread(AVCodecContext *c, void *arg){
2307 MpegEncContext *s= *(void**)arg;
2310 ff_check_alignment();
2312 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2313 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2316 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2318 int sum = s->dsp.pix_sum(pix, s->linesize);
2320 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2322 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2323 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2324 s->me.mb_var_sum_temp += varc;
2330 static void write_slice_end(MpegEncContext *s){
2331 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2332 if(s->partitioned_frame){
2333 ff_mpeg4_merge_partitions(s);
2336 ff_mpeg4_stuffing(&s->pb);
2337 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2338 ff_mjpeg_encode_stuffing(&s->pb);
2341 avpriv_align_put_bits(&s->pb);
2342 flush_put_bits(&s->pb);
2344 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2345 s->misc_bits+= get_bits_diff(s);
2348 static void write_mb_info(MpegEncContext *s)
2350 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2351 int offset = put_bits_count(&s->pb);
2352 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2353 int gobn = s->mb_y / s->gob_index;
2355 if (CONFIG_H263_ENCODER)
2356 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2357 bytestream_put_le32(&ptr, offset);
2358 bytestream_put_byte(&ptr, s->qscale);
2359 bytestream_put_byte(&ptr, gobn);
2360 bytestream_put_le16(&ptr, mba);
2361 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2362 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2363 /* 4MV not implemented */
2364 bytestream_put_byte(&ptr, 0); /* hmv2 */
2365 bytestream_put_byte(&ptr, 0); /* vmv2 */
2368 static void update_mb_info(MpegEncContext *s, int startcode)
2372 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2373 s->mb_info_size += 12;
2374 s->prev_mb_info = s->last_mb_info;
2377 s->prev_mb_info = put_bits_count(&s->pb)/8;
2378 /* This might have incremented mb_info_size above, and we return without
2379 * actually writing any info into that slot yet. But in that case,
2380 * this will be called again at the start of the after writing the
2381 * start code, actually writing the mb info. */
2385 s->last_mb_info = put_bits_count(&s->pb)/8;
2386 if (!s->mb_info_size)
2387 s->mb_info_size += 12;
2391 static int encode_thread(AVCodecContext *c, void *arg){
2392 MpegEncContext *s= *(void**)arg;
2393 int mb_x, mb_y, pdif = 0;
2394 int chr_h= 16>>s->chroma_y_shift;
2396 MpegEncContext best_s, backup_s;
2397 uint8_t bit_buf[2][MAX_MB_BYTES];
2398 uint8_t bit_buf2[2][MAX_MB_BYTES];
2399 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2400 PutBitContext pb[2], pb2[2], tex_pb[2];
2402 ff_check_alignment();
2405 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2406 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2407 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2410 s->last_bits= put_bits_count(&s->pb);
2421 /* init last dc values */
2422 /* note: quant matrix value (8) is implied here */
2423 s->last_dc[i] = 128 << s->intra_dc_precision;
2425 s->current_picture.f.error[i] = 0;
2428 memset(s->last_mv, 0, sizeof(s->last_mv));
2432 switch(s->codec_id){
2433 case AV_CODEC_ID_H263:
2434 case AV_CODEC_ID_H263P:
2435 case AV_CODEC_ID_FLV1:
2436 if (CONFIG_H263_ENCODER)
2437 s->gob_index = ff_h263_get_gob_height(s);
2439 case AV_CODEC_ID_MPEG4:
2440 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2441 ff_mpeg4_init_partitions(s);
2447 s->first_slice_line = 1;
2448 s->ptr_lastgob = s->pb.buf;
2449 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2453 ff_set_qscale(s, s->qscale);
2454 ff_init_block_index(s);
2456 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2457 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2458 int mb_type= s->mb_type[xy];
2463 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2464 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2467 if(s->data_partitioning){
2468 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2469 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2470 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2476 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2477 ff_update_block_index(s);
2479 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2480 ff_h261_reorder_mb_index(s);
2481 xy= s->mb_y*s->mb_stride + s->mb_x;
2482 mb_type= s->mb_type[xy];
2485 /* write gob / video packet header */
2487 int current_packet_size, is_gob_start;
2489 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2491 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2493 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2495 switch(s->codec_id){
2496 case AV_CODEC_ID_H263:
2497 case AV_CODEC_ID_H263P:
2498 if(!s->h263_slice_structured)
2499 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2501 case AV_CODEC_ID_MPEG2VIDEO:
2502 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2503 case AV_CODEC_ID_MPEG1VIDEO:
2504 if(s->mb_skip_run) is_gob_start=0;
2509 if(s->start_mb_y != mb_y || mb_x!=0){
2512 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2513 ff_mpeg4_init_partitions(s);
2517 assert((put_bits_count(&s->pb)&7) == 0);
2518 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2520 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2521 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2522 int d= 100 / s->avctx->error_rate;
2524 current_packet_size=0;
2525 s->pb.buf_ptr= s->ptr_lastgob;
2526 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2530 if (s->avctx->rtp_callback){
2531 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2532 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2534 update_mb_info(s, 1);
2536 switch(s->codec_id){
2537 case AV_CODEC_ID_MPEG4:
2538 if (CONFIG_MPEG4_ENCODER) {
2539 ff_mpeg4_encode_video_packet_header(s);
2540 ff_mpeg4_clean_buffers(s);
2543 case AV_CODEC_ID_MPEG1VIDEO:
2544 case AV_CODEC_ID_MPEG2VIDEO:
2545 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2546 ff_mpeg1_encode_slice_header(s);
2547 ff_mpeg1_clean_buffers(s);
2550 case AV_CODEC_ID_H263:
2551 case AV_CODEC_ID_H263P:
2552 if (CONFIG_H263_ENCODER)
2553 ff_h263_encode_gob_header(s, mb_y);
2557 if(s->flags&CODEC_FLAG_PASS1){
2558 int bits= put_bits_count(&s->pb);
2559 s->misc_bits+= bits - s->last_bits;
2563 s->ptr_lastgob += current_packet_size;
2564 s->first_slice_line=1;
2565 s->resync_mb_x=mb_x;
2566 s->resync_mb_y=mb_y;
2570 if( (s->resync_mb_x == s->mb_x)
2571 && s->resync_mb_y+1 == s->mb_y){
2572 s->first_slice_line=0;
2576 s->dquant=0; //only for QP_RD
2578 update_mb_info(s, 0);
2580 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2582 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2584 copy_context_before_encode(&backup_s, s, -1);
2586 best_s.data_partitioning= s->data_partitioning;
2587 best_s.partitioned_frame= s->partitioned_frame;
2588 if(s->data_partitioning){
2589 backup_s.pb2= s->pb2;
2590 backup_s.tex_pb= s->tex_pb;
2593 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2594 s->mv_dir = MV_DIR_FORWARD;
2595 s->mv_type = MV_TYPE_16X16;
2597 s->mv[0][0][0] = s->p_mv_table[xy][0];
2598 s->mv[0][0][1] = s->p_mv_table[xy][1];
2599 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2600 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2602 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2603 s->mv_dir = MV_DIR_FORWARD;
2604 s->mv_type = MV_TYPE_FIELD;
2607 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2608 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2609 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2611 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2612 &dmin, &next_block, 0, 0);
2614 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2615 s->mv_dir = MV_DIR_FORWARD;
2616 s->mv_type = MV_TYPE_16X16;
2620 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2621 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2623 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2624 s->mv_dir = MV_DIR_FORWARD;
2625 s->mv_type = MV_TYPE_8X8;
2628 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2629 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2631 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2632 &dmin, &next_block, 0, 0);
2634 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2635 s->mv_dir = MV_DIR_FORWARD;
2636 s->mv_type = MV_TYPE_16X16;
2638 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2639 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2640 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2641 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2643 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2644 s->mv_dir = MV_DIR_BACKWARD;
2645 s->mv_type = MV_TYPE_16X16;
2647 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2648 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2649 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2650 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2652 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2653 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2654 s->mv_type = MV_TYPE_16X16;
2656 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2657 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2658 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2659 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2660 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2661 &dmin, &next_block, 0, 0);
2663 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2664 s->mv_dir = MV_DIR_FORWARD;
2665 s->mv_type = MV_TYPE_FIELD;
2668 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2669 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2670 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2672 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2673 &dmin, &next_block, 0, 0);
2675 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2676 s->mv_dir = MV_DIR_BACKWARD;
2677 s->mv_type = MV_TYPE_FIELD;
2680 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2681 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2682 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2684 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2685 &dmin, &next_block, 0, 0);
2687 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2688 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2689 s->mv_type = MV_TYPE_FIELD;
2691 for(dir=0; dir<2; dir++){
2693 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2694 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2695 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2698 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2699 &dmin, &next_block, 0, 0);
2701 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2703 s->mv_type = MV_TYPE_16X16;
2707 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2708 &dmin, &next_block, 0, 0);
2709 if(s->h263_pred || s->h263_aic){
2711 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2713 ff_clean_intra_table_entries(s); //old mode?
2717 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2718 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2719 const int last_qp= backup_s.qscale;
2722 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2723 static const int dquant_tab[4]={-1,1,-2,2};
2725 assert(backup_s.dquant == 0);
2728 s->mv_dir= best_s.mv_dir;
2729 s->mv_type = MV_TYPE_16X16;
2730 s->mb_intra= best_s.mb_intra;
2731 s->mv[0][0][0] = best_s.mv[0][0][0];
2732 s->mv[0][0][1] = best_s.mv[0][0][1];
2733 s->mv[1][0][0] = best_s.mv[1][0][0];
2734 s->mv[1][0][1] = best_s.mv[1][0][1];
2736 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2737 for(; qpi<4; qpi++){
2738 int dquant= dquant_tab[qpi];
2739 qp= last_qp + dquant;
2740 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2742 backup_s.dquant= dquant;
2743 if(s->mb_intra && s->dc_val[0]){
2745 dc[i]= s->dc_val[0][ s->block_index[i] ];
2746 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2750 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2751 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2752 if(best_s.qscale != qp){
2753 if(s->mb_intra && s->dc_val[0]){
2755 s->dc_val[0][ s->block_index[i] ]= dc[i];
2756 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2763 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2764 int mx= s->b_direct_mv_table[xy][0];
2765 int my= s->b_direct_mv_table[xy][1];
2767 backup_s.dquant = 0;
2768 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2770 ff_mpeg4_set_direct_mv(s, mx, my);
2771 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2772 &dmin, &next_block, mx, my);
2774 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2775 backup_s.dquant = 0;
2776 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2778 ff_mpeg4_set_direct_mv(s, 0, 0);
2779 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2780 &dmin, &next_block, 0, 0);
2782 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2785 coded |= s->block_last_index[i];
2788 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2789 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2790 mx=my=0; //FIXME find the one we actually used
2791 ff_mpeg4_set_direct_mv(s, mx, my);
2792 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2800 s->mv_dir= best_s.mv_dir;
2801 s->mv_type = best_s.mv_type;
2803 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2804 s->mv[0][0][1] = best_s.mv[0][0][1];
2805 s->mv[1][0][0] = best_s.mv[1][0][0];
2806 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2809 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2810 &dmin, &next_block, mx, my);
2815 s->current_picture.f.qscale_table[xy] = best_s.qscale;
2817 copy_context_after_encode(s, &best_s, -1);
2819 pb_bits_count= put_bits_count(&s->pb);
2820 flush_put_bits(&s->pb);
2821 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2824 if(s->data_partitioning){
2825 pb2_bits_count= put_bits_count(&s->pb2);
2826 flush_put_bits(&s->pb2);
2827 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2828 s->pb2= backup_s.pb2;
2830 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2831 flush_put_bits(&s->tex_pb);
2832 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2833 s->tex_pb= backup_s.tex_pb;
2835 s->last_bits= put_bits_count(&s->pb);
2837 if (CONFIG_H263_ENCODER &&
2838 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2839 ff_h263_update_motion_val(s);
2841 if(next_block==0){ //FIXME 16 vs linesize16
2842 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2843 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2844 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2847 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2848 ff_MPV_decode_mb(s, s->block);
2850 int motion_x = 0, motion_y = 0;
2851 s->mv_type=MV_TYPE_16X16;
2852 // only one MB-Type possible
2855 case CANDIDATE_MB_TYPE_INTRA:
2858 motion_x= s->mv[0][0][0] = 0;
2859 motion_y= s->mv[0][0][1] = 0;
2861 case CANDIDATE_MB_TYPE_INTER:
2862 s->mv_dir = MV_DIR_FORWARD;
2864 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2865 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2867 case CANDIDATE_MB_TYPE_INTER_I:
2868 s->mv_dir = MV_DIR_FORWARD;
2869 s->mv_type = MV_TYPE_FIELD;
2872 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2873 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2874 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2877 case CANDIDATE_MB_TYPE_INTER4V:
2878 s->mv_dir = MV_DIR_FORWARD;
2879 s->mv_type = MV_TYPE_8X8;
2882 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2883 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2886 case CANDIDATE_MB_TYPE_DIRECT:
2887 if (CONFIG_MPEG4_ENCODER) {
2888 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2890 motion_x=s->b_direct_mv_table[xy][0];
2891 motion_y=s->b_direct_mv_table[xy][1];
2892 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2895 case CANDIDATE_MB_TYPE_DIRECT0:
2896 if (CONFIG_MPEG4_ENCODER) {
2897 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2899 ff_mpeg4_set_direct_mv(s, 0, 0);
2902 case CANDIDATE_MB_TYPE_BIDIR:
2903 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2905 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2906 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2907 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2908 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2910 case CANDIDATE_MB_TYPE_BACKWARD:
2911 s->mv_dir = MV_DIR_BACKWARD;
2913 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2914 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2916 case CANDIDATE_MB_TYPE_FORWARD:
2917 s->mv_dir = MV_DIR_FORWARD;
2919 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2920 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2922 case CANDIDATE_MB_TYPE_FORWARD_I:
2923 s->mv_dir = MV_DIR_FORWARD;
2924 s->mv_type = MV_TYPE_FIELD;
2927 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2928 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2929 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2932 case CANDIDATE_MB_TYPE_BACKWARD_I:
2933 s->mv_dir = MV_DIR_BACKWARD;
2934 s->mv_type = MV_TYPE_FIELD;
2937 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2938 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2939 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2942 case CANDIDATE_MB_TYPE_BIDIR_I:
2943 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2944 s->mv_type = MV_TYPE_FIELD;
2946 for(dir=0; dir<2; dir++){
2948 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2949 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2950 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2955 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2958 encode_mb(s, motion_x, motion_y);
2960 // RAL: Update last macroblock type
2961 s->last_mv_dir = s->mv_dir;
2963 if (CONFIG_H263_ENCODER &&
2964 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2965 ff_h263_update_motion_val(s);
2967 ff_MPV_decode_mb(s, s->block);
2970 /* clean the MV table in IPS frames for direct mode in B frames */
2971 if(s->mb_intra /* && I,P,S_TYPE */){
2972 s->p_mv_table[xy][0]=0;
2973 s->p_mv_table[xy][1]=0;
2976 if(s->flags&CODEC_FLAG_PSNR){
2980 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2981 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2983 s->current_picture.f.error[0] += sse(
2984 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2985 s->dest[0], w, h, s->linesize);
2986 s->current_picture.f.error[1] += sse(
2987 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2988 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2989 s->current_picture.f.error[2] += sse(
2990 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2991 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2994 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2995 ff_h263_loop_filter(s);
2997 av_dlog(s->avctx, "MB %d %d bits\n",
2998 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3002 //not beautiful here but we must write it before flushing so it has to be here
3003 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3004 ff_msmpeg4_encode_ext_header(s);
3008 /* Send the last GOB if RTP */
3009 if (s->avctx->rtp_callback) {
3010 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3011 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3012 /* Call the RTP callback to send the last GOB */
3014 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3020 #define MERGE(field) dst->field += src->field; src->field=0
3021 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
3022 MERGE(me.scene_change_score);
3023 MERGE(me.mc_mb_var_sum_temp);
3024 MERGE(me.mb_var_sum_temp);
3027 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
3030 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3031 MERGE(dct_count[1]);
3041 MERGE(padding_bug_score);
3042 MERGE(current_picture.f.error[0]);
3043 MERGE(current_picture.f.error[1]);
3044 MERGE(current_picture.f.error[2]);
3046 if(dst->avctx->noise_reduction){
3047 for(i=0; i<64; i++){
3048 MERGE(dct_error_sum[0][i]);
3049 MERGE(dct_error_sum[1][i]);
3053 assert(put_bits_count(&src->pb) % 8 ==0);
3054 assert(put_bits_count(&dst->pb) % 8 ==0);
3055 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3056 flush_put_bits(&dst->pb);
3059 static int estimate_qp(MpegEncContext *s, int dry_run){
3060 if (s->next_lambda){
3061 s->current_picture_ptr->f.quality =
3062 s->current_picture.f.quality = s->next_lambda;
3063 if(!dry_run) s->next_lambda= 0;
3064 } else if (!s->fixed_qscale) {
3065 s->current_picture_ptr->f.quality =
3066 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
3067 if (s->current_picture.f.quality < 0)
3071 if(s->adaptive_quant){
3072 switch(s->codec_id){
3073 case AV_CODEC_ID_MPEG4:
3074 if (CONFIG_MPEG4_ENCODER)
3075 ff_clean_mpeg4_qscales(s);
3077 case AV_CODEC_ID_H263:
3078 case AV_CODEC_ID_H263P:
3079 case AV_CODEC_ID_FLV1:
3080 if (CONFIG_H263_ENCODER)
3081 ff_clean_h263_qscales(s);
3084 ff_init_qscale_tab(s);
3087 s->lambda= s->lambda_table[0];
3090 s->lambda = s->current_picture.f.quality;
3095 /* must be called before writing the header */
3096 static void set_frame_distances(MpegEncContext * s){
3097 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3098 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3100 if(s->pict_type==AV_PICTURE_TYPE_B){
3101 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3102 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3104 s->pp_time= s->time - s->last_non_b_time;
3105 s->last_non_b_time= s->time;
3106 assert(s->picture_number==0 || s->pp_time > 0);
3110 static int encode_picture(MpegEncContext *s, int picture_number)
3114 int context_count = s->slice_context_count;
3116 s->picture_number = picture_number;
3118 /* Reset the average MB variance */
3119 s->me.mb_var_sum_temp =
3120 s->me.mc_mb_var_sum_temp = 0;
3122 /* we need to initialize some time vars before we can encode b-frames */
3123 // RAL: Condition added for MPEG1VIDEO
3124 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3125 set_frame_distances(s);
3126 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3127 ff_set_mpeg4_time(s);
3129 s->me.scene_change_score=0;
3131 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3133 if(s->pict_type==AV_PICTURE_TYPE_I){
3134 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3135 else s->no_rounding=0;
3136 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3137 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3138 s->no_rounding ^= 1;
3141 if(s->flags & CODEC_FLAG_PASS2){
3142 if (estimate_qp(s,1) < 0)
3144 ff_get_2pass_fcode(s);
3145 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3146 if(s->pict_type==AV_PICTURE_TYPE_B)
3147 s->lambda= s->last_lambda_for[s->pict_type];
3149 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3153 s->mb_intra=0; //for the rate distortion & bit compare functions
3154 for(i=1; i<context_count; i++){
3155 ret = ff_update_duplicate_context(s->thread_context[i], s);
3163 /* Estimate motion for every MB */
3164 if(s->pict_type != AV_PICTURE_TYPE_I){
3165 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3166 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3167 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3168 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3169 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3173 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3174 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3176 for(i=0; i<s->mb_stride*s->mb_height; i++)
3177 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3179 if(!s->fixed_qscale){
3180 /* finding spatial complexity for I-frame rate control */
3181 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3184 for(i=1; i<context_count; i++){
3185 merge_context_after_me(s, s->thread_context[i]);
3187 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3188 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3191 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3192 s->pict_type= AV_PICTURE_TYPE_I;
3193 for(i=0; i<s->mb_stride*s->mb_height; i++)
3194 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3195 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3196 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3200 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3201 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3203 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3205 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3206 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3207 s->f_code= FFMAX3(s->f_code, a, b);
3210 ff_fix_long_p_mvs(s);
3211 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3212 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3216 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3217 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3222 if(s->pict_type==AV_PICTURE_TYPE_B){
3225 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3226 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3227 s->f_code = FFMAX(a, b);
3229 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3230 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3231 s->b_code = FFMAX(a, b);
3233 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3234 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3235 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3236 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3237 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3239 for(dir=0; dir<2; dir++){
3242 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3243 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3244 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3245 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3253 if (estimate_qp(s, 0) < 0)
3256 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3257 s->qscale= 3; //reduce clipping problems
3259 if (s->out_format == FMT_MJPEG) {
3260 /* for mjpeg, we do include qscale in the matrix */
3262 int j= s->dsp.idct_permutation[i];
3264 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3266 s->y_dc_scale_table=
3267 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3268 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3269 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3270 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3274 //FIXME var duplication
3275 s->current_picture_ptr->f.key_frame =
3276 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3277 s->current_picture_ptr->f.pict_type =
3278 s->current_picture.f.pict_type = s->pict_type;
3280 if (s->current_picture.f.key_frame)
3281 s->picture_in_gop_number=0;
3283 s->last_bits= put_bits_count(&s->pb);
3284 switch(s->out_format) {
3286 if (CONFIG_MJPEG_ENCODER)
3287 ff_mjpeg_encode_picture_header(s);
3290 if (CONFIG_H261_ENCODER)
3291 ff_h261_encode_picture_header(s, picture_number);
3294 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3295 ff_wmv2_encode_picture_header(s, picture_number);
3296 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3297 ff_msmpeg4_encode_picture_header(s, picture_number);
3298 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3299 ff_mpeg4_encode_picture_header(s, picture_number);
3300 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3301 ff_rv10_encode_picture_header(s, picture_number);
3302 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3303 ff_rv20_encode_picture_header(s, picture_number);
3304 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3305 ff_flv_encode_picture_header(s, picture_number);
3306 else if (CONFIG_H263_ENCODER)
3307 ff_h263_encode_picture_header(s, picture_number);
3310 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3311 ff_mpeg1_encode_picture_header(s, picture_number);
3318 bits= put_bits_count(&s->pb);
3319 s->header_bits= bits - s->last_bits;
3321 for(i=1; i<context_count; i++){
3322 update_duplicate_context_after_me(s->thread_context[i], s);
3324 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3325 for(i=1; i<context_count; i++){
3326 merge_context_after_encode(s, s->thread_context[i]);
3332 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3333 const int intra= s->mb_intra;
3336 s->dct_count[intra]++;
3338 for(i=0; i<64; i++){
3339 int level= block[i];
3343 s->dct_error_sum[intra][i] += level;
3344 level -= s->dct_offset[intra][i];
3345 if(level<0) level=0;
3347 s->dct_error_sum[intra][i] -= level;
3348 level += s->dct_offset[intra][i];
3349 if(level>0) level=0;
3356 static int dct_quantize_trellis_c(MpegEncContext *s,
3357 int16_t *block, int n,
3358 int qscale, int *overflow){
3360 const uint8_t *scantable= s->intra_scantable.scantable;
3361 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3363 unsigned int threshold1, threshold2;
3375 int coeff_count[64];
3376 int qmul, qadd, start_i, last_non_zero, i, dc;
3377 const int esc_length= s->ac_esc_length;
3379 uint8_t * last_length;
3380 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3382 s->dsp.fdct (block);
3384 if(s->dct_error_sum)
3385 s->denoise_dct(s, block);
3387 qadd= ((qscale-1)|1)*8;
3398 /* For AIC we skip quant/dequant of INTRADC */
3403 /* note: block[0] is assumed to be positive */
3404 block[0] = (block[0] + (q >> 1)) / q;
3407 qmat = s->q_intra_matrix[qscale];
3408 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3409 bias= 1<<(QMAT_SHIFT-1);
3410 length = s->intra_ac_vlc_length;
3411 last_length= s->intra_ac_vlc_last_length;
3415 qmat = s->q_inter_matrix[qscale];
3416 length = s->inter_ac_vlc_length;
3417 last_length= s->inter_ac_vlc_last_length;
3421 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3422 threshold2= (threshold1<<1);
3424 for(i=63; i>=start_i; i--) {
3425 const int j = scantable[i];
3426 int level = block[j] * qmat[j];
3428 if(((unsigned)(level+threshold1))>threshold2){
3434 for(i=start_i; i<=last_non_zero; i++) {
3435 const int j = scantable[i];
3436 int level = block[j] * qmat[j];
3438 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3439 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3440 if(((unsigned)(level+threshold1))>threshold2){
3442 level= (bias + level)>>QMAT_SHIFT;
3444 coeff[1][i]= level-1;
3445 // coeff[2][k]= level-2;
3447 level= (bias - level)>>QMAT_SHIFT;
3448 coeff[0][i]= -level;
3449 coeff[1][i]= -level+1;
3450 // coeff[2][k]= -level+2;
3452 coeff_count[i]= FFMIN(level, 2);
3453 assert(coeff_count[i]);
3456 coeff[0][i]= (level>>31)|1;
3461 *overflow= s->max_qcoeff < max; //overflow might have happened
3463 if(last_non_zero < start_i){
3464 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3465 return last_non_zero;
3468 score_tab[start_i]= 0;
3469 survivor[0]= start_i;
3472 for(i=start_i; i<=last_non_zero; i++){
3473 int level_index, j, zero_distortion;
3474 int dct_coeff= FFABS(block[ scantable[i] ]);
3475 int best_score=256*256*256*120;
3477 if (s->dsp.fdct == ff_fdct_ifast)
3478 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3479 zero_distortion= dct_coeff*dct_coeff;
3481 for(level_index=0; level_index < coeff_count[i]; level_index++){
3483 int level= coeff[level_index][i];
3484 const int alevel= FFABS(level);
3489 if(s->out_format == FMT_H263){
3490 unquant_coeff= alevel*qmul + qadd;
3492 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3494 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3495 unquant_coeff = (unquant_coeff - 1) | 1;
3497 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3498 unquant_coeff = (unquant_coeff - 1) | 1;
3503 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3505 if((level&(~127)) == 0){
3506 for(j=survivor_count-1; j>=0; j--){
3507 int run= i - survivor[j];
3508 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3509 score += score_tab[i-run];
3511 if(score < best_score){
3514 level_tab[i+1]= level-64;
3518 if(s->out_format == FMT_H263){
3519 for(j=survivor_count-1; j>=0; j--){
3520 int run= i - survivor[j];
3521 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3522 score += score_tab[i-run];
3523 if(score < last_score){
3526 last_level= level-64;
3532 distortion += esc_length*lambda;
3533 for(j=survivor_count-1; j>=0; j--){
3534 int run= i - survivor[j];
3535 int score= distortion + score_tab[i-run];
3537 if(score < best_score){
3540 level_tab[i+1]= level-64;
3544 if(s->out_format == FMT_H263){
3545 for(j=survivor_count-1; j>=0; j--){
3546 int run= i - survivor[j];
3547 int score= distortion + score_tab[i-run];
3548 if(score < last_score){
3551 last_level= level-64;
3559 score_tab[i+1]= best_score;
3561 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3562 if(last_non_zero <= 27){
3563 for(; survivor_count; survivor_count--){
3564 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3568 for(; survivor_count; survivor_count--){
3569 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3574 survivor[ survivor_count++ ]= i+1;
3577 if(s->out_format != FMT_H263){
3578 last_score= 256*256*256*120;
3579 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3580 int score= score_tab[i];
3581 if(i) score += lambda*2; //FIXME exacter?
3583 if(score < last_score){
3586 last_level= level_tab[i];
3587 last_run= run_tab[i];
3592 s->coded_score[n] = last_score;
3594 dc= FFABS(block[0]);
3595 last_non_zero= last_i - 1;
3596 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3598 if(last_non_zero < start_i)
3599 return last_non_zero;
3601 if(last_non_zero == 0 && start_i == 0){
3603 int best_score= dc * dc;
3605 for(i=0; i<coeff_count[0]; i++){
3606 int level= coeff[i][0];
3607 int alevel= FFABS(level);
3608 int unquant_coeff, score, distortion;
3610 if(s->out_format == FMT_H263){
3611 unquant_coeff= (alevel*qmul + qadd)>>3;
3613 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3614 unquant_coeff = (unquant_coeff - 1) | 1;
3616 unquant_coeff = (unquant_coeff + 4) >> 3;
3617 unquant_coeff<<= 3 + 3;
3619 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3621 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3622 else score= distortion + esc_length*lambda;
3624 if(score < best_score){
3626 best_level= level - 64;
3629 block[0]= best_level;
3630 s->coded_score[n] = best_score - dc*dc;
3631 if(best_level == 0) return -1;
3632 else return last_non_zero;
3638 block[ perm_scantable[last_non_zero] ]= last_level;
3641 for(; i>start_i; i -= run_tab[i] + 1){
3642 block[ perm_scantable[i-1] ]= level_tab[i];
3645 return last_non_zero;
3648 //#define REFINE_STATS 1
3649 static int16_t basis[64][64];
3651 static void build_basis(uint8_t *perm){
3658 double s= 0.25*(1<<BASIS_SHIFT);
3660 int perm_index= perm[index];
3661 if(i==0) s*= sqrt(0.5);
3662 if(j==0) s*= sqrt(0.5);
3663 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3670 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3671 int16_t *block, int16_t *weight, int16_t *orig,
3674 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3675 const uint8_t *scantable= s->intra_scantable.scantable;
3676 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3677 // unsigned int threshold1, threshold2;
3682 int qmul, qadd, start_i, last_non_zero, i, dc;
3684 uint8_t * last_length;
3686 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3689 static int after_last=0;
3690 static int to_zero=0;
3691 static int from_zero=0;
3694 static int messed_sign=0;
3697 if(basis[0][0] == 0)
3698 build_basis(s->dsp.idct_permutation);
3709 /* For AIC we skip quant/dequant of INTRADC */
3713 q <<= RECON_SHIFT-3;
3714 /* note: block[0] is assumed to be positive */
3716 // block[0] = (block[0] + (q >> 1)) / q;
3718 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3719 // bias= 1<<(QMAT_SHIFT-1);
3720 length = s->intra_ac_vlc_length;
3721 last_length= s->intra_ac_vlc_last_length;
3725 length = s->inter_ac_vlc_length;
3726 last_length= s->inter_ac_vlc_last_length;
3728 last_non_zero = s->block_last_index[n];
3733 dc += (1<<(RECON_SHIFT-1));
3734 for(i=0; i<64; i++){
3735 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3738 STOP_TIMER("memset rem[]")}
3741 for(i=0; i<64; i++){
3746 w= FFABS(weight[i]) + qns*one;
3747 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3750 // w=weight[i] = (63*qns + (w/2)) / w;
3756 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3762 for(i=start_i; i<=last_non_zero; i++){
3763 int j= perm_scantable[i];
3764 const int level= block[j];
3768 if(level<0) coeff= qmul*level - qadd;
3769 else coeff= qmul*level + qadd;
3770 run_tab[rle_index++]=run;
3773 s->dsp.add_8x8basis(rem, basis[j], coeff);
3779 if(last_non_zero>0){
3780 STOP_TIMER("init rem[]")
3787 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3790 int run2, best_unquant_change=0, analyze_gradient;
3794 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3796 if(analyze_gradient){
3800 for(i=0; i<64; i++){
3803 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3806 STOP_TIMER("rem*w*w")}
3816 const int level= block[0];
3817 int change, old_coeff;
3819 assert(s->mb_intra);
3823 for(change=-1; change<=1; change+=2){
3824 int new_level= level + change;
3825 int score, new_coeff;
3827 new_coeff= q*new_level;
3828 if(new_coeff >= 2048 || new_coeff < 0)
3831 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3832 if(score<best_score){
3835 best_change= change;
3836 best_unquant_change= new_coeff - old_coeff;
3843 run2= run_tab[rle_index++];
3847 for(i=start_i; i<64; i++){
3848 int j= perm_scantable[i];
3849 const int level= block[j];
3850 int change, old_coeff;
3852 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3856 if(level<0) old_coeff= qmul*level - qadd;
3857 else old_coeff= qmul*level + qadd;
3858 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3862 assert(run2>=0 || i >= last_non_zero );
3865 for(change=-1; change<=1; change+=2){
3866 int new_level= level + change;
3867 int score, new_coeff, unquant_change;
3870 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3874 if(new_level<0) new_coeff= qmul*new_level - qadd;
3875 else new_coeff= qmul*new_level + qadd;
3876 if(new_coeff >= 2048 || new_coeff <= -2048)
3878 //FIXME check for overflow
3881 if(level < 63 && level > -63){
3882 if(i < last_non_zero)
3883 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3884 - length[UNI_AC_ENC_INDEX(run, level+64)];
3886 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3887 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3890 assert(FFABS(new_level)==1);
3892 if(analyze_gradient){
3893 int g= d1[ scantable[i] ];
3894 if(g && (g^new_level) >= 0)
3898 if(i < last_non_zero){
3899 int next_i= i + run2 + 1;
3900 int next_level= block[ perm_scantable[next_i] ] + 64;
3902 if(next_level&(~127))
3905 if(next_i < last_non_zero)
3906 score += length[UNI_AC_ENC_INDEX(run, 65)]
3907 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3908 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3910 score += length[UNI_AC_ENC_INDEX(run, 65)]
3911 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3912 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3914 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3916 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3917 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3923 assert(FFABS(level)==1);
3925 if(i < last_non_zero){
3926 int next_i= i + run2 + 1;
3927 int next_level= block[ perm_scantable[next_i] ] + 64;
3929 if(next_level&(~127))
3932 if(next_i < last_non_zero)
3933 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3934 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3935 - length[UNI_AC_ENC_INDEX(run, 65)];
3937 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3938 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3939 - length[UNI_AC_ENC_INDEX(run, 65)];
3941 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3943 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3944 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3951 unquant_change= new_coeff - old_coeff;
3952 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3954 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3955 if(score<best_score){
3958 best_change= change;
3959 best_unquant_change= unquant_change;
3963 prev_level= level + 64;
3964 if(prev_level&(~127))
3973 STOP_TIMER("iterative step")}
3977 int j= perm_scantable[ best_coeff ];
3979 block[j] += best_change;
3981 if(best_coeff > last_non_zero){
3982 last_non_zero= best_coeff;
3990 if(block[j] - best_change){
3991 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4003 for(; last_non_zero>=start_i; last_non_zero--){
4004 if(block[perm_scantable[last_non_zero]])
4010 if(256*256*256*64 % count == 0){
4011 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4016 for(i=start_i; i<=last_non_zero; i++){
4017 int j= perm_scantable[i];
4018 const int level= block[j];
4021 run_tab[rle_index++]=run;
4028 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4034 if(last_non_zero>0){
4035 STOP_TIMER("iterative search")
4040 return last_non_zero;
4043 int ff_dct_quantize_c(MpegEncContext *s,
4044 int16_t *block, int n,
4045 int qscale, int *overflow)
4047 int i, j, level, last_non_zero, q, start_i;
4049 const uint8_t *scantable= s->intra_scantable.scantable;
4052 unsigned int threshold1, threshold2;
4054 s->dsp.fdct (block);
4056 if(s->dct_error_sum)
4057 s->denoise_dct(s, block);
4067 /* For AIC we skip quant/dequant of INTRADC */
4070 /* note: block[0] is assumed to be positive */
4071 block[0] = (block[0] + (q >> 1)) / q;
4074 qmat = s->q_intra_matrix[qscale];
4075 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4079 qmat = s->q_inter_matrix[qscale];
4080 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4082 threshold1= (1<<QMAT_SHIFT) - bias - 1;
4083 threshold2= (threshold1<<1);
4084 for(i=63;i>=start_i;i--) {
4086 level = block[j] * qmat[j];
4088 if(((unsigned)(level+threshold1))>threshold2){
4095 for(i=start_i; i<=last_non_zero; i++) {
4097 level = block[j] * qmat[j];
4099 // if( bias+level >= (1<<QMAT_SHIFT)
4100 // || bias-level >= (1<<QMAT_SHIFT)){
4101 if(((unsigned)(level+threshold1))>threshold2){
4103 level= (bias + level)>>QMAT_SHIFT;
4106 level= (bias - level)>>QMAT_SHIFT;
4114 *overflow= s->max_qcoeff < max; //overflow might have happened
4116 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4117 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4118 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4120 return last_non_zero;
4123 #define OFFSET(x) offsetof(MpegEncContext, x)
4124 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4125 static const AVOption h263_options[] = {
4126 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4127 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4128 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4133 static const AVClass h263_class = {
4134 .class_name = "H.263 encoder",
4135 .item_name = av_default_item_name,
4136 .option = h263_options,
4137 .version = LIBAVUTIL_VERSION_INT,
4140 AVCodec ff_h263_encoder = {
4142 .type = AVMEDIA_TYPE_VIDEO,
4143 .id = AV_CODEC_ID_H263,
4144 .priv_data_size = sizeof(MpegEncContext),
4145 .init = ff_MPV_encode_init,
4146 .encode2 = ff_MPV_encode_picture,
4147 .close = ff_MPV_encode_end,
4148 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4149 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4150 .priv_class = &h263_class,
4153 static const AVOption h263p_options[] = {
4154 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4155 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4156 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4157 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4161 static const AVClass h263p_class = {
4162 .class_name = "H.263p encoder",
4163 .item_name = av_default_item_name,
4164 .option = h263p_options,
4165 .version = LIBAVUTIL_VERSION_INT,
4168 AVCodec ff_h263p_encoder = {
4170 .type = AVMEDIA_TYPE_VIDEO,
4171 .id = AV_CODEC_ID_H263P,
4172 .priv_data_size = sizeof(MpegEncContext),
4173 .init = ff_MPV_encode_init,
4174 .encode2 = ff_MPV_encode_picture,
4175 .close = ff_MPV_encode_end,
4176 .capabilities = CODEC_CAP_SLICE_THREADS,
4177 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4178 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4179 .priv_class = &h263p_class,
4182 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4184 AVCodec ff_msmpeg4v2_encoder = {
4185 .name = "msmpeg4v2",
4186 .type = AVMEDIA_TYPE_VIDEO,
4187 .id = AV_CODEC_ID_MSMPEG4V2,
4188 .priv_data_size = sizeof(MpegEncContext),
4189 .init = ff_MPV_encode_init,
4190 .encode2 = ff_MPV_encode_picture,
4191 .close = ff_MPV_encode_end,
4192 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4193 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4194 .priv_class = &msmpeg4v2_class,
4197 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4199 AVCodec ff_msmpeg4v3_encoder = {
4201 .type = AVMEDIA_TYPE_VIDEO,
4202 .id = AV_CODEC_ID_MSMPEG4V3,
4203 .priv_data_size = sizeof(MpegEncContext),
4204 .init = ff_MPV_encode_init,
4205 .encode2 = ff_MPV_encode_picture,
4206 .close = ff_MPV_encode_end,
4207 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4208 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4209 .priv_class = &msmpeg4v3_class,
4212 FF_MPV_GENERIC_CLASS(wmv1)
4214 AVCodec ff_wmv1_encoder = {
4216 .type = AVMEDIA_TYPE_VIDEO,
4217 .id = AV_CODEC_ID_WMV1,
4218 .priv_data_size = sizeof(MpegEncContext),
4219 .init = ff_MPV_encode_init,
4220 .encode2 = ff_MPV_encode_picture,
4221 .close = ff_MPV_encode_end,
4222 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4223 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4224 .priv_class = &wmv1_class,