2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
63 const AVOption ff_mpv_generic_options[] = {
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69 uint16_t (*qmat16)[2][64],
70 const uint16_t *quant_matrix,
71 int bias, int qmin, int qmax, int intra)
76 for (qscale = qmin; qscale <= qmax; qscale++) {
78 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79 dsp->fdct == ff_jpeg_fdct_islow_10 ||
80 dsp->fdct == ff_faandct) {
81 for (i = 0; i < 64; i++) {
82 const int j = dsp->idct_permutation[i];
83 /* 16 <= qscale * quant_matrix[i] <= 7905
84 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85 * 19952 <= x <= 249205026
86 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87 * 3444240 >= (1 << 36) / (x) >= 275 */
89 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90 (qscale * quant_matrix[j]));
92 } else if (dsp->fdct == ff_fdct_ifast) {
93 for (i = 0; i < 64; i++) {
94 const int j = dsp->idct_permutation[i];
95 /* 16 <= qscale * quant_matrix[i] <= 7905
96 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97 * 19952 <= x <= 249205026
98 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99 * 3444240 >= (1 << 36) / (x) >= 275 */
101 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102 (ff_aanscales[i] * qscale *
106 for (i = 0; i < 64; i++) {
107 const int j = dsp->idct_permutation[i];
108 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109 * Assume x = qscale * quant_matrix[i]
111 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112 * so 32768 >= (1 << 19) / (x) >= 67 */
113 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114 (qscale * quant_matrix[j]));
115 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116 // (qscale * quant_matrix[i]);
117 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118 (qscale * quant_matrix[j]);
120 if (qmat16[qscale][0][i] == 0 ||
121 qmat16[qscale][0][i] == 128 * 256)
122 qmat16[qscale][0][i] = 128 * 256 - 1;
123 qmat16[qscale][1][i] =
124 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125 qmat16[qscale][0][i]);
129 for (i = intra; i < 64; i++) {
131 if (dsp->fdct == ff_fdct_ifast) {
132 max = (8191LL * ff_aanscales[i]) >> 14;
134 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
140 av_log(NULL, AV_LOG_INFO,
141 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146 static inline void update_qscale(MpegEncContext *s)
148 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149 (FF_LAMBDA_SHIFT + 7);
150 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
152 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
162 for (i = 0; i < 64; i++) {
163 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
170 * init s->current_picture.qscale_table from s->lambda_table
172 void ff_init_qscale_tab(MpegEncContext *s)
174 int8_t * const qscale_table = s->current_picture.qscale_table;
177 for (i = 0; i < s->mb_num; i++) {
178 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185 static void update_duplicate_context_after_me(MpegEncContext *dst,
188 #define COPY(a) dst->a= src->a
190 COPY(current_picture);
196 COPY(picture_in_gop_number);
197 COPY(gop_picture_number);
198 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199 COPY(progressive_frame); // FIXME don't set in encode_header
200 COPY(partitioned_frame); // FIXME don't set in encode_header
205 * Set the given MpegEncContext to defaults for encoding.
206 * the changed fields will not depend upon the prior state of the MpegEncContext.
208 static void MPV_encode_defaults(MpegEncContext *s)
211 ff_MPV_common_defaults(s);
213 for (i = -16; i < 16; i++) {
214 default_fcode_tab[i + MAX_MV] = 1;
216 s->me.mv_penalty = default_mv_penalty;
217 s->fcode_tab = default_fcode_tab;
220 /* init video encoder */
221 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
223 MpegEncContext *s = avctx->priv_data;
225 int chroma_h_shift, chroma_v_shift;
227 MPV_encode_defaults(s);
229 switch (avctx->codec_id) {
230 case AV_CODEC_ID_MPEG2VIDEO:
231 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
232 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
233 av_log(avctx, AV_LOG_ERROR,
234 "only YUV420 and YUV422 are supported\n");
238 case AV_CODEC_ID_LJPEG:
239 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
240 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
241 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
242 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
243 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
244 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
246 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
247 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
251 case AV_CODEC_ID_MJPEG:
252 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
255 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
256 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
257 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
262 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
263 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
268 switch (avctx->pix_fmt) {
269 case AV_PIX_FMT_YUVJ422P:
270 case AV_PIX_FMT_YUV422P:
271 s->chroma_format = CHROMA_422;
273 case AV_PIX_FMT_YUVJ420P:
274 case AV_PIX_FMT_YUV420P:
276 s->chroma_format = CHROMA_420;
280 s->bit_rate = avctx->bit_rate;
281 s->width = avctx->width;
282 s->height = avctx->height;
283 if (avctx->gop_size > 600 &&
284 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
285 av_log(avctx, AV_LOG_ERROR,
286 "Warning keyframe interval too large! reducing it ...\n");
287 avctx->gop_size = 600;
289 s->gop_size = avctx->gop_size;
291 s->flags = avctx->flags;
292 s->flags2 = avctx->flags2;
293 s->max_b_frames = avctx->max_b_frames;
294 s->codec_id = avctx->codec->id;
295 s->strict_std_compliance = avctx->strict_std_compliance;
296 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
297 s->mpeg_quant = avctx->mpeg_quant;
298 s->rtp_mode = !!avctx->rtp_payload_size;
299 s->intra_dc_precision = avctx->intra_dc_precision;
300 s->user_specified_pts = AV_NOPTS_VALUE;
302 if (s->gop_size <= 1) {
309 s->me_method = avctx->me_method;
312 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
314 s->adaptive_quant = (s->avctx->lumi_masking ||
315 s->avctx->dark_masking ||
316 s->avctx->temporal_cplx_masking ||
317 s->avctx->spatial_cplx_masking ||
318 s->avctx->p_masking ||
319 s->avctx->border_masking ||
320 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
323 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
325 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
326 av_log(avctx, AV_LOG_ERROR,
327 "a vbv buffer size is needed, "
328 "for encoding with a maximum bitrate\n");
332 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
333 av_log(avctx, AV_LOG_INFO,
334 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
337 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
338 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
342 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
343 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
347 if (avctx->rc_max_rate &&
348 avctx->rc_max_rate == avctx->bit_rate &&
349 avctx->rc_max_rate != avctx->rc_min_rate) {
350 av_log(avctx, AV_LOG_INFO,
351 "impossible bitrate constraints, this will fail\n");
354 if (avctx->rc_buffer_size &&
355 avctx->bit_rate * (int64_t)avctx->time_base.num >
356 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
357 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
361 if (!s->fixed_qscale &&
362 avctx->bit_rate * av_q2d(avctx->time_base) >
363 avctx->bit_rate_tolerance) {
364 av_log(avctx, AV_LOG_ERROR,
365 "bitrate tolerance too small for bitrate\n");
369 if (s->avctx->rc_max_rate &&
370 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
371 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
372 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
373 90000LL * (avctx->rc_buffer_size - 1) >
374 s->avctx->rc_max_rate * 0xFFFFLL) {
375 av_log(avctx, AV_LOG_INFO,
376 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
377 "specified vbv buffer is too large for the given bitrate!\n");
380 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
381 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
382 s->codec_id != AV_CODEC_ID_FLV1) {
383 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
387 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
388 av_log(avctx, AV_LOG_ERROR,
389 "OBMC is only supported with simple mb decision\n");
393 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
394 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
398 if (s->max_b_frames &&
399 s->codec_id != AV_CODEC_ID_MPEG4 &&
400 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
401 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
402 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
406 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
407 s->codec_id == AV_CODEC_ID_H263 ||
408 s->codec_id == AV_CODEC_ID_H263P) &&
409 (avctx->sample_aspect_ratio.num > 255 ||
410 avctx->sample_aspect_ratio.den > 255)) {
411 av_log(avctx, AV_LOG_ERROR,
412 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
413 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
417 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
418 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
419 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
423 // FIXME mpeg2 uses that too
424 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
425 av_log(avctx, AV_LOG_ERROR,
426 "mpeg2 style quantization not supported by codec\n");
430 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
431 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
435 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
436 s->avctx->mb_decision != FF_MB_DECISION_RD) {
437 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
441 if (s->avctx->scenechange_threshold < 1000000000 &&
442 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
443 av_log(avctx, AV_LOG_ERROR,
444 "closed gop with scene change detection are not supported yet, "
445 "set threshold to 1000000000\n");
449 if (s->flags & CODEC_FLAG_LOW_DELAY) {
450 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
451 av_log(avctx, AV_LOG_ERROR,
452 "low delay forcing is only available for mpeg2\n");
455 if (s->max_b_frames != 0) {
456 av_log(avctx, AV_LOG_ERROR,
457 "b frames cannot be used with low delay\n");
462 if (s->q_scale_type == 1) {
463 if (avctx->qmax > 12) {
464 av_log(avctx, AV_LOG_ERROR,
465 "non linear quant only supports qmax <= 12 currently\n");
470 if (s->avctx->thread_count > 1 &&
471 s->codec_id != AV_CODEC_ID_MPEG4 &&
472 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
473 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
474 (s->codec_id != AV_CODEC_ID_H263P)) {
475 av_log(avctx, AV_LOG_ERROR,
476 "multi threaded encoding not supported by codec\n");
480 if (s->avctx->thread_count < 1) {
481 av_log(avctx, AV_LOG_ERROR,
482 "automatic thread number detection not supported by codec,"
487 if (s->avctx->thread_count > 1)
490 if (!avctx->time_base.den || !avctx->time_base.num) {
491 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
495 i = (INT_MAX / 2 + 128) >> 8;
496 if (avctx->mb_threshold >= i) {
497 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
502 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
503 av_log(avctx, AV_LOG_INFO,
504 "notice: b_frame_strategy only affects the first pass\n");
505 avctx->b_frame_strategy = 0;
508 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
510 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
511 avctx->time_base.den /= i;
512 avctx->time_base.num /= i;
516 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
517 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
518 // (a + x * 3 / 8) / x
519 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
520 s->inter_quant_bias = 0;
522 s->intra_quant_bias = 0;
524 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
527 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
528 s->intra_quant_bias = avctx->intra_quant_bias;
529 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
530 s->inter_quant_bias = avctx->inter_quant_bias;
532 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
535 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
536 s->avctx->time_base.den > (1 << 16) - 1) {
537 av_log(avctx, AV_LOG_ERROR,
538 "timebase %d/%d not supported by MPEG 4 standard, "
539 "the maximum admitted value for the timebase denominator "
540 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
544 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
546 switch (avctx->codec->id) {
547 case AV_CODEC_ID_MPEG1VIDEO:
548 s->out_format = FMT_MPEG1;
549 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
550 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
552 case AV_CODEC_ID_MPEG2VIDEO:
553 s->out_format = FMT_MPEG1;
554 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
555 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
558 case AV_CODEC_ID_LJPEG:
559 case AV_CODEC_ID_MJPEG:
560 s->out_format = FMT_MJPEG;
561 s->intra_only = 1; /* force intra only for jpeg */
562 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
563 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
564 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
565 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
566 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
568 s->mjpeg_vsample[0] = 2;
569 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
570 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
571 s->mjpeg_hsample[0] = 2;
572 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
573 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
575 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
576 ff_mjpeg_encode_init(s) < 0)
581 case AV_CODEC_ID_H261:
582 if (!CONFIG_H261_ENCODER)
584 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
585 av_log(avctx, AV_LOG_ERROR,
586 "The specified picture size of %dx%d is not valid for the "
587 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
588 s->width, s->height);
591 s->out_format = FMT_H261;
595 case AV_CODEC_ID_H263:
596 if (!CONFIG_H263_ENCODER)
598 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
599 s->width, s->height) == 8) {
600 av_log(avctx, AV_LOG_INFO,
601 "The specified picture size of %dx%d is not valid for "
602 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
603 "352x288, 704x576, and 1408x1152."
604 "Try H.263+.\n", s->width, s->height);
607 s->out_format = FMT_H263;
611 case AV_CODEC_ID_H263P:
612 s->out_format = FMT_H263;
615 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
616 s->modified_quant = s->h263_aic;
617 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
618 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
621 /* These are just to be sure */
625 case AV_CODEC_ID_FLV1:
626 s->out_format = FMT_H263;
627 s->h263_flv = 2; /* format = 1; 11-bit codes */
628 s->unrestricted_mv = 1;
629 s->rtp_mode = 0; /* don't allow GOB */
633 case AV_CODEC_ID_RV10:
634 s->out_format = FMT_H263;
638 case AV_CODEC_ID_RV20:
639 s->out_format = FMT_H263;
642 s->modified_quant = 1;
646 s->unrestricted_mv = 0;
648 case AV_CODEC_ID_MPEG4:
649 s->out_format = FMT_H263;
651 s->unrestricted_mv = 1;
652 s->low_delay = s->max_b_frames ? 0 : 1;
653 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
655 case AV_CODEC_ID_MSMPEG4V2:
656 s->out_format = FMT_H263;
658 s->unrestricted_mv = 1;
659 s->msmpeg4_version = 2;
663 case AV_CODEC_ID_MSMPEG4V3:
664 s->out_format = FMT_H263;
666 s->unrestricted_mv = 1;
667 s->msmpeg4_version = 3;
668 s->flipflop_rounding = 1;
672 case AV_CODEC_ID_WMV1:
673 s->out_format = FMT_H263;
675 s->unrestricted_mv = 1;
676 s->msmpeg4_version = 4;
677 s->flipflop_rounding = 1;
681 case AV_CODEC_ID_WMV2:
682 s->out_format = FMT_H263;
684 s->unrestricted_mv = 1;
685 s->msmpeg4_version = 5;
686 s->flipflop_rounding = 1;
694 avctx->has_b_frames = !s->low_delay;
698 s->progressive_frame =
699 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
700 CODEC_FLAG_INTERLACED_ME) ||
704 if (ff_MPV_common_init(s) < 0)
708 ff_MPV_encode_init_x86(s);
710 ff_h263dsp_init(&s->h263dsp);
711 if (!s->dct_quantize)
712 s->dct_quantize = ff_dct_quantize_c;
714 s->denoise_dct = denoise_dct_c;
715 s->fast_dct_quantize = s->dct_quantize;
717 s->dct_quantize = dct_quantize_trellis_c;
719 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
720 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
722 s->quant_precision = 5;
724 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
725 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
727 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
728 ff_h261_encode_init(s);
729 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
730 ff_h263_encode_init(s);
731 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
732 ff_msmpeg4_encode_init(s);
733 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
734 && s->out_format == FMT_MPEG1)
735 ff_mpeg1_encode_init(s);
738 for (i = 0; i < 64; i++) {
739 int j = s->dsp.idct_permutation[i];
740 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
742 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
743 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
744 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
746 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
749 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
750 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
752 if (s->avctx->intra_matrix)
753 s->intra_matrix[j] = s->avctx->intra_matrix[i];
754 if (s->avctx->inter_matrix)
755 s->inter_matrix[j] = s->avctx->inter_matrix[i];
758 /* precompute matrix */
759 /* for mjpeg, we do include qscale in the matrix */
760 if (s->out_format != FMT_MJPEG) {
761 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
762 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
764 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
765 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
769 if (ff_rate_control_init(s) < 0)
775 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
777 MpegEncContext *s = avctx->priv_data;
779 ff_rate_control_uninit(s);
781 ff_MPV_common_end(s);
782 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
783 s->out_format == FMT_MJPEG)
784 ff_mjpeg_encode_close(s);
786 av_freep(&avctx->extradata);
791 static int get_sae(uint8_t *src, int ref, int stride)
796 for (y = 0; y < 16; y++) {
797 for (x = 0; x < 16; x++) {
798 acc += FFABS(src[x + y * stride] - ref);
805 static int get_intra_count(MpegEncContext *s, uint8_t *src,
806 uint8_t *ref, int stride)
814 for (y = 0; y < h; y += 16) {
815 for (x = 0; x < w; x += 16) {
816 int offset = x + y * stride;
817 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
819 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
820 int sae = get_sae(src + offset, mean, stride);
822 acc += sae + 500 < sad;
829 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
833 int i, display_picture_number = 0, ret;
834 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
835 (s->low_delay ? 0 : 1);
840 display_picture_number = s->input_picture_number++;
842 if (pts != AV_NOPTS_VALUE) {
843 if (s->user_specified_pts != AV_NOPTS_VALUE) {
845 int64_t last = s->user_specified_pts;
848 av_log(s->avctx, AV_LOG_ERROR,
849 "Error, Invalid timestamp=%"PRId64", "
850 "last=%"PRId64"\n", pts, s->user_specified_pts);
854 if (!s->low_delay && display_picture_number == 1)
855 s->dts_delta = time - last;
857 s->user_specified_pts = pts;
859 if (s->user_specified_pts != AV_NOPTS_VALUE) {
860 s->user_specified_pts =
861 pts = s->user_specified_pts + 1;
862 av_log(s->avctx, AV_LOG_INFO,
863 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
866 pts = display_picture_number;
872 if (!pic_arg->buf[0]);
874 if (pic_arg->linesize[0] != s->linesize)
876 if (pic_arg->linesize[1] != s->uvlinesize)
878 if (pic_arg->linesize[2] != s->uvlinesize)
881 av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0],
882 pic_arg->linesize[1], s->linesize, s->uvlinesize);
885 i = ff_find_unused_picture(s, 1);
889 pic = &s->picture[i];
892 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
894 if (ff_alloc_picture(s, pic, 1) < 0) {
898 i = ff_find_unused_picture(s, 0);
902 pic = &s->picture[i];
905 if (ff_alloc_picture(s, pic, 0) < 0) {
909 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
910 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
911 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
914 int h_chroma_shift, v_chroma_shift;
915 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
919 for (i = 0; i < 3; i++) {
920 int src_stride = pic_arg->linesize[i];
921 int dst_stride = i ? s->uvlinesize : s->linesize;
922 int h_shift = i ? h_chroma_shift : 0;
923 int v_shift = i ? v_chroma_shift : 0;
924 int w = s->width >> h_shift;
925 int h = s->height >> v_shift;
926 uint8_t *src = pic_arg->data[i];
927 uint8_t *dst = pic->f.data[i];
929 if (!s->avctx->rc_buffer_size)
930 dst += INPLACE_OFFSET;
932 if (src_stride == dst_stride)
933 memcpy(dst, src, src_stride * h);
944 ret = av_frame_copy_props(&pic->f, pic_arg);
948 pic->f.display_picture_number = display_picture_number;
949 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
952 /* shift buffer entries */
953 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
954 s->input_picture[i - 1] = s->input_picture[i];
956 s->input_picture[encoding_delay] = (Picture*) pic;
961 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
967 for (plane = 0; plane < 3; plane++) {
968 const int stride = p->f.linesize[plane];
969 const int bw = plane ? 1 : 2;
970 for (y = 0; y < s->mb_height * bw; y++) {
971 for (x = 0; x < s->mb_width * bw; x++) {
972 int off = p->shared ? 0 : 16;
973 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
974 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
975 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
977 switch (s->avctx->frame_skip_exp) {
978 case 0: score = FFMAX(score, v); break;
979 case 1: score += FFABS(v); break;
980 case 2: score += v * v; break;
981 case 3: score64 += FFABS(v * v * (int64_t)v); break;
982 case 4: score64 += v * v * (int64_t)(v * v); break;
991 if (score64 < s->avctx->frame_skip_threshold)
993 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
998 static int encode_frame(AVCodecContext *c, AVFrame *frame)
1000 AVPacket pkt = { 0 };
1001 int ret, got_output;
1003 av_init_packet(&pkt);
1004 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1009 av_free_packet(&pkt);
1013 static int estimate_best_b_count(MpegEncContext *s)
1015 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1016 AVCodecContext *c = avcodec_alloc_context3(NULL);
1017 AVFrame input[FF_MAX_B_FRAMES + 2];
1018 const int scale = s->avctx->brd_scale;
1019 int i, j, out_size, p_lambda, b_lambda, lambda2;
1020 int64_t best_rd = INT64_MAX;
1021 int best_b_count = -1;
1023 assert(scale >= 0 && scale <= 3);
1026 //s->next_picture_ptr->quality;
1027 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1028 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1029 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1030 if (!b_lambda) // FIXME we should do this somewhere else
1031 b_lambda = p_lambda;
1032 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1035 c->width = s->width >> scale;
1036 c->height = s->height >> scale;
1037 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1038 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1039 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1040 c->mb_decision = s->avctx->mb_decision;
1041 c->me_cmp = s->avctx->me_cmp;
1042 c->mb_cmp = s->avctx->mb_cmp;
1043 c->me_sub_cmp = s->avctx->me_sub_cmp;
1044 c->pix_fmt = AV_PIX_FMT_YUV420P;
1045 c->time_base = s->avctx->time_base;
1046 c->max_b_frames = s->max_b_frames;
1048 if (avcodec_open2(c, codec, NULL) < 0)
1051 for (i = 0; i < s->max_b_frames + 2; i++) {
1052 int ysize = c->width * c->height;
1053 int csize = (c->width / 2) * (c->height / 2);
1054 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1055 s->next_picture_ptr;
1057 avcodec_get_frame_defaults(&input[i]);
1058 input[i].data[0] = av_malloc(ysize + 2 * csize);
1059 input[i].data[1] = input[i].data[0] + ysize;
1060 input[i].data[2] = input[i].data[1] + csize;
1061 input[i].linesize[0] = c->width;
1062 input[i].linesize[1] =
1063 input[i].linesize[2] = c->width / 2;
1065 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1066 pre_input = *pre_input_ptr;
1068 if (!pre_input.shared && i) {
1069 pre_input.f.data[0] += INPLACE_OFFSET;
1070 pre_input.f.data[1] += INPLACE_OFFSET;
1071 pre_input.f.data[2] += INPLACE_OFFSET;
1074 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1075 pre_input.f.data[0], pre_input.f.linesize[0],
1076 c->width, c->height);
1077 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1078 pre_input.f.data[1], pre_input.f.linesize[1],
1079 c->width >> 1, c->height >> 1);
1080 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1081 pre_input.f.data[2], pre_input.f.linesize[2],
1082 c->width >> 1, c->height >> 1);
1086 for (j = 0; j < s->max_b_frames + 1; j++) {
1089 if (!s->input_picture[j])
1092 c->error[0] = c->error[1] = c->error[2] = 0;
1094 input[0].pict_type = AV_PICTURE_TYPE_I;
1095 input[0].quality = 1 * FF_QP2LAMBDA;
1097 out_size = encode_frame(c, &input[0]);
1099 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1101 for (i = 0; i < s->max_b_frames + 1; i++) {
1102 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1104 input[i + 1].pict_type = is_p ?
1105 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1106 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1108 out_size = encode_frame(c, &input[i + 1]);
1110 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1113 /* get the delayed frames */
1115 out_size = encode_frame(c, NULL);
1116 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1119 rd += c->error[0] + c->error[1] + c->error[2];
1130 for (i = 0; i < s->max_b_frames + 2; i++) {
1131 av_freep(&input[i].data[0]);
1134 return best_b_count;
1137 static int select_input_picture(MpegEncContext *s)
1141 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1142 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1143 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1145 /* set next picture type & ordering */
1146 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1147 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1148 s->next_picture_ptr == NULL || s->intra_only) {
1149 s->reordered_input_picture[0] = s->input_picture[0];
1150 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1151 s->reordered_input_picture[0]->f.coded_picture_number =
1152 s->coded_picture_number++;
1156 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1157 if (s->picture_in_gop_number < s->gop_size &&
1158 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1159 // FIXME check that te gop check above is +-1 correct
1160 av_frame_unref(&s->input_picture[0]->f);
1163 ff_vbv_update(s, 0);
1169 if (s->flags & CODEC_FLAG_PASS2) {
1170 for (i = 0; i < s->max_b_frames + 1; i++) {
1171 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1173 if (pict_num >= s->rc_context.num_entries)
1175 if (!s->input_picture[i]) {
1176 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1180 s->input_picture[i]->f.pict_type =
1181 s->rc_context.entry[pict_num].new_pict_type;
1185 if (s->avctx->b_frame_strategy == 0) {
1186 b_frames = s->max_b_frames;
1187 while (b_frames && !s->input_picture[b_frames])
1189 } else if (s->avctx->b_frame_strategy == 1) {
1190 for (i = 1; i < s->max_b_frames + 1; i++) {
1191 if (s->input_picture[i] &&
1192 s->input_picture[i]->b_frame_score == 0) {
1193 s->input_picture[i]->b_frame_score =
1195 s->input_picture[i ]->f.data[0],
1196 s->input_picture[i - 1]->f.data[0],
1200 for (i = 0; i < s->max_b_frames + 1; i++) {
1201 if (s->input_picture[i] == NULL ||
1202 s->input_picture[i]->b_frame_score - 1 >
1203 s->mb_num / s->avctx->b_sensitivity)
1207 b_frames = FFMAX(0, i - 1);
1210 for (i = 0; i < b_frames + 1; i++) {
1211 s->input_picture[i]->b_frame_score = 0;
1213 } else if (s->avctx->b_frame_strategy == 2) {
1214 b_frames = estimate_best_b_count(s);
1216 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1222 for (i = b_frames - 1; i >= 0; i--) {
1223 int type = s->input_picture[i]->f.pict_type;
1224 if (type && type != AV_PICTURE_TYPE_B)
1227 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1228 b_frames == s->max_b_frames) {
1229 av_log(s->avctx, AV_LOG_ERROR,
1230 "warning, too many b frames in a row\n");
1233 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1234 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1235 s->gop_size > s->picture_in_gop_number) {
1236 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1238 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1240 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1244 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1245 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1248 s->reordered_input_picture[0] = s->input_picture[b_frames];
1249 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1250 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1251 s->reordered_input_picture[0]->f.coded_picture_number =
1252 s->coded_picture_number++;
1253 for (i = 0; i < b_frames; i++) {
1254 s->reordered_input_picture[i + 1] = s->input_picture[i];
1255 s->reordered_input_picture[i + 1]->f.pict_type =
1257 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1258 s->coded_picture_number++;
1263 if (s->reordered_input_picture[0]) {
1264 s->reordered_input_picture[0]->reference =
1265 s->reordered_input_picture[0]->f.pict_type !=
1266 AV_PICTURE_TYPE_B ? 3 : 0;
1268 ff_mpeg_unref_picture(s, &s->new_picture);
1269 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1272 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1273 // input is a shared pix, so we can't modifiy it -> alloc a new
1274 // one & ensure that the shared one is reuseable
1277 int i = ff_find_unused_picture(s, 0);
1280 pic = &s->picture[i];
1282 pic->reference = s->reordered_input_picture[0]->reference;
1283 if (ff_alloc_picture(s, pic, 0) < 0) {
1287 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1291 /* mark us unused / free shared pic */
1292 av_frame_unref(&s->reordered_input_picture[0]->f);
1293 s->reordered_input_picture[0]->shared = 0;
1295 s->current_picture_ptr = pic;
1297 // input is not a shared pix -> reuse buffer for current_pix
1298 s->current_picture_ptr = s->reordered_input_picture[0];
1299 for (i = 0; i < 4; i++) {
1300 s->new_picture.f.data[i] += INPLACE_OFFSET;
1303 ff_mpeg_unref_picture(s, &s->current_picture);
1304 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1305 s->current_picture_ptr)) < 0)
1308 s->picture_number = s->new_picture.f.display_picture_number;
1310 ff_mpeg_unref_picture(s, &s->new_picture);
1315 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1316 const AVFrame *pic_arg, int *got_packet)
1318 MpegEncContext *s = avctx->priv_data;
1319 int i, stuffing_count, ret;
1320 int context_count = s->slice_context_count;
1322 s->picture_in_gop_number++;
1324 if (load_input_picture(s, pic_arg) < 0)
1327 if (select_input_picture(s) < 0) {
1332 if (s->new_picture.f.data[0]) {
1334 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1337 s->mb_info_ptr = av_packet_new_side_data(pkt,
1338 AV_PKT_DATA_H263_MB_INFO,
1339 s->mb_width*s->mb_height*12);
1340 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1343 for (i = 0; i < context_count; i++) {
1344 int start_y = s->thread_context[i]->start_mb_y;
1345 int end_y = s->thread_context[i]-> end_mb_y;
1346 int h = s->mb_height;
1347 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1348 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1350 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1353 s->pict_type = s->new_picture.f.pict_type;
1355 ff_MPV_frame_start(s, avctx);
1357 if (encode_picture(s, s->picture_number) < 0)
1360 avctx->header_bits = s->header_bits;
1361 avctx->mv_bits = s->mv_bits;
1362 avctx->misc_bits = s->misc_bits;
1363 avctx->i_tex_bits = s->i_tex_bits;
1364 avctx->p_tex_bits = s->p_tex_bits;
1365 avctx->i_count = s->i_count;
1366 // FIXME f/b_count in avctx
1367 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1368 avctx->skip_count = s->skip_count;
1370 ff_MPV_frame_end(s);
1372 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1373 ff_mjpeg_encode_picture_trailer(s);
1375 if (avctx->rc_buffer_size) {
1376 RateControlContext *rcc = &s->rc_context;
1377 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1379 if (put_bits_count(&s->pb) > max_size &&
1380 s->lambda < s->avctx->lmax) {
1381 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1382 (s->qscale + 1) / s->qscale);
1383 if (s->adaptive_quant) {
1385 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1386 s->lambda_table[i] =
1387 FFMAX(s->lambda_table[i] + 1,
1388 s->lambda_table[i] * (s->qscale + 1) /
1391 s->mb_skipped = 0; // done in MPV_frame_start()
1392 // done in encode_picture() so we must undo it
1393 if (s->pict_type == AV_PICTURE_TYPE_P) {
1394 if (s->flipflop_rounding ||
1395 s->codec_id == AV_CODEC_ID_H263P ||
1396 s->codec_id == AV_CODEC_ID_MPEG4)
1397 s->no_rounding ^= 1;
1399 if (s->pict_type != AV_PICTURE_TYPE_B) {
1400 s->time_base = s->last_time_base;
1401 s->last_non_b_time = s->time - s->pp_time;
1403 for (i = 0; i < context_count; i++) {
1404 PutBitContext *pb = &s->thread_context[i]->pb;
1405 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1410 assert(s->avctx->rc_max_rate);
1413 if (s->flags & CODEC_FLAG_PASS1)
1414 ff_write_pass1_stats(s);
1416 for (i = 0; i < 4; i++) {
1417 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1418 avctx->error[i] += s->current_picture_ptr->f.error[i];
1421 if (s->flags & CODEC_FLAG_PASS1)
1422 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1423 avctx->i_tex_bits + avctx->p_tex_bits ==
1424 put_bits_count(&s->pb));
1425 flush_put_bits(&s->pb);
1426 s->frame_bits = put_bits_count(&s->pb);
1428 stuffing_count = ff_vbv_update(s, s->frame_bits);
1429 if (stuffing_count) {
1430 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1431 stuffing_count + 50) {
1432 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1436 switch (s->codec_id) {
1437 case AV_CODEC_ID_MPEG1VIDEO:
1438 case AV_CODEC_ID_MPEG2VIDEO:
1439 while (stuffing_count--) {
1440 put_bits(&s->pb, 8, 0);
1443 case AV_CODEC_ID_MPEG4:
1444 put_bits(&s->pb, 16, 0);
1445 put_bits(&s->pb, 16, 0x1C3);
1446 stuffing_count -= 4;
1447 while (stuffing_count--) {
1448 put_bits(&s->pb, 8, 0xFF);
1452 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1454 flush_put_bits(&s->pb);
1455 s->frame_bits = put_bits_count(&s->pb);
1458 /* update mpeg1/2 vbv_delay for CBR */
1459 if (s->avctx->rc_max_rate &&
1460 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1461 s->out_format == FMT_MPEG1 &&
1462 90000LL * (avctx->rc_buffer_size - 1) <=
1463 s->avctx->rc_max_rate * 0xFFFFLL) {
1464 int vbv_delay, min_delay;
1465 double inbits = s->avctx->rc_max_rate *
1466 av_q2d(s->avctx->time_base);
1467 int minbits = s->frame_bits - 8 *
1468 (s->vbv_delay_ptr - s->pb.buf - 1);
1469 double bits = s->rc_context.buffer_index + minbits - inbits;
1472 av_log(s->avctx, AV_LOG_ERROR,
1473 "Internal error, negative bits\n");
1475 assert(s->repeat_first_field == 0);
1477 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1478 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1479 s->avctx->rc_max_rate;
1481 vbv_delay = FFMAX(vbv_delay, min_delay);
1483 assert(vbv_delay < 0xFFFF);
1485 s->vbv_delay_ptr[0] &= 0xF8;
1486 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1487 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1488 s->vbv_delay_ptr[2] &= 0x07;
1489 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1490 avctx->vbv_delay = vbv_delay * 300;
1492 s->total_bits += s->frame_bits;
1493 avctx->frame_bits = s->frame_bits;
1495 pkt->pts = s->current_picture.f.pts;
1496 if (!s->low_delay) {
1497 if (!s->current_picture.f.coded_picture_number)
1498 pkt->dts = pkt->pts - s->dts_delta;
1500 pkt->dts = s->reordered_pts;
1501 s->reordered_pts = s->input_picture[0]->f.pts;
1503 pkt->dts = pkt->pts;
1504 if (s->current_picture.f.key_frame)
1505 pkt->flags |= AV_PKT_FLAG_KEY;
1507 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1511 assert((s->frame_bits & 7) == 0);
1513 pkt->size = s->frame_bits / 8;
1514 *got_packet = !!pkt->size;
1518 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1519 int n, int threshold)
1521 static const char tab[64] = {
1522 3, 2, 2, 1, 1, 1, 1, 1,
1523 1, 1, 1, 1, 1, 1, 1, 1,
1524 1, 1, 1, 1, 1, 1, 1, 1,
1525 0, 0, 0, 0, 0, 0, 0, 0,
1526 0, 0, 0, 0, 0, 0, 0, 0,
1527 0, 0, 0, 0, 0, 0, 0, 0,
1528 0, 0, 0, 0, 0, 0, 0, 0,
1529 0, 0, 0, 0, 0, 0, 0, 0
1534 int16_t *block = s->block[n];
1535 const int last_index = s->block_last_index[n];
1538 if (threshold < 0) {
1540 threshold = -threshold;
1544 /* Are all we could set to zero already zero? */
1545 if (last_index <= skip_dc - 1)
1548 for (i = 0; i <= last_index; i++) {
1549 const int j = s->intra_scantable.permutated[i];
1550 const int level = FFABS(block[j]);
1552 if (skip_dc && i == 0)
1556 } else if (level > 1) {
1562 if (score >= threshold)
1564 for (i = skip_dc; i <= last_index; i++) {
1565 const int j = s->intra_scantable.permutated[i];
1569 s->block_last_index[n] = 0;
1571 s->block_last_index[n] = -1;
1574 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1578 const int maxlevel = s->max_qcoeff;
1579 const int minlevel = s->min_qcoeff;
1583 i = 1; // skip clipping of intra dc
1587 for (; i <= last_index; i++) {
1588 const int j = s->intra_scantable.permutated[i];
1589 int level = block[j];
1591 if (level > maxlevel) {
1594 } else if (level < minlevel) {
1602 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1603 av_log(s->avctx, AV_LOG_INFO,
1604 "warning, clipping %d dct coefficients to %d..%d\n",
1605 overflow, minlevel, maxlevel);
1608 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1612 for (y = 0; y < 8; y++) {
1613 for (x = 0; x < 8; x++) {
1619 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1620 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1621 int v = ptr[x2 + y2 * stride];
1627 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1632 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1633 int motion_x, int motion_y,
1634 int mb_block_height,
1637 int16_t weight[8][64];
1638 int16_t orig[8][64];
1639 const int mb_x = s->mb_x;
1640 const int mb_y = s->mb_y;
1643 int dct_offset = s->linesize * 8; // default for progressive frames
1644 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1645 ptrdiff_t wrap_y, wrap_c;
1647 for (i = 0; i < mb_block_count; i++)
1648 skip_dct[i] = s->skipdct;
1650 if (s->adaptive_quant) {
1651 const int last_qp = s->qscale;
1652 const int mb_xy = mb_x + mb_y * s->mb_stride;
1654 s->lambda = s->lambda_table[mb_xy];
1657 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1658 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1659 s->dquant = s->qscale - last_qp;
1661 if (s->out_format == FMT_H263) {
1662 s->dquant = av_clip(s->dquant, -2, 2);
1664 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1666 if (s->pict_type == AV_PICTURE_TYPE_B) {
1667 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1670 if (s->mv_type == MV_TYPE_8X8)
1676 ff_set_qscale(s, last_qp + s->dquant);
1677 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1678 ff_set_qscale(s, s->qscale + s->dquant);
1680 wrap_y = s->linesize;
1681 wrap_c = s->uvlinesize;
1682 ptr_y = s->new_picture.f.data[0] +
1683 (mb_y * 16 * wrap_y) + mb_x * 16;
1684 ptr_cb = s->new_picture.f.data[1] +
1685 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1686 ptr_cr = s->new_picture.f.data[2] +
1687 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1689 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1690 uint8_t *ebuf = s->edge_emu_buffer + 32;
1691 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1692 mb_y * 16, s->width, s->height);
1694 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1695 mb_block_height, mb_x * 8, mb_y * 8,
1696 s->width >> 1, s->height >> 1);
1697 ptr_cb = ebuf + 18 * wrap_y;
1698 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1699 mb_block_height, mb_x * 8, mb_y * 8,
1700 s->width >> 1, s->height >> 1);
1701 ptr_cr = ebuf + 18 * wrap_y + 8;
1705 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1706 int progressive_score, interlaced_score;
1708 s->interlaced_dct = 0;
1709 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1711 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1712 NULL, wrap_y, 8) - 400;
1714 if (progressive_score > 0) {
1715 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1716 NULL, wrap_y * 2, 8) +
1717 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1718 NULL, wrap_y * 2, 8);
1719 if (progressive_score > interlaced_score) {
1720 s->interlaced_dct = 1;
1722 dct_offset = wrap_y;
1724 if (s->chroma_format == CHROMA_422)
1730 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1731 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1732 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1733 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1735 if (s->flags & CODEC_FLAG_GRAY) {
1739 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1740 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1741 if (!s->chroma_y_shift) { /* 422 */
1742 s->dsp.get_pixels(s->block[6],
1743 ptr_cb + (dct_offset >> 1), wrap_c);
1744 s->dsp.get_pixels(s->block[7],
1745 ptr_cr + (dct_offset >> 1), wrap_c);
1749 op_pixels_func (*op_pix)[4];
1750 qpel_mc_func (*op_qpix)[16];
1751 uint8_t *dest_y, *dest_cb, *dest_cr;
1753 dest_y = s->dest[0];
1754 dest_cb = s->dest[1];
1755 dest_cr = s->dest[2];
1757 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1758 op_pix = s->hdsp.put_pixels_tab;
1759 op_qpix = s->dsp.put_qpel_pixels_tab;
1761 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1762 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1765 if (s->mv_dir & MV_DIR_FORWARD) {
1766 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1767 s->last_picture.f.data,
1769 op_pix = s->hdsp.avg_pixels_tab;
1770 op_qpix = s->dsp.avg_qpel_pixels_tab;
1772 if (s->mv_dir & MV_DIR_BACKWARD) {
1773 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1774 s->next_picture.f.data,
1778 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1779 int progressive_score, interlaced_score;
1781 s->interlaced_dct = 0;
1782 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1785 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1786 ptr_y + wrap_y * 8, wrap_y,
1789 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1790 progressive_score -= 400;
1792 if (progressive_score > 0) {
1793 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1796 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1800 if (progressive_score > interlaced_score) {
1801 s->interlaced_dct = 1;
1803 dct_offset = wrap_y;
1805 if (s->chroma_format == CHROMA_422)
1811 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1812 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1813 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1814 dest_y + dct_offset, wrap_y);
1815 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1816 dest_y + dct_offset + 8, wrap_y);
1818 if (s->flags & CODEC_FLAG_GRAY) {
1822 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1823 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1824 if (!s->chroma_y_shift) { /* 422 */
1825 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1826 dest_cb + (dct_offset >> 1), wrap_c);
1827 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1828 dest_cr + (dct_offset >> 1), wrap_c);
1831 /* pre quantization */
1832 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1833 2 * s->qscale * s->qscale) {
1835 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1836 wrap_y, 8) < 20 * s->qscale)
1838 if (s->dsp.sad[1](NULL, ptr_y + 8,
1839 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1841 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1842 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1844 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1845 dest_y + dct_offset + 8,
1846 wrap_y, 8) < 20 * s->qscale)
1848 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1849 wrap_c, 8) < 20 * s->qscale)
1851 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1852 wrap_c, 8) < 20 * s->qscale)
1854 if (!s->chroma_y_shift) { /* 422 */
1855 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1856 dest_cb + (dct_offset >> 1),
1857 wrap_c, 8) < 20 * s->qscale)
1859 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1860 dest_cr + (dct_offset >> 1),
1861 wrap_c, 8) < 20 * s->qscale)
1867 if (s->quantizer_noise_shaping) {
1869 get_visual_weight(weight[0], ptr_y , wrap_y);
1871 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1873 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1875 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1877 get_visual_weight(weight[4], ptr_cb , wrap_c);
1879 get_visual_weight(weight[5], ptr_cr , wrap_c);
1880 if (!s->chroma_y_shift) { /* 422 */
1882 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1885 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1888 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1891 /* DCT & quantize */
1892 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1894 for (i = 0; i < mb_block_count; i++) {
1897 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1898 // FIXME we could decide to change to quantizer instead of
1900 // JS: I don't think that would be a good idea it could lower
1901 // quality instead of improve it. Just INTRADC clipping
1902 // deserves changes in quantizer
1904 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1906 s->block_last_index[i] = -1;
1908 if (s->quantizer_noise_shaping) {
1909 for (i = 0; i < mb_block_count; i++) {
1911 s->block_last_index[i] =
1912 dct_quantize_refine(s, s->block[i], weight[i],
1913 orig[i], i, s->qscale);
1918 if (s->luma_elim_threshold && !s->mb_intra)
1919 for (i = 0; i < 4; i++)
1920 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1921 if (s->chroma_elim_threshold && !s->mb_intra)
1922 for (i = 4; i < mb_block_count; i++)
1923 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1925 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1926 for (i = 0; i < mb_block_count; i++) {
1927 if (s->block_last_index[i] == -1)
1928 s->coded_score[i] = INT_MAX / 256;
1933 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1934 s->block_last_index[4] =
1935 s->block_last_index[5] = 0;
1937 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1940 // non c quantize code returns incorrect block_last_index FIXME
1941 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1942 for (i = 0; i < mb_block_count; i++) {
1944 if (s->block_last_index[i] > 0) {
1945 for (j = 63; j > 0; j--) {
1946 if (s->block[i][s->intra_scantable.permutated[j]])
1949 s->block_last_index[i] = j;
1954 /* huffman encode */
1955 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1956 case AV_CODEC_ID_MPEG1VIDEO:
1957 case AV_CODEC_ID_MPEG2VIDEO:
1958 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1959 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1961 case AV_CODEC_ID_MPEG4:
1962 if (CONFIG_MPEG4_ENCODER)
1963 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1965 case AV_CODEC_ID_MSMPEG4V2:
1966 case AV_CODEC_ID_MSMPEG4V3:
1967 case AV_CODEC_ID_WMV1:
1968 if (CONFIG_MSMPEG4_ENCODER)
1969 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1971 case AV_CODEC_ID_WMV2:
1972 if (CONFIG_WMV2_ENCODER)
1973 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1975 case AV_CODEC_ID_H261:
1976 if (CONFIG_H261_ENCODER)
1977 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1979 case AV_CODEC_ID_H263:
1980 case AV_CODEC_ID_H263P:
1981 case AV_CODEC_ID_FLV1:
1982 case AV_CODEC_ID_RV10:
1983 case AV_CODEC_ID_RV20:
1984 if (CONFIG_H263_ENCODER)
1985 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
1987 case AV_CODEC_ID_MJPEG:
1988 if (CONFIG_MJPEG_ENCODER)
1989 ff_mjpeg_encode_mb(s, s->block);
1996 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1998 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
1999 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2002 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2005 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2008 d->mb_skip_run= s->mb_skip_run;
2010 d->last_dc[i] = s->last_dc[i];
2013 d->mv_bits= s->mv_bits;
2014 d->i_tex_bits= s->i_tex_bits;
2015 d->p_tex_bits= s->p_tex_bits;
2016 d->i_count= s->i_count;
2017 d->f_count= s->f_count;
2018 d->b_count= s->b_count;
2019 d->skip_count= s->skip_count;
2020 d->misc_bits= s->misc_bits;
2024 d->qscale= s->qscale;
2025 d->dquant= s->dquant;
2027 d->esc3_level_length= s->esc3_level_length;
2030 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2033 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2034 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2037 d->mb_skip_run= s->mb_skip_run;
2039 d->last_dc[i] = s->last_dc[i];
2042 d->mv_bits= s->mv_bits;
2043 d->i_tex_bits= s->i_tex_bits;
2044 d->p_tex_bits= s->p_tex_bits;
2045 d->i_count= s->i_count;
2046 d->f_count= s->f_count;
2047 d->b_count= s->b_count;
2048 d->skip_count= s->skip_count;
2049 d->misc_bits= s->misc_bits;
2051 d->mb_intra= s->mb_intra;
2052 d->mb_skipped= s->mb_skipped;
2053 d->mv_type= s->mv_type;
2054 d->mv_dir= s->mv_dir;
2056 if(s->data_partitioning){
2058 d->tex_pb= s->tex_pb;
2062 d->block_last_index[i]= s->block_last_index[i];
2063 d->interlaced_dct= s->interlaced_dct;
2064 d->qscale= s->qscale;
2066 d->esc3_level_length= s->esc3_level_length;
2069 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2070 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2071 int *dmin, int *next_block, int motion_x, int motion_y)
2074 uint8_t *dest_backup[3];
2076 copy_context_before_encode(s, backup, type);
2078 s->block= s->blocks[*next_block];
2079 s->pb= pb[*next_block];
2080 if(s->data_partitioning){
2081 s->pb2 = pb2 [*next_block];
2082 s->tex_pb= tex_pb[*next_block];
2086 memcpy(dest_backup, s->dest, sizeof(s->dest));
2087 s->dest[0] = s->rd_scratchpad;
2088 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2089 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2090 assert(s->linesize >= 32); //FIXME
2093 encode_mb(s, motion_x, motion_y);
2095 score= put_bits_count(&s->pb);
2096 if(s->data_partitioning){
2097 score+= put_bits_count(&s->pb2);
2098 score+= put_bits_count(&s->tex_pb);
2101 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2102 ff_MPV_decode_mb(s, s->block);
2104 score *= s->lambda2;
2105 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2109 memcpy(s->dest, dest_backup, sizeof(s->dest));
2116 copy_context_after_encode(best, s, type);
2120 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2121 uint32_t *sq = ff_squareTbl + 256;
2126 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2127 else if(w==8 && h==8)
2128 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2132 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2141 static int sse_mb(MpegEncContext *s){
2145 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2146 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2149 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2150 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2151 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2152 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2154 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2155 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2156 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2159 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2160 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2161 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2164 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2165 MpegEncContext *s= *(void**)arg;
2169 s->me.dia_size= s->avctx->pre_dia_size;
2170 s->first_slice_line=1;
2171 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2172 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2173 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2175 s->first_slice_line=0;
2183 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2184 MpegEncContext *s= *(void**)arg;
2186 ff_check_alignment();
2188 s->me.dia_size= s->avctx->dia_size;
2189 s->first_slice_line=1;
2190 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2191 s->mb_x=0; //for block init below
2192 ff_init_block_index(s);
2193 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2194 s->block_index[0]+=2;
2195 s->block_index[1]+=2;
2196 s->block_index[2]+=2;
2197 s->block_index[3]+=2;
2199 /* compute motion vector & mb_type and store in context */
2200 if(s->pict_type==AV_PICTURE_TYPE_B)
2201 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2203 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2205 s->first_slice_line=0;
2210 static int mb_var_thread(AVCodecContext *c, void *arg){
2211 MpegEncContext *s= *(void**)arg;
2214 ff_check_alignment();
2216 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2217 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2220 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2222 int sum = s->dsp.pix_sum(pix, s->linesize);
2224 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2226 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2227 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2228 s->me.mb_var_sum_temp += varc;
2234 static void write_slice_end(MpegEncContext *s){
2235 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2236 if(s->partitioned_frame){
2237 ff_mpeg4_merge_partitions(s);
2240 ff_mpeg4_stuffing(&s->pb);
2241 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2242 ff_mjpeg_encode_stuffing(&s->pb);
2245 avpriv_align_put_bits(&s->pb);
2246 flush_put_bits(&s->pb);
2248 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2249 s->misc_bits+= get_bits_diff(s);
2252 static void write_mb_info(MpegEncContext *s)
2254 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2255 int offset = put_bits_count(&s->pb);
2256 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2257 int gobn = s->mb_y / s->gob_index;
2259 if (CONFIG_H263_ENCODER)
2260 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2261 bytestream_put_le32(&ptr, offset);
2262 bytestream_put_byte(&ptr, s->qscale);
2263 bytestream_put_byte(&ptr, gobn);
2264 bytestream_put_le16(&ptr, mba);
2265 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2266 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2267 /* 4MV not implemented */
2268 bytestream_put_byte(&ptr, 0); /* hmv2 */
2269 bytestream_put_byte(&ptr, 0); /* vmv2 */
2272 static void update_mb_info(MpegEncContext *s, int startcode)
2276 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2277 s->mb_info_size += 12;
2278 s->prev_mb_info = s->last_mb_info;
2281 s->prev_mb_info = put_bits_count(&s->pb)/8;
2282 /* This might have incremented mb_info_size above, and we return without
2283 * actually writing any info into that slot yet. But in that case,
2284 * this will be called again at the start of the after writing the
2285 * start code, actually writing the mb info. */
2289 s->last_mb_info = put_bits_count(&s->pb)/8;
2290 if (!s->mb_info_size)
2291 s->mb_info_size += 12;
2295 static int encode_thread(AVCodecContext *c, void *arg){
2296 MpegEncContext *s= *(void**)arg;
2297 int mb_x, mb_y, pdif = 0;
2298 int chr_h= 16>>s->chroma_y_shift;
2300 MpegEncContext best_s, backup_s;
2301 uint8_t bit_buf[2][MAX_MB_BYTES];
2302 uint8_t bit_buf2[2][MAX_MB_BYTES];
2303 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2304 PutBitContext pb[2], pb2[2], tex_pb[2];
2306 ff_check_alignment();
2309 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2310 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2311 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2314 s->last_bits= put_bits_count(&s->pb);
2325 /* init last dc values */
2326 /* note: quant matrix value (8) is implied here */
2327 s->last_dc[i] = 128 << s->intra_dc_precision;
2329 s->current_picture.f.error[i] = 0;
2332 memset(s->last_mv, 0, sizeof(s->last_mv));
2336 switch(s->codec_id){
2337 case AV_CODEC_ID_H263:
2338 case AV_CODEC_ID_H263P:
2339 case AV_CODEC_ID_FLV1:
2340 if (CONFIG_H263_ENCODER)
2341 s->gob_index = ff_h263_get_gob_height(s);
2343 case AV_CODEC_ID_MPEG4:
2344 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2345 ff_mpeg4_init_partitions(s);
2351 s->first_slice_line = 1;
2352 s->ptr_lastgob = s->pb.buf;
2353 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2357 ff_set_qscale(s, s->qscale);
2358 ff_init_block_index(s);
2360 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2361 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2362 int mb_type= s->mb_type[xy];
2367 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2368 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2371 if(s->data_partitioning){
2372 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2373 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2374 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2380 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2381 ff_update_block_index(s);
2383 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2384 ff_h261_reorder_mb_index(s);
2385 xy= s->mb_y*s->mb_stride + s->mb_x;
2386 mb_type= s->mb_type[xy];
2389 /* write gob / video packet header */
2391 int current_packet_size, is_gob_start;
2393 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2395 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2397 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2399 switch(s->codec_id){
2400 case AV_CODEC_ID_H263:
2401 case AV_CODEC_ID_H263P:
2402 if(!s->h263_slice_structured)
2403 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2405 case AV_CODEC_ID_MPEG2VIDEO:
2406 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2407 case AV_CODEC_ID_MPEG1VIDEO:
2408 if(s->mb_skip_run) is_gob_start=0;
2413 if(s->start_mb_y != mb_y || mb_x!=0){
2416 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2417 ff_mpeg4_init_partitions(s);
2421 assert((put_bits_count(&s->pb)&7) == 0);
2422 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2424 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2425 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2426 int d= 100 / s->avctx->error_rate;
2428 current_packet_size=0;
2429 s->pb.buf_ptr= s->ptr_lastgob;
2430 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2434 if (s->avctx->rtp_callback){
2435 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2436 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2438 update_mb_info(s, 1);
2440 switch(s->codec_id){
2441 case AV_CODEC_ID_MPEG4:
2442 if (CONFIG_MPEG4_ENCODER) {
2443 ff_mpeg4_encode_video_packet_header(s);
2444 ff_mpeg4_clean_buffers(s);
2447 case AV_CODEC_ID_MPEG1VIDEO:
2448 case AV_CODEC_ID_MPEG2VIDEO:
2449 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2450 ff_mpeg1_encode_slice_header(s);
2451 ff_mpeg1_clean_buffers(s);
2454 case AV_CODEC_ID_H263:
2455 case AV_CODEC_ID_H263P:
2456 if (CONFIG_H263_ENCODER)
2457 ff_h263_encode_gob_header(s, mb_y);
2461 if(s->flags&CODEC_FLAG_PASS1){
2462 int bits= put_bits_count(&s->pb);
2463 s->misc_bits+= bits - s->last_bits;
2467 s->ptr_lastgob += current_packet_size;
2468 s->first_slice_line=1;
2469 s->resync_mb_x=mb_x;
2470 s->resync_mb_y=mb_y;
2474 if( (s->resync_mb_x == s->mb_x)
2475 && s->resync_mb_y+1 == s->mb_y){
2476 s->first_slice_line=0;
2480 s->dquant=0; //only for QP_RD
2482 update_mb_info(s, 0);
2484 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2486 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2488 copy_context_before_encode(&backup_s, s, -1);
2490 best_s.data_partitioning= s->data_partitioning;
2491 best_s.partitioned_frame= s->partitioned_frame;
2492 if(s->data_partitioning){
2493 backup_s.pb2= s->pb2;
2494 backup_s.tex_pb= s->tex_pb;
2497 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2498 s->mv_dir = MV_DIR_FORWARD;
2499 s->mv_type = MV_TYPE_16X16;
2501 s->mv[0][0][0] = s->p_mv_table[xy][0];
2502 s->mv[0][0][1] = s->p_mv_table[xy][1];
2503 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2504 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2506 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2507 s->mv_dir = MV_DIR_FORWARD;
2508 s->mv_type = MV_TYPE_FIELD;
2511 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2512 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2513 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2515 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2516 &dmin, &next_block, 0, 0);
2518 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2519 s->mv_dir = MV_DIR_FORWARD;
2520 s->mv_type = MV_TYPE_16X16;
2524 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2525 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2527 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2528 s->mv_dir = MV_DIR_FORWARD;
2529 s->mv_type = MV_TYPE_8X8;
2532 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2533 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2535 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2536 &dmin, &next_block, 0, 0);
2538 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2539 s->mv_dir = MV_DIR_FORWARD;
2540 s->mv_type = MV_TYPE_16X16;
2542 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2543 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2544 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2545 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2547 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2548 s->mv_dir = MV_DIR_BACKWARD;
2549 s->mv_type = MV_TYPE_16X16;
2551 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2552 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2553 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2554 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2556 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2557 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2558 s->mv_type = MV_TYPE_16X16;
2560 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2561 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2562 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2563 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2564 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2565 &dmin, &next_block, 0, 0);
2567 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2568 s->mv_dir = MV_DIR_FORWARD;
2569 s->mv_type = MV_TYPE_FIELD;
2572 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2573 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2574 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2576 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2577 &dmin, &next_block, 0, 0);
2579 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2580 s->mv_dir = MV_DIR_BACKWARD;
2581 s->mv_type = MV_TYPE_FIELD;
2584 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2585 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2586 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2588 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2589 &dmin, &next_block, 0, 0);
2591 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2592 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2593 s->mv_type = MV_TYPE_FIELD;
2595 for(dir=0; dir<2; dir++){
2597 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2598 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2599 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2602 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2603 &dmin, &next_block, 0, 0);
2605 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2607 s->mv_type = MV_TYPE_16X16;
2611 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2612 &dmin, &next_block, 0, 0);
2613 if(s->h263_pred || s->h263_aic){
2615 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2617 ff_clean_intra_table_entries(s); //old mode?
2621 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2622 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2623 const int last_qp= backup_s.qscale;
2626 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2627 static const int dquant_tab[4]={-1,1,-2,2};
2629 assert(backup_s.dquant == 0);
2632 s->mv_dir= best_s.mv_dir;
2633 s->mv_type = MV_TYPE_16X16;
2634 s->mb_intra= best_s.mb_intra;
2635 s->mv[0][0][0] = best_s.mv[0][0][0];
2636 s->mv[0][0][1] = best_s.mv[0][0][1];
2637 s->mv[1][0][0] = best_s.mv[1][0][0];
2638 s->mv[1][0][1] = best_s.mv[1][0][1];
2640 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2641 for(; qpi<4; qpi++){
2642 int dquant= dquant_tab[qpi];
2643 qp= last_qp + dquant;
2644 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2646 backup_s.dquant= dquant;
2647 if(s->mb_intra && s->dc_val[0]){
2649 dc[i]= s->dc_val[0][ s->block_index[i] ];
2650 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2654 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2655 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2656 if(best_s.qscale != qp){
2657 if(s->mb_intra && s->dc_val[0]){
2659 s->dc_val[0][ s->block_index[i] ]= dc[i];
2660 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2667 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2668 int mx= s->b_direct_mv_table[xy][0];
2669 int my= s->b_direct_mv_table[xy][1];
2671 backup_s.dquant = 0;
2672 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2674 ff_mpeg4_set_direct_mv(s, mx, my);
2675 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2676 &dmin, &next_block, mx, my);
2678 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2679 backup_s.dquant = 0;
2680 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2682 ff_mpeg4_set_direct_mv(s, 0, 0);
2683 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2684 &dmin, &next_block, 0, 0);
2686 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2689 coded |= s->block_last_index[i];
2692 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2693 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2694 mx=my=0; //FIXME find the one we actually used
2695 ff_mpeg4_set_direct_mv(s, mx, my);
2696 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2704 s->mv_dir= best_s.mv_dir;
2705 s->mv_type = best_s.mv_type;
2707 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2708 s->mv[0][0][1] = best_s.mv[0][0][1];
2709 s->mv[1][0][0] = best_s.mv[1][0][0];
2710 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2713 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2714 &dmin, &next_block, mx, my);
2719 s->current_picture.qscale_table[xy] = best_s.qscale;
2721 copy_context_after_encode(s, &best_s, -1);
2723 pb_bits_count= put_bits_count(&s->pb);
2724 flush_put_bits(&s->pb);
2725 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2728 if(s->data_partitioning){
2729 pb2_bits_count= put_bits_count(&s->pb2);
2730 flush_put_bits(&s->pb2);
2731 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2732 s->pb2= backup_s.pb2;
2734 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2735 flush_put_bits(&s->tex_pb);
2736 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2737 s->tex_pb= backup_s.tex_pb;
2739 s->last_bits= put_bits_count(&s->pb);
2741 if (CONFIG_H263_ENCODER &&
2742 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2743 ff_h263_update_motion_val(s);
2745 if(next_block==0){ //FIXME 16 vs linesize16
2746 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2747 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2748 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2751 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2752 ff_MPV_decode_mb(s, s->block);
2754 int motion_x = 0, motion_y = 0;
2755 s->mv_type=MV_TYPE_16X16;
2756 // only one MB-Type possible
2759 case CANDIDATE_MB_TYPE_INTRA:
2762 motion_x= s->mv[0][0][0] = 0;
2763 motion_y= s->mv[0][0][1] = 0;
2765 case CANDIDATE_MB_TYPE_INTER:
2766 s->mv_dir = MV_DIR_FORWARD;
2768 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2769 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2771 case CANDIDATE_MB_TYPE_INTER_I:
2772 s->mv_dir = MV_DIR_FORWARD;
2773 s->mv_type = MV_TYPE_FIELD;
2776 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2777 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2778 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2781 case CANDIDATE_MB_TYPE_INTER4V:
2782 s->mv_dir = MV_DIR_FORWARD;
2783 s->mv_type = MV_TYPE_8X8;
2786 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2787 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2790 case CANDIDATE_MB_TYPE_DIRECT:
2791 if (CONFIG_MPEG4_ENCODER) {
2792 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2794 motion_x=s->b_direct_mv_table[xy][0];
2795 motion_y=s->b_direct_mv_table[xy][1];
2796 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2799 case CANDIDATE_MB_TYPE_DIRECT0:
2800 if (CONFIG_MPEG4_ENCODER) {
2801 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2803 ff_mpeg4_set_direct_mv(s, 0, 0);
2806 case CANDIDATE_MB_TYPE_BIDIR:
2807 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2809 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2810 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2811 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2812 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2814 case CANDIDATE_MB_TYPE_BACKWARD:
2815 s->mv_dir = MV_DIR_BACKWARD;
2817 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2818 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2820 case CANDIDATE_MB_TYPE_FORWARD:
2821 s->mv_dir = MV_DIR_FORWARD;
2823 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2824 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2826 case CANDIDATE_MB_TYPE_FORWARD_I:
2827 s->mv_dir = MV_DIR_FORWARD;
2828 s->mv_type = MV_TYPE_FIELD;
2831 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2832 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2833 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2836 case CANDIDATE_MB_TYPE_BACKWARD_I:
2837 s->mv_dir = MV_DIR_BACKWARD;
2838 s->mv_type = MV_TYPE_FIELD;
2841 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2842 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2843 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2846 case CANDIDATE_MB_TYPE_BIDIR_I:
2847 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2848 s->mv_type = MV_TYPE_FIELD;
2850 for(dir=0; dir<2; dir++){
2852 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2853 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2854 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2859 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2862 encode_mb(s, motion_x, motion_y);
2864 // RAL: Update last macroblock type
2865 s->last_mv_dir = s->mv_dir;
2867 if (CONFIG_H263_ENCODER &&
2868 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2869 ff_h263_update_motion_val(s);
2871 ff_MPV_decode_mb(s, s->block);
2874 /* clean the MV table in IPS frames for direct mode in B frames */
2875 if(s->mb_intra /* && I,P,S_TYPE */){
2876 s->p_mv_table[xy][0]=0;
2877 s->p_mv_table[xy][1]=0;
2880 if(s->flags&CODEC_FLAG_PSNR){
2884 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2885 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2887 s->current_picture.f.error[0] += sse(
2888 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2889 s->dest[0], w, h, s->linesize);
2890 s->current_picture.f.error[1] += sse(
2891 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2892 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2893 s->current_picture.f.error[2] += sse(
2894 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2895 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2898 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2899 ff_h263_loop_filter(s);
2901 av_dlog(s->avctx, "MB %d %d bits\n",
2902 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2906 //not beautiful here but we must write it before flushing so it has to be here
2907 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2908 ff_msmpeg4_encode_ext_header(s);
2912 /* Send the last GOB if RTP */
2913 if (s->avctx->rtp_callback) {
2914 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2915 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2916 /* Call the RTP callback to send the last GOB */
2918 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2924 #define MERGE(field) dst->field += src->field; src->field=0
2925 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2926 MERGE(me.scene_change_score);
2927 MERGE(me.mc_mb_var_sum_temp);
2928 MERGE(me.mb_var_sum_temp);
2931 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2934 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2935 MERGE(dct_count[1]);
2944 MERGE(er.error_count);
2945 MERGE(padding_bug_score);
2946 MERGE(current_picture.f.error[0]);
2947 MERGE(current_picture.f.error[1]);
2948 MERGE(current_picture.f.error[2]);
2950 if(dst->avctx->noise_reduction){
2951 for(i=0; i<64; i++){
2952 MERGE(dct_error_sum[0][i]);
2953 MERGE(dct_error_sum[1][i]);
2957 assert(put_bits_count(&src->pb) % 8 ==0);
2958 assert(put_bits_count(&dst->pb) % 8 ==0);
2959 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2960 flush_put_bits(&dst->pb);
2963 static int estimate_qp(MpegEncContext *s, int dry_run){
2964 if (s->next_lambda){
2965 s->current_picture_ptr->f.quality =
2966 s->current_picture.f.quality = s->next_lambda;
2967 if(!dry_run) s->next_lambda= 0;
2968 } else if (!s->fixed_qscale) {
2969 s->current_picture_ptr->f.quality =
2970 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2971 if (s->current_picture.f.quality < 0)
2975 if(s->adaptive_quant){
2976 switch(s->codec_id){
2977 case AV_CODEC_ID_MPEG4:
2978 if (CONFIG_MPEG4_ENCODER)
2979 ff_clean_mpeg4_qscales(s);
2981 case AV_CODEC_ID_H263:
2982 case AV_CODEC_ID_H263P:
2983 case AV_CODEC_ID_FLV1:
2984 if (CONFIG_H263_ENCODER)
2985 ff_clean_h263_qscales(s);
2988 ff_init_qscale_tab(s);
2991 s->lambda= s->lambda_table[0];
2994 s->lambda = s->current_picture.f.quality;
2999 /* must be called before writing the header */
3000 static void set_frame_distances(MpegEncContext * s){
3001 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3002 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3004 if(s->pict_type==AV_PICTURE_TYPE_B){
3005 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3006 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3008 s->pp_time= s->time - s->last_non_b_time;
3009 s->last_non_b_time= s->time;
3010 assert(s->picture_number==0 || s->pp_time > 0);
3014 static int encode_picture(MpegEncContext *s, int picture_number)
3018 int context_count = s->slice_context_count;
3020 s->picture_number = picture_number;
3022 /* Reset the average MB variance */
3023 s->me.mb_var_sum_temp =
3024 s->me.mc_mb_var_sum_temp = 0;
3026 /* we need to initialize some time vars before we can encode b-frames */
3027 // RAL: Condition added for MPEG1VIDEO
3028 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3029 set_frame_distances(s);
3030 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3031 ff_set_mpeg4_time(s);
3033 s->me.scene_change_score=0;
3035 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3037 if(s->pict_type==AV_PICTURE_TYPE_I){
3038 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3039 else s->no_rounding=0;
3040 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3041 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3042 s->no_rounding ^= 1;
3045 if(s->flags & CODEC_FLAG_PASS2){
3046 if (estimate_qp(s,1) < 0)
3048 ff_get_2pass_fcode(s);
3049 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3050 if(s->pict_type==AV_PICTURE_TYPE_B)
3051 s->lambda= s->last_lambda_for[s->pict_type];
3053 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3057 s->mb_intra=0; //for the rate distortion & bit compare functions
3058 for(i=1; i<context_count; i++){
3059 ret = ff_update_duplicate_context(s->thread_context[i], s);
3067 /* Estimate motion for every MB */
3068 if(s->pict_type != AV_PICTURE_TYPE_I){
3069 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3070 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3071 if (s->pict_type != AV_PICTURE_TYPE_B) {
3072 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3073 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3077 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3078 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3080 for(i=0; i<s->mb_stride*s->mb_height; i++)
3081 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3083 if(!s->fixed_qscale){
3084 /* finding spatial complexity for I-frame rate control */
3085 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3088 for(i=1; i<context_count; i++){
3089 merge_context_after_me(s, s->thread_context[i]);
3091 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3092 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3095 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3096 s->pict_type= AV_PICTURE_TYPE_I;
3097 for(i=0; i<s->mb_stride*s->mb_height; i++)
3098 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3099 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3100 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3104 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3105 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3107 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3109 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3110 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3111 s->f_code= FFMAX3(s->f_code, a, b);
3114 ff_fix_long_p_mvs(s);
3115 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3116 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3120 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3121 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3126 if(s->pict_type==AV_PICTURE_TYPE_B){
3129 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3130 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3131 s->f_code = FFMAX(a, b);
3133 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3134 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3135 s->b_code = FFMAX(a, b);
3137 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3138 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3139 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3140 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3141 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3143 for(dir=0; dir<2; dir++){
3146 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3147 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3148 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3149 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3157 if (estimate_qp(s, 0) < 0)
3160 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3161 s->qscale= 3; //reduce clipping problems
3163 if (s->out_format == FMT_MJPEG) {
3164 /* for mjpeg, we do include qscale in the matrix */
3166 int j= s->dsp.idct_permutation[i];
3168 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3170 s->y_dc_scale_table=
3171 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3172 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3173 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3174 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3178 //FIXME var duplication
3179 s->current_picture_ptr->f.key_frame =
3180 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3181 s->current_picture_ptr->f.pict_type =
3182 s->current_picture.f.pict_type = s->pict_type;
3184 if (s->current_picture.f.key_frame)
3185 s->picture_in_gop_number=0;
3187 s->last_bits= put_bits_count(&s->pb);
3188 switch(s->out_format) {
3190 if (CONFIG_MJPEG_ENCODER)
3191 ff_mjpeg_encode_picture_header(s);
3194 if (CONFIG_H261_ENCODER)
3195 ff_h261_encode_picture_header(s, picture_number);
3198 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3199 ff_wmv2_encode_picture_header(s, picture_number);
3200 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3201 ff_msmpeg4_encode_picture_header(s, picture_number);
3202 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3203 ff_mpeg4_encode_picture_header(s, picture_number);
3204 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3205 ff_rv10_encode_picture_header(s, picture_number);
3206 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3207 ff_rv20_encode_picture_header(s, picture_number);
3208 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3209 ff_flv_encode_picture_header(s, picture_number);
3210 else if (CONFIG_H263_ENCODER)
3211 ff_h263_encode_picture_header(s, picture_number);
3214 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3215 ff_mpeg1_encode_picture_header(s, picture_number);
3220 bits= put_bits_count(&s->pb);
3221 s->header_bits= bits - s->last_bits;
3223 for(i=1; i<context_count; i++){
3224 update_duplicate_context_after_me(s->thread_context[i], s);
3226 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3227 for(i=1; i<context_count; i++){
3228 merge_context_after_encode(s, s->thread_context[i]);
3234 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3235 const int intra= s->mb_intra;
3238 s->dct_count[intra]++;
3240 for(i=0; i<64; i++){
3241 int level= block[i];
3245 s->dct_error_sum[intra][i] += level;
3246 level -= s->dct_offset[intra][i];
3247 if(level<0) level=0;
3249 s->dct_error_sum[intra][i] -= level;
3250 level += s->dct_offset[intra][i];
3251 if(level>0) level=0;
3258 static int dct_quantize_trellis_c(MpegEncContext *s,
3259 int16_t *block, int n,
3260 int qscale, int *overflow){
3262 const uint8_t *scantable= s->intra_scantable.scantable;
3263 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3265 unsigned int threshold1, threshold2;
3277 int coeff_count[64];
3278 int qmul, qadd, start_i, last_non_zero, i, dc;
3279 const int esc_length= s->ac_esc_length;
3281 uint8_t * last_length;
3282 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3284 s->dsp.fdct (block);
3286 if(s->dct_error_sum)
3287 s->denoise_dct(s, block);
3289 qadd= ((qscale-1)|1)*8;
3300 /* For AIC we skip quant/dequant of INTRADC */
3305 /* note: block[0] is assumed to be positive */
3306 block[0] = (block[0] + (q >> 1)) / q;
3309 qmat = s->q_intra_matrix[qscale];
3310 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3311 bias= 1<<(QMAT_SHIFT-1);
3312 length = s->intra_ac_vlc_length;
3313 last_length= s->intra_ac_vlc_last_length;
3317 qmat = s->q_inter_matrix[qscale];
3318 length = s->inter_ac_vlc_length;
3319 last_length= s->inter_ac_vlc_last_length;
3323 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3324 threshold2= (threshold1<<1);
3326 for(i=63; i>=start_i; i--) {
3327 const int j = scantable[i];
3328 int level = block[j] * qmat[j];
3330 if(((unsigned)(level+threshold1))>threshold2){
3336 for(i=start_i; i<=last_non_zero; i++) {
3337 const int j = scantable[i];
3338 int level = block[j] * qmat[j];
3340 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3341 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3342 if(((unsigned)(level+threshold1))>threshold2){
3344 level= (bias + level)>>QMAT_SHIFT;
3346 coeff[1][i]= level-1;
3347 // coeff[2][k]= level-2;
3349 level= (bias - level)>>QMAT_SHIFT;
3350 coeff[0][i]= -level;
3351 coeff[1][i]= -level+1;
3352 // coeff[2][k]= -level+2;
3354 coeff_count[i]= FFMIN(level, 2);
3355 assert(coeff_count[i]);
3358 coeff[0][i]= (level>>31)|1;
3363 *overflow= s->max_qcoeff < max; //overflow might have happened
3365 if(last_non_zero < start_i){
3366 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3367 return last_non_zero;
3370 score_tab[start_i]= 0;
3371 survivor[0]= start_i;
3374 for(i=start_i; i<=last_non_zero; i++){
3375 int level_index, j, zero_distortion;
3376 int dct_coeff= FFABS(block[ scantable[i] ]);
3377 int best_score=256*256*256*120;
3379 if (s->dsp.fdct == ff_fdct_ifast)
3380 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3381 zero_distortion= dct_coeff*dct_coeff;
3383 for(level_index=0; level_index < coeff_count[i]; level_index++){
3385 int level= coeff[level_index][i];
3386 const int alevel= FFABS(level);
3391 if(s->out_format == FMT_H263){
3392 unquant_coeff= alevel*qmul + qadd;
3394 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3396 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3397 unquant_coeff = (unquant_coeff - 1) | 1;
3399 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3400 unquant_coeff = (unquant_coeff - 1) | 1;
3405 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3407 if((level&(~127)) == 0){
3408 for(j=survivor_count-1; j>=0; j--){
3409 int run= i - survivor[j];
3410 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3411 score += score_tab[i-run];
3413 if(score < best_score){
3416 level_tab[i+1]= level-64;
3420 if(s->out_format == FMT_H263){
3421 for(j=survivor_count-1; j>=0; j--){
3422 int run= i - survivor[j];
3423 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3424 score += score_tab[i-run];
3425 if(score < last_score){
3428 last_level= level-64;
3434 distortion += esc_length*lambda;
3435 for(j=survivor_count-1; j>=0; j--){
3436 int run= i - survivor[j];
3437 int score= distortion + score_tab[i-run];
3439 if(score < best_score){
3442 level_tab[i+1]= level-64;
3446 if(s->out_format == FMT_H263){
3447 for(j=survivor_count-1; j>=0; j--){
3448 int run= i - survivor[j];
3449 int score= distortion + score_tab[i-run];
3450 if(score < last_score){
3453 last_level= level-64;
3461 score_tab[i+1]= best_score;
3463 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3464 if(last_non_zero <= 27){
3465 for(; survivor_count; survivor_count--){
3466 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3470 for(; survivor_count; survivor_count--){
3471 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3476 survivor[ survivor_count++ ]= i+1;
3479 if(s->out_format != FMT_H263){
3480 last_score= 256*256*256*120;
3481 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3482 int score= score_tab[i];
3483 if(i) score += lambda*2; //FIXME exacter?
3485 if(score < last_score){
3488 last_level= level_tab[i];
3489 last_run= run_tab[i];
3494 s->coded_score[n] = last_score;
3496 dc= FFABS(block[0]);
3497 last_non_zero= last_i - 1;
3498 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3500 if(last_non_zero < start_i)
3501 return last_non_zero;
3503 if(last_non_zero == 0 && start_i == 0){
3505 int best_score= dc * dc;
3507 for(i=0; i<coeff_count[0]; i++){
3508 int level= coeff[i][0];
3509 int alevel= FFABS(level);
3510 int unquant_coeff, score, distortion;
3512 if(s->out_format == FMT_H263){
3513 unquant_coeff= (alevel*qmul + qadd)>>3;
3515 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3516 unquant_coeff = (unquant_coeff - 1) | 1;
3518 unquant_coeff = (unquant_coeff + 4) >> 3;
3519 unquant_coeff<<= 3 + 3;
3521 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3523 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3524 else score= distortion + esc_length*lambda;
3526 if(score < best_score){
3528 best_level= level - 64;
3531 block[0]= best_level;
3532 s->coded_score[n] = best_score - dc*dc;
3533 if(best_level == 0) return -1;
3534 else return last_non_zero;
3540 block[ perm_scantable[last_non_zero] ]= last_level;
3543 for(; i>start_i; i -= run_tab[i] + 1){
3544 block[ perm_scantable[i-1] ]= level_tab[i];
3547 return last_non_zero;
3550 //#define REFINE_STATS 1
3551 static int16_t basis[64][64];
3553 static void build_basis(uint8_t *perm){
3560 double s= 0.25*(1<<BASIS_SHIFT);
3562 int perm_index= perm[index];
3563 if(i==0) s*= sqrt(0.5);
3564 if(j==0) s*= sqrt(0.5);
3565 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3572 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3573 int16_t *block, int16_t *weight, int16_t *orig,
3576 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3577 const uint8_t *scantable= s->intra_scantable.scantable;
3578 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3579 // unsigned int threshold1, threshold2;
3584 int qmul, qadd, start_i, last_non_zero, i, dc;
3586 uint8_t * last_length;
3588 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3591 static int after_last=0;
3592 static int to_zero=0;
3593 static int from_zero=0;
3596 static int messed_sign=0;
3599 if(basis[0][0] == 0)
3600 build_basis(s->dsp.idct_permutation);
3611 /* For AIC we skip quant/dequant of INTRADC */
3615 q <<= RECON_SHIFT-3;
3616 /* note: block[0] is assumed to be positive */
3618 // block[0] = (block[0] + (q >> 1)) / q;
3620 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3621 // bias= 1<<(QMAT_SHIFT-1);
3622 length = s->intra_ac_vlc_length;
3623 last_length= s->intra_ac_vlc_last_length;
3627 length = s->inter_ac_vlc_length;
3628 last_length= s->inter_ac_vlc_last_length;
3630 last_non_zero = s->block_last_index[n];
3635 dc += (1<<(RECON_SHIFT-1));
3636 for(i=0; i<64; i++){
3637 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3640 STOP_TIMER("memset rem[]")}
3643 for(i=0; i<64; i++){
3648 w= FFABS(weight[i]) + qns*one;
3649 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3652 // w=weight[i] = (63*qns + (w/2)) / w;
3658 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3664 for(i=start_i; i<=last_non_zero; i++){
3665 int j= perm_scantable[i];
3666 const int level= block[j];
3670 if(level<0) coeff= qmul*level - qadd;
3671 else coeff= qmul*level + qadd;
3672 run_tab[rle_index++]=run;
3675 s->dsp.add_8x8basis(rem, basis[j], coeff);
3681 if(last_non_zero>0){
3682 STOP_TIMER("init rem[]")
3689 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3692 int run2, best_unquant_change=0, analyze_gradient;
3696 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3698 if(analyze_gradient){
3702 for(i=0; i<64; i++){
3705 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3708 STOP_TIMER("rem*w*w")}
3718 const int level= block[0];
3719 int change, old_coeff;
3721 assert(s->mb_intra);
3725 for(change=-1; change<=1; change+=2){
3726 int new_level= level + change;
3727 int score, new_coeff;
3729 new_coeff= q*new_level;
3730 if(new_coeff >= 2048 || new_coeff < 0)
3733 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3734 if(score<best_score){
3737 best_change= change;
3738 best_unquant_change= new_coeff - old_coeff;
3745 run2= run_tab[rle_index++];
3749 for(i=start_i; i<64; i++){
3750 int j= perm_scantable[i];
3751 const int level= block[j];
3752 int change, old_coeff;
3754 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3758 if(level<0) old_coeff= qmul*level - qadd;
3759 else old_coeff= qmul*level + qadd;
3760 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3764 assert(run2>=0 || i >= last_non_zero );
3767 for(change=-1; change<=1; change+=2){
3768 int new_level= level + change;
3769 int score, new_coeff, unquant_change;
3772 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3776 if(new_level<0) new_coeff= qmul*new_level - qadd;
3777 else new_coeff= qmul*new_level + qadd;
3778 if(new_coeff >= 2048 || new_coeff <= -2048)
3780 //FIXME check for overflow
3783 if(level < 63 && level > -63){
3784 if(i < last_non_zero)
3785 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3786 - length[UNI_AC_ENC_INDEX(run, level+64)];
3788 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3789 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3792 assert(FFABS(new_level)==1);
3794 if(analyze_gradient){
3795 int g= d1[ scantable[i] ];
3796 if(g && (g^new_level) >= 0)
3800 if(i < last_non_zero){
3801 int next_i= i + run2 + 1;
3802 int next_level= block[ perm_scantable[next_i] ] + 64;
3804 if(next_level&(~127))
3807 if(next_i < last_non_zero)
3808 score += length[UNI_AC_ENC_INDEX(run, 65)]
3809 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3810 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3812 score += length[UNI_AC_ENC_INDEX(run, 65)]
3813 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3814 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3816 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3818 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3819 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3825 assert(FFABS(level)==1);
3827 if(i < last_non_zero){
3828 int next_i= i + run2 + 1;
3829 int next_level= block[ perm_scantable[next_i] ] + 64;
3831 if(next_level&(~127))
3834 if(next_i < last_non_zero)
3835 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3836 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3837 - length[UNI_AC_ENC_INDEX(run, 65)];
3839 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3840 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3841 - length[UNI_AC_ENC_INDEX(run, 65)];
3843 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3845 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3846 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3853 unquant_change= new_coeff - old_coeff;
3854 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3856 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3857 if(score<best_score){
3860 best_change= change;
3861 best_unquant_change= unquant_change;
3865 prev_level= level + 64;
3866 if(prev_level&(~127))
3875 STOP_TIMER("iterative step")}
3879 int j= perm_scantable[ best_coeff ];
3881 block[j] += best_change;
3883 if(best_coeff > last_non_zero){
3884 last_non_zero= best_coeff;
3892 if(block[j] - best_change){
3893 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3905 for(; last_non_zero>=start_i; last_non_zero--){
3906 if(block[perm_scantable[last_non_zero]])
3912 if(256*256*256*64 % count == 0){
3913 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3918 for(i=start_i; i<=last_non_zero; i++){
3919 int j= perm_scantable[i];
3920 const int level= block[j];
3923 run_tab[rle_index++]=run;
3930 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3936 if(last_non_zero>0){
3937 STOP_TIMER("iterative search")
3942 return last_non_zero;
3945 int ff_dct_quantize_c(MpegEncContext *s,
3946 int16_t *block, int n,
3947 int qscale, int *overflow)
3949 int i, j, level, last_non_zero, q, start_i;
3951 const uint8_t *scantable= s->intra_scantable.scantable;
3954 unsigned int threshold1, threshold2;
3956 s->dsp.fdct (block);
3958 if(s->dct_error_sum)
3959 s->denoise_dct(s, block);
3969 /* For AIC we skip quant/dequant of INTRADC */
3972 /* note: block[0] is assumed to be positive */
3973 block[0] = (block[0] + (q >> 1)) / q;
3976 qmat = s->q_intra_matrix[qscale];
3977 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3981 qmat = s->q_inter_matrix[qscale];
3982 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3984 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3985 threshold2= (threshold1<<1);
3986 for(i=63;i>=start_i;i--) {
3988 level = block[j] * qmat[j];
3990 if(((unsigned)(level+threshold1))>threshold2){
3997 for(i=start_i; i<=last_non_zero; i++) {
3999 level = block[j] * qmat[j];
4001 // if( bias+level >= (1<<QMAT_SHIFT)
4002 // || bias-level >= (1<<QMAT_SHIFT)){
4003 if(((unsigned)(level+threshold1))>threshold2){
4005 level= (bias + level)>>QMAT_SHIFT;
4008 level= (bias - level)>>QMAT_SHIFT;
4016 *overflow= s->max_qcoeff < max; //overflow might have happened
4018 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4019 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4020 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4022 return last_non_zero;
4025 #define OFFSET(x) offsetof(MpegEncContext, x)
4026 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4027 static const AVOption h263_options[] = {
4028 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4029 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4030 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4035 static const AVClass h263_class = {
4036 .class_name = "H.263 encoder",
4037 .item_name = av_default_item_name,
4038 .option = h263_options,
4039 .version = LIBAVUTIL_VERSION_INT,
4042 AVCodec ff_h263_encoder = {
4044 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4045 .type = AVMEDIA_TYPE_VIDEO,
4046 .id = AV_CODEC_ID_H263,
4047 .priv_data_size = sizeof(MpegEncContext),
4048 .init = ff_MPV_encode_init,
4049 .encode2 = ff_MPV_encode_picture,
4050 .close = ff_MPV_encode_end,
4051 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4052 .priv_class = &h263_class,
4055 static const AVOption h263p_options[] = {
4056 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4057 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4058 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4059 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4063 static const AVClass h263p_class = {
4064 .class_name = "H.263p encoder",
4065 .item_name = av_default_item_name,
4066 .option = h263p_options,
4067 .version = LIBAVUTIL_VERSION_INT,
4070 AVCodec ff_h263p_encoder = {
4072 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4073 .type = AVMEDIA_TYPE_VIDEO,
4074 .id = AV_CODEC_ID_H263P,
4075 .priv_data_size = sizeof(MpegEncContext),
4076 .init = ff_MPV_encode_init,
4077 .encode2 = ff_MPV_encode_picture,
4078 .close = ff_MPV_encode_end,
4079 .capabilities = CODEC_CAP_SLICE_THREADS,
4080 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4081 .priv_class = &h263p_class,
4084 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4086 AVCodec ff_msmpeg4v2_encoder = {
4087 .name = "msmpeg4v2",
4088 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4089 .type = AVMEDIA_TYPE_VIDEO,
4090 .id = AV_CODEC_ID_MSMPEG4V2,
4091 .priv_data_size = sizeof(MpegEncContext),
4092 .init = ff_MPV_encode_init,
4093 .encode2 = ff_MPV_encode_picture,
4094 .close = ff_MPV_encode_end,
4095 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4096 .priv_class = &msmpeg4v2_class,
4099 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4101 AVCodec ff_msmpeg4v3_encoder = {
4103 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4104 .type = AVMEDIA_TYPE_VIDEO,
4105 .id = AV_CODEC_ID_MSMPEG4V3,
4106 .priv_data_size = sizeof(MpegEncContext),
4107 .init = ff_MPV_encode_init,
4108 .encode2 = ff_MPV_encode_picture,
4109 .close = ff_MPV_encode_end,
4110 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4111 .priv_class = &msmpeg4v3_class,
4114 FF_MPV_GENERIC_CLASS(wmv1)
4116 AVCodec ff_wmv1_encoder = {
4118 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4119 .type = AVMEDIA_TYPE_VIDEO,
4120 .id = AV_CODEC_ID_WMV1,
4121 .priv_data_size = sizeof(MpegEncContext),
4122 .init = ff_MPV_encode_init,
4123 .encode2 = ff_MPV_encode_picture,
4124 .close = ff_MPV_encode_end,
4125 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4126 .priv_class = &wmv1_class,