2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
39 #include "mpegvideo.h"
47 #include "aandcttab.h"
49 #include "mpeg4video.h"
51 #include "bytestream.h"
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
60 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
61 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
63 const AVOption ff_mpv_generic_options[] = {
68 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
69 uint16_t (*qmat16)[2][64],
70 const uint16_t *quant_matrix,
71 int bias, int qmin, int qmax, int intra)
76 for (qscale = qmin; qscale <= qmax; qscale++) {
78 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
79 dsp->fdct == ff_jpeg_fdct_islow_10 ||
80 dsp->fdct == ff_faandct) {
81 for (i = 0; i < 64; i++) {
82 const int j = dsp->idct_permutation[i];
83 /* 16 <= qscale * quant_matrix[i] <= 7905
84 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
85 * 19952 <= x <= 249205026
86 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
87 * 3444240 >= (1 << 36) / (x) >= 275 */
89 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
90 (qscale * quant_matrix[j]));
92 } else if (dsp->fdct == ff_fdct_ifast) {
93 for (i = 0; i < 64; i++) {
94 const int j = dsp->idct_permutation[i];
95 /* 16 <= qscale * quant_matrix[i] <= 7905
96 * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
97 * 19952 <= x <= 249205026
98 * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
99 * 3444240 >= (1 << 36) / (x) >= 275 */
101 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
102 (ff_aanscales[i] * qscale *
106 for (i = 0; i < 64; i++) {
107 const int j = dsp->idct_permutation[i];
108 /* We can safely suppose that 16 <= quant_matrix[i] <= 255
109 * Assume x = qscale * quant_matrix[i]
111 * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
112 * so 32768 >= (1 << 19) / (x) >= 67 */
113 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
114 (qscale * quant_matrix[j]));
115 //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
116 // (qscale * quant_matrix[i]);
117 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
118 (qscale * quant_matrix[j]);
120 if (qmat16[qscale][0][i] == 0 ||
121 qmat16[qscale][0][i] == 128 * 256)
122 qmat16[qscale][0][i] = 128 * 256 - 1;
123 qmat16[qscale][1][i] =
124 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
125 qmat16[qscale][0][i]);
129 for (i = intra; i < 64; i++) {
131 if (dsp->fdct == ff_fdct_ifast) {
132 max = (8191LL * ff_aanscales[i]) >> 14;
134 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
140 av_log(NULL, AV_LOG_INFO,
141 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
146 static inline void update_qscale(MpegEncContext *s)
148 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
149 (FF_LAMBDA_SHIFT + 7);
150 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
152 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
156 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
162 for (i = 0; i < 64; i++) {
163 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
170 * init s->current_picture.qscale_table from s->lambda_table
172 void ff_init_qscale_tab(MpegEncContext *s)
174 int8_t * const qscale_table = s->current_picture.qscale_table;
177 for (i = 0; i < s->mb_num; i++) {
178 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
179 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
180 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
185 static void update_duplicate_context_after_me(MpegEncContext *dst,
188 #define COPY(a) dst->a= src->a
190 COPY(current_picture);
196 COPY(picture_in_gop_number);
197 COPY(gop_picture_number);
198 COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
199 COPY(progressive_frame); // FIXME don't set in encode_header
200 COPY(partitioned_frame); // FIXME don't set in encode_header
205 * Set the given MpegEncContext to defaults for encoding.
206 * the changed fields will not depend upon the prior state of the MpegEncContext.
208 static void MPV_encode_defaults(MpegEncContext *s)
211 ff_MPV_common_defaults(s);
213 for (i = -16; i < 16; i++) {
214 default_fcode_tab[i + MAX_MV] = 1;
216 s->me.mv_penalty = default_mv_penalty;
217 s->fcode_tab = default_fcode_tab;
220 /* init video encoder */
221 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
223 MpegEncContext *s = avctx->priv_data;
225 int chroma_h_shift, chroma_v_shift;
227 MPV_encode_defaults(s);
229 switch (avctx->codec_id) {
230 case AV_CODEC_ID_MPEG2VIDEO:
231 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
232 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
233 av_log(avctx, AV_LOG_ERROR,
234 "only YUV420 and YUV422 are supported\n");
238 case AV_CODEC_ID_LJPEG:
239 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
240 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
241 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
242 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
243 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
244 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
245 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
246 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
247 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
251 case AV_CODEC_ID_MJPEG:
252 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
253 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
254 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
255 avctx->pix_fmt != AV_PIX_FMT_YUV422P) ||
256 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
257 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
262 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
263 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
268 switch (avctx->pix_fmt) {
269 case AV_PIX_FMT_YUVJ422P:
270 case AV_PIX_FMT_YUV422P:
271 s->chroma_format = CHROMA_422;
273 case AV_PIX_FMT_YUVJ420P:
274 case AV_PIX_FMT_YUV420P:
276 s->chroma_format = CHROMA_420;
280 s->bit_rate = avctx->bit_rate;
281 s->width = avctx->width;
282 s->height = avctx->height;
283 if (avctx->gop_size > 600 &&
284 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
285 av_log(avctx, AV_LOG_ERROR,
286 "Warning keyframe interval too large! reducing it ...\n");
287 avctx->gop_size = 600;
289 s->gop_size = avctx->gop_size;
291 s->flags = avctx->flags;
292 s->flags2 = avctx->flags2;
293 s->max_b_frames = avctx->max_b_frames;
294 s->codec_id = avctx->codec->id;
295 s->strict_std_compliance = avctx->strict_std_compliance;
296 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
297 s->mpeg_quant = avctx->mpeg_quant;
298 s->rtp_mode = !!avctx->rtp_payload_size;
299 s->intra_dc_precision = avctx->intra_dc_precision;
300 s->user_specified_pts = AV_NOPTS_VALUE;
302 if (s->gop_size <= 1) {
309 s->me_method = avctx->me_method;
312 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
314 s->adaptive_quant = (s->avctx->lumi_masking ||
315 s->avctx->dark_masking ||
316 s->avctx->temporal_cplx_masking ||
317 s->avctx->spatial_cplx_masking ||
318 s->avctx->p_masking ||
319 s->avctx->border_masking ||
320 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
323 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
325 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
326 av_log(avctx, AV_LOG_ERROR,
327 "a vbv buffer size is needed, "
328 "for encoding with a maximum bitrate\n");
332 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
333 av_log(avctx, AV_LOG_INFO,
334 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
337 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
338 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
342 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
343 av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n");
347 if (avctx->rc_max_rate &&
348 avctx->rc_max_rate == avctx->bit_rate &&
349 avctx->rc_max_rate != avctx->rc_min_rate) {
350 av_log(avctx, AV_LOG_INFO,
351 "impossible bitrate constraints, this will fail\n");
354 if (avctx->rc_buffer_size &&
355 avctx->bit_rate * (int64_t)avctx->time_base.num >
356 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
357 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
361 if (!s->fixed_qscale &&
362 avctx->bit_rate * av_q2d(avctx->time_base) >
363 avctx->bit_rate_tolerance) {
364 av_log(avctx, AV_LOG_ERROR,
365 "bitrate tolerance too small for bitrate\n");
369 if (s->avctx->rc_max_rate &&
370 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
371 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
372 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
373 90000LL * (avctx->rc_buffer_size - 1) >
374 s->avctx->rc_max_rate * 0xFFFFLL) {
375 av_log(avctx, AV_LOG_INFO,
376 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
377 "specified vbv buffer is too large for the given bitrate!\n");
380 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
381 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
382 s->codec_id != AV_CODEC_ID_FLV1) {
383 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
387 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
388 av_log(avctx, AV_LOG_ERROR,
389 "OBMC is only supported with simple mb decision\n");
393 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
394 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
398 if (s->max_b_frames &&
399 s->codec_id != AV_CODEC_ID_MPEG4 &&
400 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
401 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
402 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
406 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
407 s->codec_id == AV_CODEC_ID_H263 ||
408 s->codec_id == AV_CODEC_ID_H263P) &&
409 (avctx->sample_aspect_ratio.num > 255 ||
410 avctx->sample_aspect_ratio.den > 255)) {
411 av_log(avctx, AV_LOG_ERROR,
412 "Invalid pixel aspect ratio %i/%i, limit is 255/255\n",
413 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
417 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
418 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
419 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
423 // FIXME mpeg2 uses that too
424 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
425 av_log(avctx, AV_LOG_ERROR,
426 "mpeg2 style quantization not supported by codec\n");
430 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
431 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
435 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
436 s->avctx->mb_decision != FF_MB_DECISION_RD) {
437 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
441 if (s->avctx->scenechange_threshold < 1000000000 &&
442 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
443 av_log(avctx, AV_LOG_ERROR,
444 "closed gop with scene change detection are not supported yet, "
445 "set threshold to 1000000000\n");
449 if (s->flags & CODEC_FLAG_LOW_DELAY) {
450 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
451 av_log(avctx, AV_LOG_ERROR,
452 "low delay forcing is only available for mpeg2\n");
455 if (s->max_b_frames != 0) {
456 av_log(avctx, AV_LOG_ERROR,
457 "b frames cannot be used with low delay\n");
462 if (s->q_scale_type == 1) {
463 if (avctx->qmax > 12) {
464 av_log(avctx, AV_LOG_ERROR,
465 "non linear quant only supports qmax <= 12 currently\n");
470 if (s->avctx->thread_count > 1 &&
471 s->codec_id != AV_CODEC_ID_MPEG4 &&
472 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
473 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
474 (s->codec_id != AV_CODEC_ID_H263P)) {
475 av_log(avctx, AV_LOG_ERROR,
476 "multi threaded encoding not supported by codec\n");
480 if (s->avctx->thread_count < 1) {
481 av_log(avctx, AV_LOG_ERROR,
482 "automatic thread number detection not supported by codec,"
487 if (s->avctx->thread_count > 1)
490 if (!avctx->time_base.den || !avctx->time_base.num) {
491 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
495 i = (INT_MAX / 2 + 128) >> 8;
496 if (avctx->mb_threshold >= i) {
497 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
502 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
503 av_log(avctx, AV_LOG_INFO,
504 "notice: b_frame_strategy only affects the first pass\n");
505 avctx->b_frame_strategy = 0;
508 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
510 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
511 avctx->time_base.den /= i;
512 avctx->time_base.num /= i;
516 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
517 s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG) {
518 // (a + x * 3 / 8) / x
519 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
520 s->inter_quant_bias = 0;
522 s->intra_quant_bias = 0;
524 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
527 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
528 s->intra_quant_bias = avctx->intra_quant_bias;
529 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
530 s->inter_quant_bias = avctx->inter_quant_bias;
532 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
535 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
536 s->avctx->time_base.den > (1 << 16) - 1) {
537 av_log(avctx, AV_LOG_ERROR,
538 "timebase %d/%d not supported by MPEG 4 standard, "
539 "the maximum admitted value for the timebase denominator "
540 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
544 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
546 switch (avctx->codec->id) {
547 case AV_CODEC_ID_MPEG1VIDEO:
548 s->out_format = FMT_MPEG1;
549 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
550 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
552 case AV_CODEC_ID_MPEG2VIDEO:
553 s->out_format = FMT_MPEG1;
554 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
555 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
558 case AV_CODEC_ID_LJPEG:
559 case AV_CODEC_ID_MJPEG:
560 s->out_format = FMT_MJPEG;
561 s->intra_only = 1; /* force intra only for jpeg */
562 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
563 avctx->pix_fmt == AV_PIX_FMT_BGRA) {
564 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
565 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
566 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
568 s->mjpeg_vsample[0] = 2;
569 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
570 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
571 s->mjpeg_hsample[0] = 2;
572 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
573 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
575 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
576 ff_mjpeg_encode_init(s) < 0)
581 case AV_CODEC_ID_H261:
582 if (!CONFIG_H261_ENCODER)
584 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
585 av_log(avctx, AV_LOG_ERROR,
586 "The specified picture size of %dx%d is not valid for the "
587 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
588 s->width, s->height);
591 s->out_format = FMT_H261;
595 case AV_CODEC_ID_H263:
596 if (!CONFIG_H263_ENCODER)
598 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
599 s->width, s->height) == 8) {
600 av_log(avctx, AV_LOG_INFO,
601 "The specified picture size of %dx%d is not valid for "
602 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
603 "352x288, 704x576, and 1408x1152."
604 "Try H.263+.\n", s->width, s->height);
607 s->out_format = FMT_H263;
611 case AV_CODEC_ID_H263P:
612 s->out_format = FMT_H263;
615 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
616 s->modified_quant = s->h263_aic;
617 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
618 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
621 /* These are just to be sure */
625 case AV_CODEC_ID_FLV1:
626 s->out_format = FMT_H263;
627 s->h263_flv = 2; /* format = 1; 11-bit codes */
628 s->unrestricted_mv = 1;
629 s->rtp_mode = 0; /* don't allow GOB */
633 case AV_CODEC_ID_RV10:
634 s->out_format = FMT_H263;
638 case AV_CODEC_ID_RV20:
639 s->out_format = FMT_H263;
642 s->modified_quant = 1;
646 s->unrestricted_mv = 0;
648 case AV_CODEC_ID_MPEG4:
649 s->out_format = FMT_H263;
651 s->unrestricted_mv = 1;
652 s->low_delay = s->max_b_frames ? 0 : 1;
653 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
655 case AV_CODEC_ID_MSMPEG4V2:
656 s->out_format = FMT_H263;
658 s->unrestricted_mv = 1;
659 s->msmpeg4_version = 2;
663 case AV_CODEC_ID_MSMPEG4V3:
664 s->out_format = FMT_H263;
666 s->unrestricted_mv = 1;
667 s->msmpeg4_version = 3;
668 s->flipflop_rounding = 1;
672 case AV_CODEC_ID_WMV1:
673 s->out_format = FMT_H263;
675 s->unrestricted_mv = 1;
676 s->msmpeg4_version = 4;
677 s->flipflop_rounding = 1;
681 case AV_CODEC_ID_WMV2:
682 s->out_format = FMT_H263;
684 s->unrestricted_mv = 1;
685 s->msmpeg4_version = 5;
686 s->flipflop_rounding = 1;
694 avctx->has_b_frames = !s->low_delay;
698 s->progressive_frame =
699 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
700 CODEC_FLAG_INTERLACED_ME) ||
704 if (ff_MPV_common_init(s) < 0)
708 ff_MPV_encode_init_x86(s);
710 if (!s->dct_quantize)
711 s->dct_quantize = ff_dct_quantize_c;
713 s->denoise_dct = denoise_dct_c;
714 s->fast_dct_quantize = s->dct_quantize;
716 s->dct_quantize = dct_quantize_trellis_c;
718 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
719 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
721 s->quant_precision = 5;
723 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
724 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
726 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
727 ff_h261_encode_init(s);
728 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
729 ff_h263_encode_init(s);
730 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
731 ff_msmpeg4_encode_init(s);
732 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
733 && s->out_format == FMT_MPEG1)
734 ff_mpeg1_encode_init(s);
737 for (i = 0; i < 64; i++) {
738 int j = s->dsp.idct_permutation[i];
739 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
741 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
742 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
743 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
745 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
748 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
749 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
751 if (s->avctx->intra_matrix)
752 s->intra_matrix[j] = s->avctx->intra_matrix[i];
753 if (s->avctx->inter_matrix)
754 s->inter_matrix[j] = s->avctx->inter_matrix[i];
757 /* precompute matrix */
758 /* for mjpeg, we do include qscale in the matrix */
759 if (s->out_format != FMT_MJPEG) {
760 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
761 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
763 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
764 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
768 if (ff_rate_control_init(s) < 0)
774 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
776 MpegEncContext *s = avctx->priv_data;
778 ff_rate_control_uninit(s);
780 ff_MPV_common_end(s);
781 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
782 s->out_format == FMT_MJPEG)
783 ff_mjpeg_encode_close(s);
785 av_freep(&avctx->extradata);
790 static int get_sae(uint8_t *src, int ref, int stride)
795 for (y = 0; y < 16; y++) {
796 for (x = 0; x < 16; x++) {
797 acc += FFABS(src[x + y * stride] - ref);
804 static int get_intra_count(MpegEncContext *s, uint8_t *src,
805 uint8_t *ref, int stride)
813 for (y = 0; y < h; y += 16) {
814 for (x = 0; x < w; x += 16) {
815 int offset = x + y * stride;
816 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
818 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
819 int sae = get_sae(src + offset, mean, stride);
821 acc += sae + 500 < sad;
828 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
832 int i, display_picture_number = 0, ret;
833 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
834 (s->low_delay ? 0 : 1);
839 display_picture_number = s->input_picture_number++;
841 if (pts != AV_NOPTS_VALUE) {
842 if (s->user_specified_pts != AV_NOPTS_VALUE) {
844 int64_t last = s->user_specified_pts;
847 av_log(s->avctx, AV_LOG_ERROR,
848 "Error, Invalid timestamp=%"PRId64", "
849 "last=%"PRId64"\n", pts, s->user_specified_pts);
853 if (!s->low_delay && display_picture_number == 1)
854 s->dts_delta = time - last;
856 s->user_specified_pts = pts;
858 if (s->user_specified_pts != AV_NOPTS_VALUE) {
859 s->user_specified_pts =
860 pts = s->user_specified_pts + 1;
861 av_log(s->avctx, AV_LOG_INFO,
862 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
865 pts = display_picture_number;
871 if (!pic_arg->buf[0]);
873 if (pic_arg->linesize[0] != s->linesize)
875 if (pic_arg->linesize[1] != s->uvlinesize)
877 if (pic_arg->linesize[2] != s->uvlinesize)
880 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
881 pic_arg->linesize[1], s->linesize, s->uvlinesize);
884 i = ff_find_unused_picture(s, 1);
888 pic = &s->picture[i];
891 if ((ret = av_frame_ref(&pic->f, pic_arg)) < 0)
893 if (ff_alloc_picture(s, pic, 1) < 0) {
897 i = ff_find_unused_picture(s, 0);
901 pic = &s->picture[i];
904 if (ff_alloc_picture(s, pic, 0) < 0) {
908 if (pic->f.data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
909 pic->f.data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
910 pic->f.data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
913 int h_chroma_shift, v_chroma_shift;
914 av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
918 for (i = 0; i < 3; i++) {
919 int src_stride = pic_arg->linesize[i];
920 int dst_stride = i ? s->uvlinesize : s->linesize;
921 int h_shift = i ? h_chroma_shift : 0;
922 int v_shift = i ? v_chroma_shift : 0;
923 int w = s->width >> h_shift;
924 int h = s->height >> v_shift;
925 uint8_t *src = pic_arg->data[i];
926 uint8_t *dst = pic->f.data[i];
928 if (!s->avctx->rc_buffer_size)
929 dst += INPLACE_OFFSET;
931 if (src_stride == dst_stride)
932 memcpy(dst, src, src_stride * h);
943 ret = av_frame_copy_props(&pic->f, pic_arg);
947 pic->f.display_picture_number = display_picture_number;
948 pic->f.pts = pts; // we set this here to avoid modifiying pic_arg
951 /* shift buffer entries */
952 for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
953 s->input_picture[i - 1] = s->input_picture[i];
955 s->input_picture[encoding_delay] = (Picture*) pic;
960 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
966 for (plane = 0; plane < 3; plane++) {
967 const int stride = p->f.linesize[plane];
968 const int bw = plane ? 1 : 2;
969 for (y = 0; y < s->mb_height * bw; y++) {
970 for (x = 0; x < s->mb_width * bw; x++) {
971 int off = p->shared ? 0 : 16;
972 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
973 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
974 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
976 switch (s->avctx->frame_skip_exp) {
977 case 0: score = FFMAX(score, v); break;
978 case 1: score += FFABS(v); break;
979 case 2: score += v * v; break;
980 case 3: score64 += FFABS(v * v * (int64_t)v); break;
981 case 4: score64 += v * v * (int64_t)(v * v); break;
990 if (score64 < s->avctx->frame_skip_threshold)
992 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
997 static int encode_frame(AVCodecContext *c, AVFrame *frame)
999 AVPacket pkt = { 0 };
1000 int ret, got_output;
1002 av_init_packet(&pkt);
1003 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1008 av_free_packet(&pkt);
1012 static int estimate_best_b_count(MpegEncContext *s)
1014 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1015 AVCodecContext *c = avcodec_alloc_context3(NULL);
1016 AVFrame input[FF_MAX_B_FRAMES + 2];
1017 const int scale = s->avctx->brd_scale;
1018 int i, j, out_size, p_lambda, b_lambda, lambda2;
1019 int64_t best_rd = INT64_MAX;
1020 int best_b_count = -1;
1022 assert(scale >= 0 && scale <= 3);
1025 //s->next_picture_ptr->quality;
1026 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1027 //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1028 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1029 if (!b_lambda) // FIXME we should do this somewhere else
1030 b_lambda = p_lambda;
1031 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1034 c->width = s->width >> scale;
1035 c->height = s->height >> scale;
1036 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
1037 CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1038 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1039 c->mb_decision = s->avctx->mb_decision;
1040 c->me_cmp = s->avctx->me_cmp;
1041 c->mb_cmp = s->avctx->mb_cmp;
1042 c->me_sub_cmp = s->avctx->me_sub_cmp;
1043 c->pix_fmt = AV_PIX_FMT_YUV420P;
1044 c->time_base = s->avctx->time_base;
1045 c->max_b_frames = s->max_b_frames;
1047 if (avcodec_open2(c, codec, NULL) < 0)
1050 for (i = 0; i < s->max_b_frames + 2; i++) {
1051 int ysize = c->width * c->height;
1052 int csize = (c->width / 2) * (c->height / 2);
1053 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1054 s->next_picture_ptr;
1056 avcodec_get_frame_defaults(&input[i]);
1057 input[i].data[0] = av_malloc(ysize + 2 * csize);
1058 input[i].data[1] = input[i].data[0] + ysize;
1059 input[i].data[2] = input[i].data[1] + csize;
1060 input[i].linesize[0] = c->width;
1061 input[i].linesize[1] =
1062 input[i].linesize[2] = c->width / 2;
1064 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1065 pre_input = *pre_input_ptr;
1067 if (!pre_input.shared && i) {
1068 pre_input.f.data[0] += INPLACE_OFFSET;
1069 pre_input.f.data[1] += INPLACE_OFFSET;
1070 pre_input.f.data[2] += INPLACE_OFFSET;
1073 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1074 pre_input.f.data[0], pre_input.f.linesize[0],
1075 c->width, c->height);
1076 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1077 pre_input.f.data[1], pre_input.f.linesize[1],
1078 c->width >> 1, c->height >> 1);
1079 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1080 pre_input.f.data[2], pre_input.f.linesize[2],
1081 c->width >> 1, c->height >> 1);
1085 for (j = 0; j < s->max_b_frames + 1; j++) {
1088 if (!s->input_picture[j])
1091 c->error[0] = c->error[1] = c->error[2] = 0;
1093 input[0].pict_type = AV_PICTURE_TYPE_I;
1094 input[0].quality = 1 * FF_QP2LAMBDA;
1096 out_size = encode_frame(c, &input[0]);
1098 //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1100 for (i = 0; i < s->max_b_frames + 1; i++) {
1101 int is_p = i % (j + 1) == j || i == s->max_b_frames;
1103 input[i + 1].pict_type = is_p ?
1104 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
1105 input[i + 1].quality = is_p ? p_lambda : b_lambda;
1107 out_size = encode_frame(c, &input[i + 1]);
1109 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1112 /* get the delayed frames */
1114 out_size = encode_frame(c, NULL);
1115 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1118 rd += c->error[0] + c->error[1] + c->error[2];
1129 for (i = 0; i < s->max_b_frames + 2; i++) {
1130 av_freep(&input[i].data[0]);
1133 return best_b_count;
1136 static int select_input_picture(MpegEncContext *s)
1140 for (i = 1; i < MAX_PICTURE_COUNT; i++)
1141 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1142 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1144 /* set next picture type & ordering */
1145 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1146 if (/*s->picture_in_gop_number >= s->gop_size ||*/
1147 s->next_picture_ptr == NULL || s->intra_only) {
1148 s->reordered_input_picture[0] = s->input_picture[0];
1149 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
1150 s->reordered_input_picture[0]->f.coded_picture_number =
1151 s->coded_picture_number++;
1155 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
1156 if (s->picture_in_gop_number < s->gop_size &&
1157 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1158 // FIXME check that te gop check above is +-1 correct
1159 av_frame_unref(&s->input_picture[0]->f);
1162 ff_vbv_update(s, 0);
1168 if (s->flags & CODEC_FLAG_PASS2) {
1169 for (i = 0; i < s->max_b_frames + 1; i++) {
1170 int pict_num = s->input_picture[0]->f.display_picture_number + i;
1172 if (pict_num >= s->rc_context.num_entries)
1174 if (!s->input_picture[i]) {
1175 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1179 s->input_picture[i]->f.pict_type =
1180 s->rc_context.entry[pict_num].new_pict_type;
1184 if (s->avctx->b_frame_strategy == 0) {
1185 b_frames = s->max_b_frames;
1186 while (b_frames && !s->input_picture[b_frames])
1188 } else if (s->avctx->b_frame_strategy == 1) {
1189 for (i = 1; i < s->max_b_frames + 1; i++) {
1190 if (s->input_picture[i] &&
1191 s->input_picture[i]->b_frame_score == 0) {
1192 s->input_picture[i]->b_frame_score =
1194 s->input_picture[i ]->f.data[0],
1195 s->input_picture[i - 1]->f.data[0],
1199 for (i = 0; i < s->max_b_frames + 1; i++) {
1200 if (s->input_picture[i] == NULL ||
1201 s->input_picture[i]->b_frame_score - 1 >
1202 s->mb_num / s->avctx->b_sensitivity)
1206 b_frames = FFMAX(0, i - 1);
1209 for (i = 0; i < b_frames + 1; i++) {
1210 s->input_picture[i]->b_frame_score = 0;
1212 } else if (s->avctx->b_frame_strategy == 2) {
1213 b_frames = estimate_best_b_count(s);
1215 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1221 for (i = b_frames - 1; i >= 0; i--) {
1222 int type = s->input_picture[i]->f.pict_type;
1223 if (type && type != AV_PICTURE_TYPE_B)
1226 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1227 b_frames == s->max_b_frames) {
1228 av_log(s->avctx, AV_LOG_ERROR,
1229 "warning, too many b frames in a row\n");
1232 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1233 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1234 s->gop_size > s->picture_in_gop_number) {
1235 b_frames = s->gop_size - s->picture_in_gop_number - 1;
1237 if (s->flags & CODEC_FLAG_CLOSED_GOP)
1239 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1243 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1244 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1247 s->reordered_input_picture[0] = s->input_picture[b_frames];
1248 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
1249 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
1250 s->reordered_input_picture[0]->f.coded_picture_number =
1251 s->coded_picture_number++;
1252 for (i = 0; i < b_frames; i++) {
1253 s->reordered_input_picture[i + 1] = s->input_picture[i];
1254 s->reordered_input_picture[i + 1]->f.pict_type =
1256 s->reordered_input_picture[i + 1]->f.coded_picture_number =
1257 s->coded_picture_number++;
1262 if (s->reordered_input_picture[0]) {
1263 s->reordered_input_picture[0]->reference =
1264 s->reordered_input_picture[0]->f.pict_type !=
1265 AV_PICTURE_TYPE_B ? 3 : 0;
1267 ff_mpeg_unref_picture(s, &s->new_picture);
1268 if ((ret = ff_mpeg_ref_picture(s, &s->new_picture, s->reordered_input_picture[0])))
1271 if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1272 // input is a shared pix, so we can't modifiy it -> alloc a new
1273 // one & ensure that the shared one is reuseable
1276 int i = ff_find_unused_picture(s, 0);
1279 pic = &s->picture[i];
1281 pic->reference = s->reordered_input_picture[0]->reference;
1282 if (ff_alloc_picture(s, pic, 0) < 0) {
1286 ret = av_frame_copy_props(&pic->f, &s->reordered_input_picture[0]->f);
1290 /* mark us unused / free shared pic */
1291 av_frame_unref(&s->reordered_input_picture[0]->f);
1292 s->reordered_input_picture[0]->shared = 0;
1294 s->current_picture_ptr = pic;
1296 // input is not a shared pix -> reuse buffer for current_pix
1297 s->current_picture_ptr = s->reordered_input_picture[0];
1298 for (i = 0; i < 4; i++) {
1299 s->new_picture.f.data[i] += INPLACE_OFFSET;
1302 ff_mpeg_unref_picture(s, &s->current_picture);
1303 if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
1304 s->current_picture_ptr)) < 0)
1307 s->picture_number = s->new_picture.f.display_picture_number;
1309 ff_mpeg_unref_picture(s, &s->new_picture);
1314 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
1315 const AVFrame *pic_arg, int *got_packet)
1317 MpegEncContext *s = avctx->priv_data;
1318 int i, stuffing_count, ret;
1319 int context_count = s->slice_context_count;
1321 s->picture_in_gop_number++;
1323 if (load_input_picture(s, pic_arg) < 0)
1326 if (select_input_picture(s) < 0) {
1331 if (s->new_picture.f.data[0]) {
1333 (ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*MAX_MB_BYTES)) < 0)
1336 s->mb_info_ptr = av_packet_new_side_data(pkt,
1337 AV_PKT_DATA_H263_MB_INFO,
1338 s->mb_width*s->mb_height*12);
1339 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1342 for (i = 0; i < context_count; i++) {
1343 int start_y = s->thread_context[i]->start_mb_y;
1344 int end_y = s->thread_context[i]-> end_mb_y;
1345 int h = s->mb_height;
1346 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1347 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1349 init_put_bits(&s->thread_context[i]->pb, start, end - start);
1352 s->pict_type = s->new_picture.f.pict_type;
1354 ff_MPV_frame_start(s, avctx);
1356 if (encode_picture(s, s->picture_number) < 0)
1359 avctx->header_bits = s->header_bits;
1360 avctx->mv_bits = s->mv_bits;
1361 avctx->misc_bits = s->misc_bits;
1362 avctx->i_tex_bits = s->i_tex_bits;
1363 avctx->p_tex_bits = s->p_tex_bits;
1364 avctx->i_count = s->i_count;
1365 // FIXME f/b_count in avctx
1366 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1367 avctx->skip_count = s->skip_count;
1369 ff_MPV_frame_end(s);
1371 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1372 ff_mjpeg_encode_picture_trailer(s);
1374 if (avctx->rc_buffer_size) {
1375 RateControlContext *rcc = &s->rc_context;
1376 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1378 if (put_bits_count(&s->pb) > max_size &&
1379 s->lambda < s->avctx->lmax) {
1380 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1381 (s->qscale + 1) / s->qscale);
1382 if (s->adaptive_quant) {
1384 for (i = 0; i < s->mb_height * s->mb_stride; i++)
1385 s->lambda_table[i] =
1386 FFMAX(s->lambda_table[i] + 1,
1387 s->lambda_table[i] * (s->qscale + 1) /
1390 s->mb_skipped = 0; // done in MPV_frame_start()
1391 // done in encode_picture() so we must undo it
1392 if (s->pict_type == AV_PICTURE_TYPE_P) {
1393 if (s->flipflop_rounding ||
1394 s->codec_id == AV_CODEC_ID_H263P ||
1395 s->codec_id == AV_CODEC_ID_MPEG4)
1396 s->no_rounding ^= 1;
1398 if (s->pict_type != AV_PICTURE_TYPE_B) {
1399 s->time_base = s->last_time_base;
1400 s->last_non_b_time = s->time - s->pp_time;
1402 for (i = 0; i < context_count; i++) {
1403 PutBitContext *pb = &s->thread_context[i]->pb;
1404 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1409 assert(s->avctx->rc_max_rate);
1412 if (s->flags & CODEC_FLAG_PASS1)
1413 ff_write_pass1_stats(s);
1415 for (i = 0; i < 4; i++) {
1416 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
1417 avctx->error[i] += s->current_picture_ptr->f.error[i];
1420 if (s->flags & CODEC_FLAG_PASS1)
1421 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1422 avctx->i_tex_bits + avctx->p_tex_bits ==
1423 put_bits_count(&s->pb));
1424 flush_put_bits(&s->pb);
1425 s->frame_bits = put_bits_count(&s->pb);
1427 stuffing_count = ff_vbv_update(s, s->frame_bits);
1428 if (stuffing_count) {
1429 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1430 stuffing_count + 50) {
1431 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1435 switch (s->codec_id) {
1436 case AV_CODEC_ID_MPEG1VIDEO:
1437 case AV_CODEC_ID_MPEG2VIDEO:
1438 while (stuffing_count--) {
1439 put_bits(&s->pb, 8, 0);
1442 case AV_CODEC_ID_MPEG4:
1443 put_bits(&s->pb, 16, 0);
1444 put_bits(&s->pb, 16, 0x1C3);
1445 stuffing_count -= 4;
1446 while (stuffing_count--) {
1447 put_bits(&s->pb, 8, 0xFF);
1451 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1453 flush_put_bits(&s->pb);
1454 s->frame_bits = put_bits_count(&s->pb);
1457 /* update mpeg1/2 vbv_delay for CBR */
1458 if (s->avctx->rc_max_rate &&
1459 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1460 s->out_format == FMT_MPEG1 &&
1461 90000LL * (avctx->rc_buffer_size - 1) <=
1462 s->avctx->rc_max_rate * 0xFFFFLL) {
1463 int vbv_delay, min_delay;
1464 double inbits = s->avctx->rc_max_rate *
1465 av_q2d(s->avctx->time_base);
1466 int minbits = s->frame_bits - 8 *
1467 (s->vbv_delay_ptr - s->pb.buf - 1);
1468 double bits = s->rc_context.buffer_index + minbits - inbits;
1471 av_log(s->avctx, AV_LOG_ERROR,
1472 "Internal error, negative bits\n");
1474 assert(s->repeat_first_field == 0);
1476 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1477 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1478 s->avctx->rc_max_rate;
1480 vbv_delay = FFMAX(vbv_delay, min_delay);
1482 assert(vbv_delay < 0xFFFF);
1484 s->vbv_delay_ptr[0] &= 0xF8;
1485 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1486 s->vbv_delay_ptr[1] = vbv_delay >> 5;
1487 s->vbv_delay_ptr[2] &= 0x07;
1488 s->vbv_delay_ptr[2] |= vbv_delay << 3;
1489 avctx->vbv_delay = vbv_delay * 300;
1491 s->total_bits += s->frame_bits;
1492 avctx->frame_bits = s->frame_bits;
1494 pkt->pts = s->current_picture.f.pts;
1495 if (!s->low_delay) {
1496 if (!s->current_picture.f.coded_picture_number)
1497 pkt->dts = pkt->pts - s->dts_delta;
1499 pkt->dts = s->reordered_pts;
1500 s->reordered_pts = s->input_picture[0]->f.pts;
1502 pkt->dts = pkt->pts;
1503 if (s->current_picture.f.key_frame)
1504 pkt->flags |= AV_PKT_FLAG_KEY;
1506 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
1510 assert((s->frame_bits & 7) == 0);
1512 pkt->size = s->frame_bits / 8;
1513 *got_packet = !!pkt->size;
1517 static inline void dct_single_coeff_elimination(MpegEncContext *s,
1518 int n, int threshold)
1520 static const char tab[64] = {
1521 3, 2, 2, 1, 1, 1, 1, 1,
1522 1, 1, 1, 1, 1, 1, 1, 1,
1523 1, 1, 1, 1, 1, 1, 1, 1,
1524 0, 0, 0, 0, 0, 0, 0, 0,
1525 0, 0, 0, 0, 0, 0, 0, 0,
1526 0, 0, 0, 0, 0, 0, 0, 0,
1527 0, 0, 0, 0, 0, 0, 0, 0,
1528 0, 0, 0, 0, 0, 0, 0, 0
1533 int16_t *block = s->block[n];
1534 const int last_index = s->block_last_index[n];
1537 if (threshold < 0) {
1539 threshold = -threshold;
1543 /* Are all we could set to zero already zero? */
1544 if (last_index <= skip_dc - 1)
1547 for (i = 0; i <= last_index; i++) {
1548 const int j = s->intra_scantable.permutated[i];
1549 const int level = FFABS(block[j]);
1551 if (skip_dc && i == 0)
1555 } else if (level > 1) {
1561 if (score >= threshold)
1563 for (i = skip_dc; i <= last_index; i++) {
1564 const int j = s->intra_scantable.permutated[i];
1568 s->block_last_index[n] = 0;
1570 s->block_last_index[n] = -1;
1573 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1577 const int maxlevel = s->max_qcoeff;
1578 const int minlevel = s->min_qcoeff;
1582 i = 1; // skip clipping of intra dc
1586 for (; i <= last_index; i++) {
1587 const int j = s->intra_scantable.permutated[i];
1588 int level = block[j];
1590 if (level > maxlevel) {
1593 } else if (level < minlevel) {
1601 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1602 av_log(s->avctx, AV_LOG_INFO,
1603 "warning, clipping %d dct coefficients to %d..%d\n",
1604 overflow, minlevel, maxlevel);
1607 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1611 for (y = 0; y < 8; y++) {
1612 for (x = 0; x < 8; x++) {
1618 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1619 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1620 int v = ptr[x2 + y2 * stride];
1626 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1631 static av_always_inline void encode_mb_internal(MpegEncContext *s,
1632 int motion_x, int motion_y,
1633 int mb_block_height,
1636 int16_t weight[8][64];
1637 int16_t orig[8][64];
1638 const int mb_x = s->mb_x;
1639 const int mb_y = s->mb_y;
1642 int dct_offset = s->linesize * 8; // default for progressive frames
1643 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1646 for (i = 0; i < mb_block_count; i++)
1647 skip_dct[i] = s->skipdct;
1649 if (s->adaptive_quant) {
1650 const int last_qp = s->qscale;
1651 const int mb_xy = mb_x + mb_y * s->mb_stride;
1653 s->lambda = s->lambda_table[mb_xy];
1656 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1657 s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
1658 s->dquant = s->qscale - last_qp;
1660 if (s->out_format == FMT_H263) {
1661 s->dquant = av_clip(s->dquant, -2, 2);
1663 if (s->codec_id == AV_CODEC_ID_MPEG4) {
1665 if (s->pict_type == AV_PICTURE_TYPE_B) {
1666 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1669 if (s->mv_type == MV_TYPE_8X8)
1675 ff_set_qscale(s, last_qp + s->dquant);
1676 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1677 ff_set_qscale(s, s->qscale + s->dquant);
1679 wrap_y = s->linesize;
1680 wrap_c = s->uvlinesize;
1681 ptr_y = s->new_picture.f.data[0] +
1682 (mb_y * 16 * wrap_y) + mb_x * 16;
1683 ptr_cb = s->new_picture.f.data[1] +
1684 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1685 ptr_cr = s->new_picture.f.data[2] +
1686 (mb_y * mb_block_height * wrap_c) + mb_x * 8;
1688 if (mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) {
1689 uint8_t *ebuf = s->edge_emu_buffer + 32;
1690 s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1691 mb_y * 16, s->width, s->height);
1693 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8,
1694 mb_block_height, mb_x * 8, mb_y * 8,
1695 s->width >> 1, s->height >> 1);
1696 ptr_cb = ebuf + 18 * wrap_y;
1697 s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8,
1698 mb_block_height, mb_x * 8, mb_y * 8,
1699 s->width >> 1, s->height >> 1);
1700 ptr_cr = ebuf + 18 * wrap_y + 8;
1704 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1705 int progressive_score, interlaced_score;
1707 s->interlaced_dct = 0;
1708 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1710 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1711 NULL, wrap_y, 8) - 400;
1713 if (progressive_score > 0) {
1714 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1715 NULL, wrap_y * 2, 8) +
1716 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1717 NULL, wrap_y * 2, 8);
1718 if (progressive_score > interlaced_score) {
1719 s->interlaced_dct = 1;
1721 dct_offset = wrap_y;
1723 if (s->chroma_format == CHROMA_422)
1729 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1730 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1731 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1732 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1734 if (s->flags & CODEC_FLAG_GRAY) {
1738 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1739 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1740 if (!s->chroma_y_shift) { /* 422 */
1741 s->dsp.get_pixels(s->block[6],
1742 ptr_cb + (dct_offset >> 1), wrap_c);
1743 s->dsp.get_pixels(s->block[7],
1744 ptr_cr + (dct_offset >> 1), wrap_c);
1748 op_pixels_func (*op_pix)[4];
1749 qpel_mc_func (*op_qpix)[16];
1750 uint8_t *dest_y, *dest_cb, *dest_cr;
1752 dest_y = s->dest[0];
1753 dest_cb = s->dest[1];
1754 dest_cr = s->dest[2];
1756 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1757 op_pix = s->hdsp.put_pixels_tab;
1758 op_qpix = s->dsp.put_qpel_pixels_tab;
1760 op_pix = s->hdsp.put_no_rnd_pixels_tab;
1761 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1764 if (s->mv_dir & MV_DIR_FORWARD) {
1765 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1766 s->last_picture.f.data,
1768 op_pix = s->hdsp.avg_pixels_tab;
1769 op_qpix = s->dsp.avg_qpel_pixels_tab;
1771 if (s->mv_dir & MV_DIR_BACKWARD) {
1772 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1773 s->next_picture.f.data,
1777 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1778 int progressive_score, interlaced_score;
1780 s->interlaced_dct = 0;
1781 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1784 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1785 ptr_y + wrap_y * 8, wrap_y,
1788 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1789 progressive_score -= 400;
1791 if (progressive_score > 0) {
1792 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
1795 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
1799 if (progressive_score > interlaced_score) {
1800 s->interlaced_dct = 1;
1802 dct_offset = wrap_y;
1804 if (s->chroma_format == CHROMA_422)
1810 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
1811 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
1812 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
1813 dest_y + dct_offset, wrap_y);
1814 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
1815 dest_y + dct_offset + 8, wrap_y);
1817 if (s->flags & CODEC_FLAG_GRAY) {
1821 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
1822 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
1823 if (!s->chroma_y_shift) { /* 422 */
1824 s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1),
1825 dest_cb + (dct_offset >> 1), wrap_c);
1826 s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1),
1827 dest_cr + (dct_offset >> 1), wrap_c);
1830 /* pre quantization */
1831 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
1832 2 * s->qscale * s->qscale) {
1834 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
1835 wrap_y, 8) < 20 * s->qscale)
1837 if (s->dsp.sad[1](NULL, ptr_y + 8,
1838 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
1840 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
1841 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
1843 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
1844 dest_y + dct_offset + 8,
1845 wrap_y, 8) < 20 * s->qscale)
1847 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
1848 wrap_c, 8) < 20 * s->qscale)
1850 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
1851 wrap_c, 8) < 20 * s->qscale)
1853 if (!s->chroma_y_shift) { /* 422 */
1854 if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1),
1855 dest_cb + (dct_offset >> 1),
1856 wrap_c, 8) < 20 * s->qscale)
1858 if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1),
1859 dest_cr + (dct_offset >> 1),
1860 wrap_c, 8) < 20 * s->qscale)
1866 if (s->quantizer_noise_shaping) {
1868 get_visual_weight(weight[0], ptr_y , wrap_y);
1870 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
1872 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
1874 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
1876 get_visual_weight(weight[4], ptr_cb , wrap_c);
1878 get_visual_weight(weight[5], ptr_cr , wrap_c);
1879 if (!s->chroma_y_shift) { /* 422 */
1881 get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1),
1884 get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1),
1887 memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
1890 /* DCT & quantize */
1891 assert(s->out_format != FMT_MJPEG || s->qscale == 8);
1893 for (i = 0; i < mb_block_count; i++) {
1896 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
1897 // FIXME we could decide to change to quantizer instead of
1899 // JS: I don't think that would be a good idea it could lower
1900 // quality instead of improve it. Just INTRADC clipping
1901 // deserves changes in quantizer
1903 clip_coeffs(s, s->block[i], s->block_last_index[i]);
1905 s->block_last_index[i] = -1;
1907 if (s->quantizer_noise_shaping) {
1908 for (i = 0; i < mb_block_count; i++) {
1910 s->block_last_index[i] =
1911 dct_quantize_refine(s, s->block[i], weight[i],
1912 orig[i], i, s->qscale);
1917 if (s->luma_elim_threshold && !s->mb_intra)
1918 for (i = 0; i < 4; i++)
1919 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
1920 if (s->chroma_elim_threshold && !s->mb_intra)
1921 for (i = 4; i < mb_block_count; i++)
1922 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
1924 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
1925 for (i = 0; i < mb_block_count; i++) {
1926 if (s->block_last_index[i] == -1)
1927 s->coded_score[i] = INT_MAX / 256;
1932 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
1933 s->block_last_index[4] =
1934 s->block_last_index[5] = 0;
1936 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
1939 // non c quantize code returns incorrect block_last_index FIXME
1940 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
1941 for (i = 0; i < mb_block_count; i++) {
1943 if (s->block_last_index[i] > 0) {
1944 for (j = 63; j > 0; j--) {
1945 if (s->block[i][s->intra_scantable.permutated[j]])
1948 s->block_last_index[i] = j;
1953 /* huffman encode */
1954 switch(s->codec_id){ //FIXME funct ptr could be slightly faster
1955 case AV_CODEC_ID_MPEG1VIDEO:
1956 case AV_CODEC_ID_MPEG2VIDEO:
1957 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1958 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
1960 case AV_CODEC_ID_MPEG4:
1961 if (CONFIG_MPEG4_ENCODER)
1962 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
1964 case AV_CODEC_ID_MSMPEG4V2:
1965 case AV_CODEC_ID_MSMPEG4V3:
1966 case AV_CODEC_ID_WMV1:
1967 if (CONFIG_MSMPEG4_ENCODER)
1968 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
1970 case AV_CODEC_ID_WMV2:
1971 if (CONFIG_WMV2_ENCODER)
1972 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
1974 case AV_CODEC_ID_H261:
1975 if (CONFIG_H261_ENCODER)
1976 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
1978 case AV_CODEC_ID_H263:
1979 case AV_CODEC_ID_H263P:
1980 case AV_CODEC_ID_FLV1:
1981 case AV_CODEC_ID_RV10:
1982 case AV_CODEC_ID_RV20:
1983 if (CONFIG_H263_ENCODER)
1984 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
1986 case AV_CODEC_ID_MJPEG:
1987 if (CONFIG_MJPEG_ENCODER)
1988 ff_mjpeg_encode_mb(s, s->block);
1995 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
1997 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6);
1998 else encode_mb_internal(s, motion_x, motion_y, 16, 8);
2001 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2004 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2007 d->mb_skip_run= s->mb_skip_run;
2009 d->last_dc[i] = s->last_dc[i];
2012 d->mv_bits= s->mv_bits;
2013 d->i_tex_bits= s->i_tex_bits;
2014 d->p_tex_bits= s->p_tex_bits;
2015 d->i_count= s->i_count;
2016 d->f_count= s->f_count;
2017 d->b_count= s->b_count;
2018 d->skip_count= s->skip_count;
2019 d->misc_bits= s->misc_bits;
2023 d->qscale= s->qscale;
2024 d->dquant= s->dquant;
2026 d->esc3_level_length= s->esc3_level_length;
2029 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2032 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2033 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2036 d->mb_skip_run= s->mb_skip_run;
2038 d->last_dc[i] = s->last_dc[i];
2041 d->mv_bits= s->mv_bits;
2042 d->i_tex_bits= s->i_tex_bits;
2043 d->p_tex_bits= s->p_tex_bits;
2044 d->i_count= s->i_count;
2045 d->f_count= s->f_count;
2046 d->b_count= s->b_count;
2047 d->skip_count= s->skip_count;
2048 d->misc_bits= s->misc_bits;
2050 d->mb_intra= s->mb_intra;
2051 d->mb_skipped= s->mb_skipped;
2052 d->mv_type= s->mv_type;
2053 d->mv_dir= s->mv_dir;
2055 if(s->data_partitioning){
2057 d->tex_pb= s->tex_pb;
2061 d->block_last_index[i]= s->block_last_index[i];
2062 d->interlaced_dct= s->interlaced_dct;
2063 d->qscale= s->qscale;
2065 d->esc3_level_length= s->esc3_level_length;
2068 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2069 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2070 int *dmin, int *next_block, int motion_x, int motion_y)
2073 uint8_t *dest_backup[3];
2075 copy_context_before_encode(s, backup, type);
2077 s->block= s->blocks[*next_block];
2078 s->pb= pb[*next_block];
2079 if(s->data_partitioning){
2080 s->pb2 = pb2 [*next_block];
2081 s->tex_pb= tex_pb[*next_block];
2085 memcpy(dest_backup, s->dest, sizeof(s->dest));
2086 s->dest[0] = s->rd_scratchpad;
2087 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2088 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2089 assert(s->linesize >= 32); //FIXME
2092 encode_mb(s, motion_x, motion_y);
2094 score= put_bits_count(&s->pb);
2095 if(s->data_partitioning){
2096 score+= put_bits_count(&s->pb2);
2097 score+= put_bits_count(&s->tex_pb);
2100 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2101 ff_MPV_decode_mb(s, s->block);
2103 score *= s->lambda2;
2104 score += sse_mb(s) << FF_LAMBDA_SHIFT;
2108 memcpy(s->dest, dest_backup, sizeof(s->dest));
2115 copy_context_after_encode(best, s, type);
2119 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2120 uint32_t *sq = ff_squareTbl + 256;
2125 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2126 else if(w==8 && h==8)
2127 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2131 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2140 static int sse_mb(MpegEncContext *s){
2144 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2145 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2148 if(s->avctx->mb_cmp == FF_CMP_NSSE){
2149 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2150 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2151 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2153 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2154 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2155 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2158 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2159 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2160 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2163 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
2164 MpegEncContext *s= *(void**)arg;
2168 s->me.dia_size= s->avctx->pre_dia_size;
2169 s->first_slice_line=1;
2170 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2171 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2172 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2174 s->first_slice_line=0;
2182 static int estimate_motion_thread(AVCodecContext *c, void *arg){
2183 MpegEncContext *s= *(void**)arg;
2185 ff_check_alignment();
2187 s->me.dia_size= s->avctx->dia_size;
2188 s->first_slice_line=1;
2189 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2190 s->mb_x=0; //for block init below
2191 ff_init_block_index(s);
2192 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2193 s->block_index[0]+=2;
2194 s->block_index[1]+=2;
2195 s->block_index[2]+=2;
2196 s->block_index[3]+=2;
2198 /* compute motion vector & mb_type and store in context */
2199 if(s->pict_type==AV_PICTURE_TYPE_B)
2200 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2202 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2204 s->first_slice_line=0;
2209 static int mb_var_thread(AVCodecContext *c, void *arg){
2210 MpegEncContext *s= *(void**)arg;
2213 ff_check_alignment();
2215 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2216 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2219 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2221 int sum = s->dsp.pix_sum(pix, s->linesize);
2223 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2225 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2226 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2227 s->me.mb_var_sum_temp += varc;
2233 static void write_slice_end(MpegEncContext *s){
2234 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2235 if(s->partitioned_frame){
2236 ff_mpeg4_merge_partitions(s);
2239 ff_mpeg4_stuffing(&s->pb);
2240 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2241 ff_mjpeg_encode_stuffing(&s->pb);
2244 avpriv_align_put_bits(&s->pb);
2245 flush_put_bits(&s->pb);
2247 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2248 s->misc_bits+= get_bits_diff(s);
2251 static void write_mb_info(MpegEncContext *s)
2253 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2254 int offset = put_bits_count(&s->pb);
2255 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2256 int gobn = s->mb_y / s->gob_index;
2258 if (CONFIG_H263_ENCODER)
2259 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2260 bytestream_put_le32(&ptr, offset);
2261 bytestream_put_byte(&ptr, s->qscale);
2262 bytestream_put_byte(&ptr, gobn);
2263 bytestream_put_le16(&ptr, mba);
2264 bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2265 bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2266 /* 4MV not implemented */
2267 bytestream_put_byte(&ptr, 0); /* hmv2 */
2268 bytestream_put_byte(&ptr, 0); /* vmv2 */
2271 static void update_mb_info(MpegEncContext *s, int startcode)
2275 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2276 s->mb_info_size += 12;
2277 s->prev_mb_info = s->last_mb_info;
2280 s->prev_mb_info = put_bits_count(&s->pb)/8;
2281 /* This might have incremented mb_info_size above, and we return without
2282 * actually writing any info into that slot yet. But in that case,
2283 * this will be called again at the start of the after writing the
2284 * start code, actually writing the mb info. */
2288 s->last_mb_info = put_bits_count(&s->pb)/8;
2289 if (!s->mb_info_size)
2290 s->mb_info_size += 12;
2294 static int encode_thread(AVCodecContext *c, void *arg){
2295 MpegEncContext *s= *(void**)arg;
2296 int mb_x, mb_y, pdif = 0;
2297 int chr_h= 16>>s->chroma_y_shift;
2299 MpegEncContext best_s, backup_s;
2300 uint8_t bit_buf[2][MAX_MB_BYTES];
2301 uint8_t bit_buf2[2][MAX_MB_BYTES];
2302 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2303 PutBitContext pb[2], pb2[2], tex_pb[2];
2305 ff_check_alignment();
2308 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2309 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2310 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2313 s->last_bits= put_bits_count(&s->pb);
2324 /* init last dc values */
2325 /* note: quant matrix value (8) is implied here */
2326 s->last_dc[i] = 128 << s->intra_dc_precision;
2328 s->current_picture.f.error[i] = 0;
2331 memset(s->last_mv, 0, sizeof(s->last_mv));
2335 switch(s->codec_id){
2336 case AV_CODEC_ID_H263:
2337 case AV_CODEC_ID_H263P:
2338 case AV_CODEC_ID_FLV1:
2339 if (CONFIG_H263_ENCODER)
2340 s->gob_index = ff_h263_get_gob_height(s);
2342 case AV_CODEC_ID_MPEG4:
2343 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2344 ff_mpeg4_init_partitions(s);
2350 s->first_slice_line = 1;
2351 s->ptr_lastgob = s->pb.buf;
2352 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2356 ff_set_qscale(s, s->qscale);
2357 ff_init_block_index(s);
2359 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2360 int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2361 int mb_type= s->mb_type[xy];
2366 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2367 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2370 if(s->data_partitioning){
2371 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2372 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2373 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2379 s->mb_y = mb_y; // moved into loop, can get changed by H.261
2380 ff_update_block_index(s);
2382 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2383 ff_h261_reorder_mb_index(s);
2384 xy= s->mb_y*s->mb_stride + s->mb_x;
2385 mb_type= s->mb_type[xy];
2388 /* write gob / video packet header */
2390 int current_packet_size, is_gob_start;
2392 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2394 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2396 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2398 switch(s->codec_id){
2399 case AV_CODEC_ID_H263:
2400 case AV_CODEC_ID_H263P:
2401 if(!s->h263_slice_structured)
2402 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2404 case AV_CODEC_ID_MPEG2VIDEO:
2405 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2406 case AV_CODEC_ID_MPEG1VIDEO:
2407 if(s->mb_skip_run) is_gob_start=0;
2412 if(s->start_mb_y != mb_y || mb_x!=0){
2415 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2416 ff_mpeg4_init_partitions(s);
2420 assert((put_bits_count(&s->pb)&7) == 0);
2421 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2423 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2424 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2425 int d= 100 / s->avctx->error_rate;
2427 current_packet_size=0;
2428 s->pb.buf_ptr= s->ptr_lastgob;
2429 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2433 if (s->avctx->rtp_callback){
2434 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2435 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2437 update_mb_info(s, 1);
2439 switch(s->codec_id){
2440 case AV_CODEC_ID_MPEG4:
2441 if (CONFIG_MPEG4_ENCODER) {
2442 ff_mpeg4_encode_video_packet_header(s);
2443 ff_mpeg4_clean_buffers(s);
2446 case AV_CODEC_ID_MPEG1VIDEO:
2447 case AV_CODEC_ID_MPEG2VIDEO:
2448 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2449 ff_mpeg1_encode_slice_header(s);
2450 ff_mpeg1_clean_buffers(s);
2453 case AV_CODEC_ID_H263:
2454 case AV_CODEC_ID_H263P:
2455 if (CONFIG_H263_ENCODER)
2456 ff_h263_encode_gob_header(s, mb_y);
2460 if(s->flags&CODEC_FLAG_PASS1){
2461 int bits= put_bits_count(&s->pb);
2462 s->misc_bits+= bits - s->last_bits;
2466 s->ptr_lastgob += current_packet_size;
2467 s->first_slice_line=1;
2468 s->resync_mb_x=mb_x;
2469 s->resync_mb_y=mb_y;
2473 if( (s->resync_mb_x == s->mb_x)
2474 && s->resync_mb_y+1 == s->mb_y){
2475 s->first_slice_line=0;
2479 s->dquant=0; //only for QP_RD
2481 update_mb_info(s, 0);
2483 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2485 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2487 copy_context_before_encode(&backup_s, s, -1);
2489 best_s.data_partitioning= s->data_partitioning;
2490 best_s.partitioned_frame= s->partitioned_frame;
2491 if(s->data_partitioning){
2492 backup_s.pb2= s->pb2;
2493 backup_s.tex_pb= s->tex_pb;
2496 if(mb_type&CANDIDATE_MB_TYPE_INTER){
2497 s->mv_dir = MV_DIR_FORWARD;
2498 s->mv_type = MV_TYPE_16X16;
2500 s->mv[0][0][0] = s->p_mv_table[xy][0];
2501 s->mv[0][0][1] = s->p_mv_table[xy][1];
2502 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2503 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2505 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2506 s->mv_dir = MV_DIR_FORWARD;
2507 s->mv_type = MV_TYPE_FIELD;
2510 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2511 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2512 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2514 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2515 &dmin, &next_block, 0, 0);
2517 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2518 s->mv_dir = MV_DIR_FORWARD;
2519 s->mv_type = MV_TYPE_16X16;
2523 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2524 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2526 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2527 s->mv_dir = MV_DIR_FORWARD;
2528 s->mv_type = MV_TYPE_8X8;
2531 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2532 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2534 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2535 &dmin, &next_block, 0, 0);
2537 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2538 s->mv_dir = MV_DIR_FORWARD;
2539 s->mv_type = MV_TYPE_16X16;
2541 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2542 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2543 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2544 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2546 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2547 s->mv_dir = MV_DIR_BACKWARD;
2548 s->mv_type = MV_TYPE_16X16;
2550 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2551 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2552 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2553 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2555 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2556 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2557 s->mv_type = MV_TYPE_16X16;
2559 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2560 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2561 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2562 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2563 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2564 &dmin, &next_block, 0, 0);
2566 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2567 s->mv_dir = MV_DIR_FORWARD;
2568 s->mv_type = MV_TYPE_FIELD;
2571 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2572 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2573 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2575 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2576 &dmin, &next_block, 0, 0);
2578 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2579 s->mv_dir = MV_DIR_BACKWARD;
2580 s->mv_type = MV_TYPE_FIELD;
2583 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2584 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2585 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2587 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2588 &dmin, &next_block, 0, 0);
2590 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2591 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2592 s->mv_type = MV_TYPE_FIELD;
2594 for(dir=0; dir<2; dir++){
2596 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2597 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2598 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2601 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2602 &dmin, &next_block, 0, 0);
2604 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2606 s->mv_type = MV_TYPE_16X16;
2610 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2611 &dmin, &next_block, 0, 0);
2612 if(s->h263_pred || s->h263_aic){
2614 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2616 ff_clean_intra_table_entries(s); //old mode?
2620 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2621 if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2622 const int last_qp= backup_s.qscale;
2625 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2626 static const int dquant_tab[4]={-1,1,-2,2};
2628 assert(backup_s.dquant == 0);
2631 s->mv_dir= best_s.mv_dir;
2632 s->mv_type = MV_TYPE_16X16;
2633 s->mb_intra= best_s.mb_intra;
2634 s->mv[0][0][0] = best_s.mv[0][0][0];
2635 s->mv[0][0][1] = best_s.mv[0][0][1];
2636 s->mv[1][0][0] = best_s.mv[1][0][0];
2637 s->mv[1][0][1] = best_s.mv[1][0][1];
2639 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2640 for(; qpi<4; qpi++){
2641 int dquant= dquant_tab[qpi];
2642 qp= last_qp + dquant;
2643 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2645 backup_s.dquant= dquant;
2646 if(s->mb_intra && s->dc_val[0]){
2648 dc[i]= s->dc_val[0][ s->block_index[i] ];
2649 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2653 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2654 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2655 if(best_s.qscale != qp){
2656 if(s->mb_intra && s->dc_val[0]){
2658 s->dc_val[0][ s->block_index[i] ]= dc[i];
2659 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2666 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2667 int mx= s->b_direct_mv_table[xy][0];
2668 int my= s->b_direct_mv_table[xy][1];
2670 backup_s.dquant = 0;
2671 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2673 ff_mpeg4_set_direct_mv(s, mx, my);
2674 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2675 &dmin, &next_block, mx, my);
2677 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2678 backup_s.dquant = 0;
2679 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
2681 ff_mpeg4_set_direct_mv(s, 0, 0);
2682 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2683 &dmin, &next_block, 0, 0);
2685 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2688 coded |= s->block_last_index[i];
2691 memcpy(s->mv, best_s.mv, sizeof(s->mv));
2692 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2693 mx=my=0; //FIXME find the one we actually used
2694 ff_mpeg4_set_direct_mv(s, mx, my);
2695 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2703 s->mv_dir= best_s.mv_dir;
2704 s->mv_type = best_s.mv_type;
2706 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2707 s->mv[0][0][1] = best_s.mv[0][0][1];
2708 s->mv[1][0][0] = best_s.mv[1][0][0];
2709 s->mv[1][0][1] = best_s.mv[1][0][1];*/
2712 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2713 &dmin, &next_block, mx, my);
2718 s->current_picture.qscale_table[xy] = best_s.qscale;
2720 copy_context_after_encode(s, &best_s, -1);
2722 pb_bits_count= put_bits_count(&s->pb);
2723 flush_put_bits(&s->pb);
2724 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2727 if(s->data_partitioning){
2728 pb2_bits_count= put_bits_count(&s->pb2);
2729 flush_put_bits(&s->pb2);
2730 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2731 s->pb2= backup_s.pb2;
2733 tex_pb_bits_count= put_bits_count(&s->tex_pb);
2734 flush_put_bits(&s->tex_pb);
2735 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2736 s->tex_pb= backup_s.tex_pb;
2738 s->last_bits= put_bits_count(&s->pb);
2740 if (CONFIG_H263_ENCODER &&
2741 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2742 ff_h263_update_motion_val(s);
2744 if(next_block==0){ //FIXME 16 vs linesize16
2745 s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2746 s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2747 s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2750 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
2751 ff_MPV_decode_mb(s, s->block);
2753 int motion_x = 0, motion_y = 0;
2754 s->mv_type=MV_TYPE_16X16;
2755 // only one MB-Type possible
2758 case CANDIDATE_MB_TYPE_INTRA:
2761 motion_x= s->mv[0][0][0] = 0;
2762 motion_y= s->mv[0][0][1] = 0;
2764 case CANDIDATE_MB_TYPE_INTER:
2765 s->mv_dir = MV_DIR_FORWARD;
2767 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2768 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2770 case CANDIDATE_MB_TYPE_INTER_I:
2771 s->mv_dir = MV_DIR_FORWARD;
2772 s->mv_type = MV_TYPE_FIELD;
2775 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2776 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2777 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2780 case CANDIDATE_MB_TYPE_INTER4V:
2781 s->mv_dir = MV_DIR_FORWARD;
2782 s->mv_type = MV_TYPE_8X8;
2785 s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
2786 s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
2789 case CANDIDATE_MB_TYPE_DIRECT:
2790 if (CONFIG_MPEG4_ENCODER) {
2791 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2793 motion_x=s->b_direct_mv_table[xy][0];
2794 motion_y=s->b_direct_mv_table[xy][1];
2795 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
2798 case CANDIDATE_MB_TYPE_DIRECT0:
2799 if (CONFIG_MPEG4_ENCODER) {
2800 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
2802 ff_mpeg4_set_direct_mv(s, 0, 0);
2805 case CANDIDATE_MB_TYPE_BIDIR:
2806 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2808 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2809 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2810 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2811 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2813 case CANDIDATE_MB_TYPE_BACKWARD:
2814 s->mv_dir = MV_DIR_BACKWARD;
2816 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2817 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2819 case CANDIDATE_MB_TYPE_FORWARD:
2820 s->mv_dir = MV_DIR_FORWARD;
2822 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2823 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2825 case CANDIDATE_MB_TYPE_FORWARD_I:
2826 s->mv_dir = MV_DIR_FORWARD;
2827 s->mv_type = MV_TYPE_FIELD;
2830 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2831 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2832 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2835 case CANDIDATE_MB_TYPE_BACKWARD_I:
2836 s->mv_dir = MV_DIR_BACKWARD;
2837 s->mv_type = MV_TYPE_FIELD;
2840 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2841 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2842 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2845 case CANDIDATE_MB_TYPE_BIDIR_I:
2846 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
2847 s->mv_type = MV_TYPE_FIELD;
2849 for(dir=0; dir<2; dir++){
2851 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2852 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2853 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2858 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
2861 encode_mb(s, motion_x, motion_y);
2863 // RAL: Update last macroblock type
2864 s->last_mv_dir = s->mv_dir;
2866 if (CONFIG_H263_ENCODER &&
2867 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
2868 ff_h263_update_motion_val(s);
2870 ff_MPV_decode_mb(s, s->block);
2873 /* clean the MV table in IPS frames for direct mode in B frames */
2874 if(s->mb_intra /* && I,P,S_TYPE */){
2875 s->p_mv_table[xy][0]=0;
2876 s->p_mv_table[xy][1]=0;
2879 if(s->flags&CODEC_FLAG_PSNR){
2883 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2884 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2886 s->current_picture.f.error[0] += sse(
2887 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
2888 s->dest[0], w, h, s->linesize);
2889 s->current_picture.f.error[1] += sse(
2890 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2891 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2892 s->current_picture.f.error[2] += sse(
2893 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
2894 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
2897 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
2898 ff_h263_loop_filter(s);
2900 av_dlog(s->avctx, "MB %d %d bits\n",
2901 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
2905 //not beautiful here but we must write it before flushing so it has to be here
2906 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
2907 ff_msmpeg4_encode_ext_header(s);
2911 /* Send the last GOB if RTP */
2912 if (s->avctx->rtp_callback) {
2913 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
2914 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
2915 /* Call the RTP callback to send the last GOB */
2917 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
2923 #define MERGE(field) dst->field += src->field; src->field=0
2924 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
2925 MERGE(me.scene_change_score);
2926 MERGE(me.mc_mb_var_sum_temp);
2927 MERGE(me.mb_var_sum_temp);
2930 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
2933 MERGE(dct_count[0]); //note, the other dct vars are not part of the context
2934 MERGE(dct_count[1]);
2943 MERGE(er.error_count);
2944 MERGE(padding_bug_score);
2945 MERGE(current_picture.f.error[0]);
2946 MERGE(current_picture.f.error[1]);
2947 MERGE(current_picture.f.error[2]);
2949 if(dst->avctx->noise_reduction){
2950 for(i=0; i<64; i++){
2951 MERGE(dct_error_sum[0][i]);
2952 MERGE(dct_error_sum[1][i]);
2956 assert(put_bits_count(&src->pb) % 8 ==0);
2957 assert(put_bits_count(&dst->pb) % 8 ==0);
2958 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
2959 flush_put_bits(&dst->pb);
2962 static int estimate_qp(MpegEncContext *s, int dry_run){
2963 if (s->next_lambda){
2964 s->current_picture_ptr->f.quality =
2965 s->current_picture.f.quality = s->next_lambda;
2966 if(!dry_run) s->next_lambda= 0;
2967 } else if (!s->fixed_qscale) {
2968 s->current_picture_ptr->f.quality =
2969 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
2970 if (s->current_picture.f.quality < 0)
2974 if(s->adaptive_quant){
2975 switch(s->codec_id){
2976 case AV_CODEC_ID_MPEG4:
2977 if (CONFIG_MPEG4_ENCODER)
2978 ff_clean_mpeg4_qscales(s);
2980 case AV_CODEC_ID_H263:
2981 case AV_CODEC_ID_H263P:
2982 case AV_CODEC_ID_FLV1:
2983 if (CONFIG_H263_ENCODER)
2984 ff_clean_h263_qscales(s);
2987 ff_init_qscale_tab(s);
2990 s->lambda= s->lambda_table[0];
2993 s->lambda = s->current_picture.f.quality;
2998 /* must be called before writing the header */
2999 static void set_frame_distances(MpegEncContext * s){
3000 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3001 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3003 if(s->pict_type==AV_PICTURE_TYPE_B){
3004 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3005 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3007 s->pp_time= s->time - s->last_non_b_time;
3008 s->last_non_b_time= s->time;
3009 assert(s->picture_number==0 || s->pp_time > 0);
3013 static int encode_picture(MpegEncContext *s, int picture_number)
3017 int context_count = s->slice_context_count;
3019 s->picture_number = picture_number;
3021 /* Reset the average MB variance */
3022 s->me.mb_var_sum_temp =
3023 s->me.mc_mb_var_sum_temp = 0;
3025 /* we need to initialize some time vars before we can encode b-frames */
3026 // RAL: Condition added for MPEG1VIDEO
3027 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3028 set_frame_distances(s);
3029 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3030 ff_set_mpeg4_time(s);
3032 s->me.scene_change_score=0;
3034 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3036 if(s->pict_type==AV_PICTURE_TYPE_I){
3037 if(s->msmpeg4_version >= 3) s->no_rounding=1;
3038 else s->no_rounding=0;
3039 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3040 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3041 s->no_rounding ^= 1;
3044 if(s->flags & CODEC_FLAG_PASS2){
3045 if (estimate_qp(s,1) < 0)
3047 ff_get_2pass_fcode(s);
3048 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3049 if(s->pict_type==AV_PICTURE_TYPE_B)
3050 s->lambda= s->last_lambda_for[s->pict_type];
3052 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3056 s->mb_intra=0; //for the rate distortion & bit compare functions
3057 for(i=1; i<context_count; i++){
3058 ret = ff_update_duplicate_context(s->thread_context[i], s);
3066 /* Estimate motion for every MB */
3067 if(s->pict_type != AV_PICTURE_TYPE_I){
3068 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3069 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3070 if (s->pict_type != AV_PICTURE_TYPE_B) {
3071 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3072 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3076 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3077 }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3079 for(i=0; i<s->mb_stride*s->mb_height; i++)
3080 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3082 if(!s->fixed_qscale){
3083 /* finding spatial complexity for I-frame rate control */
3084 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3087 for(i=1; i<context_count; i++){
3088 merge_context_after_me(s, s->thread_context[i]);
3090 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3091 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3094 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
3095 s->pict_type= AV_PICTURE_TYPE_I;
3096 for(i=0; i<s->mb_stride*s->mb_height; i++)
3097 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3098 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3099 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3103 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3104 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3106 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3108 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3109 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3110 s->f_code= FFMAX3(s->f_code, a, b);
3113 ff_fix_long_p_mvs(s);
3114 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
3115 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3119 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3120 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
3125 if(s->pict_type==AV_PICTURE_TYPE_B){
3128 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3129 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3130 s->f_code = FFMAX(a, b);
3132 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3133 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3134 s->b_code = FFMAX(a, b);
3136 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3137 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3138 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3139 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3140 if(s->flags & CODEC_FLAG_INTERLACED_ME){
3142 for(dir=0; dir<2; dir++){
3145 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
3146 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
3147 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3148 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3156 if (estimate_qp(s, 0) < 0)
3159 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3160 s->qscale= 3; //reduce clipping problems
3162 if (s->out_format == FMT_MJPEG) {
3163 /* for mjpeg, we do include qscale in the matrix */
3165 int j= s->dsp.idct_permutation[i];
3167 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3169 s->y_dc_scale_table=
3170 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3171 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3172 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
3173 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3177 //FIXME var duplication
3178 s->current_picture_ptr->f.key_frame =
3179 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3180 s->current_picture_ptr->f.pict_type =
3181 s->current_picture.f.pict_type = s->pict_type;
3183 if (s->current_picture.f.key_frame)
3184 s->picture_in_gop_number=0;
3186 s->last_bits= put_bits_count(&s->pb);
3187 switch(s->out_format) {
3189 if (CONFIG_MJPEG_ENCODER)
3190 ff_mjpeg_encode_picture_header(s);
3193 if (CONFIG_H261_ENCODER)
3194 ff_h261_encode_picture_header(s, picture_number);
3197 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3198 ff_wmv2_encode_picture_header(s, picture_number);
3199 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3200 ff_msmpeg4_encode_picture_header(s, picture_number);
3201 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3202 ff_mpeg4_encode_picture_header(s, picture_number);
3203 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3204 ff_rv10_encode_picture_header(s, picture_number);
3205 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3206 ff_rv20_encode_picture_header(s, picture_number);
3207 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3208 ff_flv_encode_picture_header(s, picture_number);
3209 else if (CONFIG_H263_ENCODER)
3210 ff_h263_encode_picture_header(s, picture_number);
3213 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3214 ff_mpeg1_encode_picture_header(s, picture_number);
3219 bits= put_bits_count(&s->pb);
3220 s->header_bits= bits - s->last_bits;
3222 for(i=1; i<context_count; i++){
3223 update_duplicate_context_after_me(s->thread_context[i], s);
3225 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3226 for(i=1; i<context_count; i++){
3227 merge_context_after_encode(s, s->thread_context[i]);
3233 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3234 const int intra= s->mb_intra;
3237 s->dct_count[intra]++;
3239 for(i=0; i<64; i++){
3240 int level= block[i];
3244 s->dct_error_sum[intra][i] += level;
3245 level -= s->dct_offset[intra][i];
3246 if(level<0) level=0;
3248 s->dct_error_sum[intra][i] -= level;
3249 level += s->dct_offset[intra][i];
3250 if(level>0) level=0;
3257 static int dct_quantize_trellis_c(MpegEncContext *s,
3258 int16_t *block, int n,
3259 int qscale, int *overflow){
3261 const uint8_t *scantable= s->intra_scantable.scantable;
3262 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3264 unsigned int threshold1, threshold2;
3276 int coeff_count[64];
3277 int qmul, qadd, start_i, last_non_zero, i, dc;
3278 const int esc_length= s->ac_esc_length;
3280 uint8_t * last_length;
3281 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3283 s->dsp.fdct (block);
3285 if(s->dct_error_sum)
3286 s->denoise_dct(s, block);
3288 qadd= ((qscale-1)|1)*8;
3299 /* For AIC we skip quant/dequant of INTRADC */
3304 /* note: block[0] is assumed to be positive */
3305 block[0] = (block[0] + (q >> 1)) / q;
3308 qmat = s->q_intra_matrix[qscale];
3309 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3310 bias= 1<<(QMAT_SHIFT-1);
3311 length = s->intra_ac_vlc_length;
3312 last_length= s->intra_ac_vlc_last_length;
3316 qmat = s->q_inter_matrix[qscale];
3317 length = s->inter_ac_vlc_length;
3318 last_length= s->inter_ac_vlc_last_length;
3322 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3323 threshold2= (threshold1<<1);
3325 for(i=63; i>=start_i; i--) {
3326 const int j = scantable[i];
3327 int level = block[j] * qmat[j];
3329 if(((unsigned)(level+threshold1))>threshold2){
3335 for(i=start_i; i<=last_non_zero; i++) {
3336 const int j = scantable[i];
3337 int level = block[j] * qmat[j];
3339 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3340 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3341 if(((unsigned)(level+threshold1))>threshold2){
3343 level= (bias + level)>>QMAT_SHIFT;
3345 coeff[1][i]= level-1;
3346 // coeff[2][k]= level-2;
3348 level= (bias - level)>>QMAT_SHIFT;
3349 coeff[0][i]= -level;
3350 coeff[1][i]= -level+1;
3351 // coeff[2][k]= -level+2;
3353 coeff_count[i]= FFMIN(level, 2);
3354 assert(coeff_count[i]);
3357 coeff[0][i]= (level>>31)|1;
3362 *overflow= s->max_qcoeff < max; //overflow might have happened
3364 if(last_non_zero < start_i){
3365 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3366 return last_non_zero;
3369 score_tab[start_i]= 0;
3370 survivor[0]= start_i;
3373 for(i=start_i; i<=last_non_zero; i++){
3374 int level_index, j, zero_distortion;
3375 int dct_coeff= FFABS(block[ scantable[i] ]);
3376 int best_score=256*256*256*120;
3378 if (s->dsp.fdct == ff_fdct_ifast)
3379 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3380 zero_distortion= dct_coeff*dct_coeff;
3382 for(level_index=0; level_index < coeff_count[i]; level_index++){
3384 int level= coeff[level_index][i];
3385 const int alevel= FFABS(level);
3390 if(s->out_format == FMT_H263){
3391 unquant_coeff= alevel*qmul + qadd;
3393 j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3395 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3396 unquant_coeff = (unquant_coeff - 1) | 1;
3398 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3399 unquant_coeff = (unquant_coeff - 1) | 1;
3404 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3406 if((level&(~127)) == 0){
3407 for(j=survivor_count-1; j>=0; j--){
3408 int run= i - survivor[j];
3409 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3410 score += score_tab[i-run];
3412 if(score < best_score){
3415 level_tab[i+1]= level-64;
3419 if(s->out_format == FMT_H263){
3420 for(j=survivor_count-1; j>=0; j--){
3421 int run= i - survivor[j];
3422 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3423 score += score_tab[i-run];
3424 if(score < last_score){
3427 last_level= level-64;
3433 distortion += esc_length*lambda;
3434 for(j=survivor_count-1; j>=0; j--){
3435 int run= i - survivor[j];
3436 int score= distortion + score_tab[i-run];
3438 if(score < best_score){
3441 level_tab[i+1]= level-64;
3445 if(s->out_format == FMT_H263){
3446 for(j=survivor_count-1; j>=0; j--){
3447 int run= i - survivor[j];
3448 int score= distortion + score_tab[i-run];
3449 if(score < last_score){
3452 last_level= level-64;
3460 score_tab[i+1]= best_score;
3462 //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3463 if(last_non_zero <= 27){
3464 for(; survivor_count; survivor_count--){
3465 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3469 for(; survivor_count; survivor_count--){
3470 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3475 survivor[ survivor_count++ ]= i+1;
3478 if(s->out_format != FMT_H263){
3479 last_score= 256*256*256*120;
3480 for(i= survivor[0]; i<=last_non_zero + 1; i++){
3481 int score= score_tab[i];
3482 if(i) score += lambda*2; //FIXME exacter?
3484 if(score < last_score){
3487 last_level= level_tab[i];
3488 last_run= run_tab[i];
3493 s->coded_score[n] = last_score;
3495 dc= FFABS(block[0]);
3496 last_non_zero= last_i - 1;
3497 memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3499 if(last_non_zero < start_i)
3500 return last_non_zero;
3502 if(last_non_zero == 0 && start_i == 0){
3504 int best_score= dc * dc;
3506 for(i=0; i<coeff_count[0]; i++){
3507 int level= coeff[i][0];
3508 int alevel= FFABS(level);
3509 int unquant_coeff, score, distortion;
3511 if(s->out_format == FMT_H263){
3512 unquant_coeff= (alevel*qmul + qadd)>>3;
3514 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3515 unquant_coeff = (unquant_coeff - 1) | 1;
3517 unquant_coeff = (unquant_coeff + 4) >> 3;
3518 unquant_coeff<<= 3 + 3;
3520 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3522 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3523 else score= distortion + esc_length*lambda;
3525 if(score < best_score){
3527 best_level= level - 64;
3530 block[0]= best_level;
3531 s->coded_score[n] = best_score - dc*dc;
3532 if(best_level == 0) return -1;
3533 else return last_non_zero;
3539 block[ perm_scantable[last_non_zero] ]= last_level;
3542 for(; i>start_i; i -= run_tab[i] + 1){
3543 block[ perm_scantable[i-1] ]= level_tab[i];
3546 return last_non_zero;
3549 //#define REFINE_STATS 1
3550 static int16_t basis[64][64];
3552 static void build_basis(uint8_t *perm){
3559 double s= 0.25*(1<<BASIS_SHIFT);
3561 int perm_index= perm[index];
3562 if(i==0) s*= sqrt(0.5);
3563 if(j==0) s*= sqrt(0.5);
3564 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3571 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3572 int16_t *block, int16_t *weight, int16_t *orig,
3575 LOCAL_ALIGNED_16(int16_t, d1, [64]);
3576 const uint8_t *scantable= s->intra_scantable.scantable;
3577 const uint8_t *perm_scantable= s->intra_scantable.permutated;
3578 // unsigned int threshold1, threshold2;
3583 int qmul, qadd, start_i, last_non_zero, i, dc;
3585 uint8_t * last_length;
3587 int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3590 static int after_last=0;
3591 static int to_zero=0;
3592 static int from_zero=0;
3595 static int messed_sign=0;
3598 if(basis[0][0] == 0)
3599 build_basis(s->dsp.idct_permutation);
3610 /* For AIC we skip quant/dequant of INTRADC */
3614 q <<= RECON_SHIFT-3;
3615 /* note: block[0] is assumed to be positive */
3617 // block[0] = (block[0] + (q >> 1)) / q;
3619 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3620 // bias= 1<<(QMAT_SHIFT-1);
3621 length = s->intra_ac_vlc_length;
3622 last_length= s->intra_ac_vlc_last_length;
3626 length = s->inter_ac_vlc_length;
3627 last_length= s->inter_ac_vlc_last_length;
3629 last_non_zero = s->block_last_index[n];
3634 dc += (1<<(RECON_SHIFT-1));
3635 for(i=0; i<64; i++){
3636 rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3639 STOP_TIMER("memset rem[]")}
3642 for(i=0; i<64; i++){
3647 w= FFABS(weight[i]) + qns*one;
3648 w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3651 // w=weight[i] = (63*qns + (w/2)) / w;
3657 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3663 for(i=start_i; i<=last_non_zero; i++){
3664 int j= perm_scantable[i];
3665 const int level= block[j];
3669 if(level<0) coeff= qmul*level - qadd;
3670 else coeff= qmul*level + qadd;
3671 run_tab[rle_index++]=run;
3674 s->dsp.add_8x8basis(rem, basis[j], coeff);
3680 if(last_non_zero>0){
3681 STOP_TIMER("init rem[]")
3688 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3691 int run2, best_unquant_change=0, analyze_gradient;
3695 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3697 if(analyze_gradient){
3701 for(i=0; i<64; i++){
3704 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3707 STOP_TIMER("rem*w*w")}
3717 const int level= block[0];
3718 int change, old_coeff;
3720 assert(s->mb_intra);
3724 for(change=-1; change<=1; change+=2){
3725 int new_level= level + change;
3726 int score, new_coeff;
3728 new_coeff= q*new_level;
3729 if(new_coeff >= 2048 || new_coeff < 0)
3732 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3733 if(score<best_score){
3736 best_change= change;
3737 best_unquant_change= new_coeff - old_coeff;
3744 run2= run_tab[rle_index++];
3748 for(i=start_i; i<64; i++){
3749 int j= perm_scantable[i];
3750 const int level= block[j];
3751 int change, old_coeff;
3753 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
3757 if(level<0) old_coeff= qmul*level - qadd;
3758 else old_coeff= qmul*level + qadd;
3759 run2= run_tab[rle_index++]; //FIXME ! maybe after last
3763 assert(run2>=0 || i >= last_non_zero );
3766 for(change=-1; change<=1; change+=2){
3767 int new_level= level + change;
3768 int score, new_coeff, unquant_change;
3771 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
3775 if(new_level<0) new_coeff= qmul*new_level - qadd;
3776 else new_coeff= qmul*new_level + qadd;
3777 if(new_coeff >= 2048 || new_coeff <= -2048)
3779 //FIXME check for overflow
3782 if(level < 63 && level > -63){
3783 if(i < last_non_zero)
3784 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
3785 - length[UNI_AC_ENC_INDEX(run, level+64)];
3787 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
3788 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
3791 assert(FFABS(new_level)==1);
3793 if(analyze_gradient){
3794 int g= d1[ scantable[i] ];
3795 if(g && (g^new_level) >= 0)
3799 if(i < last_non_zero){
3800 int next_i= i + run2 + 1;
3801 int next_level= block[ perm_scantable[next_i] ] + 64;
3803 if(next_level&(~127))
3806 if(next_i < last_non_zero)
3807 score += length[UNI_AC_ENC_INDEX(run, 65)]
3808 + length[UNI_AC_ENC_INDEX(run2, next_level)]
3809 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3811 score += length[UNI_AC_ENC_INDEX(run, 65)]
3812 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3813 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
3815 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
3817 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3818 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3824 assert(FFABS(level)==1);
3826 if(i < last_non_zero){
3827 int next_i= i + run2 + 1;
3828 int next_level= block[ perm_scantable[next_i] ] + 64;
3830 if(next_level&(~127))
3833 if(next_i < last_non_zero)
3834 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3835 - length[UNI_AC_ENC_INDEX(run2, next_level)]
3836 - length[UNI_AC_ENC_INDEX(run, 65)];
3838 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
3839 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
3840 - length[UNI_AC_ENC_INDEX(run, 65)];
3842 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
3844 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
3845 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
3852 unquant_change= new_coeff - old_coeff;
3853 assert((score < 100*lambda && score > -100*lambda) || lambda==0);
3855 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
3856 if(score<best_score){
3859 best_change= change;
3860 best_unquant_change= unquant_change;
3864 prev_level= level + 64;
3865 if(prev_level&(~127))
3874 STOP_TIMER("iterative step")}
3878 int j= perm_scantable[ best_coeff ];
3880 block[j] += best_change;
3882 if(best_coeff > last_non_zero){
3883 last_non_zero= best_coeff;
3891 if(block[j] - best_change){
3892 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
3904 for(; last_non_zero>=start_i; last_non_zero--){
3905 if(block[perm_scantable[last_non_zero]])
3911 if(256*256*256*64 % count == 0){
3912 printf("after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
3917 for(i=start_i; i<=last_non_zero; i++){
3918 int j= perm_scantable[i];
3919 const int level= block[j];
3922 run_tab[rle_index++]=run;
3929 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
3935 if(last_non_zero>0){
3936 STOP_TIMER("iterative search")
3941 return last_non_zero;
3944 int ff_dct_quantize_c(MpegEncContext *s,
3945 int16_t *block, int n,
3946 int qscale, int *overflow)
3948 int i, j, level, last_non_zero, q, start_i;
3950 const uint8_t *scantable= s->intra_scantable.scantable;
3953 unsigned int threshold1, threshold2;
3955 s->dsp.fdct (block);
3957 if(s->dct_error_sum)
3958 s->denoise_dct(s, block);
3968 /* For AIC we skip quant/dequant of INTRADC */
3971 /* note: block[0] is assumed to be positive */
3972 block[0] = (block[0] + (q >> 1)) / q;
3975 qmat = s->q_intra_matrix[qscale];
3976 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3980 qmat = s->q_inter_matrix[qscale];
3981 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
3983 threshold1= (1<<QMAT_SHIFT) - bias - 1;
3984 threshold2= (threshold1<<1);
3985 for(i=63;i>=start_i;i--) {
3987 level = block[j] * qmat[j];
3989 if(((unsigned)(level+threshold1))>threshold2){
3996 for(i=start_i; i<=last_non_zero; i++) {
3998 level = block[j] * qmat[j];
4000 // if( bias+level >= (1<<QMAT_SHIFT)
4001 // || bias-level >= (1<<QMAT_SHIFT)){
4002 if(((unsigned)(level+threshold1))>threshold2){
4004 level= (bias + level)>>QMAT_SHIFT;
4007 level= (bias - level)>>QMAT_SHIFT;
4015 *overflow= s->max_qcoeff < max; //overflow might have happened
4017 /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4018 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4019 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4021 return last_non_zero;
4024 #define OFFSET(x) offsetof(MpegEncContext, x)
4025 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4026 static const AVOption h263_options[] = {
4027 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4028 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4029 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4034 static const AVClass h263_class = {
4035 .class_name = "H.263 encoder",
4036 .item_name = av_default_item_name,
4037 .option = h263_options,
4038 .version = LIBAVUTIL_VERSION_INT,
4041 AVCodec ff_h263_encoder = {
4043 .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4044 .type = AVMEDIA_TYPE_VIDEO,
4045 .id = AV_CODEC_ID_H263,
4046 .priv_data_size = sizeof(MpegEncContext),
4047 .init = ff_MPV_encode_init,
4048 .encode2 = ff_MPV_encode_picture,
4049 .close = ff_MPV_encode_end,
4050 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4051 .priv_class = &h263_class,
4054 static const AVOption h263p_options[] = {
4055 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4056 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4057 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4058 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4062 static const AVClass h263p_class = {
4063 .class_name = "H.263p encoder",
4064 .item_name = av_default_item_name,
4065 .option = h263p_options,
4066 .version = LIBAVUTIL_VERSION_INT,
4069 AVCodec ff_h263p_encoder = {
4071 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4072 .type = AVMEDIA_TYPE_VIDEO,
4073 .id = AV_CODEC_ID_H263P,
4074 .priv_data_size = sizeof(MpegEncContext),
4075 .init = ff_MPV_encode_init,
4076 .encode2 = ff_MPV_encode_picture,
4077 .close = ff_MPV_encode_end,
4078 .capabilities = CODEC_CAP_SLICE_THREADS,
4079 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4080 .priv_class = &h263p_class,
4083 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4085 AVCodec ff_msmpeg4v2_encoder = {
4086 .name = "msmpeg4v2",
4087 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4088 .type = AVMEDIA_TYPE_VIDEO,
4089 .id = AV_CODEC_ID_MSMPEG4V2,
4090 .priv_data_size = sizeof(MpegEncContext),
4091 .init = ff_MPV_encode_init,
4092 .encode2 = ff_MPV_encode_picture,
4093 .close = ff_MPV_encode_end,
4094 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4095 .priv_class = &msmpeg4v2_class,
4098 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4100 AVCodec ff_msmpeg4v3_encoder = {
4102 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4103 .type = AVMEDIA_TYPE_VIDEO,
4104 .id = AV_CODEC_ID_MSMPEG4V3,
4105 .priv_data_size = sizeof(MpegEncContext),
4106 .init = ff_MPV_encode_init,
4107 .encode2 = ff_MPV_encode_picture,
4108 .close = ff_MPV_encode_end,
4109 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4110 .priv_class = &msmpeg4v3_class,
4113 FF_MPV_GENERIC_CLASS(wmv1)
4115 AVCodec ff_wmv1_encoder = {
4117 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4118 .type = AVMEDIA_TYPE_VIDEO,
4119 .id = AV_CODEC_ID_WMV1,
4120 .priv_data_size = sizeof(MpegEncContext),
4121 .init = ff_MPV_encode_init,
4122 .encode2 = ff_MPV_encode_picture,
4123 .close = ff_MPV_encode_end,
4124 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4125 .priv_class = &wmv1_class,