#include "libavutil/internal.h"
#include "libavutil/intmath.h"
#include "libavutil/mathematics.h"
+#include "libavutil/mem_internal.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
+#include "libavutil/thread.h"
#include "avcodec.h"
#include "dct.h"
#include "idctdsp.h"
#include "mathops.h"
#include "mpegutils.h"
#include "mjpegenc.h"
+#include "speedhqenc.h"
#include "msmpeg4.h"
#include "pixblockdsp.h"
#include "qpeldsp.h"
#include "wmv2.h"
#include "rv10.h"
#include "packet_internal.h"
-#include "libxvid.h"
#include <limits.h>
#include "sp5x.h"
#undef COPY
}
+static void mpv_encode_init_static(void)
+{
+ for (int i = -16; i < 16; i++)
+ default_fcode_tab[i + MAX_MV] = 1;
+}
+
/**
* Set the given MpegEncContext to defaults for encoding.
* the changed fields will not depend upon the prior state of the MpegEncContext.
*/
static void mpv_encode_defaults(MpegEncContext *s)
{
- int i;
+ static AVOnce init_static_once = AV_ONCE_INIT;
+
ff_mpv_common_defaults(s);
- for (i = -16; i < 16; i++) {
- default_fcode_tab[i + MAX_MV] = 1;
- }
+ ff_thread_once(&init_static_once, mpv_encode_init_static);
+
s->me.mv_penalty = default_mv_penalty;
s->fcode_tab = default_fcode_tab;
return AVERROR(EINVAL);
}
break;
+ case AV_CODEC_ID_SPEEDHQ:
+ if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
+ avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
+ avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
+ av_log(avctx, AV_LOG_ERROR,
+ "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
+ return AVERROR(EINVAL);
+ }
+ break;
default:
if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
//return -1;
}
- if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
+ if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
// (a + x * 3 / 8) / x
s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
s->inter_quant_bias = 0;
avctx->delay = 0;
s->low_delay = 1;
break;
+ case AV_CODEC_ID_SPEEDHQ:
+ s->out_format = FMT_SPEEDHQ;
+ s->intra_only = 1; /* force intra only for SHQ */
+ if (!CONFIG_SPEEDHQ_ENCODER)
+ return AVERROR_ENCODER_NOT_FOUND;
+ if ((ret = ff_speedhq_encode_init(s)) < 0)
+ return ret;
+ avctx->delay = 0;
+ s->low_delay = 1;
+ break;
case AV_CODEC_ID_H261:
if (!CONFIG_H261_ENCODER)
return AVERROR_ENCODER_NOT_FOUND;
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->intra_matrix[j] =
s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
+ } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
+ s->intra_matrix[j] =
+ s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
} else {
/* MPEG-1/2 */
s->chroma_intra_matrix[j] =
return 0;
}
-static int encode_frame(AVCodecContext *c, AVFrame *frame)
+static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
{
- AVPacket pkt = { 0 };
int ret;
int size = 0;
- av_init_packet(&pkt);
-
ret = avcodec_send_frame(c, frame);
if (ret < 0)
return ret;
do {
- ret = avcodec_receive_packet(c, &pkt);
+ ret = avcodec_receive_packet(c, pkt);
if (ret >= 0) {
- size += pkt.size;
- av_packet_unref(&pkt);
+ size += pkt->size;
+ av_packet_unref(pkt);
} else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
return ret;
} while (ret >= 0);
static int estimate_best_b_count(MpegEncContext *s)
{
const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
+ AVPacket *pkt;
const int scale = s->brd_scale;
int width = s->width >> scale;
int height = s->height >> scale;
av_assert0(scale >= 0 && scale <= 3);
+ pkt = av_packet_alloc();
+ if (!pkt)
+ return AVERROR(ENOMEM);
+
//emms_c();
//s->next_picture_ptr->quality;
p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
break;
c = avcodec_alloc_context3(NULL);
- if (!c)
- return AVERROR(ENOMEM);
+ if (!c) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
c->width = width;
c->height = height;
if (ret < 0)
goto fail;
+
s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
- out_size = encode_frame(c, s->tmp_frames[0]);
+ out_size = encode_frame(c, s->tmp_frames[0], pkt);
if (out_size < 0) {
ret = out_size;
goto fail;
AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
- out_size = encode_frame(c, s->tmp_frames[i + 1]);
+ out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
if (out_size < 0) {
ret = out_size;
goto fail;
}
/* get the delayed frames */
- out_size = encode_frame(c, NULL);
+ out_size = encode_frame(c, NULL, pkt);
if (out_size < 0) {
ret = out_size;
goto fail;
fail:
avcodec_free_context(&c);
- if (ret < 0)
- return ret;
+ av_packet_unref(pkt);
+ if (ret < 0) {
+ best_b_count = ret;
+ break;
+ }
}
+ av_packet_free(&pkt);
+
return best_b_count;
}
if (CONFIG_MJPEG_ENCODER)
ff_mjpeg_encode_mb(s, s->block);
break;
+ case AV_CODEC_ID_SPEEDHQ:
+ if (CONFIG_SPEEDHQ_ENCODER)
+ ff_speedhq_encode_mb(s, s->block);
+ break;
default:
av_assert1(0);
}
static int estimate_motion_thread(AVCodecContext *c, void *arg){
MpegEncContext *s= *(void**)arg;
- ff_check_alignment();
-
s->me.dia_size= s->avctx->dia_size;
s->first_slice_line=1;
for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
MpegEncContext *s= *(void**)arg;
int mb_x, mb_y;
- ff_check_alignment();
-
for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
int xx = mb_x * 16;
ff_mpeg4_stuffing(&s->pb);
}else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
ff_mjpeg_encode_stuffing(s);
+ } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
+ ff_speedhq_end_slice(s);
}
flush_put_bits(&s->pb);
static int encode_thread(AVCodecContext *c, void *arg){
MpegEncContext *s= *(void**)arg;
- int mb_x, mb_y;
+ int mb_x, mb_y, mb_y_order;
int chr_h= 16>>s->chroma_y_shift;
int i, j;
MpegEncContext best_s = { 0 }, backup_s;
uint8_t bit_buf_tex[2][MAX_MB_BYTES];
PutBitContext pb[2], pb2[2], tex_pb[2];
- ff_check_alignment();
-
for(i=0; i<2; i++){
init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
s->resync_mb_y=0;
s->first_slice_line = 1;
s->ptr_lastgob = s->pb.buf;
- for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
+ for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
+ if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
+ int first_in_slice;
+ mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
+ if (first_in_slice && mb_y_order != s->start_mb_y)
+ ff_speedhq_end_slice(s);
+ s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
+ } else {
+ mb_y = mb_y_order;
+ }
s->mb_x=0;
s->mb_y= mb_y;
pb_bits_count= put_bits_count(&s->pb);
flush_put_bits(&s->pb);
- avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
+ ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
s->pb= backup_s.pb;
if(s->data_partitioning){
pb2_bits_count= put_bits_count(&s->pb2);
flush_put_bits(&s->pb2);
- avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
+ ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
s->pb2= backup_s.pb2;
tex_pb_bits_count= put_bits_count(&s->tex_pb);
flush_put_bits(&s->tex_pb);
- avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
+ ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
s->tex_pb= backup_s.tex_pb;
}
s->last_bits= put_bits_count(&s->pb);
av_assert1(put_bits_count(&src->pb) % 8 ==0);
av_assert1(put_bits_count(&dst->pb) % 8 ==0);
- avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
+ ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
flush_put_bits(&dst->pb);
}
for(i=1;i<64;i++){
int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
- s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
- s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
+ s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
+ s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
}
s->y_dc_scale_table= y;
s->c_dc_scale_table= c;
s->qscale= 8;
}
+ if (s->out_format == FMT_SPEEDHQ) {
+ s->y_dc_scale_table=
+ s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
+ }
+
//FIXME var duplication
s->current_picture_ptr->f->key_frame =
s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
s->pred, s->intra_matrix, s->chroma_intra_matrix);
break;
+ case FMT_SPEEDHQ:
+ if (CONFIG_SPEEDHQ_ENCODER)
+ ff_speedhq_encode_picture_header(s);
+ break;
case FMT_H261:
if (CONFIG_H261_ENCODER)
ff_h261_encode_picture_header(s, picture_number);