3 * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Sorenson Vector Quantizer #1 (SVQ1) video codec.
25 * For more information of the SVQ1 algorithm, visit:
26 * http://www.pcisys.net/~melanson/codecs/
32 #include "mpegvideo.h"
35 #include "libavutil/avassert.h"
38 #include "svq1enc_cb.h"
42 typedef struct SVQ1Context {
43 MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to make the motion estimation eventually independent of MpegEncContext, so this will be removed then (FIXME/XXX)
44 AVCodecContext *avctx;
47 AVFrame current_picture;
52 PutBitContext reorder_pb[6]; //why ooh why this sick breadth first order, everything is slower and more complex
57 /* Y plane block dimensions */
61 /* U & V plane (C planes) block dimensions */
67 int16_t (*motion_val8[3])[2];
68 int16_t (*motion_val16[3])[2];
75 static void svq1_write_header(SVQ1Context *s, int frame_type)
80 put_bits(&s->pb, 22, 0x20);
82 /* temporal reference (sure hope this is a "don't care") */
83 put_bits(&s->pb, 8, 0x00);
86 put_bits(&s->pb, 2, frame_type - 1);
88 if (frame_type == AV_PICTURE_TYPE_I) {
90 /* no checksum since frame code is 0x20 */
92 /* no embedded string either */
94 /* output 5 unknown bits (2 + 2 + 1) */
95 put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
97 i= ff_match_2uint16((void*)ff_svq1_frame_size_table, FF_ARRAY_ELEMS(ff_svq1_frame_size_table), s->frame_width, s->frame_height);
98 put_bits(&s->pb, 3, i);
102 put_bits(&s->pb, 12, s->frame_width);
103 put_bits(&s->pb, 12, s->frame_height);
107 /* no checksum or extra data (next 2 bits get 0) */
108 put_bits(&s->pb, 2, 0);
112 #define QUALITY_THRESHOLD 100
113 #define THRESHOLD_MULTIPLIER 0.6
115 static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra){
116 int count, y, x, i, j, split, best_mean, best_score, best_count;
118 int block_sum[7]= {0, 0, 0, 0, 0, 0};
119 int w= 2<<((level+2)>>1);
120 int h= 2<<((level+1)>>1);
122 int16_t block[7][256];
123 const int8_t *codebook_sum, *codebook;
124 const uint16_t (*mean_vlc)[2];
125 const uint8_t (*multistage_vlc)[2];
128 //FIXME optimize, this doenst need to be done multiple times
130 codebook_sum= svq1_intra_codebook_sum[level];
131 codebook= ff_svq1_intra_codebooks[level];
132 mean_vlc= ff_svq1_intra_mean_vlc;
133 multistage_vlc= ff_svq1_intra_multistage_vlc[level];
136 int v= src[x + y*stride];
137 block[0][x + w*y]= v;
143 codebook_sum= svq1_inter_codebook_sum[level];
144 codebook= ff_svq1_inter_codebooks[level];
145 mean_vlc= ff_svq1_inter_mean_vlc + 256;
146 multistage_vlc= ff_svq1_inter_multistage_vlc[level];
149 int v= src[x + y*stride] - ref[x + y*stride];
150 block[0][x + w*y]= v;
158 best_score -= (int)(((unsigned)block_sum[0]*block_sum[0])>>(level+3));
159 best_mean= (block_sum[0] + (size>>1)) >> (level+3);
162 for(count=1; count<7; count++){
163 int best_vector_score= INT_MAX;
164 int best_vector_sum=-999, best_vector_mean=-999;
165 const int stage= count-1;
166 const int8_t *vector;
169 int sum= codebook_sum[stage*16 + i];
170 int sqr, diff, score;
172 vector = codebook + stage*size*16 + i*size;
173 sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size);
174 diff= block_sum[stage] - sum;
175 score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow
176 if(score < best_vector_score){
177 int mean= (diff + (size>>1)) >> (level+3);
178 av_assert2(mean >-300 && mean<300);
179 mean= av_clip(mean, intra?0:-256, 255);
180 best_vector_score= score;
181 best_vector[stage]= i;
182 best_vector_sum= sum;
183 best_vector_mean= mean;
186 av_assert0(best_vector_mean != -999);
187 vector= codebook + stage*size*16 + best_vector[stage]*size;
188 for(j=0; j<size; j++){
189 block[stage+1][j] = block[stage][j] - vector[j];
191 block_sum[stage+1]= block_sum[stage] - best_vector_sum;
193 lambda*(+ 1 + 4*count
194 + multistage_vlc[1+count][1]
195 + mean_vlc[best_vector_mean][1]);
197 if(best_vector_score < best_score){
198 best_score= best_vector_score;
200 best_mean= best_vector_mean;
206 if(best_score > threshold && level){
208 int offset= (level&1) ? stride*h/2 : w/2;
209 PutBitContext backup[6];
211 for(i=level-1; i>=0; i--){
212 backup[i]= s->reorder_pb[i];
214 score += encode_block(s, src , ref , decoded , stride, level-1, threshold>>1, lambda, intra);
215 score += encode_block(s, src + offset, ref + offset, decoded + offset, stride, level-1, threshold>>1, lambda, intra);
218 if(score < best_score){
222 for(i=level-1; i>=0; i--){
223 s->reorder_pb[i]= backup[i];
228 put_bits(&s->reorder_pb[level], 1, split);
231 av_assert1((best_mean >= 0 && best_mean<256) || !intra);
232 av_assert1(best_mean >= -256 && best_mean<256);
233 av_assert1(best_count >=0 && best_count<7);
234 av_assert1(level<4 || best_count==0);
236 /* output the encoding */
237 put_bits(&s->reorder_pb[level],
238 multistage_vlc[1 + best_count][1],
239 multistage_vlc[1 + best_count][0]);
240 put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
241 mean_vlc[best_mean][0]);
243 for (i = 0; i < best_count; i++){
244 av_assert2(best_vector[i]>=0 && best_vector[i]<16);
245 put_bits(&s->reorder_pb[level], 4, best_vector[i]);
250 decoded[x + y*stride]= src[x + y*stride] - block[best_count][x + w*y] + best_mean;
259 static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
260 int width, int height, int src_stride, int stride)
264 int block_width, block_height;
267 uint8_t *src = s->scratchbuf + stride * 16;
268 const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT);
270 /* figure out the acceptable level thresholds in advance */
271 threshold[5] = QUALITY_THRESHOLD;
272 for (level = 4; level >= 0; level--)
273 threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
275 block_width = (width + 15) / 16;
276 block_height = (height + 15) / 16;
278 if(s->picture.pict_type == AV_PICTURE_TYPE_P){
279 s->m.avctx= s->avctx;
280 s->m.current_picture_ptr= &s->m.current_picture;
281 s->m.last_picture_ptr = &s->m.last_picture;
282 s->m.last_picture.f.data[0] = ref_plane;
284 s->m.last_picture.f.linesize[0] =
285 s->m.new_picture.f.linesize[0] =
286 s->m.current_picture.f.linesize[0] = stride;
289 s->m.mb_width= block_width;
290 s->m.mb_height= block_height;
291 s->m.mb_stride= s->m.mb_width+1;
292 s->m.b8_stride= 2*s->m.mb_width+1;
294 s->m.pict_type= s->picture.pict_type;
295 s->m.me_method= s->avctx->me_method;
296 s->m.me.scene_change_score=0;
297 s->m.flags= s->avctx->flags;
298 // s->m.out_format = FMT_H263;
299 // s->m.unrestricted_mv= 1;
301 s->m.lambda= s->picture.quality;
302 s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
303 s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
305 if(!s->motion_val8[plane]){
306 s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t));
307 s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t));
310 s->m.mb_type= s->mb_type;
312 //dummies, to avoid segfaults
313 s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
314 s->m.current_picture.mb_var= (uint16_t*)s->dummy;
315 s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
316 s->m.current_picture.f.mb_type = s->dummy;
318 s->m.current_picture.f.motion_val[0] = s->motion_val8[plane] + 2;
319 s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
320 s->m.dsp= s->dsp; //move
323 s->m.me.dia_size= s->avctx->dia_size;
324 s->m.first_slice_line=1;
325 for (y = 0; y < block_height; y++) {
326 s->m.new_picture.f.data[0] = src - y*16*stride; //ugly
329 for(i=0; i<16 && i + 16*y<height; i++){
330 memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
331 for(x=width; x<16*block_width; x++)
332 src[i*stride+x]= src[i*stride+x-1];
334 for(; i<16 && i + 16*y<16*block_height; i++)
335 memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
337 for (x = 0; x < block_width; x++) {
339 ff_init_block_index(&s->m);
340 ff_update_block_index(&s->m);
342 ff_estimate_p_frame_motion(&s->m, x, y);
344 s->m.first_slice_line=0;
347 ff_fix_long_p_mvs(&s->m);
348 ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0);
351 s->m.first_slice_line=1;
352 for (y = 0; y < block_height; y++) {
353 for(i=0; i<16 && i + 16*y<height; i++){
354 memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
355 for(x=width; x<16*block_width; x++)
356 src[i*stride+x]= src[i*stride+x-1];
358 for(; i<16 && i + 16*y<16*block_height; i++)
359 memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
362 for (x = 0; x < block_width; x++) {
363 uint8_t reorder_buffer[3][6][7*32];
365 int offset = y * 16 * stride + x * 16;
366 uint8_t *decoded= decoded_plane + offset;
367 uint8_t *ref= ref_plane + offset;
368 int score[4]={0,0,0,0}, best;
369 uint8_t *temp = s->scratchbuf;
371 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size
372 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
377 ff_init_block_index(&s->m);
378 ff_update_block_index(&s->m);
380 if(s->picture.pict_type == AV_PICTURE_TYPE_I || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
382 init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
384 if(s->picture.pict_type == AV_PICTURE_TYPE_P){
385 const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
386 put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
387 score[0]= vlc[1]*lambda;
389 score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1);
391 count[0][i]= put_bits_count(&s->reorder_pb[i]);
392 flush_put_bits(&s->reorder_pb[i]);
399 if(s->picture.pict_type == AV_PICTURE_TYPE_P){
400 const uint8_t *vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
401 int mx, my, pred_x, pred_y, dxy;
404 motion_ptr= ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
405 if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
407 init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
409 put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
411 s->m.pb= s->reorder_pb[5];
414 av_assert1(mx>=-32 && mx<=31);
415 av_assert1(my>=-32 && my<=31);
416 av_assert1(pred_x>=-32 && pred_x<=31);
417 av_assert1(pred_y>=-32 && pred_y<=31);
418 ff_h263_encode_motion(&s->m, mx - pred_x, 1);
419 ff_h263_encode_motion(&s->m, my - pred_y, 1);
420 s->reorder_pb[5]= s->m.pb;
421 score[1] += lambda*put_bits_count(&s->reorder_pb[5]);
423 dxy= (mx&1) + 2*(my&1);
425 s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16);
427 score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0);
428 best= score[1] <= score[0];
430 vlc= ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
431 score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16);
432 score[2]+= vlc[1]*lambda;
433 if(score[2] < score[best] && mx==0 && my==0){
435 s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
439 put_bits(&s->pb, vlc[1], vlc[0]);
445 count[1][i]= put_bits_count(&s->reorder_pb[i]);
446 flush_put_bits(&s->reorder_pb[i]);
449 motion_ptr[0 ] = motion_ptr[1 ]=
450 motion_ptr[2 ] = motion_ptr[3 ]=
451 motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]=
452 motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0;
456 s->rd_total += score[best];
459 avpriv_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]);
462 s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
465 s->m.first_slice_line=0;
470 static av_cold int svq1_encode_init(AVCodecContext *avctx)
472 SVQ1Context * const s = avctx->priv_data;
474 ff_dsputil_init(&s->dsp, avctx);
475 avctx->coded_frame = &s->picture;
477 s->frame_width = avctx->width;
478 s->frame_height = avctx->height;
480 s->y_block_width = (s->frame_width + 15) / 16;
481 s->y_block_height = (s->frame_height + 15) / 16;
483 s->c_block_width = (s->frame_width / 4 + 15) / 16;
484 s->c_block_height = (s->frame_height / 4 + 15) / 16;
488 s->m.picture_structure = PICT_FRAME;
490 s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
491 s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
492 s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
493 s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
494 s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
495 ff_h263_encode_init(&s->m); //mv_penalty
500 static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
501 const AVFrame *pict, int *got_packet)
503 SVQ1Context * const s = avctx->priv_data;
504 AVFrame * const p = &s->picture;
508 if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width*s->y_block_height*MAX_MB_BYTES*3 + FF_MIN_BUFFER_SIZE) < 0))
511 if(avctx->pix_fmt != PIX_FMT_YUV410P){
512 av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
516 if(!s->current_picture.data[0]){
517 avctx->get_buffer(avctx, &s->current_picture);
518 avctx->get_buffer(avctx, &s->last_picture);
519 s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2);
522 temp= s->current_picture;
523 s->current_picture= s->last_picture;
524 s->last_picture= temp;
526 init_put_bits(&s->pb, pkt->data, pkt->size);
529 p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
530 p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
532 svq1_write_header(s, p->pict_type);
534 if(svq1_encode_plane(s, i,
535 s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
536 s->frame_width / (i?4:1), s->frame_height / (i?4:1),
537 s->picture.linesize[i], s->current_picture.linesize[i]) < 0)
541 // avpriv_align_put_bits(&s->pb);
542 while(put_bits_count(&s->pb) & 31)
543 put_bits(&s->pb, 1, 0);
545 flush_put_bits(&s->pb);
547 pkt->size = put_bits_count(&s->pb) / 8;
548 if (p->pict_type == AV_PICTURE_TYPE_I)
549 pkt->flags |= AV_PKT_FLAG_KEY;
555 static av_cold int svq1_encode_end(AVCodecContext *avctx)
557 SVQ1Context * const s = avctx->priv_data;
560 av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total/(double)(avctx->width*avctx->height*avctx->frame_number));
562 av_freep(&s->m.me.scratchpad);
563 av_freep(&s->m.me.map);
564 av_freep(&s->m.me.score_map);
565 av_freep(&s->mb_type);
567 av_freep(&s->scratchbuf);
570 av_freep(&s->motion_val8[i]);
571 av_freep(&s->motion_val16[i]);
573 if(s->current_picture.data[0])
574 avctx->release_buffer(avctx, &s->current_picture);
575 if(s->last_picture.data[0])
576 avctx->release_buffer(avctx, &s->last_picture);
582 AVCodec ff_svq1_encoder = {
584 .type = AVMEDIA_TYPE_VIDEO,
585 .id = AV_CODEC_ID_SVQ1,
586 .priv_data_size = sizeof(SVQ1Context),
587 .init = svq1_encode_init,
588 .encode2 = svq1_encode_frame,
589 .close = svq1_encode_end,
590 .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV410P, PIX_FMT_NONE },
591 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),