]> git.sesse.net Git - ffmpeg/blobdiff - libavcodec/ffv1.c
Add spectral extension to the E-AC-3 decoder.
[ffmpeg] / libavcodec / ffv1.c
index 3d52f7ef96685f002847286cbe0967b6b819adaf..e3b71f7bb19d291968a4e7cf1731d54b642416b6 100644 (file)
@@ -22,7 +22,7 @@
 
 /**
  * @file libavcodec/ffv1.c
- * FF Video Codec 1 (an experimental lossless codec)
+ * FF Video Codec 1 (a lossless codec)
  */
 
 #include "avcodec.h"
@@ -212,7 +212,7 @@ typedef struct FFV1Context{
     int picture_number;
     AVFrame picture;
     int plane_count;
-    int ac;                              ///< 1-> CABAC 0-> golomb rice
+    int ac;                              ///< 1=range coder <-> 0=golomb rice
     PlaneContext plane[MAX_PLANES];
     int16_t quant_table[5][256];
     int run_index;
@@ -264,17 +264,17 @@ static inline void put_symbol_inline(RangeCoder *c, uint8_t *state, int v, int i
         const int e= av_log2(a);
         put_rac(c, state+0, 0);
         if(e<=9){
-        for(i=0; i<e; i++){
-            put_rac(c, state+1+i, 1);  //1..10
-        }
-        put_rac(c, state+1+i, 0);
+            for(i=0; i<e; i++){
+                put_rac(c, state+1+i, 1);  //1..10
+            }
+            put_rac(c, state+1+i, 0);
 
-        for(i=e-1; i>=0; i--){
-            put_rac(c, state+22+i, (a>>i)&1); //22..31
-        }
+            for(i=e-1; i>=0; i--){
+                put_rac(c, state+22+i, (a>>i)&1); //22..31
+            }
 
-        if(is_signed)
-            put_rac(c, state+11 + e, v < 0); //11..21
+            if(is_signed)
+                put_rac(c, state+11 + e, v < 0); //11..21
         }else{
             for(i=0; i<e; i++){
                 put_rac(c, state+1+FFMIN(i,9), 1);  //1..10
@@ -502,10 +502,10 @@ static void encode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
         sample[1][ w]= sample[1][w-1];
 //{START_TIMER
         if(s->avctx->bits_per_raw_sample<=8){
-        for(x=0; x<w; x++){
-            sample[0][x]= src[x + stride*y];
-        }
-        encode_line(s, w, sample, plane_index, 8);
+            for(x=0; x<w; x++){
+                sample[0][x]= src[x + stride*y];
+            }
+            encode_line(s, w, sample, plane_index, 8);
         }else{
             for(x=0; x<w; x++){
                 sample[0][x]= ((uint16_t*)(src + stride*y))[x] >> (16 - s->avctx->bits_per_raw_sample);
@@ -622,17 +622,17 @@ static av_cold int encode_init(AVCodecContext *avctx)
     s->plane_count=2;
     for(i=0; i<256; i++){
         if(avctx->bits_per_raw_sample <=8){
-        s->quant_table[0][i]=           quant11[i];
-        s->quant_table[1][i]=        11*quant11[i];
-        if(avctx->context_model==0){
-            s->quant_table[2][i]=     11*11*quant11[i];
-            s->quant_table[3][i]=
-            s->quant_table[4][i]=0;
-        }else{
-            s->quant_table[2][i]=     11*11*quant5 [i];
-            s->quant_table[3][i]=   5*11*11*quant5 [i];
-            s->quant_table[4][i]= 5*5*11*11*quant5 [i];
-        }
+            s->quant_table[0][i]=           quant11[i];
+            s->quant_table[1][i]=        11*quant11[i];
+            if(avctx->context_model==0){
+                s->quant_table[2][i]=     11*11*quant11[i];
+                s->quant_table[3][i]=
+                s->quant_table[4][i]=0;
+            }else{
+                s->quant_table[2][i]=     11*11*quant5 [i];
+                s->quant_table[3][i]=   5*11*11*quant5 [i];
+                s->quant_table[4][i]= 5*5*11*11*quant5 [i];
+            }
         }else{
             s->quant_table[0][i]=           quant9_10bit[i];
             s->quant_table[1][i]=        11*quant9_10bit[i];
@@ -669,13 +669,12 @@ static av_cold int encode_init(AVCodecContext *avctx)
     case PIX_FMT_YUV444P16:
     case PIX_FMT_YUV422P16:
     case PIX_FMT_YUV420P16:
-        if(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL){
-            av_log(avctx, AV_LOG_ERROR, "More than 8 bit per component is still experimental and no gurantee is yet made for future compatibility\n"
-               "Use vstrict=-2 / -strict -2 to use it anyway.\n");
+        if(avctx->bits_per_raw_sample <=8){
+            av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample invalid\n");
             return -1;
         }
-        if(avctx->bits_per_raw_sample <=8){
-            av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample inavlid\n");
+        if(!s->ac){
+            av_log(avctx, AV_LOG_ERROR, "bits_per_raw_sample of more than 8 needs -coder 1 currently\n");
             return -1;
         }
         s->version= 1;
@@ -736,7 +735,6 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
     uint8_t keystate=128;
 
     ff_init_range_encoder(c, buf, buf_size);
-//    ff_init_cabac_states(c, ff_h264_lps_range, ff_h264_mps_state, ff_h264_lps_state, 64);
     ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
 
     *p = *pict;
@@ -875,10 +873,10 @@ static void decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride,
 
 //{START_TIMER
         if(s->avctx->bits_per_raw_sample <= 8){
-        decode_line(s, w, sample, plane_index, 8);
-        for(x=0; x<w; x++){
-            src[x + stride*y]= sample[1][x];
-        }
+            decode_line(s, w, sample, plane_index, 8);
+            for(x=0; x<w; x++){
+                src[x + stride*y]= sample[1][x];
+            }
         }else{
             decode_line(s, w, sample, plane_index, s->avctx->bits_per_raw_sample);
             for(x=0; x<w; x++){
@@ -980,16 +978,16 @@ static int read_header(FFV1Context *f){
 
     if(f->colorspace==0){
         if(f->avctx->bits_per_raw_sample<=8){
-        switch(16*f->chroma_h_shift + f->chroma_v_shift){
-        case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
-        case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
-        case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
-        case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
-        case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
-        default:
-            av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
-            return -1;
-        }
+            switch(16*f->chroma_h_shift + f->chroma_v_shift){
+            case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P; break;
+            case 0x10: f->avctx->pix_fmt= PIX_FMT_YUV422P; break;
+            case 0x11: f->avctx->pix_fmt= PIX_FMT_YUV420P; break;
+            case 0x20: f->avctx->pix_fmt= PIX_FMT_YUV411P; break;
+            case 0x22: f->avctx->pix_fmt= PIX_FMT_YUV410P; break;
+            default:
+                av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
+                return -1;
+            }
         }else{
             switch(16*f->chroma_h_shift + f->chroma_v_shift){
             case 0x00: f->avctx->pix_fmt= PIX_FMT_YUV444P16; break;
@@ -1136,7 +1134,7 @@ AVCodec ffv1_decoder = {
     decode_frame,
     CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
     NULL,
-    .long_name= NULL_IF_CONFIG_SMALL("FFmpeg codec #1"),
+    .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
 };
 
 #if CONFIG_FFV1_ENCODER
@@ -1148,7 +1146,7 @@ AVCodec ffv1_encoder = {
     encode_init,
     encode_frame,
     common_end,
-    .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE},
-    .long_name= NULL_IF_CONFIG_SMALL("FFmpeg codec #1"),
+    .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_NONE},
+    .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
 };
 #endif