]> git.sesse.net Git - ffmpeg/blobdiff - libswresample/swresample.c
Merge remote-tracking branch 'qatar/master'
[ffmpeg] / libswresample / swresample.c
index 9a4f6a75974907c45092973dd4acbdc96f765e8b..ff09ea4685ac9ebd4a6ab6af08eb1704ad1ad0d9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Michael Niedermayer (michaelni@gmx.at)
+ * Copyright (C) 2011-2012 Michael Niedermayer (michaelni@gmx.at)
  *
  * This file is part of libswresample
  *
@@ -24,6 +24,8 @@
 #include "libavutil/avassert.h"
 #include "libavutil/audioconvert.h"
 
+#include <float.h>
+
 #define  C30DB  M_SQRT2
 #define  C15DB  1.189207115
 #define C__0DB  1.0
@@ -32,6 +34,7 @@
 #define C_45DB  0.594603558
 #define C_60DB  0.5
 
+#define ALIGN 32
 
 //TODO split options array out?
 #define OFFSET(x) offsetof(SwrContext,x)
@@ -44,14 +47,14 @@ static const AVOption options[]={
 {"out_channel_count"    , "Output Channel Count"        , OFFSET(out.ch_count   ), AV_OPT_TYPE_INT  , {.dbl=2                     }, 0      , SWR_CH_MAX, PARAM},
 {"uch"                  ,   "Used Channel Count"        , OFFSET(used_ch_count  ), AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , SWR_CH_MAX, PARAM},
 {"used_channel_count"   ,   "Used Channel Count"        , OFFSET(used_ch_count  ), AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , SWR_CH_MAX, PARAM},
-{"isr"                  ,  "Input Sample Rate"          , OFFSET( in_sample_rate), AV_OPT_TYPE_INT  , {.dbl=48000                 }, 1      , INT_MAX   , PARAM},
-{"in_sample_rate"       ,  "Input Sample Rate"          , OFFSET( in_sample_rate), AV_OPT_TYPE_INT  , {.dbl=48000                 }, 1      , INT_MAX   , PARAM},
-{"osr"                  , "Output Sample Rate"          , OFFSET(out_sample_rate), AV_OPT_TYPE_INT  , {.dbl=48000                 }, 1      , INT_MAX   , PARAM},
-{"out_sample_rate"      , "Output Sample Rate"          , OFFSET(out_sample_rate), AV_OPT_TYPE_INT  , {.dbl=48000                 }, 1      , INT_MAX   , PARAM},
-{"isf"                  ,    "Input Sample Format"      , OFFSET( in_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_S16     }, 0      , AV_SAMPLE_FMT_NB-1+256, PARAM},
-{"in_sample_fmt"        ,    "Input Sample Format"      , OFFSET( in_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_S16     }, 0      , AV_SAMPLE_FMT_NB-1+256, PARAM},
-{"osf"                  ,   "Output Sample Format"      , OFFSET(out_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_S16     }, 0      , AV_SAMPLE_FMT_NB-1+256, PARAM},
-{"out_sample_fmt"       ,   "Output Sample Format"      , OFFSET(out_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_S16     }, 0      , AV_SAMPLE_FMT_NB-1+256, PARAM},
+{"isr"                  ,  "Input Sample Rate"          , OFFSET( in_sample_rate), AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , INT_MAX   , PARAM},
+{"in_sample_rate"       ,  "Input Sample Rate"          , OFFSET( in_sample_rate), AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , INT_MAX   , PARAM},
+{"osr"                  , "Output Sample Rate"          , OFFSET(out_sample_rate), AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , INT_MAX   , PARAM},
+{"out_sample_rate"      , "Output Sample Rate"          , OFFSET(out_sample_rate), AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , INT_MAX   , PARAM},
+{"isf"                  ,    "Input Sample Format"      , OFFSET( in_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_NONE    }, -1     , AV_SAMPLE_FMT_NB-1+256, PARAM},
+{"in_sample_fmt"        ,    "Input Sample Format"      , OFFSET( in_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_NONE    }, -1     , AV_SAMPLE_FMT_NB-1+256, PARAM},
+{"osf"                  ,   "Output Sample Format"      , OFFSET(out_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_NONE    }, -1     , AV_SAMPLE_FMT_NB-1+256, PARAM},
+{"out_sample_fmt"       ,   "Output Sample Format"      , OFFSET(out_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_NONE    }, -1     , AV_SAMPLE_FMT_NB-1+256, PARAM},
 {"tsf"                  , "Internal Sample Format"      , OFFSET(int_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_NONE    }, -1     , AV_SAMPLE_FMT_FLTP, PARAM},
 {"internal_sample_fmt"  , "Internal Sample Format"      , OFFSET(int_sample_fmt ), AV_OPT_TYPE_INT  , {.dbl=AV_SAMPLE_FMT_NONE    }, -1     , AV_SAMPLE_FMT_FLTP, PARAM},
 {"icl"                  ,   "Input Channel Layout"      , OFFSET( in_ch_layout  ), AV_OPT_TYPE_INT64, {.dbl=0                     }, 0      , INT64_MAX , PARAM, "channel_layout"},
@@ -62,6 +65,7 @@ static const AVOption options[]={
 {"center_mix_level"     ,    "Center Mix Level"         , OFFSET(clev           ), AV_OPT_TYPE_FLOAT, {.dbl=C_30DB                }, -32    , 32        , PARAM},
 {"slev"                 , "Sourround Mix Level"         , OFFSET(slev           ), AV_OPT_TYPE_FLOAT, {.dbl=C_30DB                }, -32    , 32        , PARAM},
 {"surround_mix_level"   , "Sourround Mix Level"         , OFFSET(slev           ), AV_OPT_TYPE_FLOAT, {.dbl=C_30DB                }, -32    , 32        , PARAM},
+{"lfe_mix_level"        , "LFE Mix Level"               , OFFSET(lfe_mix_level  ), AV_OPT_TYPE_FLOAT, {.dbl=0                     }, -32    , 32        , PARAM},
 {"rmvol"                , "Rematrix Volume"             , OFFSET(rematrix_volume), AV_OPT_TYPE_FLOAT, {.dbl=1.0                   }, -1000  , 1000      , PARAM},
 {"rematrix_volume"      , "Rematrix Volume"             , OFFSET(rematrix_volume), AV_OPT_TYPE_FLOAT, {.dbl=1.0                   }, -1000  , 1000      , PARAM},
 {"flags"                , NULL                          , OFFSET(flags          ), AV_OPT_TYPE_FLAGS, {.dbl=0                     }, 0      , UINT_MAX  , PARAM, "flags"},
@@ -76,6 +80,15 @@ static const AVOption options[]={
 {"phase_shift"          , "Resampling Phase Shift"      , OFFSET(phase_shift)    , AV_OPT_TYPE_INT  , {.dbl=10                    }, 0      , 30        , PARAM },
 {"linear_interp"        , "Use Linear Interpolation"    , OFFSET(linear_interp)  , AV_OPT_TYPE_INT  , {.dbl=0                     }, 0      , 1         , PARAM },
 {"cutoff"               , "Cutoff Frequency Ratio"      , OFFSET(cutoff)         , AV_OPT_TYPE_DOUBLE,{.dbl=0.8                   }, 0      , 1         , PARAM },
+{"min_comp"             , "Minimum difference between timestamps and audio data (in seconds) below which no timestamp compensation of either kind is applied"
+                                                        , OFFSET(min_compensation),AV_OPT_TYPE_FLOAT ,{.dbl=FLT_MAX               }, 0      , FLT_MAX   , PARAM },
+{"min_hard_comp"        , "Minimum difference between timestamps and audio data (in seconds) to trigger padding/trimming the data."
+                                                   , OFFSET(min_hard_compensation),AV_OPT_TYPE_FLOAT ,{.dbl=0.1                   }, 0      , INT_MAX   , PARAM },
+{"comp_duration"        , "Duration (in seconds) over which data is stretched/squeezed to make it match the timestamps."
+                                              , OFFSET(soft_compensation_duration),AV_OPT_TYPE_FLOAT ,{.dbl=1                     }, 0      , INT_MAX   , PARAM },
+{"max_soft_comp"        , "Maximum factor by which data is stretched/squeezed to make it match the timestamps."
+                                                   , OFFSET(max_soft_compensation),AV_OPT_TYPE_FLOAT ,{.dbl=0                     }, INT_MIN, INT_MAX   , PARAM },
+
 {0}
 };
 
@@ -207,16 +220,22 @@ int swr_init(struct SwrContext *s){
         return AVERROR(EINVAL);
     }
 
-    //FIXME should we allow/support using FLT on material that doesnt need it ?
-    if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_S16P || s->int_sample_fmt==AV_SAMPLE_FMT_S16P){
-        s->int_sample_fmt= AV_SAMPLE_FMT_S16P;
-    }else
-        s->int_sample_fmt= AV_SAMPLE_FMT_FLTP;
+    if(s->int_sample_fmt == AV_SAMPLE_FMT_NONE){
+        if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_S16P){
+            s->int_sample_fmt= AV_SAMPLE_FMT_S16P;
+        }else if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_FLTP){
+            s->int_sample_fmt= AV_SAMPLE_FMT_FLTP;
+        }else{
+            av_log(s, AV_LOG_DEBUG, "Using double precision mode\n");
+            s->int_sample_fmt= AV_SAMPLE_FMT_DBLP;
+        }
+    }
 
     if(   s->int_sample_fmt != AV_SAMPLE_FMT_S16P
         &&s->int_sample_fmt != AV_SAMPLE_FMT_S32P
-        &&s->int_sample_fmt != AV_SAMPLE_FMT_FLTP){
-        av_log(s, AV_LOG_ERROR, "Requested sample format %s is not supported internally, S16/S32/FLT is supported\n", av_get_sample_fmt_name(s->int_sample_fmt));
+        &&s->int_sample_fmt != AV_SAMPLE_FMT_FLTP
+        &&s->int_sample_fmt != AV_SAMPLE_FMT_DBLP){
+        av_log(s, AV_LOG_ERROR, "Requested sample format %s is not supported internally, S16/S32/FLT/DBL is supported\n", av_get_sample_fmt_name(s->int_sample_fmt));
         return AVERROR(EINVAL);
     }
 
@@ -230,8 +249,9 @@ int swr_init(struct SwrContext *s){
     if(    s->int_sample_fmt != AV_SAMPLE_FMT_S16P
         && s->int_sample_fmt != AV_SAMPLE_FMT_S32P
         && s->int_sample_fmt != AV_SAMPLE_FMT_FLTP
+        && s->int_sample_fmt != AV_SAMPLE_FMT_DBLP
         && s->resample){
-        av_log(s, AV_LOG_ERROR, "Resampling only supported with internal s16/s32/flt\n");
+        av_log(s, AV_LOG_ERROR, "Resampling only supported with internal s16/s32/flt/dbl\n");
         return -1;
     }
 
@@ -314,7 +334,7 @@ av_assert0(s->out.ch_count);
 
     s->dither = s->preout;
 
-    if(s->rematrix)
+    if(s->rematrix || s->dither_method)
         return swri_rematrix_init(s);
 
     return 0;
@@ -324,18 +344,21 @@ static int realloc_audio(AudioData *a, int count){
     int i, countb;
     AudioData old;
 
+    if(count < 0 || count > INT_MAX/2/a->bps/a->ch_count)
+        return AVERROR(EINVAL);
+
     if(a->count >= count)
         return 0;
 
     count*=2;
 
-    countb= FFALIGN(count*a->bps, 32);
+    countb= FFALIGN(count*a->bps, ALIGN);
     old= *a;
 
     av_assert0(a->bps);
     av_assert0(a->ch_count);
 
-    a->data= av_malloc(countb*a->ch_count);
+    a->data= av_mallocz(countb*a->ch_count);
     if(!a->data)
         return AVERROR(ENOMEM);
     for(i=0; i<a->ch_count; i++){
@@ -364,7 +387,9 @@ static void copy(AudioData *out, AudioData *in,
 
 static void fill_audiodata(AudioData *out, uint8_t *in_arg [SWR_CH_MAX]){
     int i;
-    if(out->planar){
+    if(!in_arg){
+        memset(out->ch, 0, sizeof(out->ch));
+    }else if(out->planar){
         for(i=0; i<out->ch_count; i++)
             out->ch[i]= in_arg[i];
     }else{
@@ -373,6 +398,16 @@ static void fill_audiodata(AudioData *out, uint8_t *in_arg [SWR_CH_MAX]){
     }
 }
 
+static void reversefill_audiodata(AudioData *out, uint8_t *in_arg [SWR_CH_MAX]){
+    int i;
+    if(out->planar){
+        for(i=0; i<out->ch_count; i++)
+            in_arg[i]= out->ch[i];
+    }else{
+        in_arg[0]= out->ch[0];
+    }
+}
+
 /**
  *
  * out may be equal in.
@@ -383,7 +418,7 @@ static void buf_set(AudioData *out, AudioData *in, int count){
         for(ch=0; ch<out->ch_count; ch++)
             out->ch[ch]= in->ch[ch] + count*out->bps;
     }else{
-        for(ch=0; ch<out->ch_count; ch++)
+        for(ch=out->ch_count-1; ch>=0; ch--)
             out->ch[ch]= in->ch[0] + (ch + count*out->ch_count) * out->bps;
     }
 }
@@ -398,6 +433,10 @@ static int resample(SwrContext *s, AudioData *out_param, int out_count,
     int ret_sum=0;
     int border=0;
 
+    av_assert1(s->in_buffer.ch_count == in_param->ch_count);
+    av_assert1(s->in_buffer.planar   == in_param->planar);
+    av_assert1(s->in_buffer.fmt      == in_param->fmt);
+
     tmp=out=*out_param;
     in =  *in_param;
 
@@ -554,8 +593,9 @@ static int swr_convert_internal(struct SwrContext *s, AudioData *out, int out_co
 
             if(s->dither_pos + out_count > s->dither.count)
                 s->dither_pos = 0;
+
             for(ch=0; ch<preout->ch_count; ch++)
-                swri_sum2(s->int_sample_fmt, preout->ch[ch], preout->ch[ch], s->dither.ch[ch] + s->dither.bps * s->dither_pos, 1, 1, out_count);
+                s->mix_2_1_f(preout->ch[ch], preout->ch[ch], s->dither.ch[ch] + s->dither.bps * s->dither_pos, s->native_one, 0, 0, out_count);
 
             s->dither_pos += out_count;
         }
@@ -570,6 +610,28 @@ int swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_coun
     AudioData * in= &s->in;
     AudioData *out= &s->out;
 
+    if(s->drop_output > 0){
+        int ret;
+        AudioData tmp = s->out;
+        uint8_t *tmp_arg[SWR_CH_MAX];
+        tmp.count = 0;
+        tmp.data  = NULL;
+        if((ret=realloc_audio(&tmp, s->drop_output))<0)
+            return ret;
+
+        reversefill_audiodata(&tmp, tmp_arg);
+        s->drop_output *= -1; //FIXME find a less hackish solution
+        ret = swr_convert(s, tmp_arg, -s->drop_output, in_arg, in_count); //FIXME optimize but this is as good as never called so maybe it doesnt matter
+        s->drop_output *= -1;
+        if(ret>0)
+            s->drop_output -= ret;
+
+        av_freep(&tmp.data);
+        if(s->drop_output || !out_arg)
+            return 0;
+        in_count = 0;
+    }
+
     if(!in_arg){
         if(s->in_buffer_count){
             if (s->resample && !s->flushed) {
@@ -597,7 +659,10 @@ int swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_coun
     fill_audiodata(out, out_arg);
 
     if(s->resample){
-        return swr_convert_internal(s, out, out_count, in, in_count);
+        int ret = swr_convert_internal(s, out, out_count, in, in_count);
+        if(ret>0 && !s->drop_output)
+            s->outpts += ret * (int64_t)s->in_sample_rate;
+        return ret;
     }else{
         AudioData tmp= *in;
         int ret2=0;
@@ -641,12 +706,78 @@ int swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_coun
                 ret2 += ret;
             }
             if(in_count){
-                buf_set(&tmp, &s->in_buffer, s->in_buffer_index);
+                buf_set(&tmp, &s->in_buffer, s->in_buffer_index + s->in_buffer_count);
                 copy(&tmp, in, in_count);
                 s->in_buffer_count += in_count;
             }
         }
+        if(ret2>0 && !s->drop_output)
+            s->outpts += ret2 * (int64_t)s->in_sample_rate;
         return ret2;
     }
 }
 
+int swr_drop_output(struct SwrContext *s, int count){
+    s->drop_output += count;
+
+    if(s->drop_output <= 0)
+        return 0;
+
+    av_log(s, AV_LOG_VERBOSE, "discarding %d audio samples\n", count);
+    return swr_convert(s, NULL, s->drop_output, NULL, 0);
+}
+
+int swr_inject_silence(struct SwrContext *s, int count){
+    int ret, i;
+    AudioData silence = s->out;
+    uint8_t *tmp_arg[SWR_CH_MAX];
+
+    if(count <= 0)
+        return 0;
+
+    silence.count = 0;
+    silence.data  = NULL;
+    if((ret=realloc_audio(&silence, count))<0)
+        return ret;
+
+    if(silence.planar) for(i=0; i<silence.ch_count; i++) {
+        memset(silence.ch[i], silence.bps==1 ? 0x80 : 0, count*silence.bps);
+    } else
+        memset(silence.ch[0], silence.bps==1 ? 0x80 : 0, count*silence.bps*silence.ch_count);
+
+    reversefill_audiodata(&silence, tmp_arg);
+    av_log(s, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", count);
+    ret = swr_convert(s, NULL, 0, (const uint8_t**)tmp_arg, count);
+    av_freep(&silence.data);
+    return ret;
+}
+
+int64_t swr_next_pts(struct SwrContext *s, int64_t pts){
+    if(pts == INT64_MIN)
+        return s->outpts;
+    if(s->min_compensation >= FLT_MAX) {
+        return (s->outpts = pts - swr_get_delay(s, s->in_sample_rate * (int64_t)s->out_sample_rate));
+    } else {
+        int64_t delta = pts - swr_get_delay(s, s->in_sample_rate * (int64_t)s->out_sample_rate) - s->outpts;
+        double fdelta = delta /(double)(s->in_sample_rate * (int64_t)s->out_sample_rate);
+
+        if(fabs(fdelta) > s->min_compensation) {
+            if(!s->outpts || fabs(fdelta) > s->min_hard_compensation){
+                int ret;
+                if(delta > 0) ret = swr_inject_silence(s,  delta / s->out_sample_rate);
+                else          ret = swr_drop_output   (s, -delta / s-> in_sample_rate);
+                if(ret<0){
+                    av_log(s, AV_LOG_ERROR, "Failed to compensate for timestamp delta of %f\n", fdelta);
+                }
+            } else if(s->soft_compensation_duration && s->max_soft_compensation) {
+                int duration = s->out_sample_rate * s->soft_compensation_duration;
+                double max_soft_compensation = s->max_soft_compensation / (s->max_soft_compensation < 0 ? -s->in_sample_rate : 1);
+                int comp = av_clipf(fdelta, -max_soft_compensation, max_soft_compensation) * duration ;
+                av_log(s, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n", fdelta, comp, duration);
+                swr_set_compensation(s, comp, duration);
+            }
+        }
+
+        return s->outpts;
+    }
+}