]> git.sesse.net Git - ffmpeg/commitdiff
Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Sat, 3 Dec 2011 01:08:55 +0000 (02:08 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Sat, 3 Dec 2011 02:00:30 +0000 (03:00 +0100)
* qatar/master:
  aac_latm: reconfigure decoder on audio specific config changes
  latmdec: fix audio specific config parsing
  Add avcodec_decode_audio4().
  avcodec: change number of plane pointers from 4 to 8 at next major bump.
  Update developers documentation with coding conventions.
  svq1dec: avoid undefined get_bits(0) call
  ARM: h264dsp_neon cosmetics
  ARM: make some NEON macros reusable
  Do not memcpy raw video frames when using null muxer
  fate: update asf seektest
  vp8: flush buffers on size changes.
  doc: improve general documentation for MacOSX
  asf: use packet dts as approximation of pts
  asf: do not call av_read_frame
  rtsp: Initialize the media_type_mask in the rtp guessing demuxer
  Cleaned up alacenc.c

Conflicts:
doc/APIchanges
doc/developer.texi
libavcodec/8svx.c
libavcodec/aacdec.c
libavcodec/ac3dec.c
libavcodec/avcodec.h
libavcodec/nellymoserdec.c
libavcodec/tta.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/wmadec.c
libavformat/asfdec.c
tests/ref/seek/lavf_asf

Merged-by: Michael Niedermayer <michaelni@gmx.at>
84 files changed:
1  2 
avconv.c
doc/APIchanges
doc/developer.texi
doc/general.texi
ffmpeg.c
libavcodec/8svx.c
libavcodec/aac.h
libavcodec/aacdec.c
libavcodec/ac3dec.c
libavcodec/ac3dec.h
libavcodec/adpcm.c
libavcodec/adx.h
libavcodec/adxdec.c
libavcodec/alac.c
libavcodec/alacenc.c
libavcodec/alsdec.c
libavcodec/amrnbdec.c
libavcodec/amrwbdec.c
libavcodec/apedec.c
libavcodec/arm/h264dsp_neon.S
libavcodec/arm/vp8dsp_neon.S
libavcodec/atrac1.c
libavcodec/atrac3.c
libavcodec/avcodec.h
libavcodec/binkaudio.c
libavcodec/cook.c
libavcodec/dca.c
libavcodec/dpcm.c
libavcodec/dsicinav.c
libavcodec/flacdec.c
libavcodec/g726.c
libavcodec/gsmdec.c
libavcodec/gsmdec_data.h
libavcodec/huffyuv.c
libavcodec/imc.c
libavcodec/internal.h
libavcodec/libgsm.c
libavcodec/libopencore-amr.c
libavcodec/libspeexdec.c
libavcodec/mace.c
libavcodec/mlpdec.c
libavcodec/mpc.h
libavcodec/mpc7.c
libavcodec/mpc8.c
libavcodec/mpeg4audio.c
libavcodec/mpeg4audio.h
libavcodec/mpegaudiodec.c
libavcodec/mpegaudiodec_float.c
libavcodec/mpegvideo.c
libavcodec/nellymoserdec.c
libavcodec/pcm.c
libavcodec/qcelpdec.c
libavcodec/qdm2.c
libavcodec/ra144.h
libavcodec/ra144dec.c
libavcodec/ra288.c
libavcodec/s302m.c
libavcodec/shorten.c
libavcodec/sipr.c
libavcodec/smacker.c
libavcodec/svq1dec.c
libavcodec/truespeech.c
libavcodec/tta.c
libavcodec/twinvq.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/vmdav.c
libavcodec/vorbisdec.c
libavcodec/vp3.c
libavcodec/vp8.c
libavcodec/wavpack.c
libavcodec/wma.h
libavcodec/wmadec.c
libavcodec/wmaprodec.c
libavcodec/wmavoice.c
libavcodec/ws-snd1.c
libavformat/adtsenc.c
libavformat/asfdec.c
libavformat/flvdec.c
libavformat/isom.c
libavformat/latmenc.c
libavformat/matroskaenc.c
libavformat/nullenc.c
libavformat/rtsp.c

diff --cc avconv.c
Simple merge
diff --cc doc/APIchanges
index 97a5c0068d6c8222f29d381ff78c893179b74371,2c43e75dba65f74bd4c95e028333202ce17bd212..23e568922c4b80e8ab8bdb618d2009ccd8583bc6
@@@ -13,15 -13,19 +13,28 @@@ libavutil:   2011-04-1
  
  API changes, most recent first:
  
 +2011-xx-xx - xxxxxxx - lavu 51.28.1
 +  Add av_get_alt_sample_fmt() to samplefmt.h.
 +
 +2011-11-03 - 96949da - lavu 51.23.0
 +  Add av_strcasecmp() and av_strncasecmp() to avstring.h.
 +
 +2011-10-20 - b35e9e1 - lavu 51.22.0
 +  Add av_strtok() to avstring.h.
 +
+ 2011-xx-xx - xxxxxxx - lavc 53.25.0
+   Add nb_samples and extended_data fields to AVFrame.
+   Deprecate AVCODEC_MAX_AUDIO_FRAME_SIZE.
+   Deprecate avcodec_decode_audio3() in favor of avcodec_decode_audio4().
+   avcodec_decode_audio4() writes output samples to an AVFrame, which allows
+   audio decoders to use get_buffer().
+ 2011-xx-xx - xxxxxxx - lavc 53.24.0
+   Change AVFrame.data[4]/base[4]/linesize[4]/error[4] to [8] at next major bump.
+   Change AVPicture.data[4]/linesize[4] to [8] at next major bump.
+   Change AVCodecContext.error[4] to [8] at next major bump.
+   Add AV_NUM_DATA_POINTERS to simplify the bump transition.
  2011-11-23 - bbb46f3 - lavu 51.18.0
    Add av_samples_get_buffer_size(), av_samples_fill_arrays(), and
    av_samples_alloc(), to samplefmt.h.
index 2052854a40daf4b5f1044d6f6c68bcb1ffb0c02a,128b46e830d27409e351c99cf7edb2231dabb820..800ca7d0455ceeaf159da6e4c534483e5819cda1
@@@ -53,48 -45,61 +53,26 @@@ and should try to fix issues their comm
  @anchor{Coding Rules}
  @section Coding Rules
  
- FFmpeg is programmed in the ISO C90 language with a few additional
- features from ISO C99, namely:
- @itemize @bullet
- @item
- the @samp{inline} keyword;
- @item
- @samp{//} comments;
- @item
- designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
- @item
- compound literals (@samp{x = (struct s) @{ 17, 23 @};})
- @end itemize
- These features are supported by all compilers we care about, so we will not
- accept patches to remove their use unless they absolutely do not impair
- clarity and performance.
+ @subsection Code formatting conventions
 -The code is written in K&R C style. That means the following:
 -@itemize @bullet
 -@item
 -The control statements are formatted by putting space betwen the statement and parenthesis
 -in the following way:
 -@example
 -for (i = 0; i < filter->input_count; i ++) @{
 -@end example
 -@item
 -The case statement is always located at the same level as the switch itself:
 -@example
 -switch (link->init_state) @{
 -case AVLINK_INIT:
 -    continue;
 -case AVLINK_STARTINIT:
 -    av_log(filter, AV_LOG_INFO, "circular filter chain detected");
 -    return 0;
 -@end example
 -@item
 -Braces in function declarations are written on the new line:
 -@example
 -const char *avfilter_configuration(void)
 -@{
 -    return LIBAV_CONFIGURATION;
 -@}
 -@end example
 -@item
 -In case of a single-statement if, no curly braces are required:
 -@example
 -if (!pic || !picref)
 -    goto fail;
 -@end example
 -@item
 -Do not put spaces immediately inside parenthesis. @samp{if (ret)} is a valid style; @samp{if ( ret )} is not.
 -@end itemize
  
- All code must compile with recent versions of GCC and a number of other
- currently supported compilers. To ensure compatibility, please do not use
- additional C99 features or GCC extensions. Especially watch out for:
+ There are the following guidelines regarding the indentation in files:
  @itemize @bullet
  @item
- mixing statements and declarations;
- @item
- @samp{long long} (use @samp{int64_t} instead);
- @item
- @samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
- @item
- GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
- @end itemize
  Indent size is 4.
- The presentation is one inspired by 'indent -i4 -kr -nut'.
+ @item
  The TAB character is forbidden outside of Makefiles as is any
  form of trailing whitespace. Commits containing either will be
  rejected by the git repository.
+ @item
+ You should try to limit your code lines to 80 characters; however, do so if and only if this improves readability.
+ @end itemize
+ The presentation is one inspired by 'indent -i4 -kr -nut'.
  
 -The main priority in Libav is simplicity and small code size in order to
 +The main priority in FFmpeg is simplicity and small code size in order to
  minimize the bug count.
  
- Comments: Use the JavaDoc/Doxygen
- format (see examples below) so that code documentation
+ @subsection Comments
Use the JavaDoc/Doxygen  format (see examples below) so that code documentation
  can be generated automatically. All nontrivial functions should have a comment
  above them explaining what the function does, even if it is just one sentence.
  All structures and their member variables should be documented, too.
@@@ -128,11 -133,69 +106,69 @@@ int myfunc(int my_parameter
  ...
  @end example
  
 -Libav is programmed in the ISO C90 language with a few additional
+ @subsection C language features
++FFmpeg is programmed in the ISO C90 language with a few additional
+ features from ISO C99, namely:
+ @itemize @bullet
+ @item
+ the @samp{inline} keyword;
+ @item
+ @samp{//} comments;
+ @item
+ designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
+ @item
+ compound literals (@samp{x = (struct s) @{ 17, 23 @};})
+ @end itemize
+ These features are supported by all compilers we care about, so we will not
+ accept patches to remove their use unless they absolutely do not impair
+ clarity and performance.
+ All code must compile with recent versions of GCC and a number of other
+ currently supported compilers. To ensure compatibility, please do not use
+ additional C99 features or GCC extensions. Especially watch out for:
+ @itemize @bullet
+ @item
+ mixing statements and declarations;
+ @item
+ @samp{long long} (use @samp{int64_t} instead);
+ @item
+ @samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
+ @item
+ GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
+ @end itemize
+ @subsection Naming conventions
+ All names are using underscores (_), not CamelCase. For example, @samp{avfilter_get_video_buffer} is
+ a valid function name and @samp{AVFilterGetVideo} is not. The only exception from this are structure names;
+ they should always be in the CamelCase
+ There are following conventions for naming variables and functions:
+ @itemize @bullet
+ @item
+ For local variables no prefix is required.
+ @item
+ For variables and functions declared as @code{static} no prefixes are required.
+ @item
+ For variables and functions used internally by the library, @code{ff_} prefix should be used.
+ For example, @samp{ff_w64_demuxer}.
+ @item
+ For variables and functions used internally across multiple libraries, use @code{avpriv_}. For example,
+ @samp{avpriv_aac_parse_header}.
+ @item
+ For exported names, each library has its own prefixes. Just check the existing code and name accordingly.
+ @end itemize
+ @subsection Miscellanous conventions
+ @itemize @bullet
+ @item
  fprintf and printf are forbidden in libavformat and libavcodec,
  please use av_log() instead.
+ @item
  Casts should be used only when necessary. Unneeded parentheses
  should also be avoided if they don't make the code easier to understand.
+ @end itemize
  
  @section Development Policy
  
index 120b7160c25e3c9c57ae5a91c610c68975a62891,ca9731ef55c222694d02849b05b0163b6487510a..04ca71db916de0205349e17168ec4fbea5c3d75c
@@@ -840,27 -809,32 +840,36 @@@ bash directly to work around this
  bash ./configure
  @end example
  
- @subsection Darwin (MacOS X, iPhone)
+ @anchor{Darwin}
+ @subsection Darwin (OSX, iPhone)
  
- MacOS X on PowerPC or ARM (iPhone) requires a preprocessor from
+ The toolchain provided with Xcode is sufficient to build the basic
+ unacelerated code.
+ OSX on PowerPC or ARM (iPhone) requires a preprocessor from
  @url{http://github.com/yuvi/gas-preprocessor} to build the optimized
  assembler functions. Just download the Perl script and put it somewhere
 -in your PATH, Libav's configure will pick it up automatically.
 +in your PATH, FFmpeg's configure will pick it up automatically.
  
+ OSX on amd64 and x86 requires @command{yasm} to build most of the
+ optimized assembler functions @url{http://mxcl.github.com/homebrew/, Homebrew},
+ @url{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix}
+ or @url{http://www.macports.org, MacPorts} can easily provide it.
  @section Windows
  
 +To get help and instructions for building FFmpeg under Windows, check out
 +the FFmpeg Windows Help Forum at
 +@url{http://ffmpeg.arrozcru.org/}.
 +
  @subsection Native Windows compilation
  
 -Libav can be built to run natively on Windows using the MinGW tools. Install
 +FFmpeg can be built to run natively on Windows using the MinGW tools. Install
  the latest versions of MSYS and MinGW from @url{http://www.mingw.org/}.
 -You can find detailed installation
 -instructions in the download section and the FAQ.
 +You can find detailed installation instructions in the download
 +section and the FAQ.
  
 -Libav does not build out-of-the-box with the packages the automated MinGW
 +FFmpeg does not build out-of-the-box with the packages the automated MinGW
  installer provides. It also requires coreutils to be installed and many other
  packages updated to the latest version. The minimum version for some packages
  are listed below:
diff --cc ffmpeg.c
index e81936de8cc6c5c9cf9c3a847f5953db49b26b23,c731cec53b4b5d0f8ae0de84ba76fa5a64e3b613..3158bb4874e06fe4ca5951261ad99b39171bf8ad
+++ b/ffmpeg.c
@@@ -1295,12 -1202,12 +1295,13 @@@ static void do_video_out(AVFormatContex
          av_init_packet(&pkt);
          pkt.stream_index= ost->index;
  
--        if (s->oformat->flags & AVFMT_RAWPICTURE) {
++        if (s->oformat->flags & AVFMT_RAWPICTURE &&
++            enc->codec->id == CODEC_ID_RAWVIDEO) {
              /* raw pictures are written as AVPicture structure to
 -               avoid any copies. We support temorarily the older
 +               avoid any copies. We support temporarily the older
                 method. */
 -            AVFrame* old_frame = enc->coded_frame;
 -            enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
 +            enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
 +            enc->coded_frame->top_field_first  = in_picture->top_field_first;
              pkt.data= (uint8_t *)final_picture;
              pkt.size=  sizeof(AVPicture);
              pkt.pts= av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
@@@ -1546,530 -1441,462 +1547,530 @@@ static void print_report(OutputFile *ou
      }
  }
  
 -static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
 +static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
  {
 -    int fill_char = 0x00;
 -    if (sample_fmt == AV_SAMPLE_FMT_U8)
 -        fill_char = 0x80;
 -    memset(buf, fill_char, size);
 -}
 +    int i, ret;
  
 -/* pkt = NULL means EOF (needed to flush decoder buffers) */
 -static int output_packet(InputStream *ist, int ist_index,
 -                         OutputStream **ost_table, int nb_ostreams,
 -                         const AVPacket *pkt)
 -{
 -    AVFormatContext *os;
 -    OutputStream *ost;
 -    int ret, i;
 -    int got_output;
 -    AVFrame picture;
 -    void *buffer_to_free = NULL;
 -    static unsigned int samples_size= 0;
 -    AVSubtitle subtitle, *subtitle_to_free;
 -    int64_t pkt_pts = AV_NOPTS_VALUE;
 -#if CONFIG_AVFILTER
 -    int frame_available;
 -#endif
 -    float quality;
 +    for (i = 0; i < nb_ostreams; i++) {
 +        OutputStream   *ost = &ost_table[i];
 +        AVCodecContext *enc = ost->st->codec;
 +        AVFormatContext *os = output_files[ost->file_index].ctx;
  
 -    AVPacket avpkt;
 -    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 +        if (!ost->encoding_needed)
 +            continue;
  
 -    if(ist->next_pts == AV_NOPTS_VALUE)
 -        ist->next_pts= ist->pts;
 -
 -    if (pkt == NULL) {
 -        /* EOF handling */
 -        av_init_packet(&avpkt);
 -        avpkt.data = NULL;
 -        avpkt.size = 0;
 -        goto handle_eof;
 -    } else {
 -        avpkt = *pkt;
 -    }
 -
 -    if(pkt->dts != AV_NOPTS_VALUE)
 -        ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 -    if(pkt->pts != AV_NOPTS_VALUE)
 -        pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +        if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
 +            continue;
-         if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
++        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
 +            continue;
  
 -    //while we have more to decode or while the decoder did output something on EOF
 -    while (avpkt.size > 0 || (!pkt && got_output)) {
 -        uint8_t *data_buf, *decoded_data_buf;
 -        int data_size, decoded_data_size;
 -    handle_eof:
 -        ist->pts= ist->next_pts;
 +        for(;;) {
 +            AVPacket pkt;
 +            int fifo_bytes;
 +            av_init_packet(&pkt);
 +            pkt.stream_index= ost->index;
  
 -        if(avpkt.size && avpkt.size != pkt->size &&
 -           ((!ist->showed_multi_packet_warning && verbose>0) || verbose>1)){
 -            fprintf(stderr, "Multiple frames in a packet from stream %d\n", pkt->stream_index);
 -            ist->showed_multi_packet_warning=1;
 -        }
 +            switch (ost->st->codec->codec_type) {
 +            case AVMEDIA_TYPE_AUDIO:
 +                fifo_bytes = av_fifo_size(ost->fifo);
 +                ret = 0;
 +                /* encode any samples remaining in fifo */
 +                if (fifo_bytes > 0) {
 +                    int osize = av_get_bytes_per_sample(enc->sample_fmt);
 +                    int fs_tmp = enc->frame_size;
 +
 +                    av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
 +                    if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
 +                        enc->frame_size = fifo_bytes / (osize * enc->channels);
 +                    } else { /* pad */
 +                        int frame_bytes = enc->frame_size*osize*enc->channels;
 +                        if (allocated_audio_buf_size < frame_bytes)
 +                            exit_program(1);
 +                        generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
 +                    }
  
 -        /* decode the packet if needed */
 -        decoded_data_buf = NULL; /* fail safe */
 -        decoded_data_size= 0;
 -        data_buf  = avpkt.data;
 -        data_size = avpkt.size;
 -        subtitle_to_free = NULL;
 -        if (ist->decoding_needed) {
 -            switch(ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_AUDIO:{
 -                if(pkt && samples_size < FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
 -                    samples_size = FFMAX(pkt->size*sizeof(*samples), AVCODEC_MAX_AUDIO_FRAME_SIZE);
 -                    av_free(samples);
 -                    samples= av_malloc(samples_size);
 +                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
 +                    pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
 +                                              ost->st->time_base.num, enc->sample_rate);
 +                    enc->frame_size = fs_tmp;
                  }
 -                decoded_data_size= samples_size;
 -                    /* XXX: could avoid copy if PCM 16 bits with same
 -                       endianness as CPU */
 -                ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
 -                                            &avpkt);
 -                if (ret < 0)
 -                    return ret;
 -                avpkt.data += ret;
 -                avpkt.size -= ret;
 -                data_size   = ret;
 -                got_output  = decoded_data_size > 0;
 -                /* Some bug in mpeg audio decoder gives */
 -                /* decoded_data_size < 0, it seems they are overflows */
 -                if (!got_output) {
 -                    /* no audio frame */
 -                    continue;
 +                if (ret <= 0) {
 +                    ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
                  }
 -                decoded_data_buf = (uint8_t *)samples;
 -                ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
 -                    (ist->st->codec->sample_rate * ist->st->codec->channels);
 -                break;}
 -            case AVMEDIA_TYPE_VIDEO:
 -                    decoded_data_size = (ist->st->codec->width * ist->st->codec->height * 3) / 2;
 -                    /* XXX: allocate picture correctly */
 -                    avcodec_get_frame_defaults(&picture);
 -                    avpkt.pts = pkt_pts;
 -                    avpkt.dts = ist->pts;
 -                    pkt_pts = AV_NOPTS_VALUE;
 -
 -                    ret = avcodec_decode_video2(ist->st->codec,
 -                                                &picture, &got_output, &avpkt);
 -                    quality = same_quality ? picture.quality : 0;
 -                    if (ret < 0)
 -                        return ret;
 -                    if (!got_output) {
 -                        /* no picture yet */
 -                        goto discard_packet;
 -                    }
 -                    ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, picture.pkt_pts, picture.pkt_dts);
 -                    if (ist->st->codec->time_base.num != 0) {
 -                        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 -                        ist->next_pts += ((int64_t)AV_TIME_BASE *
 -                                          ist->st->codec->time_base.num * ticks) /
 -                            ist->st->codec->time_base.den;
 -                    }
 -                    avpkt.size = 0;
 -                    buffer_to_free = NULL;
 -                    pre_process_video_frame(ist, (AVPicture *)&picture, &buffer_to_free);
 -                    break;
 -            case AVMEDIA_TYPE_SUBTITLE:
 -                ret = avcodec_decode_subtitle2(ist->st->codec,
 -                                               &subtitle, &got_output, &avpkt);
 -                if (ret < 0)
 -                    return ret;
 -                if (!got_output) {
 -                    goto discard_packet;
 +                if (ret < 0) {
 +                    av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
 +                    exit_program(1);
                  }
 -                subtitle_to_free = &subtitle;
 -                avpkt.size = 0;
 -                break;
 -            default:
 -                return -1;
 -            }
 -        } else {
 -            switch(ist->st->codec->codec_type) {
 -            case AVMEDIA_TYPE_AUDIO:
 -                ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
 -                    ist->st->codec->sample_rate;
 +                audio_size += ret;
 +                pkt.flags |= AV_PKT_FLAG_KEY;
                  break;
              case AVMEDIA_TYPE_VIDEO:
 -                if (ist->st->codec->time_base.num != 0) {
 -                    int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 -                    ist->next_pts += ((int64_t)AV_TIME_BASE *
 -                                      ist->st->codec->time_base.num * ticks) /
 -                        ist->st->codec->time_base.den;
 +                ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
 +                if (ret < 0) {
 +                    av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
 +                    exit_program(1);
 +                }
 +                video_size += ret;
 +                if(enc->coded_frame && enc->coded_frame->key_frame)
 +                    pkt.flags |= AV_PKT_FLAG_KEY;
 +                if (ost->logfile && enc->stats_out) {
 +                    fprintf(ost->logfile, "%s", enc->stats_out);
                  }
                  break;
 +            default:
 +                ret=-1;
              }
 -            ret = avpkt.size;
 -            avpkt.size = 0;
 +
 +            if (ret <= 0)
 +                break;
 +            pkt.data = bit_buffer;
 +            pkt.size = ret;
 +            if (enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 +                pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 +            write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
          }
 +    }
 +}
  
 -#if CONFIG_AVFILTER
 -        if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -            for (i = 0; i < nb_ostreams; i++) {
 -                ost = ost_table[i];
 -                if (ost->input_video_filter && ost->source_index == ist_index) {
 -                    AVRational sar;
 -                    if (ist->st->sample_aspect_ratio.num)
 -                        sar = ist->st->sample_aspect_ratio;
 -                    else
 -                        sar = ist->st->codec->sample_aspect_ratio;
 -                    // add it to be filtered
 -                    av_vsrc_buffer_add_frame(ost->input_video_filter, &picture,
 -                                             ist->pts,
 -                                             sar);
 -                }
 +/*
 + * Check whether a packet from ist should be written into ost at this time
 + */
 +static int check_output_constraints(InputStream *ist, OutputStream *ost)
 +{
 +    OutputFile *of = &output_files[ost->file_index];
 +    int ist_index  = ist - input_streams;
 +
 +    if (ost->source_index != ist_index)
 +        return 0;
 +
 +    if (of->start_time && ist->pts < of->start_time)
 +        return 0;
 +
 +    if (of->recording_time != INT64_MAX &&
 +        av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
 +                      (AVRational){1, 1000000}) >= 0) {
 +        ost->is_past_recording_time = 1;
 +        return 0;
 +    }
 +
 +    return 1;
 +}
 +
 +static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
 +{
 +    OutputFile *of = &output_files[ost->file_index];
 +    int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +    AVPicture pict;
 +    AVPacket opkt;
 +
 +    av_init_packet(&opkt);
 +
 +    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
 +        !ost->copy_initial_nonkeyframes)
 +        return;
 +
 +    /* force the input stream PTS */
 +    if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 +        audio_size += pkt->size;
 +    else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 +        video_size += pkt->size;
 +        ost->sync_opts++;
 +    }
 +
 +    opkt.stream_index = ost->index;
 +    if (pkt->pts != AV_NOPTS_VALUE)
 +        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
 +    else
 +        opkt.pts = AV_NOPTS_VALUE;
 +
 +    if (pkt->dts == AV_NOPTS_VALUE)
 +        opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
 +    else
 +        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
 +    opkt.dts -= ost_tb_start_time;
 +
 +    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
 +    opkt.flags    = pkt->flags;
 +
 +    //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
 +    if(   ost->st->codec->codec_id != CODEC_ID_H264
 +       && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
 +       && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
 +       ) {
 +        if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
 +            opkt.destruct = av_destruct_packet;
 +    } else {
 +        opkt.data = pkt->data;
 +        opkt.size = pkt->size;
 +    }
 +    if (of->ctx->oformat->flags & AVFMT_RAWPICTURE) {
 +        /* store AVPicture in AVPacket, as expected by the output format */
 +        avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
 +        opkt.data = (uint8_t *)&pict;
 +        opkt.size = sizeof(AVPicture);
 +        opkt.flags |= AV_PKT_FLAG_KEY;
 +    }
 +
 +    write_frame(of->ctx, &opkt, ost->st->codec, ost->bitstream_filters);
 +    ost->st->codec->frame_number++;
 +    ost->frame_number++;
 +    av_free_packet(&opkt);
 +}
 +
 +static void rate_emu_sleep(InputStream *ist)
 +{
 +    if (input_files[ist->file_index].rate_emu) {
 +        int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
 +        int64_t now = av_gettime() - ist->start;
 +        if (pts > now)
 +            usleep(pts - now);
 +    }
 +}
 +
 +static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
 +{
 +    static unsigned int samples_size = 0;
 +    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 +    uint8_t *decoded_data_buf  = NULL;
 +    int      decoded_data_size = 0;
 +    int i, ret;
 +
 +    if (pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
 +        av_free(samples);
 +        samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE);
 +        samples      = av_malloc(samples_size);
 +    }
 +    decoded_data_size = samples_size;
 +
 +    ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
 +                                pkt);
 +    if (ret < 0)
 +        return ret;
 +    *got_output  = decoded_data_size > 0;
 +
 +    /* Some bug in mpeg audio decoder gives */
 +    /* decoded_data_size < 0, it seems they are overflows */
 +    if (!*got_output) {
 +        /* no audio frame */
 +        return ret;
 +    }
 +
 +    decoded_data_buf = (uint8_t *)samples;
 +    ist->next_pts   += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
 +                       (ist->st->codec->sample_rate * ist->st->codec->channels);
 +
 +    // preprocess audio (volume)
 +    if (audio_volume != 256) {
 +        switch (ist->st->codec->sample_fmt) {
 +        case AV_SAMPLE_FMT_U8:
 +        {
 +            uint8_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
 +                *volp++ = av_clip_uint8(v);
              }
 +            break;
          }
 -#endif
 -
 -        // preprocess audio (volume)
 -        if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 -            if (audio_volume != 256) {
 -                short *volp;
 -                volp = samples;
 -                for(i=0;i<(decoded_data_size / sizeof(short));i++) {
 -                    int v = ((*volp) * audio_volume + 128) >> 8;
 -                    if (v < -32768) v = -32768;
 -                    if (v >  32767) v = 32767;
 -                    *volp++ = v;
 -                }
 +        case AV_SAMPLE_FMT_S16:
 +        {
 +            int16_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int v = ((*volp) * audio_volume + 128) >> 8;
 +                *volp++ = av_clip_int16(v);
              }
 +            break;
          }
 -
 -        /* frame rate emulation */
 -        if (rate_emu) {
 -            int64_t pts = av_rescale(ist->pts, 1000000, AV_TIME_BASE);
 -            int64_t now = av_gettime() - ist->start;
 -            if (pts > now)
 -                usleep(pts - now);
 +        case AV_SAMPLE_FMT_S32:
 +        {
 +            int32_t *volp = samples;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
 +                *volp++ = av_clipl_int32(v);
 +            }
 +            break;
          }
 -        /* if output time reached then transcode raw format,
 -           encode packets and output them */
 -        if (start_time == 0 || ist->pts >= start_time)
 -            for(i=0;i<nb_ostreams;i++) {
 -                int frame_size;
 -
 -                ost = ost_table[i];
 -                if (ost->source_index == ist_index) {
 -#if CONFIG_AVFILTER
 -                frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
 -                    !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 -                while (frame_available) {
 -                    AVRational ist_pts_tb;
 -                    if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
 -                        get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
 -                    if (ost->picref)
 -                        ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
 -#endif
 -                    os = output_files[ost->file_index];
 -
 -                    /* set the input output pts pairs */
 -                    //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
 -
 -                    if (ost->encoding_needed) {
 -                        av_assert0(ist->decoding_needed);
 -                        switch(ost->st->codec->codec_type) {
 -                        case AVMEDIA_TYPE_AUDIO:
 -                            do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
 -                            break;
 -                        case AVMEDIA_TYPE_VIDEO:
 +        case AV_SAMPLE_FMT_FLT:
 +        {
 +            float *volp = samples;
 +            float scale = audio_volume / 256.f;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                *volp++ *= scale;
 +            }
 +            break;
 +        }
 +        case AV_SAMPLE_FMT_DBL:
 +        {
 +            double *volp = samples;
 +            double scale = audio_volume / 256.;
 +            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
 +                *volp++ *= scale;
 +            }
 +            break;
 +        }
 +        default:
 +            av_log(NULL, AV_LOG_FATAL,
 +                   "Audio volume adjustment on sample format %s is not supported.\n",
 +                   av_get_sample_fmt_name(ist->st->codec->sample_fmt));
 +            exit_program(1);
 +        }
 +    }
 +
 +    rate_emu_sleep(ist);
 +
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +        do_audio_out(output_files[ost->file_index].ctx, ost, ist,
 +                     decoded_data_buf, decoded_data_size);
 +    }
 +    return ret;
 +}
 +
 +static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts, int64_t *pkt_dts)
 +{
 +    AVFrame *decoded_frame, *filtered_frame = NULL;
 +    void *buffer_to_free = NULL;
 +    int i, ret = 0;
 +    float quality = 0;
  #if CONFIG_AVFILTER
 -                            if (ost->picref->video && !ost->frame_aspect_ratio)
 -                                ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
 +    int frame_available = 1;
  #endif
 -                            do_video_out(os, ost, ist, &picture, &frame_size,
 -                                         same_quality ? quality : ost->st->codec->global_quality);
 -                            if (vstats_filename && frame_size)
 -                                do_video_stats(os, ost, frame_size);
 -                            break;
 -                        case AVMEDIA_TYPE_SUBTITLE:
 -                            do_subtitle_out(os, ost, ist, &subtitle,
 -                                            pkt->pts);
 -                            break;
 -                        default:
 -                            abort();
 -                        }
 -                    } else {
 -                        AVFrame avframe; //FIXME/XXX remove this
 -                        AVPacket opkt;
 -                        int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
 +    int duration=0;
 +
 +    if (!(decoded_frame = avcodec_alloc_frame()))
 +        return AVERROR(ENOMEM);
 +    pkt->pts  = *pkt_pts;
 +    pkt->dts  = *pkt_dts;
 +    *pkt_pts  = AV_NOPTS_VALUE;
 +
 +    if (pkt->duration) {
 +        duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
 +    } else if(ist->st->codec->time_base.num != 0) {
 +        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 +        duration = ((int64_t)AV_TIME_BASE *
 +                          ist->st->codec->time_base.num * ticks) /
 +                          ist->st->codec->time_base.den;
 +    }
  
 -                        av_init_packet(&opkt);
 +    if(*pkt_dts != AV_NOPTS_VALUE && duration) {
 +        *pkt_dts += duration;
 +    }else
 +        *pkt_dts = AV_NOPTS_VALUE;
  
 -                        if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
 -#if !CONFIG_AVFILTER
 -                            continue;
 -#else
 -                            goto cont;
 -#endif
 +    ret = avcodec_decode_video2(ist->st->codec,
 +                                decoded_frame, got_output, pkt);
 +    if (ret < 0)
 +        goto fail;
  
 -                        /* no reencoding needed : output the packet directly */
 -                        /* force the input stream PTS */
 +    quality = same_quant ? decoded_frame->quality : 0;
 +    if (!*got_output) {
 +        /* no picture yet */
 +        av_freep(&decoded_frame);
 +        return ret;
 +    }
  
 -                        avcodec_get_frame_defaults(&avframe);
 -                        ost->st->codec->coded_frame= &avframe;
 -                        avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
 +    if(decoded_frame->best_effort_timestamp != AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts = decoded_frame->best_effort_timestamp;
  
 -                        if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 -                            audio_size += data_size;
 -                        else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
 -                            video_size += data_size;
 -                            ost->sync_opts++;
 -                        }
 +    ist->next_pts += duration;
 +    pkt->size = 0;
  
 -                        opkt.stream_index= ost->index;
 -                        if(pkt->pts != AV_NOPTS_VALUE)
 -                            opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
 -                        else
 -                            opkt.pts= AV_NOPTS_VALUE;
 -
 -                        if (pkt->dts == AV_NOPTS_VALUE)
 -                            opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
 -                        else
 -                            opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
 -                        opkt.dts -= ost_tb_start_time;
 -
 -                        opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
 -                        opkt.flags= pkt->flags;
 -
 -                        //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
 -                        if(   ost->st->codec->codec_id != CODEC_ID_H264
 -                           && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
 -                           && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
 -                           ) {
 -                            if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
 -                                opkt.destruct= av_destruct_packet;
 -                        } else {
 -                            opkt.data = data_buf;
 -                            opkt.size = data_size;
 -                        }
 +    pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
  
 -                        write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
 -                        ost->st->codec->frame_number++;
 -                        ost->frame_number++;
 -                        av_free_packet(&opkt);
 -                    }
  #if CONFIG_AVFILTER
 -                    cont:
 -                    frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
 -                                       ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 -                    if (ost->picref)
 -                        avfilter_unref_buffer(ost->picref);
 -                }
 -#endif
 -                }
 -            }
 -
 -        av_free(buffer_to_free);
 -        /* XXX: allocate the subtitles in the codec ? */
 -        if (subtitle_to_free) {
 -            avsubtitle_free(subtitle_to_free);
 -            subtitle_to_free = NULL;
 +    for(i=0;i<nb_output_streams;i++) {
 +        OutputStream *ost = ost = &output_streams[i];
 +        if(check_output_constraints(ist, ost)){
 +            if (!decoded_frame->sample_aspect_ratio.num)
 +                decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
 +            decoded_frame->pts = ist->pts;
 +
 +            av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE);
          }
      }
 - discard_packet:
 -    if (pkt == NULL) {
 -        /* EOF handling */
 +#endif
  
 -        for(i=0;i<nb_ostreams;i++) {
 -            ost = ost_table[i];
 -            if (ost->source_index == ist_index) {
 -                AVCodecContext *enc= ost->st->codec;
 -                os = output_files[ost->file_index];
 -
 -                if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
 -                    continue;
 -                if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
 -                    continue;
 -
 -                if (ost->encoding_needed) {
 -                    for(;;) {
 -                        AVPacket pkt;
 -                        int fifo_bytes;
 -                        av_init_packet(&pkt);
 -                        pkt.stream_index= ost->index;
 -
 -                        switch(ost->st->codec->codec_type) {
 -                        case AVMEDIA_TYPE_AUDIO:
 -                            fifo_bytes = av_fifo_size(ost->fifo);
 -                            ret = 0;
 -                            /* encode any samples remaining in fifo */
 -                            if (fifo_bytes > 0) {
 -                                int osize = av_get_bytes_per_sample(enc->sample_fmt);
 -                                int fs_tmp = enc->frame_size;
 -
 -                                av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
 -                                if (enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
 -                                    enc->frame_size = fifo_bytes / (osize * enc->channels);
 -                                } else { /* pad */
 -                                    int frame_bytes = enc->frame_size*osize*enc->channels;
 -                                    if (allocated_audio_buf_size < frame_bytes)
 -                                        exit_program(1);
 -                                    generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
 -                                }
 -
 -                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, (short *)audio_buf);
 -                                pkt.duration = av_rescale((int64_t)enc->frame_size*ost->st->time_base.den,
 -                                                          ost->st->time_base.num, enc->sample_rate);
 -                                enc->frame_size = fs_tmp;
 -                            }
 -                            if(ret <= 0) {
 -                                ret = avcodec_encode_audio(enc, bit_buffer, bit_buffer_size, NULL);
 -                            }
 -                            if (ret < 0) {
 -                                fprintf(stderr, "Audio encoding failed\n");
 -                                exit_program(1);
 -                            }
 -                            audio_size += ret;
 -                            pkt.flags |= AV_PKT_FLAG_KEY;
 -                            break;
 -                        case AVMEDIA_TYPE_VIDEO:
 -                            ret = avcodec_encode_video(enc, bit_buffer, bit_buffer_size, NULL);
 -                            if (ret < 0) {
 -                                fprintf(stderr, "Video encoding failed\n");
 -                                exit_program(1);
 -                            }
 -                            video_size += ret;
 -                            if(enc->coded_frame && enc->coded_frame->key_frame)
 -                                pkt.flags |= AV_PKT_FLAG_KEY;
 -                            if (ost->logfile && enc->stats_out) {
 -                                fprintf(ost->logfile, "%s", enc->stats_out);
 -                            }
 -                            break;
 -                        default:
 -                            ret=-1;
 -                        }
 +    rate_emu_sleep(ist);
  
 -                        if(ret<=0)
 -                            break;
 -                        pkt.data= bit_buffer;
 -                        pkt.size= ret;
 -                        if(enc->coded_frame && enc->coded_frame->pts != AV_NOPTS_VALUE)
 -                            pkt.pts= av_rescale_q(enc->coded_frame->pts, enc->time_base, ost->st->time_base);
 -                        write_frame(os, &pkt, ost->st->codec, ost->bitstream_filters);
 -                    }
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +        int frame_size;
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +
 +#if CONFIG_AVFILTER
 +        if (ost->input_video_filter) {
 +            frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 +        }
 +        while (frame_available) {
 +            if (ost->output_video_filter) {
 +                AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
 +                if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
 +                    goto cont;
 +                if (!filtered_frame && !(filtered_frame = avcodec_alloc_frame())) {
 +                    ret = AVERROR(ENOMEM);
 +                    goto fail;
 +                }
 +                *filtered_frame= *decoded_frame; //for me_threshold
 +                if (ost->picref) {
 +                    avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
 +                    ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
                  }
              }
 +            if (ost->picref->video && !ost->frame_aspect_ratio)
 +                ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
 +#else
 +            filtered_frame = decoded_frame;
 +#endif
 +
 +            do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
 +                         same_quant ? quality : ost->st->codec->global_quality);
 +            if (vstats_filename && frame_size)
 +                do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
 +#if CONFIG_AVFILTER
 +            cont:
 +            frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
 +            avfilter_unref_buffer(ost->picref);
          }
 +        av_freep(&filtered_frame);
 +#endif
      }
  
 -    return 0;
 +fail:
 +    av_free(buffer_to_free);
 +    av_freep(&decoded_frame);
 +    return ret;
  }
  
 -static void print_sdp(AVFormatContext **avc, int n)
 +static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
  {
 -    char sdp[2048];
 +    AVSubtitle subtitle;
 +    int i, ret = avcodec_decode_subtitle2(ist->st->codec,
 +                                          &subtitle, got_output, pkt);
 +    if (ret < 0)
 +        return ret;
 +    if (!*got_output)
 +        return ret;
  
 -    av_sdp_create(avc, n, sdp, sizeof(sdp));
 -    printf("SDP:\n%s\n", sdp);
 -    fflush(stdout);
 +    rate_emu_sleep(ist);
 +
 +    for (i = 0; i < nb_output_streams; i++) {
 +        OutputStream *ost = &output_streams[i];
 +
 +        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
 +            continue;
 +
 +        do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
 +    }
 +
 +    avsubtitle_free(&subtitle);
 +    return ret;
  }
  
 -static int copy_chapters(int infile, int outfile)
 +/* pkt = NULL means EOF (needed to flush decoder buffers) */
 +static int output_packet(InputStream *ist,
 +                         OutputStream *ost_table, int nb_ostreams,
 +                         const AVPacket *pkt)
  {
 -    AVFormatContext *is = input_files[infile].ctx;
 -    AVFormatContext *os = output_files[outfile];
 -    int i;
 +    int ret = 0, i;
 +    int got_output;
 +    int64_t pkt_dts = AV_NOPTS_VALUE;
 +    int64_t pkt_pts = AV_NOPTS_VALUE;
  
 -    for (i = 0; i < is->nb_chapters; i++) {
 -        AVChapter *in_ch = is->chapters[i], *out_ch;
 -        int64_t ts_off   = av_rescale_q(start_time - input_files[infile].ts_offset,
 -                                      AV_TIME_BASE_Q, in_ch->time_base);
 -        int64_t rt       = (recording_time == INT64_MAX) ? INT64_MAX :
 -                           av_rescale_q(recording_time, AV_TIME_BASE_Q, in_ch->time_base);
 +    AVPacket avpkt;
  
 +    if (ist->next_pts == AV_NOPTS_VALUE)
 +        ist->next_pts = ist->pts;
  
 -        if (in_ch->end < ts_off)
 -            continue;
 -        if (rt != INT64_MAX && in_ch->start > rt + ts_off)
 +    if (pkt == NULL) {
 +        /* EOF handling */
 +        av_init_packet(&avpkt);
 +        avpkt.data = NULL;
 +        avpkt.size = 0;
 +        goto handle_eof;
 +    } else {
 +        avpkt = *pkt;
 +    }
 +
 +    if(pkt->dts != AV_NOPTS_VALUE){
 +        if(ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
 +            ist->next_pts = ist->pts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +        pkt_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
 +    }
 +    if(pkt->pts != AV_NOPTS_VALUE)
 +        pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
 +
 +    //while we have more to decode or while the decoder did output something on EOF
 +    while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
 +    handle_eof:
 +
 +        ist->pts = ist->next_pts;
 +
 +        if (avpkt.size && avpkt.size != pkt->size) {
 +            av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
 +                   "Multiple frames in a packet from stream %d\n", pkt->stream_index);
 +            ist->showed_multi_packet_warning = 1;
 +        }
 +
 +        switch(ist->st->codec->codec_type) {
 +        case AVMEDIA_TYPE_AUDIO:
 +            ret = transcode_audio    (ist, &avpkt, &got_output);
 +            break;
 +        case AVMEDIA_TYPE_VIDEO:
 +            ret = transcode_video    (ist, &avpkt, &got_output, &pkt_pts, &pkt_dts);
              break;
 +        case AVMEDIA_TYPE_SUBTITLE:
 +            ret = transcode_subtitles(ist, &avpkt, &got_output);
 +            break;
 +        default:
 +            return -1;
 +        }
  
 -        out_ch = av_mallocz(sizeof(AVChapter));
 -        if (!out_ch)
 -            return AVERROR(ENOMEM);
 +        if (ret < 0)
 +            return ret;
 +        // touch data and size only if not EOF
 +        if (pkt) {
 +            if(ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
 +                ret = avpkt.size;
 +            avpkt.data += ret;
 +            avpkt.size -= ret;
 +        }
 +        if (!got_output) {
 +            continue;
 +        }
 +    }
  
 -        out_ch->id        = in_ch->id;
 -        out_ch->time_base = in_ch->time_base;
 -        out_ch->start     = FFMAX(0,  in_ch->start - ts_off);
 -        out_ch->end       = FFMIN(rt, in_ch->end   - ts_off);
 +    /* handle stream copy */
 +    if (!ist->decoding_needed) {
 +        rate_emu_sleep(ist);
 +        ist->pts = ist->next_pts;
 +        switch (ist->st->codec->codec_type) {
 +        case AVMEDIA_TYPE_AUDIO:
 +            ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
 +                             ist->st->codec->sample_rate;
 +            break;
 +        case AVMEDIA_TYPE_VIDEO:
 +            if (ist->st->codec->time_base.num != 0) {
 +                int ticks = ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
 +                ist->next_pts += ((int64_t)AV_TIME_BASE *
 +                                  ist->st->codec->time_base.num * ticks) /
 +                                  ist->st->codec->time_base.den;
 +            }
 +            break;
 +        }
 +    }
 +    for (i = 0; pkt && i < nb_ostreams; i++) {
 +        OutputStream *ost = &ost_table[i];
  
 -        if (metadata_chapters_autocopy)
 -            av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
 +        if (!check_output_constraints(ist, ost) || ost->encoding_needed)
 +            continue;
  
 -        os->nb_chapters++;
 -        os->chapters = av_realloc(os->chapters, sizeof(AVChapter)*os->nb_chapters);
 -        if (!os->chapters)
 -            return AVERROR(ENOMEM);
 -        os->chapters[os->nb_chapters - 1] = out_ch;
 +        do_streamcopy(ist, ost, pkt);
      }
 +
      return 0;
  }
  
index efe554adc298b4aa131c2003354f111af4d26975,4f11b8bec4c40dacf3218bb872e6ed74c958e9e6..b4fc899a2297bfdb2a53e814ed49cb560f57bc2f
  
  /** decoder context */
  typedef struct EightSvxContext {
 -    uint8_t fib_acc[2];
+     AVFrame frame;
      const int8_t *table;
  
 -    /* buffer used to store the whole first packet.
 -       data is only sent as one large packet */
 -    uint8_t *data[2];
 -    int data_size;
 -    int data_idx;
 +    /* buffer used to store the whole audio decoded/interleaved chunk,
 +     * which is sent with the first packet */
 +    uint8_t *samples;
 +    size_t samples_size;
 +    int samples_idx;
  } EightSvxContext;
  
 -static const int8_t fibonacci[16]   = { -34, -21, -13,  -8, -5, -3, -2, -1,
 -                                          0,   1,   2,   3,  5,  8, 13, 21 };
 -static const int8_t exponential[16] = { -128, -64, -32, -16, -8, -4, -2, -1,
 -                                           0,   1,   2,   4,  8, 16, 32, 64 };
 +static const int8_t fibonacci[16]   = { -34,  -21, -13,  -8, -5, -3, -2, -1, 0, 1, 2, 3, 5, 8,  13, 21 };
 +static const int8_t exponential[16] = { -128, -64, -32, -16, -8, -4, -2, -1, 0, 1, 2, 4, 8, 16, 32, 64 };
  
 -#define MAX_FRAME_SIZE 32768
 +#define MAX_FRAME_SIZE 2048
  
  /**
 - * Delta decode the compressed values in src, and put the resulting
 - * decoded samples in dst.
 + * Interleave samples in buffer containing all left channel samples
 + * at the beginning, and right channel samples at the end.
 + * Each sample is assumed to be in signed 8-bit format.
   *
 - * @param[in,out] state starting value. it is saved for use in the next call.
 + * @param size the size in bytes of the dst and src buffer
   */
 -static void delta_decode(uint8_t *dst, const uint8_t *src, int src_size,
 -                         uint8_t *state, const int8_t *table, int channels)
 +static void interleave_stereo(uint8_t *dst, const uint8_t *src, int size)
  {
 -    uint8_t val = *state;
 +    uint8_t *dst_end = dst + size;
 +    size = size>>1;
  
 -    while (src_size--) {
 -        uint8_t d = *src++;
 -        val = av_clip_uint8(val + table[d & 0xF]);
 -        *dst = val;
 -        dst += channels;
 -        val = av_clip_uint8(val + table[d >> 4]);
 -        *dst = val;
 -        dst += channels;
 +    while (dst < dst_end) {
 +        *dst++ = *src;
 +        *dst++ = *(src+size);
 +        src++;
      }
 -
 -    *state = val;
  }
  
 -static void raw_decode(uint8_t *dst, const int8_t *src, int src_size,
 -                       int channels)
 +/**
 + * Delta decode the compressed values in src, and put the resulting
 + * decoded n samples in dst.
 + *
 + * @param val starting value assumed by the delta sequence
 + * @param table delta sequence table
 + * @return size in bytes of the decoded data, must be src_size*2
 + */
 +static int delta_decode(int8_t *dst, const uint8_t *src, int src_size,
 +                        int8_t val, const int8_t *table)
  {
 -    while (src_size--) {
 -        *dst = *src++ + 128;
 -        dst += channels;
 +    int n = src_size;
 +    int8_t *dst0 = dst;
 +
 +    while (n--) {
 +        uint8_t d = *src++;
 +        val = av_clip(val + table[d & 0x0f], -127, 128);
 +        *dst++ = val;
 +        val = av_clip(val + table[d >> 4]  , -127, 128);
 +        *dst++ = val;
      }
 +
 +    return dst-dst0;
  }
  
- static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
-                                  AVPacket *avpkt)
+ /** decode a frame */
+ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
+                                  int *got_frame_ptr, AVPacket *avpkt)
  {
      EightSvxContext *esc = avctx->priv_data;
-     int out_data_size, n;
 -    int buf_size;
 -    uint8_t *out_data;
 -    int ret;
 -    int is_compr = (avctx->codec_id != CODEC_ID_PCM_S8_PLANAR);
 -
 -    /* for the first packet, copy data to buffer */
 -    if (avpkt->data) {
 -        int hdr_size  = is_compr ? 2 : 0;
 -        int chan_size = (avpkt->size - hdr_size * avctx->channels) / avctx->channels;
 -
 -        if (avpkt->size < hdr_size * avctx->channels) {
 -            av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
 -            return AVERROR(EINVAL);
 -        }
 -        if (esc->data[0]) {
 -            av_log(avctx, AV_LOG_ERROR, "unexpected data after first packet\n");
 -            return AVERROR(EINVAL);
 -        }
++    int n, out_data_size, ret;
++    uint8_t *out_date;
 +    uint8_t *src, *dst;
  
 -        if (is_compr) {
 -        esc->fib_acc[0] = avpkt->data[1] + 128;
 -        if (avctx->channels == 2)
 -            esc->fib_acc[1] = avpkt->data[2+chan_size+1] + 128;
 -        }
 +    /* decode and interleave the first packet */
 +    if (!esc->samples && avpkt) {
 +        uint8_t *deinterleaved_samples;
  
 -        esc->data_idx  = 0;
 -        esc->data_size = chan_size;
 -        if (!(esc->data[0] = av_malloc(chan_size)))
 +        esc->samples_size = avctx->codec->id == CODEC_ID_8SVX_RAW || avctx->codec->id ==CODEC_ID_PCM_S8_PLANAR?
 +            avpkt->size : avctx->channels + (avpkt->size-avctx->channels) * 2;
 +        if (!(esc->samples = av_malloc(esc->samples_size)))
              return AVERROR(ENOMEM);
 -        if (avctx->channels == 2) {
 -            if (!(esc->data[1] = av_malloc(chan_size))) {
 -                av_freep(&esc->data[0]);
 -                return AVERROR(ENOMEM);
 +
 +        /* decompress */
 +        if (avctx->codec->id == CODEC_ID_8SVX_FIB || avctx->codec->id == CODEC_ID_8SVX_EXP) {
 +            const uint8_t *buf = avpkt->data;
 +            int buf_size = avpkt->size;
 +            int n = esc->samples_size;
 +
 +            if (buf_size < 2) {
 +                av_log(avctx, AV_LOG_ERROR, "packet size is too small\n");
 +                return AVERROR(EINVAL);
              }
 +            if (!(deinterleaved_samples = av_mallocz(n)))
 +                return AVERROR(ENOMEM);
 +
 +            /* the uncompressed starting value is contained in the first byte */
 +            if (avctx->channels == 2) {
 +                delta_decode(deinterleaved_samples      , buf+1, buf_size/2-1, buf[0], esc->table);
 +                buf += buf_size/2;
 +                delta_decode(deinterleaved_samples+n/2-1, buf+1, buf_size/2-1, buf[0], esc->table);
 +            } else
 +                delta_decode(deinterleaved_samples      , buf+1, buf_size-1  , buf[0], esc->table);
 +        } else {
 +            deinterleaved_samples = avpkt->data;
          }
 -        memcpy(esc->data[0], &avpkt->data[hdr_size], chan_size);
 -        if (avctx->channels == 2)
 -            memcpy(esc->data[1], &avpkt->data[2*hdr_size+chan_size], chan_size);
 -    }
 -    if (!esc->data[0]) {
 -        av_log(avctx, AV_LOG_ERROR, "unexpected empty packet\n");
 -        return AVERROR(EINVAL);
 -    }
  
 -    /* decode next piece of data from the buffer */
 -    buf_size = FFMIN(MAX_FRAME_SIZE, esc->data_size - esc->data_idx);
 -    if (buf_size <= 0) {
 -        *got_frame_ptr = 0;
 -        return avpkt->size;
 +        if (avctx->channels == 2)
 +            interleave_stereo(esc->samples, deinterleaved_samples, esc->samples_size);
 +        else
 +            memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
      }
  
-     /* return single packed with fixed size */
-     out_data_size = FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx);
-     if (*data_size < out_data_size) {
-         av_log(avctx, AV_LOG_ERROR, "Provided buffer with size %d is too small.\n", *data_size);
-         return AVERROR(EINVAL);
+     /* get output buffer */
 -    esc->frame.nb_samples = buf_size * (is_compr + 1);
++    esc->frame.nb_samples = (FFMIN(MAX_FRAME_SIZE, esc->samples_size - esc->samples_idx) +avctx->channels-1)  / avctx->channels;
+     if ((ret = avctx->get_buffer(avctx, &esc->frame)) < 0) {
+         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+         return ret;
      }
 -    out_data = esc->frame.data[0];
 -
 -    if (is_compr) {
 -    delta_decode(out_data, &esc->data[0][esc->data_idx], buf_size,
 -                 &esc->fib_acc[0], esc->table, avctx->channels);
 -    if (avctx->channels == 2) {
 -        delta_decode(&out_data[1], &esc->data[1][esc->data_idx], buf_size,
 -                    &esc->fib_acc[1], esc->table, avctx->channels);
 -    }
 -    } else {
 -        int ch;
 -        for (ch = 0; ch < avctx->channels; ch++) {
 -            raw_decode((int8_t *)&out_data[ch], &esc->data[ch][esc->data_idx],
 -                       buf_size, avctx->channels);
 -        }
 -    }
 -    esc->data_idx += buf_size;
  
-     *data_size = out_data_size;
-     dst = data;
+     *got_frame_ptr   = 1;
+     *(AVFrame *)data = esc->frame;
 -    return avpkt->size;
++    dst = esc->frame.data[0];
 +    src = esc->samples + esc->samples_idx;
++    out_data_size = esc->frame.nb_samples * avctx->channels;
 +    for (n = out_data_size; n > 0; n--)
 +        *dst++ = *src++ + 128;
-     esc->samples_idx += *data_size;
++    esc->samples_idx += out_data_size;
 +
 +    return avctx->codec->id == CODEC_ID_8SVX_FIB || avctx->codec->id == CODEC_ID_8SVX_EXP ?
 +        (avctx->frame_number == 0)*2 + out_data_size / 2 :
 +        out_data_size;
  }
  
 -/** initialize 8svx decoder */
  static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
  {
      EightSvxContext *esc = avctx->priv_data;
@@@ -204,8 -215,9 +213,9 @@@ AVCodec ff_eightsvx_fib_decoder = 
    .id             = CODEC_ID_8SVX_FIB,
    .priv_data_size = sizeof (EightSvxContext),
    .init           = eightsvx_decode_init,
 -  .close          = eightsvx_decode_close,
    .decode         = eightsvx_decode_frame,
 -  .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_DR1,
 +  .close          = eightsvx_decode_close,
++  .capabilities   = CODEC_CAP_DR1,
    .long_name      = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
  };
  
@@@ -215,8 -227,9 +225,9 @@@ AVCodec ff_eightsvx_exp_decoder = 
    .id             = CODEC_ID_8SVX_EXP,
    .priv_data_size = sizeof (EightSvxContext),
    .init           = eightsvx_decode_init,
 -  .close          = eightsvx_decode_close,
    .decode         = eightsvx_decode_frame,
 -  .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_DR1,
 +  .close          = eightsvx_decode_close,
++  .capabilities   = CODEC_CAP_DR1,
    .long_name      = NULL_IF_CONFIG_SMALL("8SVX exponential"),
  };
  
@@@ -228,5 -241,6 +239,6 @@@ AVCodec ff_pcm_s8_planar_decoder = 
      .init           = eightsvx_decode_init,
      .close          = eightsvx_decode_close,
      .decode         = eightsvx_decode_frame,
 -    .capabilities   = CODEC_CAP_DELAY | CODEC_CAP_DR1,
++    .capabilities   = CODEC_CAP_DR1,
      .long_name      = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
  };
Simple merge
index 5a2b230d24a70c97aca62a8bda779bae3b596010,8e4b510354be350c8281d2fe46b48ecb968d7ee6..a046d991e662baa831d883f64d3022eb6c70599e
@@@ -2604,7 -2592,6 +2620,7 @@@ AVCodec ff_aac_latm_decoder = 
      .sample_fmts = (const enum AVSampleFormat[]) {
          AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
      },
-     .capabilities = CODEC_CAP_CHANNEL_CONF,
+     .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
      .channel_layouts = aac_channel_layout,
 +    .flush = flush,
  };
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index c399471c1dc7f3ac5fb64baddaa2fb8631bee232,7e29a24c6ff3fc9ee739dff53f14dfdb9f14c884..305a5b825bd5eb9e403466a26cb78bdb10bf926d
@@@ -389,13 -391,8 +391,13 @@@ static av_cold int alac_encode_init(AVC
          return -1;
      }
  
 +    if(avctx->channels > 2) {
 +        av_log(avctx, AV_LOG_ERROR, "channels > 2 not supported\n");
 +        return AVERROR_PATCHWELCOME;
 +    }
 +
      // Set default compression level
-     if(avctx->compression_level == FF_COMPRESSION_DEFAULT)
+     if (avctx->compression_level == FF_COMPRESSION_DEFAULT)
          s->compression_level = 2;
      else
          s->compression_level = av_clip(avctx->compression_level, 0, 2);
Simple merge
index b8d826e13903db934c2f2330b50f989e6dcf6003,b594af760a11a7a2a29d8ce4cea1a0064239e301..0a4a7e6dda569615d663cd6b2e656cadc66f2fee
@@@ -934,10 -938,17 +938,18 @@@ static int amrnb_decode_frame(AVCodecCo
      float synth_fixed_gain;                  // the fixed gain that synthesis should use
      const float *synth_fixed_vector;         // pointer to the fixed vector that synthesis should use
  
+     /* get output buffer */
+     p->avframe.nb_samples = AMR_BLOCK_SIZE;
+     if ((ret = avctx->get_buffer(avctx, &p->avframe)) < 0) {
+         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+         return ret;
+     }
+     buf_out = (float *)p->avframe.data[0];
      p->cur_frame_mode = unpack_bitstream(p, buf, buf_size);
      if (p->cur_frame_mode == MODE_DTX) {
 -        av_log_missing_feature(avctx, "dtx mode", 1);
 +        av_log_missing_feature(avctx, "dtx mode", 0);
 +        av_log(avctx, AV_LOG_INFO, "Note: libopencore_amrnb supports dtx\n");
          return -1;
      }
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 1381a3e0181e429bc46c617648fce0fff509feff,83fb39b99e6d10456e599212cf7ac9a2d2dd06ad..dfd15e568337fca94880b769be4471a4735fdf77
@@@ -1206,43 -1204,32 +1224,70 @@@ typedef struct AVFrame 
       */
      void *thread_opaque;
  
+     /**
+      * number of audio samples (per channel) described by this frame
+      * - encoding: unused
+      * - decoding: Set by libavcodec
+      */
+     int nb_samples;
+     /**
+      * pointers to the data planes/channels.
+      *
+      * For video, this should simply point to data[].
+      *
+      * For planar audio, each channel has a separate data pointer, and
+      * linesize[0] contains the size of each channel buffer.
+      * For packed audio, there is just one data pointer, and linesize[0]
+      * contains the total size of the buffer for all channels.
+      *
+      * Note: Both data and extended_data will always be set by get_buffer(),
+      * but for planar audio with more channels that can fit in data,
+      * extended_data must be used by the decoder in order to access all
+      * channels.
+      *
+      * encoding: unused
+      * decoding: set by AVCodecContext.get_buffer()
+      */
+     uint8_t **extended_data;
++
 +    /**
 +     * frame timestamp estimated using various heuristics, in stream time base
 +     * - encoding: unused
 +     * - decoding: set by libavcodec, read by user.
 +     */
 +    int64_t best_effort_timestamp;
 +
 +    /**
 +     * reordered pos from the last AVPacket that has been input into the decoder
 +     * - encoding: unused
 +     * - decoding: Read by user.
 +     */
 +    int64_t pkt_pos;
 +
 +    /**
 +     * reordered sample aspect ratio for the video frame, 0/1 if unknown\unspecified
 +     * - encoding: unused
 +     * - decoding: Read by user.
 +     */
 +    AVRational sample_aspect_ratio;
 +
 +    /**
 +     * width and height of the video frame
 +     * - encoding: unused
 +     * - decoding: Read by user.
 +     */
 +    int width, height;
 +
 +    /**
 +     * format of the frame, -1 if unknown or unset
 +     * It should be cast to the corresponding enum (enum PixelFormat
 +     * for video, enum AVSampleFormat for audio)
 +     * - encoding: unused
 +     * - decoding: Read by user.
 +     */
 +    int format;
 +
  } AVFrame;
  
  struct AVCodecInternal;
Simple merge
Simple merge
index edafb967b401c2d10c229f422418e215c77ff4ef,e3f87b92eba22135c0628f376d0d6386997d25a5..7c43b922b14a24275b68505f5b6c51b53938281c
@@@ -1826,17 -1822,30 +1825,20 @@@ static int dca_decode_frame(AVCodecCont
          return AVERROR_INVALIDDATA;
      }
  
 -
 -    /* There is nothing that prevents a dts frame to change channel configuration
 -       but Libav doesn't support that so only set the channels if it is previously
 -       unset. Ideally during the first probe for channels the crc should be checked
 -       and only set avctx->channels when the crc is ok. Right now the decoder could
 -       set the channels based on a broken first frame.*/
 -    if (s->is_channels_set == 0) {
 -        s->is_channels_set = 1;
 -        avctx->channels = channels;
 -    }
      if (avctx->channels != channels) {
 -        av_log(avctx, AV_LOG_ERROR, "DCA decoder does not support number of "
 -               "channels changing in stream. Skipping frame.\n");
 -        return AVERROR_PATCHWELCOME;
 +        if (avctx->channels)
 +            av_log(avctx, AV_LOG_INFO, "Number of channels changed in DCA decoder (%d -> %d)\n", avctx->channels, channels);
 +        avctx->channels = channels;
      }
  
-     out_size = 256 / 8 * s->sample_blocks * channels *
-                av_get_bytes_per_sample(avctx->sample_fmt);
-     if (*data_size < out_size)
-         return AVERROR(EINVAL);
-     *data_size = out_size;
+     /* get output buffer */
+     s->frame.nb_samples = 256 * (s->sample_blocks / 8);
+     if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+         return ret;
+     }
+     samples_flt = (float   *)s->frame.data[0];
+     samples_s16 = (int16_t *)s->frame.data[0];
  
      /* filter to get final output */
      for (i = 0; i < (s->sample_blocks / 8); i++) {
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index cf73121ef880395b075a37f68eb5dcb2fd8831ff,7723c5827ba5a3229a5c1f19dae71193f9f4fdb3..3386b8241e094495f465e1b4481d9213b648bfa9
@@@ -145,30 -150,21 +150,24 @@@ static av_cold int decode_init(AVCodecC
      return 0;
  }
  
- static int decode_tag(AVCodecContext * avctx,
-                       void *data, int *data_size,
                      AVPacket *avpkt) {
+ static int decode_tag(AVCodecContext *avctx, void *data,
+                       int *got_frame_ptr, AVPacket *avpkt)
+ {
      const uint8_t *buf = avpkt->data;
 +    const uint8_t *side=av_packet_get_side_data(avpkt, 'F', NULL);
      int buf_size = avpkt->size;
      NellyMoserDecodeContext *s = avctx->priv_data;
-     int data_max = *data_size;
-     int blocks, i, block_size;
-     int16_t *samples_s16 = data;
-     float   *samples_flt = data;
-     *data_size = 0;
+     int blocks, i, ret;
+     int16_t *samples_s16;
+     float   *samples_flt;
  
-     block_size = NELLY_SAMPLES * av_get_bytes_per_sample(avctx->sample_fmt);
      blocks     = buf_size / NELLY_BLOCK_LEN;
 +
      if (blocks <= 0) {
          av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
          return AVERROR_INVALIDDATA;
      }
-     if (data_max < blocks * block_size) {
-         av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
-         return AVERROR(EINVAL);
-     }
++
      if (buf_size % NELLY_BLOCK_LEN) {
          av_log(avctx, AV_LOG_WARNING, "Leftover bytes: %d.\n",
                 buf_size % NELLY_BLOCK_LEN);
       * 22050 Hz - 4
       * 44100 Hz - 8
       */
 +    if(side && blocks>1 && avctx->sample_rate%11025==0 && (1<<((side[0]>>2)&3)) == blocks)
 +        avctx->sample_rate= 11025*(blocks/2);
  
+     /* get output buffer */
+     s->frame.nb_samples = NELLY_SAMPLES * blocks;
+     if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
+         av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+         return ret;
+     }
+     samples_s16 = (int16_t *)s->frame.data[0];
+     samples_flt = (float   *)s->frame.data[0];
      for (i=0 ; i<blocks ; i++) {
          if (avctx->sample_fmt == SAMPLE_FMT_FLT) {
              nelly_decode_block(s, buf, samples_flt);
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 7ff5116ffbbb10efac3df7338b296ca3c4e84859,6b76f527c48635919a62dd08b168d2236a7be9dc..3c4c152213ca914afadf9c4b5988d7fa11b6645d
@@@ -406,34 -398,22 +410,35 @@@ static int tta_decode_frame(AVCodecCont
          return -1;
      skip_bits(&s->gb, 32); // frame crc
  
 -    // convert to output buffer
 -    if (s->bps == 2) {
 -        int16_t *samples = (int16_t *)s->frame.data[0];
 -        for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
 -            *samples++ = *p;
 -    } else {
 -        // shift samples for 24-bit sample format
 -        int32_t *samples = (int32_t *)s->frame.data[0];
 -        for (i = 0; i < framelen * s->channels; i++)
 -            *samples++ <<= 8;
 -        // reset decode buffer
 -        s->decode_buffer = NULL;
 -    }
 +        // convert to output buffer
 +        switch(s->bps) {
 +            case 1: {
-                 uint8_t *samples = data;
++                uint8_t *samples = (int16_t *)s->frame.data[0];
 +                for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
 +                    *samples++ = *p + 0x80;
 +                break;
 +            }
 +            case 2: {
-                 uint16_t *samples = data;
++                uint16_t *samples = (int16_t *)s->frame.data[0];
 +                for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
 +                    *samples++ = *p;
 +                break;
 +            }
 +            case 3: {
 +                // shift samples for 24-bit sample format
-                 int32_t *samples = data;
++                int32_t *samples = (int16_t *)s->frame.data[0];
 +                for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
 +                    *samples++ <<= 8;
 +                // reset decode buffer
 +                s->decode_buffer = NULL;
 +                break;
 +            }
 +            default:
 +                av_log(s->avctx, AV_LOG_ERROR, "Error, only 16bit samples supported!\n");
 +        }
  
-     *data_size = out_size;
+     *got_frame_ptr   = 1;
+     *(AVFrame *)data = s->frame;
  
      return buf_size;
  }
Simple merge
index f023a8967c8b127b24911b33d8c1462a993a4de9,c84439972cc381a295a5095115d3e9789218a86e..fa94b7c124e84c35fc0a17b4b61bcf02e5e6f7fa
@@@ -224,10 -221,9 +225,9 @@@ void avcodec_align_dimensions2(AVCodecC
  #if HAVE_MMX
      if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
         s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F ||
 -       s->codec_id == CODEC_ID_VP6A) {
 +       s->codec_id == CODEC_ID_VP6A || s->codec_id == CODEC_ID_DIRAC) {
-         linesize_align[0] =
-         linesize_align[1] =
-         linesize_align[2] = 16;
+         for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+             linesize_align[i] = 16;
      }
  #endif
  }
@@@ -244,23 -240,108 +244,124 @@@ void avcodec_align_dimensions(AVCodecCo
      *width=FFALIGN(*width, align);
  }
  
- int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
 +void ff_init_buffer_info(AVCodecContext *s, AVFrame *pic)
 +{
 +    if (s->pkt) {
 +        pic->pkt_pts = s->pkt->pts;
 +        pic->pkt_pos = s->pkt->pos;
 +    } else {
 +        pic->pkt_pts = AV_NOPTS_VALUE;
 +        pic->pkt_pos = -1;
 +    }
 +    pic->reordered_opaque= s->reordered_opaque;
 +    pic->sample_aspect_ratio = s->sample_aspect_ratio;
 +    pic->width               = s->width;
 +    pic->height              = s->height;
 +    pic->format              = s->pix_fmt;
 +}
 +
+ static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
+ {
+     AVCodecInternal *avci = avctx->internal;
+     InternalBuffer *buf;
+     int buf_size, ret, i, needs_extended_data;
+     buf_size = av_samples_get_buffer_size(NULL, avctx->channels,
+                                           frame->nb_samples, avctx->sample_fmt,
+                                           32);
+     if (buf_size < 0)
+         return AVERROR(EINVAL);
+     needs_extended_data = av_sample_fmt_is_planar(avctx->sample_fmt) &&
+                           avctx->channels > AV_NUM_DATA_POINTERS;
+     /* allocate InternalBuffer if needed */
+     if (!avci->buffer) {
+         avci->buffer = av_mallocz(sizeof(InternalBuffer));
+         if (!avci->buffer)
+             return AVERROR(ENOMEM);
+     }
+     buf = avci->buffer;
+     /* if there is a previously-used internal buffer, check its size and
+        channel count to see if we can reuse it */
+     if (buf->extended_data) {
+         /* if current buffer is too small, free it */
+         if (buf->extended_data[0] && buf_size > buf->audio_data_size) {
+             av_free(buf->extended_data[0]);
+             if (buf->extended_data != buf->data)
+                 av_free(&buf->extended_data);
+             buf->extended_data = NULL;
+             buf->data[0] = NULL;
+         }
+         /* if number of channels has changed, reset and/or free extended data
+            pointers but leave data buffer in buf->data[0] for reuse */
+         if (buf->nb_channels != avctx->channels) {
+             if (buf->extended_data != buf->data)
+                 av_free(buf->extended_data);
+             buf->extended_data = NULL;
+         }
+     }
+     /* if there is no previous buffer or the previous buffer cannot be used
+        as-is, allocate a new buffer and/or rearrange the channel pointers */
+     if (!buf->extended_data) {
+         /* if the channel pointers will fit, just set extended_data to data,
+            otherwise allocate the extended_data channel pointers */
+         if (needs_extended_data) {
+             buf->extended_data = av_mallocz(avctx->channels *
+                                             sizeof(*buf->extended_data));
+             if (!buf->extended_data)
+                 return AVERROR(ENOMEM);
+         } else {
+             buf->extended_data = buf->data;
+         }
+         /* if there is a previous buffer and it is large enough, reuse it and
+            just fill-in new channel pointers and linesize, otherwise allocate
+            a new buffer */
+         if (buf->extended_data[0]) {
+             ret = av_samples_fill_arrays(buf->extended_data, &buf->linesize[0],
+                                          buf->extended_data[0], avctx->channels,
+                                          frame->nb_samples, avctx->sample_fmt,
+                                          32);
+         } else {
+             ret = av_samples_alloc(buf->extended_data, &buf->linesize[0],
+                                    avctx->channels, frame->nb_samples,
+                                    avctx->sample_fmt, 32);
+         }
+         if (ret)
+             return ret;
+         /* if data was not used for extended_data, we need to copy as many of
+            the extended_data channel pointers as will fit */
+         if (needs_extended_data) {
+             for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+                 buf->data[i] = buf->extended_data[i];
+         }
+         buf->audio_data_size = buf_size;
+         buf->nb_channels     = avctx->channels;
+     }
+     /* copy InternalBuffer info to the AVFrame */
+     frame->type          = FF_BUFFER_TYPE_INTERNAL;
+     frame->extended_data = buf->extended_data;
+     frame->linesize[0]   = buf->linesize[0];
+     memcpy(frame->data, buf->data, sizeof(frame->data));
+     if (avctx->pkt) frame->pkt_pts = avctx->pkt->pts;
+     else            frame->pkt_pts = AV_NOPTS_VALUE;
+     frame->reordered_opaque = avctx->reordered_opaque;
+     if (avctx->debug & FF_DEBUG_BUFFERS)
+         av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p, "
+                "internal audio buffer used\n", frame);
+     return 0;
+ }
+ static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
+ {
      int i;
      int w= s->width;
      int h= s->height;
          pic->data[i]= buf->data[i];
          pic->linesize[i]= buf->linesize[i];
      }
+     pic->extended_data = pic->data;
      avci->buffer_count++;
  
 -    if(s->pkt) pic->pkt_pts= s->pkt->pts;
 -    else       pic->pkt_pts= AV_NOPTS_VALUE;
 +    if (s->pkt) {
 +        pic->pkt_pts = s->pkt->pts;
 +        pic->pkt_pos = s->pkt->pos;
 +    } else {
 +        pic->pkt_pts = AV_NOPTS_VALUE;
 +        pic->pkt_pos = -1;
 +    }
      pic->reordered_opaque= s->reordered_opaque;
 +    pic->sample_aspect_ratio = s->sample_aspect_ratio;
 +    pic->width               = s->width;
 +    pic->height              = s->height;
 +    pic->format              = s->pix_fmt;
  
      if(s->debug&FF_DEBUG_BUFFERS)
          av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d "
@@@ -866,32 -883,68 +989,68 @@@ int attribute_align_arg avcodec_decode_
                           int *frame_size_ptr,
                           AVPacket *avpkt)
  {
-     int ret;
+     AVFrame frame;
+     int ret, got_frame = 0;
+     if (avctx->get_buffer != avcodec_default_get_buffer) {
+         av_log(avctx, AV_LOG_ERROR, "A custom get_buffer() cannot be used with "
+                "avcodec_decode_audio3()\n");
+         return AVERROR(EINVAL);
+     }
+     ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt);
+     if (ret >= 0 && got_frame) {
+         int ch, plane_size;
+         int planar = av_sample_fmt_is_planar(avctx->sample_fmt);
+         int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels,
+                                                    frame.nb_samples,
+                                                    avctx->sample_fmt, 1);
+         if (*frame_size_ptr < data_size) {
+             av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for "
+                    "the current frame (%d < %d)\n", *frame_size_ptr, data_size);
+             return AVERROR(EINVAL);
+         }
+         memcpy(samples, frame.extended_data[0], plane_size);
+         if (planar && avctx->channels > 1) {
+             uint8_t *out = ((uint8_t *)samples) + plane_size;
+             for (ch = 1; ch < avctx->channels; ch++) {
+                 memcpy(out, frame.extended_data[ch], plane_size);
+                 out += plane_size;
+             }
+         }
+         *frame_size_ptr = data_size;
+     } else {
+         *frame_size_ptr = 0;
+     }
+     return ret;
+ }
+ #endif
+ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
+                                               AVFrame *frame,
+                                               int *got_frame_ptr,
+                                               AVPacket *avpkt)
+ {
+     int ret = 0;
+     *got_frame_ptr = 0;
  
 -    avctx->pkt = avpkt;
 -
      if (!avpkt->data && avpkt->size) {
          av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
          return AVERROR(EINVAL);
      }
  
-     if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
+     if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
 +        av_packet_split_side_data(avpkt);
 +        avctx->pkt = avpkt;
-         //FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
-         if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
-             av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
-             return -1;
-         }
-         if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
-         *frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){
-             av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
-             return -1;
+         ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
+         if (ret >= 0 && *got_frame_ptr) {
+             avctx->frame_number++;
+             frame->pkt_dts = avpkt->dts;
          }
-         ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt);
-         avctx->frame_number++;
-     }else{
-         ret= 0;
-         *frame_size_ptr=0;
      }
      return ret;
  }
index b955116c6a2704b2a24e2de898feadf4a04528d9,6faf793ea1884bc9d7c053b7b916c175255ed84f..70dbd0001e6d0b5a1d3930439b3619781b2b2acf
@@@ -21,8 -21,8 +21,8 @@@
  #define AVCODEC_VERSION_H
  
  #define LIBAVCODEC_VERSION_MAJOR 53
- #define LIBAVCODEC_VERSION_MINOR 39
- #define LIBAVCODEC_VERSION_MICRO  1
 -#define LIBAVCODEC_VERSION_MINOR 25
++#define LIBAVCODEC_VERSION_MINOR 40
+ #define LIBAVCODEC_VERSION_MICRO  0
  
  #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
                                                 LIBAVCODEC_VERSION_MINOR, \
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 40315d429117ab86737ea181f2ba0e6f6d899227,5600f9ba90d176ed94f5223063396c2a91cf8d6b..0b2e49981d50c5d40bfd12e54acb7e6bab398dc6
@@@ -832,12 -818,9 +835,10 @@@ static int wma_decode_superframe(AVCode
          return 0;
      }
      if (buf_size < s->block_align)
 -        return 0;
 -    buf_size = s->block_align;
 +        return AVERROR(EINVAL);
 +    if(s->block_align)
 +        buf_size = s->block_align;
  
-     samples = data;
      init_get_bits(&s->gb, buf, buf_size*8);
  
      if (s->use_bit_reservoir) {
      }
  
  //av_log(NULL, AV_LOG_ERROR, "%d %d %d %d outbytes:%d eaten:%d\n", s->frame_len_bits, s->block_len_bits, s->frame_len, s->block_len,        (int8_t *)samples - (int8_t *)data, s->block_align);
-     *data_size = out_size;
+     *got_frame_ptr   = 1;
+     *(AVFrame *)data = s->frame;
 -    return s->block_align;
 +    return buf_size;
   fail:
      /* when error, we reset the bit reservoir */
      s->last_superframe_len = 0;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge