]> git.sesse.net Git - ffmpeg/blobdiff - avconv.c
avconv: use avcodec_decode_audio4() instead of avcodec_decode_audio3()
[ffmpeg] / avconv.c
index 6ad26b486540c78aafd7c1e94e9ca32c7f9e791a..371a1183de4f74c9faac1e6898d5a730ff829070 100644 (file)
--- a/avconv.c
+++ b/avconv.c
@@ -113,7 +113,7 @@ static int video_sync_method= -1;
 static int audio_sync_method= 0;
 static float audio_drift_threshold= 0.1;
 static int copy_ts= 0;
-static int copy_tb;
+static int copy_tb = 1;
 static int opt_shortest = 0;
 static char *vstats_filename;
 static FILE *vstats_file;
@@ -137,8 +137,6 @@ static uint8_t *audio_buf;
 static uint8_t *audio_out;
 static unsigned int allocated_audio_out_size, allocated_audio_buf_size;
 
-static void *samples;
-
 #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
 
 typedef struct InputStream {
@@ -541,7 +539,6 @@ void exit_program(int ret)
     av_free(audio_buf);
     av_free(audio_out);
     allocated_audio_buf_size= allocated_audio_out_size= 0;
-    av_free(samples);
 
 #if CONFIG_AVFILTER
     avfilter_uninit();
@@ -673,11 +670,11 @@ static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
                 p= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE};
             }
         }
-        for(; *p!=-1; p++){
+        for (; *p != PIX_FMT_NONE; p++) {
             if(*p == st->codec->pix_fmt)
                 break;
         }
-        if (*p == -1) {
+        if (*p == PIX_FMT_NONE) {
             if(st->codec->pix_fmt != PIX_FMT_NONE)
                 av_log(NULL, AV_LOG_WARNING,
                         "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
@@ -729,14 +726,19 @@ static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx
     }
 }
 
-static void do_audio_out(AVFormatContext *s,
-                         OutputStream *ost,
-                         InputStream *ist,
-                         unsigned char *buf, int size)
+static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
+{
+    int fill_char = 0x00;
+    if (sample_fmt == AV_SAMPLE_FMT_U8)
+        fill_char = 0x80;
+    memset(buf, fill_char, size);
+}
+
+static void do_audio_out(AVFormatContext *s, OutputStream *ost,
+                         InputStream *ist, AVFrame *decoded_frame)
 {
     uint8_t *buftmp;
     int64_t audio_out_size, audio_buf_size;
-    int64_t allocated_for_size= size;
 
     int size_out, frame_bytes, ret, resample_changed;
     AVCodecContext *enc= ost->st->codec;
@@ -744,6 +746,9 @@ static void do_audio_out(AVFormatContext *s,
     int osize = av_get_bytes_per_sample(enc->sample_fmt);
     int isize = av_get_bytes_per_sample(dec->sample_fmt);
     const int coded_bps = av_get_bits_per_sample(enc->codec->id);
+    uint8_t *buf = decoded_frame->data[0];
+    int size     = decoded_frame->nb_samples * dec->channels * isize;
+    int64_t allocated_for_size = size;
 
 need_realloc:
     audio_buf_size= (allocated_for_size + isize*dec->channels - 1) / (isize*dec->channels);
@@ -829,9 +834,9 @@ need_realloc:
 
     if(audio_sync_method){
         double delta = get_sync_ipts(ost) * enc->sample_rate - ost->sync_opts
-                - av_fifo_size(ost->fifo)/(enc->channels * 2);
-        double idelta= delta*dec->sample_rate / enc->sample_rate;
-        int byte_delta= ((int)idelta)*2*dec->channels;
+                - av_fifo_size(ost->fifo)/(enc->channels * osize);
+        int idelta = delta * dec->sample_rate / enc->sample_rate;
+        int byte_delta = idelta * isize * dec->channels;
 
         //FIXME resample delay
         if(fabs(delta) > 50){
@@ -840,7 +845,8 @@ need_realloc:
                     byte_delta= FFMAX(byte_delta, -size);
                     size += byte_delta;
                     buf  -= byte_delta;
-                    av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n", (int)-delta);
+                    av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
+                           -byte_delta / (isize * dec->channels));
                     if(!size)
                         return;
                     ist->is_start=0;
@@ -854,11 +860,11 @@ need_realloc:
                     }
                     ist->is_start=0;
 
-                    memset(input_tmp, 0, byte_delta);
+                    generate_silence(input_tmp, dec->sample_fmt, byte_delta);
                     memcpy(input_tmp + byte_delta, buf, size);
                     buf= input_tmp;
                     size += byte_delta;
-                    av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", (int)delta);
+                    av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
                 }
             }else if(audio_sync_method>1){
                 int comp= av_clip(delta, -audio_sync_method, audio_sync_method);
@@ -871,7 +877,7 @@ need_realloc:
         }
     }else
         ost->sync_opts= lrintf(get_sync_ipts(ost) * enc->sample_rate)
-                        - av_fifo_size(ost->fifo)/(enc->channels * 2); //FIXME wrong
+                        - av_fifo_size(ost->fifo)/(enc->channels * osize); //FIXME wrong
 
     if (ost->audio_resample) {
         buftmp = audio_buf;
@@ -1160,7 +1166,8 @@ static void do_video_out(AVFormatContext *s,
 
     format_video_sync = video_sync_method;
     if (format_video_sync < 0)
-        format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? 2 : 1;
+        format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? 0 :
+                            (s->oformat->flags & AVFMT_VARIABLE_FPS) ? 2 : 1;
 
     if (format_video_sync) {
         double vdelta = sync_ipts - ost->sync_opts;
@@ -1197,7 +1204,8 @@ static void do_video_out(AVFormatContext *s,
         av_init_packet(&pkt);
         pkt.stream_index= ost->index;
 
-        if (s->oformat->flags & AVFMT_RAWPICTURE) {
+        if (s->oformat->flags & AVFMT_RAWPICTURE &&
+            enc->codec->id == CODEC_ID_RAWVIDEO) {
             /* raw pictures are written as AVPicture structure to
                avoid any copies. We support temporarily the older
                method. */
@@ -1435,14 +1443,6 @@ static void print_report(OutputFile *output_files,
     }
 }
 
-static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
-{
-    int fill_char = 0x00;
-    if (sample_fmt == AV_SAMPLE_FMT_U8)
-        fill_char = 0x80;
-    memset(buf, fill_char, size);
-}
-
 static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
 {
     int i, ret;
@@ -1457,7 +1457,7 @@ static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
 
         if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <=1)
             continue;
-        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE))
+        if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
             continue;
 
         for(;;) {
@@ -1615,29 +1615,239 @@ static void rate_emu_sleep(InputStream *ist)
     }
 }
 
+static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+    AVFrame *decoded_frame;
+    AVCodecContext *avctx = ist->st->codec;
+    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
+    int i, ret;
+
+    if (!(decoded_frame = avcodec_alloc_frame()))
+        return AVERROR(ENOMEM);
+
+    ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
+    if (ret < 0) {
+        av_freep(&decoded_frame);
+        return ret;
+    }
+
+    if (!*got_output) {
+        /* no audio frame */
+        return ret;
+    }
+
+    /* if the decoder provides a pts, use it instead of the last packet pts.
+       the decoder could be delaying output by a packet or more. */
+    if (decoded_frame->pts != AV_NOPTS_VALUE)
+        ist->next_pts = decoded_frame->pts;
+
+    /* increment next_pts to use for the case where the input stream does not
+       have timestamps or there are multiple frames in the packet */
+    ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+                     avctx->sample_rate;
+
+    // preprocess audio (volume)
+    if (audio_volume != 256) {
+        int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
+        void *samples = decoded_frame->data[0];
+        switch (avctx->sample_fmt) {
+        case AV_SAMPLE_FMT_U8:
+        {
+            uint8_t *volp = samples;
+            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+                int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
+                *volp++ = av_clip_uint8(v);
+            }
+            break;
+        }
+        case AV_SAMPLE_FMT_S16:
+        {
+            int16_t *volp = samples;
+            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+                int v = ((*volp) * audio_volume + 128) >> 8;
+                *volp++ = av_clip_int16(v);
+            }
+            break;
+        }
+        case AV_SAMPLE_FMT_S32:
+        {
+            int32_t *volp = samples;
+            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+                int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
+                *volp++ = av_clipl_int32(v);
+            }
+            break;
+        }
+        case AV_SAMPLE_FMT_FLT:
+        {
+            float *volp = samples;
+            float scale = audio_volume / 256.f;
+            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+                *volp++ *= scale;
+            }
+            break;
+        }
+        case AV_SAMPLE_FMT_DBL:
+        {
+            double *volp = samples;
+            double scale = audio_volume / 256.;
+            for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
+                *volp++ *= scale;
+            }
+            break;
+        }
+        default:
+            av_log(NULL, AV_LOG_FATAL,
+                   "Audio volume adjustment on sample format %s is not supported.\n",
+                   av_get_sample_fmt_name(ist->st->codec->sample_fmt));
+            exit_program(1);
+        }
+    }
+
+    rate_emu_sleep(ist);
+
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = &output_streams[i];
+
+        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
+            continue;
+        do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
+    }
+    return ret;
+}
+
+static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
+{
+    AVFrame *decoded_frame, *filtered_frame = NULL;
+    void *buffer_to_free = NULL;
+    int i, ret = 0;
+    float quality;
+#if CONFIG_AVFILTER
+    int frame_available = 1;
+#endif
+
+    if (!(decoded_frame = avcodec_alloc_frame()))
+        return AVERROR(ENOMEM);
+    pkt->pts  = *pkt_pts;
+    pkt->dts  = ist->pts;
+    *pkt_pts  = AV_NOPTS_VALUE;
+
+    ret = avcodec_decode_video2(ist->st->codec,
+                                decoded_frame, got_output, pkt);
+    if (ret < 0)
+        goto fail;
+
+    quality = same_quant ? decoded_frame->quality : 0;
+    if (!*got_output) {
+        /* no picture yet */
+        av_freep(&decoded_frame);
+        return ret;
+    }
+    ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
+                                                 decoded_frame->pkt_dts);
+    if (pkt->duration)
+        ist->next_pts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
+    else if (ist->st->codec->time_base.num != 0) {
+        int ticks      = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
+                                           ist->st->codec->ticks_per_frame;
+        ist->next_pts += ((int64_t)AV_TIME_BASE *
+                          ist->st->codec->time_base.num * ticks) /
+                          ist->st->codec->time_base.den;
+    }
+    pkt->size = 0;
+    pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
+
+    rate_emu_sleep(ist);
+
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = &output_streams[i];
+        int frame_size;
+
+        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
+            continue;
+
+#if CONFIG_AVFILTER
+        if (ost->input_video_filter) {
+            AVRational sar;
+            if (ist->st->sample_aspect_ratio.num)
+                sar = ist->st->sample_aspect_ratio;
+            else
+                sar = ist->st->codec->sample_aspect_ratio;
+            av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, ist->pts, sar);
+            if (!(filtered_frame = avcodec_alloc_frame())) {
+                ret = AVERROR(ENOMEM);
+                goto fail;
+            }
+            frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+        }
+        while (frame_available) {
+            AVRational ist_pts_tb;
+            if (ost->output_video_filter)
+                get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb);
+            if (ost->picref)
+                ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
+            if (ost->picref->video && !ost->frame_aspect_ratio)
+                ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
+#else
+            filtered_frame = decoded_frame;
+#endif
+
+            do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
+                         same_quant ? quality : ost->st->codec->global_quality);
+            if (vstats_filename && frame_size)
+                do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
+#if CONFIG_AVFILTER
+            frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+            if (ost->picref)
+                avfilter_unref_buffer(ost->picref);
+        }
+        av_freep(&filtered_frame);
+#endif
+    }
+
+fail:
+    av_free(buffer_to_free);
+    av_freep(&decoded_frame);
+    return ret;
+}
+
+static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+    AVSubtitle subtitle;
+    int i, ret = avcodec_decode_subtitle2(ist->st->codec,
+                                          &subtitle, got_output, pkt);
+    if (ret < 0)
+        return ret;
+    if (!*got_output)
+        return ret;
+
+    rate_emu_sleep(ist);
+
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = &output_streams[i];
+
+        if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
+            continue;
+
+        do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
+    }
+
+    avsubtitle_free(&subtitle);
+    return ret;
+}
+
 /* pkt = NULL means EOF (needed to flush decoder buffers) */
-static int output_packet(InputStream *ist, int ist_index,
+static int output_packet(InputStream *ist,
                          OutputStream *ost_table, int nb_ostreams,
                          const AVPacket *pkt)
 {
-    AVFormatContext *os;
-    OutputStream *ost;
-    int ret = 0, i;
+    int i;
     int got_output;
-    void *buffer_to_free = NULL;
-    static unsigned int samples_size= 0;
-    AVSubtitle subtitle, *subtitle_to_free;
     int64_t pkt_pts = AV_NOPTS_VALUE;
-#if CONFIG_AVFILTER
-    int frame_available;
-#endif
-    float quality;
-
     AVPacket avpkt;
-    int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
 
-    if(ist->next_pts == AV_NOPTS_VALUE)
-        ist->next_pts= ist->pts;
+    if (ist->next_pts == AV_NOPTS_VALUE)
+        ist->next_pts = ist->pts;
 
     if (pkt == NULL) {
         /* EOF handling */
@@ -1656,242 +1866,47 @@ static int output_packet(InputStream *ist, int ist_index,
 
     //while we have more to decode or while the decoder did output something on EOF
     while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
-        uint8_t *decoded_data_buf;
-        int decoded_data_size;
-        AVFrame *decoded_frame, *filtered_frame;
+        int ret = 0;
     handle_eof:
-        ist->pts= ist->next_pts;
 
-        if(avpkt.size && avpkt.size != pkt->size)
+        ist->pts = ist->next_pts;
+
+        if (avpkt.size && avpkt.size != pkt->size) {
             av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
                    "Multiple frames in a packet from stream %d\n", pkt->stream_index);
-            ist->showed_multi_packet_warning=1;
-
-        /* decode the packet if needed */
-        decoded_frame    = filtered_frame = NULL;
-        decoded_data_buf = NULL; /* fail safe */
-        decoded_data_size= 0;
-        subtitle_to_free = NULL;
-            switch(ist->st->codec->codec_type) {
-            case AVMEDIA_TYPE_AUDIO:{
-                if(pkt && samples_size < FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE)) {
-                    samples_size = FFMAX(pkt->size * bps, AVCODEC_MAX_AUDIO_FRAME_SIZE);
-                    av_free(samples);
-                    samples= av_malloc(samples_size);
-                }
-                decoded_data_size= samples_size;
-                    /* XXX: could avoid copy if PCM 16 bits with same
-                       endianness as CPU */
-                ret = avcodec_decode_audio3(ist->st->codec, samples, &decoded_data_size,
-                                            &avpkt);
-                if (ret < 0)
-                    return ret;
-                avpkt.data += ret;
-                avpkt.size -= ret;
-                got_output  = decoded_data_size > 0;
-                /* Some bug in mpeg audio decoder gives */
-                /* decoded_data_size < 0, it seems they are overflows */
-                if (!got_output) {
-                    /* no audio frame */
-                    continue;
-                }
-                decoded_data_buf = (uint8_t *)samples;
-                ist->next_pts += ((int64_t)AV_TIME_BASE/bps * decoded_data_size) /
-                    (ist->st->codec->sample_rate * ist->st->codec->channels);
-                break;}
-            case AVMEDIA_TYPE_VIDEO:
-                    if (!(decoded_frame = avcodec_alloc_frame()))
-                        return AVERROR(ENOMEM);
-                    avpkt.pts = pkt_pts;
-                    avpkt.dts = ist->pts;
-                    pkt_pts = AV_NOPTS_VALUE;
-
-                    ret = avcodec_decode_video2(ist->st->codec,
-                                                decoded_frame, &got_output, &avpkt);
-                    quality = same_quant ? decoded_frame->quality : 0;
-                    if (ret < 0)
-                        goto fail;
-                    if (!got_output) {
-                        /* no picture yet */
-                        av_freep(&decoded_frame);
-                        goto discard_packet;
-                    }
-                    ist->next_pts = ist->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
-                                                                 decoded_frame->pkt_dts);
-                    if (ist->st->codec->time_base.num != 0) {
-                        int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame;
-                        ist->next_pts += ((int64_t)AV_TIME_BASE *
-                                          ist->st->codec->time_base.num * ticks) /
-                            ist->st->codec->time_base.den;
-                    }
-                    avpkt.size = 0;
-                    buffer_to_free = NULL;
-                    pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
-                    break;
-            case AVMEDIA_TYPE_SUBTITLE:
-                ret = avcodec_decode_subtitle2(ist->st->codec,
-                                               &subtitle, &got_output, &avpkt);
-                if (ret < 0)
-                    return ret;
-                if (!got_output) {
-                    goto discard_packet;
-                }
-                subtitle_to_free = &subtitle;
-                avpkt.size = 0;
-                break;
-            default:
-                return -1;
-            }
-
-        // preprocess audio (volume)
-        if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
-            if (audio_volume != 256) {
-                switch (ist->st->codec->sample_fmt) {
-                case AV_SAMPLE_FMT_U8:
-                {
-                    uint8_t *volp = samples;
-                    for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                        int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
-                        *volp++ = av_clip_uint8(v);
-                    }
-                    break;
-                }
-                case AV_SAMPLE_FMT_S16:
-                {
-                    int16_t *volp = samples;
-                    for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                        int v = ((*volp) * audio_volume + 128) >> 8;
-                        *volp++ = av_clip_int16(v);
-                    }
-                    break;
-                }
-                case AV_SAMPLE_FMT_S32:
-                {
-                    int32_t *volp = samples;
-                    for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                        int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
-                        *volp++ = av_clipl_int32(v);
-                    }
-                    break;
-                }
-                case AV_SAMPLE_FMT_FLT:
-                {
-                    float *volp = samples;
-                    float scale = audio_volume / 256.f;
-                    for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                        *volp++ *= scale;
-                    }
-                    break;
-                }
-                case AV_SAMPLE_FMT_DBL:
-                {
-                    double *volp = samples;
-                    double scale = audio_volume / 256.;
-                    for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
-                        *volp++ *= scale;
-                    }
-                    break;
-                }
-                default:
-                    av_log(NULL, AV_LOG_FATAL,
-                           "Audio volume adjustment on sample format %s is not supported.\n",
-                           av_get_sample_fmt_name(ist->st->codec->sample_fmt));
-                    exit_program(1);
-                }
-            }
+            ist->showed_multi_packet_warning = 1;
         }
 
-        /* frame rate emulation */
-        rate_emu_sleep(ist);
-
-        /* if output time reached then transcode raw format,
-           encode packets and output them */
-        for (i = 0; i < nb_ostreams; i++) {
-            int frame_size;
-
-            ost = &ost_table[i];
-
-            if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
-                continue;
-
-#if CONFIG_AVFILTER
-            if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
-                ost->input_video_filter) {
-                AVRational sar;
-                if (ist->st->sample_aspect_ratio.num)
-                    sar = ist->st->sample_aspect_ratio;
-                else
-                    sar = ist->st->codec->sample_aspect_ratio;
-                av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, ist->pts, sar);
-                if (!(filtered_frame = avcodec_alloc_frame())) {
-                    ret = AVERROR(ENOMEM);
-                    goto fail;
-                }
-            }
-            frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
-                !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
-            while (frame_available) {
-                AVRational ist_pts_tb;
-                if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
-                    get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb);
-                if (ost->picref)
-                    ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
-#else
-                filtered_frame = decoded_frame;
-#endif
-                os = output_files[ost->file_index].ctx;
-
-                /* set the input output pts pairs */
-                //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
-
-                av_assert0(ist->decoding_needed);
-                switch(ost->st->codec->codec_type) {
-                case AVMEDIA_TYPE_AUDIO:
-                    do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
-                    break;
-                case AVMEDIA_TYPE_VIDEO:
-#if CONFIG_AVFILTER
-                    if (ost->picref->video && !ost->frame_aspect_ratio)
-                        ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
-#endif
-                    do_video_out(os, ost, ist, filtered_frame, &frame_size,
-                                 same_quant ? quality : ost->st->codec->global_quality);
-                    if (vstats_filename && frame_size)
-                        do_video_stats(os, ost, frame_size);
-                    break;
-                case AVMEDIA_TYPE_SUBTITLE:
-                    do_subtitle_out(os, ost, ist, &subtitle,
-                                    pkt->pts);
-                    break;
-                default:
-                    abort();
-                }
-#if CONFIG_AVFILTER
-                frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
-                                   ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
-                if (ost->picref)
-                    avfilter_unref_buffer(ost->picref);
-            }
-            av_freep(&filtered_frame);
-#endif
-            }
-
-fail:
-        av_free(buffer_to_free);
-        /* XXX: allocate the subtitles in the codec ? */
-        if (subtitle_to_free) {
-            avsubtitle_free(subtitle_to_free);
-            subtitle_to_free = NULL;
+        switch(ist->st->codec->codec_type) {
+        case AVMEDIA_TYPE_AUDIO:
+            ret = transcode_audio    (ist, &avpkt, &got_output);
+            break;
+        case AVMEDIA_TYPE_VIDEO:
+            ret = transcode_video    (ist, &avpkt, &got_output, &pkt_pts);
+            break;
+        case AVMEDIA_TYPE_SUBTITLE:
+            ret = transcode_subtitles(ist, &avpkt, &got_output);
+            break;
+        default:
+            return -1;
         }
-        av_freep(&decoded_frame);
+
         if (ret < 0)
             return ret;
+        // touch data and size only if not EOF
+        if (pkt) {
+            avpkt.data += ret;
+            avpkt.size -= ret;
+        }
+        if (!got_output) {
+            continue;
+        }
     }
- discard_packet:
 
     /* handle stream copy */
     if (!ist->decoding_needed) {
         rate_emu_sleep(ist);
+        ist->pts = ist->next_pts;
         switch (ist->st->codec->codec_type) {
         case AVMEDIA_TYPE_AUDIO:
             ist->next_pts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
@@ -1908,7 +1923,7 @@ fail:
         }
     }
     for (i = 0; pkt && i < nb_ostreams; i++) {
-        ost = &ost_table[i];
+        OutputStream *ost = &ost_table[i];
 
         if (!check_output_constraints(ist, ost) || ost->encoding_needed)
             continue;
@@ -2050,9 +2065,7 @@ static int transcode_init(OutputFile *output_files,
             }
             memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
             codec->extradata_size = icodec->extradata_size;
-            if (!copy_tb &&
-                av_q2d(icodec->time_base)*icodec->ticks_per_frame > av_q2d(ist->st->time_base) &&
-                av_q2d(ist->st->time_base) < 1.0/500) {
+            if (!copy_tb) {
                 codec->time_base      = icodec->time_base;
                 codec->time_base.num *= icodec->ticks_per_frame;
                 av_reduce(&codec->time_base.num, &codec->time_base.den,
@@ -2503,7 +2516,7 @@ static int transcode(OutputFile *output_files,
         }
 
         //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
-        if (output_packet(ist, ist_index, output_streams, nb_output_streams, &pkt) < 0) {
+        if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
 
             av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
                    ist->file_index, ist->st->index);
@@ -2524,7 +2537,7 @@ static int transcode(OutputFile *output_files,
     for (i = 0; i < nb_input_streams; i++) {
         ist = &input_streams[i];
         if (ist->decoding_needed) {
-            output_packet(ist, i, output_streams, nb_output_streams, NULL);
+            output_packet(ist, output_streams, nb_output_streams, NULL);
         }
     }
     flush_encoders(output_streams, nb_output_streams);