X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=avplay.c;h=47701320bcddff3bcdaf83d34db9850ec2664119;hb=8a62d2c28fbacd1ae20c35887a1eecba2be14371;hp=5af7f01cd70f16c0d28df665074e8ba7a46f9dad;hpb=eef9f06508354d1c7d5624c1c18997e7974288f1;p=ffmpeg diff --git a/avplay.c b/avplay.c index 5af7f01cd70..47701320bcd 100644 --- a/avplay.c +++ b/avplay.c @@ -37,16 +37,13 @@ #include "libavutil/time.h" #include "libavformat/avformat.h" #include "libavdevice/avdevice.h" -#include "libswscale/swscale.h" #include "libavresample/avresample.h" #include "libavutil/opt.h" #include "libavcodec/avfft.h" -#if CONFIG_AVFILTER -# include "libavfilter/avfilter.h" -# include "libavfilter/buffersink.h" -# include "libavfilter/buffersrc.h" -#endif +#include "libavfilter/avfilter.h" +#include "libavfilter/buffersink.h" +#include "libavfilter/buffersrc.h" #include "cmdutils.h" @@ -152,6 +149,7 @@ typedef struct PlayerState { double audio_diff_threshold; int audio_diff_avg_count; AVStream *audio_st; + AVCodecContext *audio_dec; PacketQueue audioq; int audio_hw_buf_size; uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE]; @@ -184,6 +182,7 @@ typedef struct PlayerState { int subtitle_stream; int subtitle_stream_changed; AVStream *subtitle_st; + AVCodecContext *subtitle_dec; PacketQueue subtitleq; SubPicture subpq[SUBPICTURE_QUEUE_SIZE]; int subpq_size, subpq_rindex, subpq_windex; @@ -196,6 +195,7 @@ typedef struct PlayerState { double video_clock; // pts of last decoded frame / predicted pts of next decoded frame int video_stream; AVStream *video_st; + AVCodecContext *video_dec; PacketQueue videoq; double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used) double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts @@ -204,9 +204,6 @@ typedef struct PlayerState { int pictq_size, pictq_rindex, pictq_windex; SDL_mutex *pictq_mutex; SDL_cond *pictq_cond; -#if !CONFIG_AVFILTER - struct SwsContext *img_convert_ctx; -#endif // QETimer *video_timer; char filename[1024]; @@ -214,10 +211,8 @@ typedef struct PlayerState { PtsCorrectionContext pts_ctx; -#if CONFIG_AVFILTER AVFilterContext *in_video_filter; // the first filter in the video chain AVFilterContext *out_video_filter; // the last filter in the video chain -#endif float skip_frames; float skip_frames_index; @@ -266,9 +261,7 @@ static int framedrop = 1; static int infinite_buffer = 0; static int rdftspeed = 20; -#if CONFIG_AVFILTER static char *vfilters = NULL; -#endif static int autorotate = 1; /* current context */ @@ -653,21 +646,10 @@ static void video_image_display(PlayerState *is) vp = &is->pictq[is->pictq_rindex]; if (vp->bmp) { -#if CONFIG_AVFILTER if (!vp->sar.num) aspect_ratio = 0; else aspect_ratio = av_q2d(vp->sar); -#else - - /* XXX: use variable in the frame */ - if (is->video_st->sample_aspect_ratio.num) - aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio); - else if (is->video_st->codec->sample_aspect_ratio.num) - aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio); - else - aspect_ratio = 0; -#endif if (aspect_ratio <= 0.0) aspect_ratio = 1.0; aspect_ratio *= (float)vp->width / (float)vp->height; @@ -880,15 +862,9 @@ static int video_open(PlayerState *is) } else if (!is_full_screen && screen_width) { w = screen_width; h = screen_height; -#if CONFIG_AVFILTER } else if (is->out_video_filter && is->out_video_filter->inputs[0]) { w = is->out_video_filter->inputs[0]->w; h = is->out_video_filter->inputs[0]->h; -#else - } else if (is->video_st && is->video_st->codec->width) { - w = is->video_st->codec->width; - h = is->video_st->codec->height; -#endif } else { w = 640; h = 480; @@ -1229,10 +1205,6 @@ static void player_close(PlayerState *is) SDL_DestroyCond(is->pictq_cond); SDL_DestroyMutex(is->subpq_mutex); SDL_DestroyCond(is->subpq_cond); -#if !CONFIG_AVFILTER - if (is->img_convert_ctx) - sws_freeContext(is->img_convert_ctx); -#endif } static void do_exit(void) @@ -1262,15 +1234,9 @@ static void alloc_picture(void *opaque) if (vp->bmp) SDL_FreeYUVOverlay(vp->bmp); -#if CONFIG_AVFILTER vp->width = is->out_video_filter->inputs[0]->w; vp->height = is->out_video_filter->inputs[0]->h; vp->pix_fmt = is->out_video_filter->inputs[0]->format; -#else - vp->width = is->video_st->codec->width; - vp->height = is->video_st->codec->height; - vp->pix_fmt = is->video_st->codec->pix_fmt; -#endif vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height, SDL_YV12_OVERLAY, @@ -1295,9 +1261,7 @@ static void alloc_picture(void *opaque) static int queue_picture(PlayerState *is, AVFrame *src_frame, double pts, int64_t pos) { VideoPicture *vp; -#if !CONFIG_AVFILTER - int dst_pix_fmt = AV_PIX_FMT_YUV420P; -#endif + /* wait until we have space to put a new picture */ SDL_LockMutex(is->pictq_mutex); @@ -1319,13 +1283,8 @@ static int queue_picture(PlayerState *is, AVFrame *src_frame, double pts, int64_ /* alloc or resize hardware picture buffer */ if (!vp->bmp || vp->reallocate || -#if CONFIG_AVFILTER vp->width != is->out_video_filter->inputs[0]->w || vp->height != is->out_video_filter->inputs[0]->h) { -#else - vp->width != is->video_st->codec->width || - vp->height != is->video_st->codec->height) { -#endif SDL_Event event; vp->allocated = 0; @@ -1364,22 +1323,10 @@ static int queue_picture(PlayerState *is, AVFrame *src_frame, double pts, int64_ linesize[1] = vp->bmp->pitches[2]; linesize[2] = vp->bmp->pitches[1]; -#if CONFIG_AVFILTER // FIXME use direct rendering av_image_copy(data, linesize, src_frame->data, src_frame->linesize, vp->pix_fmt, vp->width, vp->height); -#else - av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags); - is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx, - vp->width, vp->height, vp->pix_fmt, vp->width, vp->height, - dst_pix_fmt, sws_flags, NULL, NULL, NULL); - if (!is->img_convert_ctx) { - fprintf(stderr, "Cannot initialize the conversion context\n"); - exit(1); - } - sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize, - 0, vp->height, data, linesize); -#endif + /* update the bitmap content */ SDL_UnlockYUVOverlay(vp->bmp); @@ -1414,8 +1361,8 @@ static int output_picture2(PlayerState *is, AVFrame *src_frame, double pts1, int pts = is->video_clock; } /* update video clock for next frame */ - frame_delay = av_q2d(is->video_st->codec->time_base); - /* for MPEG2, the frame can be repeated, so we update the + frame_delay = av_q2d(is->video_dec->time_base); + /* For MPEG-2, the frame can be repeated, so we update the clock accordingly */ frame_delay += src_frame->repeat_pict * (frame_delay * 0.5); is->video_clock += frame_delay; @@ -1433,7 +1380,7 @@ static int get_video_frame(PlayerState *is, AVFrame *frame, int64_t *pts, AVPack return -1; if (pkt->data == flush_pkt.data) { - avcodec_flush_buffers(is->video_st->codec); + avcodec_flush_buffers(is->video_dec); SDL_LockMutex(is->pictq_mutex); // Make sure there are no long delay timers (ideally we should just flush the que but thats harder) @@ -1455,13 +1402,13 @@ static int get_video_frame(PlayerState *is, AVFrame *frame, int64_t *pts, AVPack return 0; } - avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt); + avcodec_decode_video2(is->video_dec, frame, &got_picture, pkt); if (got_picture) { if (decoder_reorder_pts == -1) { - *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts); + *pts = guess_correct_pts(&is->pts_ctx, frame->pts, frame->pkt_dts); } else if (decoder_reorder_pts) { - *pts = frame->pkt_pts; + *pts = frame->pts; } else { *pts = frame->pkt_dts; } @@ -1483,14 +1430,13 @@ static int get_video_frame(PlayerState *is, AVFrame *frame, int64_t *pts, AVPack return 0; } -#if CONFIG_AVFILTER static int configure_video_filters(AVFilterGraph *graph, PlayerState *is, const char *vfilters) { char sws_flags_str[128]; char buffersrc_args[256]; int ret; AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter; - AVCodecContext *codec = is->video_st->codec; + AVCodecContext *codec = is->video_dec; snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags); graph->scale_sws_opts = av_strdup(sws_flags_str); @@ -1579,8 +1525,6 @@ static int configure_video_filters(AVFilterGraph *graph, PlayerState *is, const return ret; } -#endif /* CONFIG_AVFILTER */ - static int video_thread(void *arg) { AVPacket pkt = { 0 }; @@ -1590,11 +1534,10 @@ static int video_thread(void *arg) double pts; int ret; -#if CONFIG_AVFILTER AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterContext *filt_out = NULL, *filt_in = NULL; - int last_w = is->video_st->codec->width; - int last_h = is->video_st->codec->height; + int last_w = is->video_dec->width; + int last_h = is->video_dec->height; if (!graph) { av_frame_free(&frame); return AVERROR(ENOMEM); @@ -1604,19 +1547,14 @@ static int video_thread(void *arg) goto the_end; filt_in = is->in_video_filter; filt_out = is->out_video_filter; -#endif if (!frame) { -#if CONFIG_AVFILTER avfilter_graph_free(&graph); -#endif return AVERROR(ENOMEM); } for (;;) { -#if CONFIG_AVFILTER AVRational tb; -#endif while (is->paused && !is->videoq.abort_request) SDL_Delay(10); @@ -1629,19 +1567,18 @@ static int video_thread(void *arg) if (!ret) continue; -#if CONFIG_AVFILTER - if ( last_w != is->video_st->codec->width - || last_h != is->video_st->codec->height) { + if ( last_w != is->video_dec->width + || last_h != is->video_dec->height) { av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h, - is->video_st->codec->width, is->video_st->codec->height); + is->video_dec->width, is->video_dec->height); avfilter_graph_free(&graph); graph = avfilter_graph_alloc(); if ((ret = configure_video_filters(graph, is, vfilters)) < 0) goto the_end; filt_in = is->in_video_filter; filt_out = is->out_video_filter; - last_w = is->video_st->codec->width; - last_h = is->video_st->codec->height; + last_w = is->video_dec->width; + last_h = is->video_dec->height; } frame->pts = pts_int; @@ -1669,10 +1606,6 @@ static int video_thread(void *arg) pts = pts_int * av_q2d(is->video_st->time_base); ret = output_picture2(is, frame, pts, 0); } -#else - pts = pts_int * av_q2d(is->video_st->time_base); - ret = output_picture2(is, frame, pts, pkt.pos); -#endif if (ret < 0) goto the_end; @@ -1683,10 +1616,8 @@ static int video_thread(void *arg) stream_pause(player); } the_end: -#if CONFIG_AVFILTER av_freep(&vfilters); avfilter_graph_free(&graph); -#endif av_packet_unref(&pkt); av_frame_free(&frame); return 0; @@ -1710,7 +1641,7 @@ static int subtitle_thread(void *arg) break; if (pkt->data == flush_pkt.data) { - avcodec_flush_buffers(is->subtitle_st->codec); + avcodec_flush_buffers(is->subtitle_dec); continue; } SDL_LockMutex(is->subpq_mutex); @@ -1729,9 +1660,9 @@ static int subtitle_thread(void *arg) this packet, if any */ pts = 0; if (pkt->pts != AV_NOPTS_VALUE) - pts = av_q2d(is->subtitle_st->time_base) * pkt->pts; + pts = av_q2d(is->subtitle_dec->time_base) * pkt->pts; - avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub, + avcodec_decode_subtitle2(is->subtitle_dec, &sp->sub, &got_subtitle, pkt); if (got_subtitle && sp->sub.format == 0) { @@ -1860,7 +1791,7 @@ static int audio_decode_frame(PlayerState *is, double *pts_ptr) { AVPacket *pkt_temp = &is->audio_pkt_temp; AVPacket *pkt = &is->audio_pkt; - AVCodecContext *dec = is->audio_st->codec; + AVCodecContext *dec = is->audio_dec; int n, len1, data_size, got_frame; double pts; int new_packet = 0; @@ -2086,11 +2017,11 @@ static AVCodec *choose_decoder(PlayerState *is, AVFormatContext *ic, AVStream *s } if (codec_name) { - AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type); - st->codec->codec_id = codec->id; + AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type); + st->codecpar->codec_id = codec->id; return codec; } else - return avcodec_find_decoder(st->codec->codec_id); + return avcodec_find_decoder(st->codecpar->codec_id); } /* open a given stream. Return 0 if OK */ @@ -2106,7 +2037,16 @@ static int stream_component_open(PlayerState *is, int stream_index) if (stream_index < 0 || stream_index >= ic->nb_streams) return -1; - avctx = ic->streams[stream_index]->codec; + + avctx = avcodec_alloc_context3(NULL); + if (!avctx) + return AVERROR(ENOMEM); + + ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar); + if (ret < 0) { + avcodec_free_context(&avctx); + return ret; + } opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL); @@ -2176,13 +2116,14 @@ static int stream_component_open(PlayerState *is, int stream_index) case AVMEDIA_TYPE_AUDIO: is->audio_stream = stream_index; is->audio_st = ic->streams[stream_index]; + is->audio_dec = avctx; is->audio_buf_size = 0; is->audio_buf_index = 0; /* init averaging filter */ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB); is->audio_diff_avg_count = 0; - /* since we do not have a precise anough audio fifo fullness, + /* since we do not have a precise enough audio FIFO fullness, we correct audio sync only if larger than this threshold */ is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate; @@ -2193,6 +2134,7 @@ static int stream_component_open(PlayerState *is, int stream_index) case AVMEDIA_TYPE_VIDEO: is->video_stream = stream_index; is->video_st = ic->streams[stream_index]; + is->video_dec = avctx; packet_queue_init(&is->videoq); is->video_tid = SDL_CreateThread(video_thread, is); @@ -2200,6 +2142,7 @@ static int stream_component_open(PlayerState *is, int stream_index) case AVMEDIA_TYPE_SUBTITLE: is->subtitle_stream = stream_index; is->subtitle_st = ic->streams[stream_index]; + is->subtitle_dec = avctx; packet_queue_init(&is->subtitleq); is->subtitle_tid = SDL_CreateThread(subtitle_thread, is); @@ -2217,13 +2160,13 @@ fail: static void stream_component_close(PlayerState *is, int stream_index) { AVFormatContext *ic = is->ic; - AVCodecContext *avctx; + AVCodecParameters *par; if (stream_index < 0 || stream_index >= ic->nb_streams) return; - avctx = ic->streams[stream_index]->codec; + par = ic->streams[stream_index]->codecpar; - switch (avctx->codec_type) { + switch (par->codec_type) { case AVMEDIA_TYPE_AUDIO: packet_queue_abort(&is->audioq); @@ -2277,17 +2220,19 @@ static void stream_component_close(PlayerState *is, int stream_index) } ic->streams[stream_index]->discard = AVDISCARD_ALL; - avcodec_close(avctx); - switch (avctx->codec_type) { + switch (par->codec_type) { case AVMEDIA_TYPE_AUDIO: + avcodec_free_context(&is->audio_dec); is->audio_st = NULL; is->audio_stream = -1; break; case AVMEDIA_TYPE_VIDEO: + avcodec_free_context(&is->video_dec); is->video_st = NULL; is->video_stream = -1; break; case AVMEDIA_TYPE_SUBTITLE: + avcodec_free_context(&is->subtitle_dec); is->subtitle_st = NULL; is->subtitle_stream = -1; break; @@ -2366,7 +2311,7 @@ static int stream_setup(PlayerState *is) orig_nb_streams = ic->nb_streams; for (i = 0; i < ic->nb_streams; i++) - ic->streams[i]->codec->codec = choose_decoder(is, ic, ic->streams[i]); + choose_decoder(is, ic, ic->streams[i]); err = avformat_find_stream_info(ic, opts); @@ -2529,7 +2474,7 @@ static int decode_thread(void *arg) packet_queue_put(&is->videoq, pkt); } if (is->audio_stream >= 0 && - (is->audio_st->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) { + (is->audio_dec->codec->capabilities & AV_CODEC_CAP_DELAY)) { av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; @@ -2651,12 +2596,12 @@ static void stream_cycle_channel(PlayerState *is, int codec_type) if (stream_index == start_index) return; st = ic->streams[stream_index]; - if (st->codec->codec_type == codec_type) { + if (st->codecpar->codec_type == codec_type) { /* check that parameters are OK */ switch (codec_type) { case AVMEDIA_TYPE_AUDIO: - if (st->codec->sample_rate != 0 && - st->codec->channels != 0) + if (st->codecpar->sample_rate != 0 && + st->codecpar->channels != 0) goto the_end; break; case AVMEDIA_TYPE_VIDEO: @@ -2993,9 +2938,7 @@ static const OptionDef options[] = { { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" }, { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" }, { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" }, -#if CONFIG_AVFILTER { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" }, -#endif { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" }, { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" }, { "i", 0, { NULL }, "avconv compatibility dummy option", ""}, @@ -3022,9 +2965,6 @@ void show_help_default(const char *opt, const char *arg) printf("\n"); show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM); show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM); -#if !CONFIG_AVFILTER - show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM); -#endif printf("\nWhile playing:\n" "q, ESC quit\n" "f toggle full screen\n" @@ -3065,9 +3005,7 @@ int main(int argc, char **argv) #if CONFIG_AVDEVICE avdevice_register_all(); #endif -#if CONFIG_AVFILTER avfilter_register_all(); -#endif av_register_all(); avformat_network_init();