X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=doc%2Fexamples%2Fdemuxing_decoding.c;h=69a31a893519cdb91bd1f39cad6b7df02b160e59;hb=626535f6a169e2d821b969e0ea77125ba7482113;hp=b1a216abb4cb72f448b4fe215d0ab3692be72a1e;hpb=68de778ccc35bea885a989e47358089da006a8b6;p=ffmpeg diff --git a/doc/examples/demuxing_decoding.c b/doc/examples/demuxing_decoding.c index b1a216abb4c..55fdb2555c7 100644 --- a/doc/examples/demuxing_decoding.c +++ b/doc/examples/demuxing_decoding.c @@ -51,99 +51,97 @@ static int video_dst_bufsize; static int video_stream_idx = -1, audio_stream_idx = -1; static AVFrame *frame = NULL; -static AVPacket pkt; +static AVPacket *pkt = NULL; static int video_frame_count = 0; static int audio_frame_count = 0; -/* Enable or disable frame reference counting. You are not supposed to support - * both paths in your application but pick the one most appropriate to your - * needs. Look for the use of refcount in this example to see what are the - * differences of API usage between them. */ -static int refcount = 0; +static int output_video_frame(AVFrame *frame) +{ + if (frame->width != width || frame->height != height || + frame->format != pix_fmt) { + /* To handle this change, one could call av_image_alloc again and + * decode the following frames into another rawvideo file. */ + fprintf(stderr, "Error: Width, height and pixel format have to be " + "constant in a rawvideo file, but the width, height or " + "pixel format of the input video changed:\n" + "old: width = %d, height = %d, format = %s\n" + "new: width = %d, height = %d, format = %s\n", + width, height, av_get_pix_fmt_name(pix_fmt), + frame->width, frame->height, + av_get_pix_fmt_name(frame->format)); + return -1; + } + + printf("video_frame n:%d coded_n:%d\n", + video_frame_count++, frame->coded_picture_number); + + /* copy decoded frame to destination buffer: + * this is required since rawvideo expects non aligned data */ + av_image_copy(video_dst_data, video_dst_linesize, + (const uint8_t **)(frame->data), frame->linesize, + pix_fmt, width, height); + + /* write to rawvideo file */ + fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); + return 0; +} + +static int output_audio_frame(AVFrame *frame) +{ + size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format); + printf("audio_frame n:%d nb_samples:%d pts:%s\n", + audio_frame_count++, frame->nb_samples, + av_ts2timestr(frame->pts, &audio_dec_ctx->time_base)); + + /* Write the raw audio data samples of the first plane. This works + * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However, + * most audio decoders output planar audio, which uses a separate + * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P). + * In other words, this code will write only the first audio channel + * in these cases. + * You should use libswresample or libavfilter to convert the frame + * to packed data. */ + fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file); + + return 0; +} -static int decode_packet(int *got_frame, int cached) +static int decode_packet(AVCodecContext *dec, const AVPacket *pkt) { int ret = 0; - int decoded = pkt.size; - *got_frame = 0; + // submit the packet to the decoder + ret = avcodec_send_packet(dec, pkt); + if (ret < 0) { + fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret)); + return ret; + } - if (pkt.stream_index == video_stream_idx) { - /* decode video frame */ - ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt); + // get all the available frames from the decoder + while (ret >= 0) { + ret = avcodec_receive_frame(dec, frame); if (ret < 0) { - fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret)); - return ret; - } + // those two return values are special and mean there is no output + // frame available, but there were no errors during decoding + if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) + return 0; - if (*got_frame) { - - if (frame->width != width || frame->height != height || - frame->format != pix_fmt) { - /* To handle this change, one could call av_image_alloc again and - * decode the following frames into another rawvideo file. */ - fprintf(stderr, "Error: Width, height and pixel format have to be " - "constant in a rawvideo file, but the width, height or " - "pixel format of the input video changed:\n" - "old: width = %d, height = %d, format = %s\n" - "new: width = %d, height = %d, format = %s\n", - width, height, av_get_pix_fmt_name(pix_fmt), - frame->width, frame->height, - av_get_pix_fmt_name(frame->format)); - return -1; - } - - printf("video_frame%s n:%d coded_n:%d\n", - cached ? "(cached)" : "", - video_frame_count++, frame->coded_picture_number); - - /* copy decoded frame to destination buffer: - * this is required since rawvideo expects non aligned data */ - av_image_copy(video_dst_data, video_dst_linesize, - (const uint8_t **)(frame->data), frame->linesize, - pix_fmt, width, height); - - /* write to rawvideo file */ - fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); - } - } else if (pkt.stream_index == audio_stream_idx) { - /* decode audio frame */ - ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt); - if (ret < 0) { - fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret)); + fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret)); return ret; } - /* Some audio decoders decode only part of the packet, and have to be - * called again with the remainder of the packet data. - * Sample: fate-suite/lossless-audio/luckynight-partial.shn - * Also, some decoders might over-read the packet. */ - decoded = FFMIN(ret, pkt.size); - - if (*got_frame) { - size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format); - printf("audio_frame%s n:%d nb_samples:%d pts:%s\n", - cached ? "(cached)" : "", - audio_frame_count++, frame->nb_samples, - av_ts2timestr(frame->pts, &audio_dec_ctx->time_base)); - - /* Write the raw audio data samples of the first plane. This works - * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However, - * most audio decoders output planar audio, which uses a separate - * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P). - * In other words, this code will write only the first audio channel - * in these cases. - * You should use libswresample or libavfilter to convert the frame - * to packed data. */ - fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file); - } - } - /* If we use frame reference counting, we own the data and need - * to de-reference it when we don't use it anymore */ - if (*got_frame && refcount) + // write the frame data to output file + if (dec->codec->type == AVMEDIA_TYPE_VIDEO) + ret = output_video_frame(frame); + else + ret = output_audio_frame(frame); + av_frame_unref(frame); + if (ret < 0) + return ret; + } - return decoded; + return 0; } static int open_codec_context(int *stream_idx, @@ -151,7 +149,7 @@ static int open_codec_context(int *stream_idx, { int ret, stream_index; AVStream *st; - AVCodec *dec = NULL; + const AVCodec *dec = NULL; AVDictionary *opts = NULL; ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0); @@ -186,8 +184,7 @@ static int open_codec_context(int *stream_idx, return ret; } - /* Init the decoders, with or without reference counting */ - av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0); + /* Init the decoders */ if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) { fprintf(stderr, "Failed to open %s codec\n", av_get_media_type_string(type)); @@ -230,31 +227,21 @@ static int get_format_from_sample_fmt(const char **fmt, int main (int argc, char **argv) { - int ret = 0, got_frame; + int ret = 0; - if (argc != 4 && argc != 5) { - fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n" + if (argc != 4) { + fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n" "API example program to show how to read frames from an input file.\n" "This program reads frames from a file, decodes them, and writes decoded\n" "video frames to a rawvideo file named video_output_file, and decoded\n" - "audio frames to a rawaudio file named audio_output_file.\n\n" - "If the -refcount option is specified, the program use the\n" - "reference counting frame system which allows keeping a copy of\n" - "the data for longer than one decode call.\n" - "\n", argv[0]); + "audio frames to a rawaudio file named audio_output_file.\n", + argv[0]); exit(1); } - if (argc == 5 && !strcmp(argv[1], "-refcount")) { - refcount = 1; - argv++; - } src_filename = argv[1]; video_dst_filename = argv[2]; audio_dst_filename = argv[3]; - /* register all formats and codecs */ - av_register_all(); - /* open input file, and allocate format context */ if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) { fprintf(stderr, "Could not open source file %s\n", src_filename); @@ -316,10 +303,12 @@ int main (int argc, char **argv) goto end; } - /* initialize packet, set data to NULL, let the demuxer fill it */ - av_init_packet(&pkt); - pkt.data = NULL; - pkt.size = 0; + pkt = av_packet_alloc(); + if (!pkt) { + fprintf(stderr, "Could not allocate packet\n"); + ret = AVERROR(ENOMEM); + goto end; + } if (video_stream) printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename); @@ -327,24 +316,23 @@ int main (int argc, char **argv) printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename); /* read frames from the file */ - while (av_read_frame(fmt_ctx, &pkt) >= 0) { - AVPacket orig_pkt = pkt; - do { - ret = decode_packet(&got_frame, 0); - if (ret < 0) - break; - pkt.data += ret; - pkt.size -= ret; - } while (pkt.size > 0); - av_packet_unref(&orig_pkt); + while (av_read_frame(fmt_ctx, pkt) >= 0) { + // check if the packet belongs to a stream we are interested in, otherwise + // skip it + if (pkt->stream_index == video_stream_idx) + ret = decode_packet(video_dec_ctx, pkt); + else if (pkt->stream_index == audio_stream_idx) + ret = decode_packet(audio_dec_ctx, pkt); + av_packet_unref(pkt); + if (ret < 0) + break; } - /* flush cached frames */ - pkt.data = NULL; - pkt.size = 0; - do { - decode_packet(&got_frame, 1); - } while (got_frame); + /* flush the decoders */ + if (video_dec_ctx) + decode_packet(video_dec_ctx, NULL); + if (audio_dec_ctx) + decode_packet(audio_dec_ctx, NULL); printf("Demuxing succeeded.\n"); @@ -386,6 +374,7 @@ end: fclose(video_dst_file); if (audio_dst_file) fclose(audio_dst_file); + av_packet_free(&pkt); av_frame_free(&frame); av_free(video_dst_data[0]);