X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Flibdavs2.c;h=28cad9c5f7a971d44163c99d9e3632c437364d6f;hb=fe95a37144077d1f652088fcb25b2949e4a3da19;hp=7fdafdab81c20ec2ed58dbf391bbb3b6b9a0de1a;hpb=701cbbb58c76dbaa5c4e346e575cc3021d78fb02;p=ffmpeg diff --git a/libavcodec/libdavs2.c b/libavcodec/libdavs2.c index 7fdafdab81c..28cad9c5f7a 100644 --- a/libavcodec/libdavs2.c +++ b/libavcodec/libdavs2.c @@ -23,6 +23,7 @@ */ #include "avcodec.h" +#include "internal.h" #include "davs2.h" typedef struct DAVS2Context { @@ -45,9 +46,9 @@ static av_cold int davs2_init(AVCodecContext *avctx) /* init the decoder */ cad->param.threads = avctx->thread_count; cad->param.info_level = 0; - cad->decoder = davs2_decoder_open(&cad->param); cad->param.disable_avx = !(cpu_flags & AV_CPU_FLAG_AVX && cpu_flags & AV_CPU_FLAG_AVX2); + cad->decoder = davs2_decoder_open(&cad->param); if (!cad->decoder) { av_log(avctx, AV_LOG_ERROR, "decoder created error."); @@ -58,7 +59,7 @@ static av_cold int davs2_init(AVCodecContext *avctx) return 0; } -static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, +static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, int *got_frame, davs2_seq_info_t *headerset, int ret_type, AVFrame *frame) { DAVS2Context *cad = avctx->priv_data; @@ -66,8 +67,10 @@ static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, int plane = 0; int line = 0; - if (!headerset) + if (!headerset) { + *got_frame = 0; return 0; + } if (!pic || ret_type == DAVS2_GOT_HEADER) { avctx->width = headerset->width; @@ -76,27 +79,28 @@ static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, AV_PIX_FMT_YUV420P10 : AV_PIX_FMT_YUV420P; avctx->framerate = av_d2q(headerset->frame_rate,4096); + *got_frame = 0; return 0; } switch (pic->type) { - case DAVS2_PIC_I: - case DAVS2_PIC_G: - frame->pict_type = AV_PICTURE_TYPE_I; - break; - case DAVS2_PIC_P: - case DAVS2_PIC_S: - frame->pict_type = AV_PICTURE_TYPE_P; - break; - case DAVS2_PIC_B: - frame->pict_type = AV_PICTURE_TYPE_B; - break; - case DAVS2_PIC_F: - frame->pict_type = AV_PICTURE_TYPE_S; - break; - default: - av_log(avctx, AV_LOG_ERROR, "Decoder error: unknown frame type\n"); - return AVERROR_EXTERNAL; + case DAVS2_PIC_I: + case DAVS2_PIC_G: + frame->pict_type = AV_PICTURE_TYPE_I; + break; + case DAVS2_PIC_P: + case DAVS2_PIC_S: + frame->pict_type = AV_PICTURE_TYPE_P; + break; + case DAVS2_PIC_B: + frame->pict_type = AV_PICTURE_TYPE_B; + break; + case DAVS2_PIC_F: + frame->pict_type = AV_PICTURE_TYPE_S; + break; + default: + av_log(avctx, AV_LOG_ERROR, "Decoder error: unknown frame type\n"); + return AVERROR_EXTERNAL; } for (plane = 0; plane < 3; ++plane) { @@ -104,7 +108,7 @@ static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, frame->buf[plane] = av_buffer_alloc(size_line * pic->lines[plane]); if (!frame->buf[plane]){ - av_log(avctx, AV_LOG_ERROR, "dump error: alloc failed.\n"); + av_log(avctx, AV_LOG_ERROR, "Decoder error: allocation failure, can't dump frames.\n"); return AVERROR(ENOMEM); } @@ -122,7 +126,40 @@ static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, frame->pts = cad->out_frame.pts; frame->format = avctx->pix_fmt; - return 1; + *got_frame = 1; + return 0; +} + +static void davs2_flush(AVCodecContext *avctx) +{ + DAVS2Context *cad = avctx->priv_data; + int ret = DAVS2_GOT_FRAME; + + while (ret == DAVS2_GOT_FRAME) { + ret = davs2_decoder_flush(cad->decoder, &cad->headerset, &cad->out_frame); + davs2_decoder_frame_unref(cad->decoder, &cad->out_frame); + } + + if (ret == DAVS2_ERROR) { + av_log(avctx, AV_LOG_WARNING, "Decoder flushing failed.\n"); + } +} + +static int send_delayed_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame) +{ + DAVS2Context *cad = avctx->priv_data; + int ret = DAVS2_DEFAULT; + + ret = davs2_decoder_flush(cad->decoder, &cad->headerset, &cad->out_frame); + if (ret == DAVS2_ERROR) { + av_log(avctx, AV_LOG_ERROR, "Decoder error: can't flush delayed frame\n"); + return AVERROR_EXTERNAL; + } + if (ret == DAVS2_GOT_FRAME) { + ret = davs2_dump_frames(avctx, &cad->out_frame, got_frame, &cad->headerset, ret, frame); + davs2_decoder_frame_unref(cad->decoder, &cad->out_frame); + } + return ret; } static av_cold int davs2_end(AVCodecContext *avctx) @@ -147,8 +184,9 @@ static int davs2_decode_frame(AVCodecContext *avctx, void *data, AVFrame *frame = data; int ret = DAVS2_DEFAULT; + /* end of stream, output what is still in the buffers */ if (!buf_size) { - return 0; + return send_delayed_frame(avctx, frame, got_frame); } cad->packet.data = buf_ptr; @@ -167,14 +205,14 @@ static int davs2_decode_frame(AVCodecContext *avctx, void *data, ret = davs2_decoder_recv_frame(cad->decoder, &cad->headerset, &cad->out_frame); if (ret != DAVS2_DEFAULT) { - *got_frame = davs2_dump_frames(avctx, &cad->out_frame, &cad->headerset, ret, frame); + ret = davs2_dump_frames(avctx, &cad->out_frame, got_frame, &cad->headerset, ret, frame); davs2_decoder_frame_unref(cad->decoder, &cad->out_frame); } - return buf_size; + return ret == 0 ? buf_size : ret; } -AVCodec ff_libdavs2_decoder = { +const AVCodec ff_libdavs2_decoder = { .name = "libdavs2", .long_name = NULL_IF_CONFIG_SMALL("libdavs2 AVS2-P2/IEEE1857.4"), .type = AVMEDIA_TYPE_VIDEO, @@ -183,7 +221,9 @@ AVCodec ff_libdavs2_decoder = { .init = davs2_init, .close = davs2_end, .decode = davs2_decode_frame, - .capabilities = AV_CODEC_CAP_DELAY,//AV_CODEC_CAP_DR1 | + .flush = davs2_flush, + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS, + .caps_internal = FF_CODEC_CAP_AUTO_THREADS, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .wrapper_name = "libdavs2",