static av_cold int davs2_init(AVCodecContext *avctx)
{
DAVS2Context *cad = avctx->priv_data;
+ int cpu_flags = av_get_cpu_flags();
/* init the decoder */
cad->param.threads = avctx->thread_count;
cad->param.info_level = 0;
cad->decoder = davs2_decoder_open(&cad->param);
+ cad->param.disable_avx = !(cpu_flags & AV_CPU_FLAG_AVX &&
+ cpu_flags & AV_CPU_FLAG_AVX2);
if (!cad->decoder) {
av_log(avctx, AV_LOG_ERROR, "decoder created error.");
return 0;
}
-static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic,
+static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, int *got_frame,
davs2_seq_info_t *headerset, int ret_type, AVFrame *frame)
{
DAVS2Context *cad = avctx->priv_data;
int plane = 0;
int line = 0;
- if (!headerset)
+ if (!headerset) {
+ *got_frame = 0;
return 0;
+ }
if (!pic || ret_type == DAVS2_GOT_HEADER) {
avctx->width = headerset->width;
AV_PIX_FMT_YUV420P10 : AV_PIX_FMT_YUV420P;
avctx->framerate = av_d2q(headerset->frame_rate,4096);
+ *got_frame = 0;
return 0;
}
}
frame->data[plane] = frame->buf[plane]->data;
- frame->linesize[plane] = pic->widths[plane];
+ frame->linesize[plane] = size_line;
for (line = 0; line < pic->lines[plane]; ++line)
memcpy(frame->data[plane] + line * size_line,
frame->pts = cad->out_frame.pts;
frame->format = avctx->pix_fmt;
- return 1;
+ *got_frame = 1;
+ return 0;
+}
+
+static int send_delayed_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame)
+{
+ DAVS2Context *cad = avctx->priv_data;
+ int ret = DAVS2_DEFAULT;
+
+ ret = davs2_decoder_flush(cad->decoder, &cad->headerset, &cad->out_frame);
+ if (ret == DAVS2_ERROR) {
+ av_log(avctx, AV_LOG_ERROR, "Decoder error: can't flush delayed frame\n");
+ return AVERROR_EXTERNAL;
+ }
+ if (ret == DAVS2_GOT_FRAME) {
+ ret = davs2_dump_frames(avctx, &cad->out_frame, got_frame, &cad->headerset, ret, frame);
+ davs2_decoder_frame_unref(cad->decoder, &cad->out_frame);
+ }
+ return ret;
}
static av_cold int davs2_end(AVCodecContext *avctx)
AVFrame *frame = data;
int ret = DAVS2_DEFAULT;
+ /* end of stream, output what is still in the buffers */
if (!buf_size) {
- return 0;
+ return send_delayed_frame(avctx, frame, got_frame);
}
cad->packet.data = buf_ptr;
ret = davs2_decoder_recv_frame(cad->decoder, &cad->headerset, &cad->out_frame);
if (ret != DAVS2_DEFAULT) {
- *got_frame = davs2_dump_frames(avctx, &cad->out_frame, &cad->headerset, ret, frame);
+ ret = davs2_dump_frames(avctx, &cad->out_frame, got_frame, &cad->headerset, ret, frame);
davs2_decoder_frame_unref(cad->decoder, &cad->out_frame);
}
- return buf_size;
+ return ret == 0 ? buf_size : ret;
}
AVCodec ff_libdavs2_decoder = {
.init = davs2_init,
.close = davs2_end,
.decode = davs2_decode_frame,
- .capabilities = AV_CODEC_CAP_DELAY,//AV_CODEC_CAP_DR1 |
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE },
.wrapper_name = "libdavs2",