* is not just a function of time, but also one of the dependency on additional
* frames being fed into the decoder to satisfy the b-frame dependencies.
*
- * As such, a pipeline will build up that is roughly equivalent to the required
- * DPB for the file being played. If that was all it took, things would still
- * be simple - so, of course, it isn't.
- *
- * The hardware has a way of indicating that a picture is ready to be copied out,
- * but this is unreliable - and sometimes the attempt will still fail so, based
- * on testing, the code will wait until 3 pictures are ready before starting
- * to copy out - and this has the effect of extending the pipeline.
- *
- * Finally, while it is tempting to say that once the decoder starts outputting
- * frames, the software should never fail to return a frame from a decode(),
- * this is a hard assertion to make, because the stream may switch between
- * differently encoded content (number of b-frames, interlacing, etc) which
- * might require a longer pipeline than before. If that happened, you could
- * deadlock trying to retrieve a frame that can't be decoded without feeding
- * in additional packets.
- *
- * As such, the code will return in the event that a picture cannot be copied
- * out, leading to an increase in the length of the pipeline. This in turn,
- * means we have to be sensitive to the time it takes to decode a picture;
- * We do not want to give up just because the hardware needed a little more
- * time to prepare the picture! For this reason, there are delays included
- * in the decode() path that ensure that, under normal conditions, the hardware
- * will only fail to return a frame if it really needs additional packets to
- * complete the decoding.
- *
- * Finally, to be explicit, we do not want the pipeline to grow without bound
- * for two reasons: 1) The hardware can only buffer a finite number of packets,
- * and 2) The client application may not be able to cope with arbitrarily long
- * delays in the video path relative to the audio path. For example. MPlayer
- * can only handle a 20 picture delay (although this is arbitrary, and needs
- * to be extended to fully support the CrystalHD where the delay could be up
- * to 32 pictures - consider PAFF H.264 content with 16 b-frames).
+ * As such, the hardware can only be used effectively with a decode API that
+ * doesn't assume a 1:1 relationship between input packets and output frames.
+ * The new avcodec decode API is such an API (an m:n API) while the old one is
+ * 1:1. Consequently, we no longer support the old API, which allows us to avoid
+ * the vicious hacks that are required to approximate 1:1 operation.
*/
/*****************************************************************************
typedef enum {
RET_ERROR = -1,
RET_OK = 0,
+ RET_COPY_AGAIN = 1,
} CopyRet;
typedef struct OpaqueList {
typedef struct {
AVClass *av_class;
AVCodecContext *avctx;
- AVFrame *pic;
HANDLE dev;
uint8_t *orig_extradata;
uint32_t sps_pps_size;
uint8_t is_nal;
uint8_t need_second_field;
+ uint8_t draining;
OpaqueList *head;
OpaqueList *tail;
CHDContext *priv = avctx->priv_data;
priv->need_second_field = 0;
-
- av_frame_unref (priv->pic);
+ priv->draining = 0;
/* Flush mode 4 flushes all software and hardware buffers. */
DtsFlushInput(priv->dev, 4);
av_freep(&priv->sps_pps_buf);
- av_frame_free (&priv->pic);
-
if (priv->head) {
OpaqueList *node = priv->head;
while (node) {
priv = avctx->priv_data;
priv->avctx = avctx;
priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
- priv->pic = av_frame_alloc();
+ priv->draining = 0;
subtype = id2subtype(priv, avctx->codec->id);
switch (subtype) {
static inline CopyRet copy_frame(AVCodecContext *avctx,
BC_DTS_PROC_OUT *output,
- void *data, int *got_frame)
+ AVFrame *frame, int *got_frame)
{
BC_STATUS ret;
BC_DTS_STATUS decoder_status = { 0, };
av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d\n",
interlaced);
- if (priv->pic->data[0] && !priv->need_second_field)
- av_frame_unref(priv->pic);
-
priv->need_second_field = interlaced && !priv->need_second_field;
- if (!priv->pic->data[0]) {
- if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0)
+ if (!frame->data[0]) {
+ if (ff_get_buffer(avctx, frame, 0) < 0)
return RET_ERROR;
}
sStride = bwidth;
}
- dStride = priv->pic->linesize[0];
- dst = priv->pic->data[0];
+ dStride = frame->linesize[0];
+ dst = frame->data[0];
av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
}
- priv->pic->interlaced_frame = interlaced;
+ frame->interlaced_frame = interlaced;
if (interlaced)
- priv->pic->top_field_first = !bottom_first;
+ frame->top_field_first = !bottom_first;
if (pkt_pts != AV_NOPTS_VALUE) {
- priv->pic->pts = pkt_pts;
+ frame->pts = pkt_pts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
- priv->pic->pkt_pts = pkt_pts;
+ frame->pkt_pts = pkt_pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
}
- av_frame_set_pkt_pos(priv->pic, -1);
- av_frame_set_pkt_duration(priv->pic, 0);
- av_frame_set_pkt_size(priv->pic, -1);
+ av_frame_set_pkt_pos(frame, -1);
+ av_frame_set_pkt_duration(frame, 0);
+ av_frame_set_pkt_size(frame, -1);
if (!priv->need_second_field) {
*got_frame = 1;
- if ((ret = av_frame_ref(data, priv->pic)) < 0) {
- return ret;
- }
+ } else {
+ return RET_COPY_AGAIN;
}
return RET_OK;
static inline CopyRet receive_frame(AVCodecContext *avctx,
- void *data, int *got_frame)
+ AVFrame *frame, int *got_frame)
{
BC_STATUS ret;
BC_DTS_PROC_OUT output = {
avctx->sample_aspect_ratio = (AVRational) {221, 1};
break;
}
- return RET_OK;
+ return RET_COPY_AGAIN;
} else if (ret == BC_STS_SUCCESS) {
int copy_ret = -1;
if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) {
print_frame_info(priv, &output);
- copy_ret = copy_frame(avctx, &output, data, got_frame);
+ copy_ret = copy_frame(avctx, &output, frame, got_frame);
} else {
/*
* An invalid frame has been consumed.
*/
av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
"invalid PIB\n");
- copy_ret = RET_OK;
+ copy_ret = RET_COPY_AGAIN;
}
DtsReleaseOutputBuffs(dev, NULL, FALSE);
return copy_ret;
} else if (ret == BC_STS_BUSY) {
- return RET_OK;
+ return RET_COPY_AGAIN;
} else {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
return RET_ERROR;
}
} else {
av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
+ priv->draining = 1;
ret = AVERROR_EOF;
goto exit;
}
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: receive_frame\n");
- bc_ret = DtsGetDriverStatus(dev, &decoder_status);
- if (bc_ret != BC_STS_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
- return -1;
- }
+ do {
+ bc_ret = DtsGetDriverStatus(dev, &decoder_status);
+ if (bc_ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
+ return -1;
+ }
- if (decoder_status.ReadyListCount == 0) {
- av_log(avctx, AV_LOG_INFO, "CrystalHD: Insufficient frames ready. Returning\n");
- return AVERROR(EAGAIN);
- }
+ if (decoder_status.ReadyListCount == 0) {
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: Insufficient frames ready. Returning\n");
+ got_frame = 0;
+ rec_ret = RET_OK;
+ break;
+ }
+
+ rec_ret = receive_frame(avctx, frame, &got_frame);
+ } while (rec_ret == RET_COPY_AGAIN);
- rec_ret = receive_frame(avctx, frame, &got_frame);
if (rec_ret == RET_ERROR) {
return -1;
} else if (got_frame == 0) {
- return AVERROR(EAGAIN);
+ return priv->draining ? AVERROR_EOF : AVERROR(EAGAIN);
} else {
return 0;
}
.send_packet = crystalhd_decode_packet, \
.receive_frame = crystalhd_receive_frame, \
.flush = flush, \
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, \
};