* is not just a function of time, but also one of the dependency on additional
* frames being fed into the decoder to satisfy the b-frame dependencies.
*
- * As such, a pipeline will build up that is roughly equivalent to the required
- * DPB for the file being played. If that was all it took, things would still
- * be simple - so, of course, it isn't.
- *
- * The hardware has a way of indicating that a picture is ready to be copied out,
- * but this is unreliable - and sometimes the attempt will still fail so, based
- * on testing, the code will wait until 3 pictures are ready before starting
- * to copy out - and this has the effect of extending the pipeline.
- *
- * Finally, while it is tempting to say that once the decoder starts outputting
- * frames, the software should never fail to return a frame from a decode(),
- * this is a hard assertion to make, because the stream may switch between
- * differently encoded content (number of b-frames, interlacing, etc) which
- * might require a longer pipeline than before. If that happened, you could
- * deadlock trying to retrieve a frame that can't be decoded without feeding
- * in additional packets.
- *
- * As such, the code will return in the event that a picture cannot be copied
- * out, leading to an increase in the length of the pipeline. This in turn,
- * means we have to be sensitive to the time it takes to decode a picture;
- * We do not want to give up just because the hardware needed a little more
- * time to prepare the picture! For this reason, there are delays included
- * in the decode() path that ensure that, under normal conditions, the hardware
- * will only fail to return a frame if it really needs additional packets to
- * complete the decoding.
- *
- * Finally, to be explicit, we do not want the pipeline to grow without bound
- * for two reasons: 1) The hardware can only buffer a finite number of packets,
- * and 2) The client application may not be able to cope with arbitrarily long
- * delays in the video path relative to the audio path. For example. MPlayer
- * can only handle a 20 picture delay (although this is arbitrary, and needs
- * to be extended to fully support the CrystalHD where the delay could be up
- * to 32 pictures - consider PAFF H.264 content with 16 b-frames).
+ * As such, the hardware can only be used effectively with a decode API that
+ * doesn't assume a 1:1 relationship between input packets and output frames.
+ * The new avcodec decode API is such an API (an m:n API) while the old one is
+ * 1:1. Consequently, we no longer support the old API, which allows us to avoid
+ * the vicious hacks that are required to approximate 1:1 operation.
*/
/*****************************************************************************
#include <libcrystalhd/libcrystalhd_if.h>
#include "avcodec.h"
-#include "h264dec.h"
#include "internal.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#define OUTPUT_PROC_TIMEOUT 50
/** Step between fake timestamps passed to hardware in units of 100ns */
#define TIMESTAMP_UNIT 100000
-/** Initial value in us of the wait in decode() */
-#define BASE_WAIT 10000
-/** Increment in us to adjust wait in decode() */
-#define WAIT_UNIT 1000
/*****************************************************************************
RET_ERROR = -1,
RET_OK = 0,
RET_COPY_AGAIN = 1,
- RET_SKIP_NEXT_COPY = 2,
- RET_COPY_NEXT_FIELD = 3,
} CopyRet;
typedef struct OpaqueList {
struct OpaqueList *next;
uint64_t fake_timestamp;
uint64_t reordered_opaque;
- uint8_t pic_type;
} OpaqueList;
typedef struct {
AVClass *av_class;
AVCodecContext *avctx;
- AVFrame *pic;
HANDLE dev;
uint8_t *orig_extradata;
uint32_t orig_extradata_size;
AVBSFContext *bsfc;
- AVCodecParserContext *parser;
uint8_t is_70012;
uint8_t *sps_pps_buf;
uint32_t sps_pps_size;
uint8_t is_nal;
- uint8_t output_ready;
uint8_t need_second_field;
- uint8_t skip_next_output;
- uint64_t decode_wait;
-
- uint64_t last_picture;
+ uint8_t draining;
OpaqueList *head;
OpaqueList *tail;
static inline void print_frame_info(CHDContext *priv, BC_DTS_PROC_OUT *output)
{
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffSz: %u\n", output->YbuffSz);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffDoneSz: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tYBuffSz: %u\n", output->YbuffSz);
+ av_log(priv->avctx, AV_LOG_TRACE, "\tYBuffDoneSz: %u\n",
output->YBuffDoneSz);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tUVBuffDoneSz: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tUVBuffDoneSz: %u\n",
output->UVBuffDoneSz);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tTimestamp: %"PRIu64"\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tTimestamp: %"PRIu64"\n",
output->PicInfo.timeStamp);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tPicture Number: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tPicture Number: %u\n",
output->PicInfo.picture_number);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tWidth: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tWidth: %u\n",
output->PicInfo.width);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tHeight: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tHeight: %u\n",
output->PicInfo.height);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tChroma: 0x%03x\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tChroma: 0x%03x\n",
output->PicInfo.chroma_format);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tPulldown: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tPulldown: %u\n",
output->PicInfo.pulldown);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tFlags: 0x%08x\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tFlags: 0x%08x\n",
output->PicInfo.flags);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrame Rate/Res: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tFrame Rate/Res: %u\n",
output->PicInfo.frame_rate);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tAspect Ratio: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tAspect Ratio: %u\n",
output->PicInfo.aspect_ratio);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tColor Primaries: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tColor Primaries: %u\n",
output->PicInfo.colour_primaries);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tMetaData: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tMetaData: %u\n",
output->PicInfo.picture_meta_payload);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tSession Number: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tSession Number: %u\n",
output->PicInfo.sess_num);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tycom: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tycom: %u\n",
output->PicInfo.ycom);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tCustom Aspect: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tCustom Aspect: %u\n",
output->PicInfo.custom_aspect_ratio_width_height);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrames to Drop: %u\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tFrames to Drop: %u\n",
output->PicInfo.n_drop);
- av_log(priv->avctx, AV_LOG_VERBOSE, "\tH264 Valid Fields: 0x%08x\n",
+ av_log(priv->avctx, AV_LOG_TRACE, "\tH264 Valid Fields: 0x%08x\n",
output->PicInfo.other.h264.valid);
}
* OpaqueList functions
****************************************************************************/
-static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque,
- uint8_t pic_type)
+static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque)
{
OpaqueList *newNode = av_mallocz(sizeof (OpaqueList));
if (!newNode) {
}
priv->tail = newNode;
newNode->reordered_opaque = reordered_opaque;
- newNode->pic_type = pic_type;
return newNode->fake_timestamp;
}
{
CHDContext *priv = avctx->priv_data;
- avctx->has_b_frames = 0;
- priv->last_picture = -1;
- priv->output_ready = 0;
priv->need_second_field = 0;
- priv->skip_next_output = 0;
- priv->decode_wait = BASE_WAIT;
-
- av_frame_unref (priv->pic);
+ priv->draining = 0;
/* Flush mode 4 flushes all software and hardware buffers. */
DtsFlushInput(priv->dev, 4);
priv->orig_extradata_size = 0;
}
- av_parser_close(priv->parser);
if (priv->bsfc) {
av_bsf_free(&priv->bsfc);
}
av_freep(&priv->sps_pps_buf);
- av_frame_free (&priv->pic);
-
if (priv->head) {
OpaqueList *node = priv->head;
while (node) {
}
+static av_cold int init_bsf(AVCodecContext *avctx, const char *bsf_name)
+{
+ CHDContext *priv = avctx->priv_data;
+ const AVBitStreamFilter *bsf;
+ int avret;
+ void *extradata = NULL;
+ size_t size = 0;
+
+ bsf = av_bsf_get_by_name(bsf_name);
+ if (!bsf) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Cannot open the %s BSF!\n", bsf_name);
+ return AVERROR_BSF_NOT_FOUND;
+ }
+
+ avret = av_bsf_alloc(bsf, &priv->bsfc);
+ if (avret != 0) {
+ return avret;
+ }
+
+ avret = avcodec_parameters_from_context(priv->bsfc->par_in, avctx);
+ if (avret != 0) {
+ return avret;
+ }
+
+ avret = av_bsf_init(priv->bsfc);
+ if (avret != 0) {
+ return avret;
+ }
+
+ /* Back up the extradata so it can be restored at close time. */
+ priv->orig_extradata = avctx->extradata;
+ priv->orig_extradata_size = avctx->extradata_size;
+
+ size = priv->bsfc->par_out->extradata_size;
+ extradata = av_malloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (!extradata) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate copy of extradata\n");
+ return AVERROR(ENOMEM);
+ }
+ memcpy(extradata, priv->bsfc->par_out->extradata, size);
+
+ avctx->extradata = extradata;
+ avctx->extradata_size = size;
+
+ return 0;
+}
+
static av_cold int init(AVCodecContext *avctx)
{
CHDContext* priv;
+ int avret;
BC_STATUS ret;
BC_INFO_CRYSTAL version;
BC_INPUT_FORMAT format = {
priv = avctx->priv_data;
priv->avctx = avctx;
priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
- priv->last_picture = -1;
- priv->decode_wait = BASE_WAIT;
- priv->pic = av_frame_alloc();
+ priv->draining = 0;
subtype = id2subtype(priv, avctx->codec->id);
switch (subtype) {
case BC_MSUBTYPE_AVC1:
- {
- const AVBitStreamFilter *bsf;
- int avret;
-
- bsf = av_bsf_get_by_name("h264_mp4toannexb");
- if (!bsf) {
- av_log(avctx, AV_LOG_ERROR,
- "Cannot open the h264_mp4toannexb BSF!\n");
- return AVERROR_BSF_NOT_FOUND;
- }
- avret = av_bsf_alloc(bsf, &priv->bsfc);
- if (avret != 0) {
- return AVERROR(ENOMEM);
- }
- avret = avcodec_parameters_from_context(priv->bsfc->par_in, avctx);
- if (avret != 0) {
- return AVERROR(ENOMEM);
- }
- avret = av_bsf_init(priv->bsfc);
- if (avret != 0) {
- return AVERROR(ENOMEM);
- }
-
- format.metaDataSz = priv->bsfc->par_out->extradata_size;
- format.pMetaData = av_malloc(format.metaDataSz + AV_INPUT_BUFFER_PADDING_SIZE);
- if (!format.pMetaData) {
- av_log(avctx, AV_LOG_ERROR,
- "Failed to allocate copy of extradata\n");
- return AVERROR(ENOMEM);
- }
- memcpy(format.pMetaData, priv->bsfc->par_out->extradata, format.metaDataSz);
-
- /* Back up the extradata so it can be restored at close time. */
- priv->orig_extradata = avctx->extradata;
- priv->orig_extradata_size = avctx->extradata_size;
- avctx->extradata = format.pMetaData;
- avctx->extradata_size = format.metaDataSz;
+ avret = init_bsf(avctx, "h264_mp4toannexb");
+ if (avret != 0) {
+ return avret;
}
subtype = BC_MSUBTYPE_H264;
format.startCodeSz = 4;
+ format.pMetaData = avctx->extradata;
+ format.metaDataSz = avctx->extradata_size;
break;
case BC_MSUBTYPE_H264:
format.startCodeSz = 4;
goto fail;
}
- if (avctx->codec->id == AV_CODEC_ID_H264) {
- priv->parser = av_parser_init(avctx->codec->id);
- if (!priv->parser)
- av_log(avctx, AV_LOG_WARNING,
- "Cannot open the h.264 parser! Interlaced h.264 content "
- "will not be detected reliably.\n");
- priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
- }
av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");
return 0;
static inline CopyRet copy_frame(AVCodecContext *avctx,
BC_DTS_PROC_OUT *output,
- void *data, int *got_frame)
+ AVFrame *frame, int *got_frame)
{
BC_STATUS ret;
BC_DTS_STATUS decoder_status = { 0, };
- uint8_t trust_interlaced;
uint8_t interlaced;
CHDContext *priv = avctx->priv_data;
int64_t pkt_pts = AV_NOPTS_VALUE;
- uint8_t pic_type = 0;
uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
VDEC_FLAG_BOTTOMFIELD;
OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
if (node) {
pkt_pts = node->reordered_opaque;
- pic_type = node->pic_type;
av_free(node);
} else {
/*
* We will encounter a situation where a timestamp cannot be
* popped if a second field is being returned. In this case,
* each field has the same timestamp and the first one will
- * cause it to be popped. To keep subsequent calculations
- * simple, pic_type should be set a FIELD value - doesn't
- * matter which, but I chose BOTTOM.
+ * cause it to be popped. We'll avoid overwriting the valid
+ * timestamp below.
*/
- pic_type = PICT_BOTTOM_FIELD;
}
av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
output->PicInfo.timeStamp);
- av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
- pic_type);
}
ret = DtsGetDriverStatus(priv->dev, &decoder_status);
return RET_ERROR;
}
- /*
- * For most content, we can trust the interlaced flag returned
- * by the hardware, but sometimes we can't. These are the
- * conditions under which we can trust the flag:
- *
- * 1) It's not h.264 content
- * 2) The UNKNOWN_SRC flag is not set
- * 3) We know we're expecting a second field
- * 4) The hardware reports this picture and the next picture
- * have the same picture number.
- *
- * Note that there can still be interlaced content that will
- * fail this check, if the hardware hasn't decoded the next
- * picture or if there is a corruption in the stream. (In either
- * case a 0 will be returned for the next picture number)
- */
- trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 ||
- !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
- priv->need_second_field ||
- (decoder_status.picNumFlags & ~0x40000000) ==
- output->PicInfo.picture_number;
-
- /*
- * If we got a false negative for trust_interlaced on the first field,
- * we will realise our mistake here when we see that the picture number is that
- * of the previous picture. We cannot recover the frame and should discard the
- * second field to keep the correct number of output frames.
- */
- if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
- av_log(avctx, AV_LOG_WARNING,
- "Incorrectly guessed progressive frame. Discarding second field\n");
- /* Returning without providing a picture. */
- return RET_OK;
- }
+ interlaced = output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC;
- interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
- trust_interlaced;
-
- if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
- av_log(avctx, AV_LOG_VERBOSE,
- "Next picture number unknown. Assuming progressive frame.\n");
- }
-
- av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
- interlaced, trust_interlaced);
-
- if (priv->pic->data[0] && !priv->need_second_field)
- av_frame_unref(priv->pic);
+ av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d\n",
+ interlaced);
priv->need_second_field = interlaced && !priv->need_second_field;
- if (!priv->pic->data[0]) {
- if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0)
+ if (!frame->data[0]) {
+ if (ff_get_buffer(avctx, frame, 0) < 0)
return RET_ERROR;
}
sStride = bwidth;
}
- dStride = priv->pic->linesize[0];
- dst = priv->pic->data[0];
+ dStride = frame->linesize[0];
+ dst = frame->data[0];
av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
+ /*
+ * The hardware doesn't return the first sample of a picture.
+ * Ignoring why it behaves this way, it's better to copy the sample from
+ * the second line, rather than the next sample across because the chroma
+ * values should be correct (assuming the decoded video was 4:2:0, which
+ * it was).
+ */
+ *((uint32_t *)src) = *((uint32_t *)(src + sStride));
+
if (interlaced) {
int dY = 0;
int sY = 0;
av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
}
- priv->pic->interlaced_frame = interlaced;
+ frame->interlaced_frame = interlaced;
if (interlaced)
- priv->pic->top_field_first = !bottom_first;
+ frame->top_field_first = !bottom_first;
- priv->pic->pts = pkt_pts;
+ if (pkt_pts != AV_NOPTS_VALUE) {
+ frame->pts = pkt_pts;
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
- priv->pic->pkt_pts = pkt_pts;
+ frame->pkt_pts = pkt_pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
+ }
+ av_frame_set_pkt_pos(frame, -1);
+ av_frame_set_pkt_duration(frame, 0);
+ av_frame_set_pkt_size(frame, -1);
if (!priv->need_second_field) {
*got_frame = 1;
- if ((ret = av_frame_ref(data, priv->pic)) < 0) {
- return ret;
- }
- }
-
- /*
- * Two types of PAFF content have been observed. One form causes the
- * hardware to return a field pair and the other individual fields,
- * even though the input is always individual fields. We must skip
- * copying on the next decode() call to maintain pipeline length in
- * the first case.
- */
- if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
- (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
- av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
- return RET_SKIP_NEXT_COPY;
+ } else {
+ return RET_COPY_AGAIN;
}
- /*
- * The logic here is purely based on empirical testing with samples.
- * If we need a second field, it could come from a second input packet,
- * or it could come from the same field-pair input packet at the current
- * field. In the first case, we should return and wait for the next time
- * round to get the second field, while in the second case, we should
- * ask the decoder for it immediately.
- *
- * Testing has shown that we are dealing with the fieldpair -> two fields
- * case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture
- * type was PICT_FRAME (in this second case, the flag might still be set)
- */
- return priv->need_second_field &&
- (!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
- pic_type == PICT_FRAME) ?
- RET_COPY_NEXT_FIELD : RET_OK;
+ return RET_OK;
}
static inline CopyRet receive_frame(AVCodecContext *avctx,
- void *data, int *got_frame)
+ AVFrame *frame, int *got_frame)
{
BC_STATUS ret;
BC_DTS_PROC_OUT output = {
} else if (ret == BC_STS_SUCCESS) {
int copy_ret = -1;
if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) {
- if (priv->last_picture == -1) {
- /*
- * Init to one less, so that the incrementing code doesn't
- * need to be special-cased.
- */
- priv->last_picture = output.PicInfo.picture_number - 1;
- }
-
if (avctx->codec->id == AV_CODEC_ID_MPEG4 &&
output.PicInfo.timeStamp == 0 && priv->bframe_bug) {
- av_log(avctx, AV_LOG_VERBOSE,
- "CrystalHD: Not returning packed frame twice.\n");
- priv->last_picture++;
+ if (!priv->bframe_bug) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: Not returning packed frame twice.\n");
+ }
DtsReleaseOutputBuffs(dev, NULL, FALSE);
return RET_COPY_AGAIN;
}
print_frame_info(priv, &output);
- if (priv->last_picture + 1 < output.PicInfo.picture_number) {
- av_log(avctx, AV_LOG_WARNING,
- "CrystalHD: Picture Number discontinuity\n");
- /*
- * Have we lost frames? If so, we need to shrink the
- * pipeline length appropriately.
- *
- * XXX: I have no idea what the semantics of this situation
- * are so I don't even know if we've lost frames or which
- * ones.
- *
- * In any case, only warn the first time.
- */
- priv->last_picture = output.PicInfo.picture_number - 1;
- }
-
- copy_ret = copy_frame(avctx, &output, data, got_frame);
- if (*got_frame > 0) {
- avctx->has_b_frames--;
- priv->last_picture++;
- av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
- avctx->has_b_frames);
- }
+ copy_ret = copy_frame(avctx, &output, frame, got_frame);
} else {
/*
* An invalid frame has been consumed.
*/
av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
"invalid PIB\n");
- avctx->has_b_frames--;
- copy_ret = RET_OK;
+ copy_ret = RET_COPY_AGAIN;
}
DtsReleaseOutputBuffs(dev, NULL, FALSE);
}
}
-
-static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
+static int crystalhd_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
- BC_STATUS ret;
- BC_DTS_STATUS decoder_status = { 0, };
- CopyRet rec_ret;
+ BC_STATUS bc_ret;
CHDContext *priv = avctx->priv_data;
HANDLE dev = priv->dev;
- uint8_t *in_data = avpkt->data;
- int len = avpkt->size;
- int free_data = 0;
- uint8_t pic_type = 0;
-
- av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
-
- if (avpkt->size == 7 && !priv->bframe_bug) {
- /*
- * The use of a drop frame triggers the bug
- */
- av_log(avctx, AV_LOG_INFO,
- "CrystalHD: Enabling work-around for packed b-frame bug\n");
- priv->bframe_bug = 1;
- } else if (avpkt->size == 8 && priv->bframe_bug) {
- /*
- * Delay frames don't trigger the bug
- */
- av_log(avctx, AV_LOG_INFO,
- "CrystalHD: Disabling work-around for packed b-frame bug\n");
- priv->bframe_bug = 0;
- }
+ AVPacket filtered_packet = { 0 };
+ int ret = 0;
+
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_packet\n");
- if (len) {
+ if (avpkt && avpkt->size) {
int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
+ if (!priv->bframe_bug && (avpkt->size == 6 || avpkt->size == 7)) {
+ /*
+ * Drop frames trigger the bug
+ */
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Enabling work-around for packed b-frame bug\n");
+ priv->bframe_bug = 1;
+ } else if (priv->bframe_bug && avpkt->size == 8) {
+ /*
+ * Delay frames don't trigger the bug
+ */
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Disabling work-around for packed b-frame bug\n");
+ priv->bframe_bug = 0;
+ }
+
if (priv->bsfc) {
- int ret = 0;
AVPacket filter_packet = { 0 };
- AVPacket filtered_packet = { 0 };
ret = av_packet_ref(&filter_packet, avpkt);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to ref input packet\n");
- return ret;
+ goto exit;
}
- ret = av_bsf_send_packet(priv->bsfc, &filter_packet);
- if (ret < 0) {
+ ret = av_bsf_send_packet(priv->bsfc, &filter_packet);
+ if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to send input packet\n");
- return ret;
+ goto exit;
}
ret = av_bsf_receive_packet(priv->bsfc, &filtered_packet);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "CrystalHD: mpv4toannexb filter "
"failed to receive output packet\n");
- return ret;
+ goto exit;
}
- in_data = filtered_packet.data;
- len = filtered_packet.size;
-
+ avpkt = &filtered_packet;
av_packet_unref(&filter_packet);
}
- if (priv->parser) {
- int ret = 0;
-
- free_data = ret > 0;
-
- if (ret >= 0) {
- uint8_t *pout;
- int psize;
- int index;
- H264Context *h = priv->parser->priv_data;
-
- index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
- in_data, len, avctx->internal->pkt->pts,
- avctx->internal->pkt->dts, 0);
- if (index < 0) {
- av_log(avctx, AV_LOG_WARNING,
- "CrystalHD: Failed to parse h.264 packet to "
- "detect interlacing.\n");
- } else if (index != len) {
- av_log(avctx, AV_LOG_WARNING,
- "CrystalHD: Failed to parse h.264 packet "
- "completely. Interlaced frames may be "
- "incorrectly detected.\n");
- } else {
- av_log(avctx, AV_LOG_VERBOSE,
- "CrystalHD: parser picture type %d\n",
- h->picture_structure);
- pic_type = h->picture_structure;
- }
- } else {
- av_log(avctx, AV_LOG_WARNING,
- "CrystalHD: mp4toannexb filter failed to filter "
- "packet. Interlaced frames may be incorrectly "
- "detected.\n");
- }
- }
-
- if (len < tx_free - 1024) {
+ if (avpkt->size < tx_free) {
/*
* Despite being notionally opaque, either libcrystalhd or
* the hardware itself will mangle pts values that are too
* avoiding mangling so we need to build a mapping to values
* we know will not be mangled.
*/
- uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type);
+ uint64_t pts = opaque_list_push(priv, avpkt->pts);
if (!pts) {
- if (free_data) {
- av_freep(&in_data);
- }
- return AVERROR(ENOMEM);
+ ret = AVERROR(ENOMEM);
+ goto exit;
}
av_log(priv->avctx, AV_LOG_VERBOSE,
"input \"pts\": %"PRIu64"\n", pts);
- ret = DtsProcInput(dev, in_data, len, pts, 0);
- if (free_data) {
- av_freep(&in_data);
- }
- if (ret == BC_STS_BUSY) {
+ bc_ret = DtsProcInput(dev, avpkt->data, avpkt->size, pts, 0);
+ if (bc_ret == BC_STS_BUSY) {
av_log(avctx, AV_LOG_WARNING,
"CrystalHD: ProcInput returned busy\n");
- usleep(BASE_WAIT);
- return AVERROR(EBUSY);
- } else if (ret != BC_STS_SUCCESS) {
+ ret = AVERROR(EAGAIN);
+ goto exit;
+ } else if (bc_ret != BC_STS_SUCCESS) {
av_log(avctx, AV_LOG_ERROR,
"CrystalHD: ProcInput failed: %u\n", ret);
- return -1;
+ ret = -1;
+ goto exit;
}
- avctx->has_b_frames++;
} else {
- av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
- len = 0; // We didn't consume any bytes.
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Input buffer full\n");
+ ret = AVERROR(EAGAIN);
+ goto exit;
}
} else {
av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
+ priv->draining = 1;
+ ret = AVERROR_EOF;
+ goto exit;
}
+ exit:
+ av_packet_unref(&filtered_packet);
+ return ret;
+}
- if (priv->skip_next_output) {
- av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
- priv->skip_next_output = 0;
- avctx->has_b_frames--;
- return len;
- }
-
- ret = DtsGetDriverStatus(dev, &decoder_status);
- if (ret != BC_STS_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
- return -1;
- }
+static int crystalhd_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ BC_STATUS bc_ret;
+ BC_DTS_STATUS decoder_status = { 0, };
+ CopyRet rec_ret;
+ CHDContext *priv = avctx->priv_data;
+ HANDLE dev = priv->dev;
+ int got_frame = 0;
- /*
- * No frames ready. Don't try to extract.
- *
- * Empirical testing shows that ReadyListCount can be a damn lie,
- * and ProcOut still fails when count > 0. The same testing showed
- * that two more iterations were needed before ProcOutput would
- * succeed.
- */
- if (priv->output_ready < 2) {
- if (decoder_status.ReadyListCount != 0)
- priv->output_ready++;
- usleep(BASE_WAIT);
- av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
- return len;
- } else if (decoder_status.ReadyListCount == 0) {
- /*
- * After the pipeline is established, if we encounter a lack of frames
- * that probably means we're not giving the hardware enough time to
- * decode them, so start increasing the wait time at the end of a
- * decode call.
- */
- usleep(BASE_WAIT);
- priv->decode_wait += WAIT_UNIT;
- av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
- return len;
- }
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: receive_frame\n");
do {
- rec_ret = receive_frame(avctx, data, got_frame);
- if (rec_ret == RET_OK && *got_frame == 0) {
- /*
- * This case is for when the encoded fields are stored
- * separately and we get a separate avpkt for each one. To keep
- * the pipeline stable, we should return nothing and wait for
- * the next time round to grab the second field.
- * H.264 PAFF is an example of this.
- */
- av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
- avctx->has_b_frames--;
- } else if (rec_ret == RET_COPY_NEXT_FIELD) {
- /*
- * This case is for when the encoded fields are stored in a
- * single avpkt but the hardware returns then separately. Unless
- * we grab the second field before returning, we'll slip another
- * frame in the pipeline and if that happens a lot, we're sunk.
- * So we have to get that second field now.
- * Interlaced mpeg2 and vc1 are examples of this.
- */
- av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
- while (1) {
- usleep(priv->decode_wait);
- ret = DtsGetDriverStatus(dev, &decoder_status);
- if (ret == BC_STS_SUCCESS &&
- decoder_status.ReadyListCount > 0) {
- rec_ret = receive_frame(avctx, data, got_frame);
- if ((rec_ret == RET_OK && *got_frame > 0) ||
- rec_ret == RET_ERROR)
- break;
- }
- }
- av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
- } else if (rec_ret == RET_SKIP_NEXT_COPY) {
- /*
- * Two input packets got turned into a field pair. Gawd.
- */
- av_log(avctx, AV_LOG_VERBOSE,
- "Don't output on next decode call.\n");
- priv->skip_next_output = 1;
+ bc_ret = DtsGetDriverStatus(dev, &decoder_status);
+ if (bc_ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
+ return -1;
+ }
+
+ if (decoder_status.ReadyListCount == 0) {
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: Insufficient frames ready. Returning\n");
+ got_frame = 0;
+ rec_ret = RET_OK;
+ break;
}
- /*
- * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
- * a FMT_CHANGE event and need to go around again for the actual frame,
- * we got a busy status and need to try again, or we're dealing with
- * packed b-frames, where the hardware strangely returns the packed
- * p-frame twice. We choose to keep the second copy as it carries the
- * valid pts.
- */
+
+ rec_ret = receive_frame(avctx, frame, &got_frame);
} while (rec_ret == RET_COPY_AGAIN);
- usleep(priv->decode_wait);
- return len;
+
+ if (rec_ret == RET_ERROR) {
+ return -1;
+ } else if (got_frame == 0) {
+ return priv->draining ? AVERROR_EOF : AVERROR(EAGAIN);
+ } else {
+ return 0;
+ }
}
+#define DEFINE_CRYSTALHD_DECODER(x, X) \
+ static const AVClass x##_crystalhd_class = { \
+ .class_name = #x "_crystalhd", \
+ .item_name = av_default_item_name, \
+ .option = options, \
+ .version = LIBAVUTIL_VERSION_INT, \
+ }; \
+ AVCodec ff_##x##_crystalhd_decoder = { \
+ .name = #x "_crystalhd", \
+ .long_name = NULL_IF_CONFIG_SMALL("CrystalHD " #X " decoder"), \
+ .type = AVMEDIA_TYPE_VIDEO, \
+ .id = AV_CODEC_ID_##X, \
+ .priv_data_size = sizeof(CHDContext), \
+ .priv_class = &x##_crystalhd_class, \
+ .init = init, \
+ .close = uninit, \
+ .send_packet = crystalhd_decode_packet, \
+ .receive_frame = crystalhd_receive_frame, \
+ .flush = flush, \
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE}, \
+ };
#if CONFIG_H264_CRYSTALHD_DECODER
-static AVClass h264_class = {
- "h264_crystalhd",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
-};
-
-AVCodec ff_h264_crystalhd_decoder = {
- .name = "h264_crystalhd",
- .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_H264,
- .priv_data_size = sizeof(CHDContext),
- .init = init,
- .close = uninit,
- .decode = decode,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
- .flush = flush,
- .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
- .priv_class = &h264_class,
-};
+DEFINE_CRYSTALHD_DECODER(h264, H264)
#endif
#if CONFIG_MPEG2_CRYSTALHD_DECODER
-static AVClass mpeg2_class = {
- "mpeg2_crystalhd",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
-};
-
-AVCodec ff_mpeg2_crystalhd_decoder = {
- .name = "mpeg2_crystalhd",
- .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_MPEG2VIDEO,
- .priv_data_size = sizeof(CHDContext),
- .init = init,
- .close = uninit,
- .decode = decode,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
- .flush = flush,
- .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
- .priv_class = &mpeg2_class,
-};
+DEFINE_CRYSTALHD_DECODER(mpeg2, MPEG2VIDEO)
#endif
#if CONFIG_MPEG4_CRYSTALHD_DECODER
-static AVClass mpeg4_class = {
- "mpeg4_crystalhd",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
-};
-
-AVCodec ff_mpeg4_crystalhd_decoder = {
- .name = "mpeg4_crystalhd",
- .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_MPEG4,
- .priv_data_size = sizeof(CHDContext),
- .init = init,
- .close = uninit,
- .decode = decode,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
- .flush = flush,
- .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
- .priv_class = &mpeg4_class,
-};
+DEFINE_CRYSTALHD_DECODER(mpeg4, MPEG4)
#endif
#if CONFIG_MSMPEG4_CRYSTALHD_DECODER
-static AVClass msmpeg4_class = {
- "msmpeg4_crystalhd",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
-};
-
-AVCodec ff_msmpeg4_crystalhd_decoder = {
- .name = "msmpeg4_crystalhd",
- .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_MSMPEG4V3,
- .priv_data_size = sizeof(CHDContext),
- .init = init,
- .close = uninit,
- .decode = decode,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
- .flush = flush,
- .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
- .priv_class = &msmpeg4_class,
-};
+DEFINE_CRYSTALHD_DECODER(msmpeg4, MSMPEG4V3)
#endif
#if CONFIG_VC1_CRYSTALHD_DECODER
-static AVClass vc1_class = {
- "vc1_crystalhd",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
-};
-
-AVCodec ff_vc1_crystalhd_decoder = {
- .name = "vc1_crystalhd",
- .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_VC1,
- .priv_data_size = sizeof(CHDContext),
- .init = init,
- .close = uninit,
- .decode = decode,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
- .flush = flush,
- .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
- .priv_class = &vc1_class,
-};
+DEFINE_CRYSTALHD_DECODER(vc1, VC1)
#endif
#if CONFIG_WMV3_CRYSTALHD_DECODER
-static AVClass wmv3_class = {
- "wmv3_crystalhd",
- av_default_item_name,
- options,
- LIBAVUTIL_VERSION_INT,
-};
-
-AVCodec ff_wmv3_crystalhd_decoder = {
- .name = "wmv3_crystalhd",
- .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_WMV3,
- .priv_data_size = sizeof(CHDContext),
- .init = init,
- .close = uninit,
- .decode = decode,
- .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
- .flush = flush,
- .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
- .priv_class = &wmv3_class,
-};
+DEFINE_CRYSTALHD_DECODER(wmv3, WMV3)
#endif