#include <interface/mmal/util/mmal_util_params.h>
#include <interface/mmal/util/mmal_default_components.h>
#include <interface/mmal/vc/mmal_vc_api.h>
+#include <stdatomic.h>
#include "avcodec.h"
#include "internal.h"
-#include "libavutil/atomic.h"
#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
#include "libavutil/common.h"
+#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/log.h"
// refcounting for AVFrames, we can free the MMAL_POOL_T only after all AVFrames
// have been unreferenced.
typedef struct FFPoolRef {
- volatile int refcount;
+ atomic_int refcount;
MMAL_POOL_T *pool;
} FFPoolRef;
typedef struct MMALDecodeContext {
AVClass *av_class;
int extra_buffers;
+ int extra_decoder_buffers;
MMAL_COMPONENT_T *decoder;
MMAL_QUEUE_T *queue_decoded_frames;
FFBufferEntry *waiting_buffers, *waiting_buffers_tail;
int64_t packets_sent;
+ atomic_int packets_buffered;
int64_t frames_output;
int eos_received;
int eos_sent;
static void ffmmal_poolref_unref(FFPoolRef *ref)
{
- if (ref && avpriv_atomic_int_add_and_fetch(&ref->refcount, -1) == 0) {
+ if (ref &&
+ atomic_fetch_add_explicit(&ref->refcount, -1, memory_order_acq_rel) == 1) {
mmal_pool_destroy(ref->pool);
av_free(ref);
}
return AVERROR(ENOMEM);
}
- avpriv_atomic_int_add_and_fetch(&ref->pool->refcount, 1);
+ atomic_fetch_add_explicit(&ref->pool->refcount, 1, memory_order_relaxed);
mmal_buffer_header_acquire(buffer);
frame->format = AV_PIX_FMT_MMAL;
ctx->waiting_buffers = buffer->next;
+ if (buffer->flags & MMAL_BUFFER_HEADER_FLAG_FRAME_END)
+ atomic_fetch_add(&ctx->packets_buffered, -1);
+
av_buffer_unref(&buffer->ref);
av_free(buffer);
}
ctx->waiting_buffers_tail = NULL;
+ av_assert0(atomic_load(&ctx->packets_buffered) == 0);
+
ctx->frames_output = ctx->eos_received = ctx->eos_sent = ctx->packets_sent = ctx->extradata_sent = 0;
}
static void input_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
+ AVCodecContext *avctx = (AVCodecContext*)port->userdata;
+ MMALDecodeContext *ctx = avctx->priv_data;
+
if (!buffer->cmd) {
- AVBufferRef *buf = buffer->user_data;
- av_buffer_unref(&buf);
+ FFBufferEntry *entry = buffer->user_data;
+ av_buffer_unref(&entry->ref);
+ if (entry->flags & MMAL_BUFFER_HEADER_FLAG_FRAME_END)
+ atomic_fetch_add(&ctx->packets_buffered, -1);
+ av_free(entry);
}
mmal_buffer_header_release(buffer);
}
ret = AVERROR(ENOMEM);
goto fail;
}
- ctx->pool_out->refcount = 1;
+ atomic_init(&ctx->pool_out->refcount, 1);
if (!format_out)
goto fail;
MMAL_STATUS_T status;
MMAL_ES_FORMAT_T *format_in;
MMAL_COMPONENT_T *decoder;
+ char tmp[32];
int ret = 0;
bcm_host_init();
format_in = decoder->input[0]->format;
format_in->type = MMAL_ES_TYPE_VIDEO;
- format_in->encoding = MMAL_ENCODING_H264;
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_MPEG2VIDEO:
+ format_in->encoding = MMAL_ENCODING_MP2V;
+ break;
+ case AV_CODEC_ID_VC1:
+ format_in->encoding = MMAL_ENCODING_WVC1;
+ break;
+ case AV_CODEC_ID_H264:
+ default:
+ format_in->encoding = MMAL_ENCODING_H264;
+ break;
+ }
format_in->es->video.width = FFALIGN(avctx->width, 32);
format_in->es->video.height = FFALIGN(avctx->height, 16);
format_in->es->video.crop.width = avctx->width;
format_in->es->video.par.den = avctx->sample_aspect_ratio.den;
format_in->flags = MMAL_ES_FORMAT_FLAG_FRAMED;
+ av_get_codec_tag_string(tmp, sizeof(tmp), format_in->encoding);
+ av_log(avctx, AV_LOG_DEBUG, "Using MMAL %s encoding.\n", tmp);
+
+#if HAVE_MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS
+ if (mmal_port_parameter_set_uint32(decoder->input[0], MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS,
+ -1 - ctx->extra_decoder_buffers)) {
+ av_log(avctx, AV_LOG_WARNING, "Could not set input buffering limit.\n");
+ }
+#endif
+
if ((status = mmal_port_format_commit(decoder->input[0])))
goto fail;
if (!is_extradata)
ctx->packets_sent++;
} else {
+ if (ctx->eos_sent)
+ goto done;
if (!ctx->packets_sent) {
// Short-cut the flush logic to avoid upsetting MMAL.
ctx->eos_sent = 1;
buffer->pts = avpkt->pts == AV_NOPTS_VALUE ? MMAL_TIME_UNKNOWN : avpkt->pts;
buffer->dts = avpkt->dts == AV_NOPTS_VALUE ? MMAL_TIME_UNKNOWN : avpkt->dts;
- if (!size)
+ if (!size) {
buffer->flags |= MMAL_BUFFER_HEADER_FLAG_FRAME_END;
+ atomic_fetch_add(&ctx->packets_buffered, 1);
+ }
if (!buffer->length) {
buffer->flags |= MMAL_BUFFER_HEADER_FLAG_EOS;
mbuffer->flags = buffer->flags;
mbuffer->data = buffer->data;
mbuffer->length = buffer->length;
- mbuffer->user_data = buffer->ref;
+ mbuffer->user_data = buffer;
mbuffer->alloc_size = ctx->decoder->input[0]->buffer_size;
- if ((status = mmal_port_send_buffer(ctx->decoder->input[0], mbuffer))) {
- mmal_buffer_header_release(mbuffer);
- av_buffer_unref(&buffer->ref);
- }
-
// Remove from start of the list
ctx->waiting_buffers = buffer->next;
if (ctx->waiting_buffers_tail == buffer)
ctx->waiting_buffers_tail = NULL;
- av_free(buffer);
+
+ if ((status = mmal_port_send_buffer(ctx->decoder->input[0], mbuffer))) {
+ mmal_buffer_header_release(mbuffer);
+ av_buffer_unref(&buffer->ref);
+ if (buffer->flags & MMAL_BUFFER_HEADER_FLAG_FRAME_END)
+ atomic_fetch_add(&ctx->packets_buffered, -1);
+ av_free(buffer);
+ }
if (status) {
av_log(avctx, AV_LOG_ERROR, "MMAL error %d when sending input\n", (int)status);
} else {
int w = FFALIGN(avctx->width, 32);
int h = FFALIGN(avctx->height, 16);
- char *ptr;
- int plane;
- int i;
+ uint8_t *src[4];
+ int linesize[4];
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
goto done;
- ptr = buffer->data + buffer->type->video.offset[0];
- for (i = 0; i < avctx->height; i++)
- memcpy(frame->data[0] + frame->linesize[0] * i, ptr + w * i, avctx->width);
-
- ptr += w * h;
-
- for (plane = 1; plane < 3; plane++) {
- for (i = 0; i < avctx->height / 2; i++)
- memcpy(frame->data[plane] + frame->linesize[plane] * i, ptr + w / 2 * i, (avctx->width + 1) / 2);
- ptr += w / 2 * h / 2;
- }
+ av_image_fill_arrays(src, linesize,
+ buffer->data + buffer->type->video.offset[0],
+ avctx->pix_fmt, w, h, 1);
+ av_image_copy(frame->data, frame->linesize, src, linesize,
+ avctx->pix_fmt, avctx->width, avctx->height);
}
- frame->pkt_pts = buffer->pts == MMAL_TIME_UNKNOWN ? AV_NOPTS_VALUE : buffer->pts;
+ frame->pts = buffer->pts == MMAL_TIME_UNKNOWN ? AV_NOPTS_VALUE : buffer->pts;
+#if FF_API_PKT_PTS
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->pkt_pts = frame->pts;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
frame->pkt_dts = AV_NOPTS_VALUE;
done:
// being busy from decoder waiting for input. So just poll at the start and
// keep feeding new data to the buffer.
// We are pretty sure the decoder will produce output if we sent more input
- // frames than what a h264 decoder could logically delay. This avoids too
+ // frames than what a H.264 decoder could logically delay. This avoids too
// excessive buffering.
// We also wait if we sent eos, but didn't receive it yet (think of decoding
// stream with a very low number of frames).
- if (ctx->frames_output || ctx->packets_sent > MAX_DELAYED_FRAMES ||
+ if (atomic_load(&ctx->packets_buffered) > MAX_DELAYED_FRAMES ||
(ctx->packets_sent && ctx->eos_sent)) {
// MMAL will ignore broken input packets, which means the frame we
// expect here may never arrive. Dealing with this correctly is
.pix_fmt = AV_PIX_FMT_MMAL,
};
-static const AVOption options[]={
- {"extra_buffers", "extra buffers", offsetof(MMALDecodeContext, extra_buffers), AV_OPT_TYPE_INT, {.i64 = 10}, 0, 256, 0},
- {NULL}
+AVHWAccel ff_mpeg2_mmal_hwaccel = {
+ .name = "mpeg2_mmal",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .pix_fmt = AV_PIX_FMT_MMAL,
};
-static const AVClass ffmmaldec_class = {
- .class_name = "mmaldec",
- .option = options,
- .version = LIBAVUTIL_VERSION_INT,
+AVHWAccel ff_vc1_mmal_hwaccel = {
+ .name = "vc1_mmal",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VC1,
+ .pix_fmt = AV_PIX_FMT_MMAL,
};
-AVCodec ff_h264_mmal_decoder = {
- .name = "h264_mmal",
- .long_name = NULL_IF_CONFIG_SMALL("h264 (mmal)"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_H264,
- .priv_data_size = sizeof(MMALDecodeContext),
- .init = ffmmal_init_decoder,
- .close = ffmmal_close_decoder,
- .decode = ffmmal_decode,
- .flush = ffmmal_flush,
- .priv_class = &ffmmaldec_class,
- .capabilities = AV_CODEC_CAP_DELAY,
- .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS,
- .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_MMAL,
- AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_NONE},
+static const AVOption options[]={
+ {"extra_buffers", "extra buffers", offsetof(MMALDecodeContext, extra_buffers), AV_OPT_TYPE_INT, {.i64 = 10}, 0, 256, 0},
+ {"extra_decoder_buffers", "extra MMAL internal buffered frames", offsetof(MMALDecodeContext, extra_decoder_buffers), AV_OPT_TYPE_INT, {.i64 = 10}, 0, 256, 0},
+ {NULL}
};
+
+#define FFMMAL_DEC_CLASS(NAME) \
+ static const AVClass ffmmal_##NAME##_dec_class = { \
+ .class_name = "mmal_" #NAME "_dec", \
+ .option = options, \
+ .version = LIBAVUTIL_VERSION_INT, \
+ };
+
+#define FFMMAL_DEC(NAME, ID) \
+ FFMMAL_DEC_CLASS(NAME) \
+ AVCodec ff_##NAME##_mmal_decoder = { \
+ .name = #NAME "_mmal", \
+ .long_name = NULL_IF_CONFIG_SMALL(#NAME " (mmal)"), \
+ .type = AVMEDIA_TYPE_VIDEO, \
+ .id = ID, \
+ .priv_data_size = sizeof(MMALDecodeContext), \
+ .init = ffmmal_init_decoder, \
+ .close = ffmmal_close_decoder, \
+ .decode = ffmmal_decode, \
+ .flush = ffmmal_flush, \
+ .priv_class = &ffmmal_##NAME##_dec_class, \
+ .capabilities = AV_CODEC_CAP_DELAY, \
+ .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS, \
+ .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_MMAL, \
+ AV_PIX_FMT_YUV420P, \
+ AV_PIX_FMT_NONE}, \
+ };
+
+FFMMAL_DEC(h264, AV_CODEC_ID_H264)
+FFMMAL_DEC(mpeg2, AV_CODEC_ID_MPEG2VIDEO)
+FFMMAL_DEC(vc1, AV_CODEC_ID_VC1)