2 * Intel MediaSDK QSV based H.264 decoder
4 * copyright (c) 2013 Luca Barbato
5 * copyright (c) 2015 Anton Khirnov
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <mfx/mfxvideo.h>
30 #include "libavutil/common.h"
31 #include "libavutil/fifo.h"
32 #include "libavutil/opt.h"
36 #include "qsv_internal.h"
40 typedef struct QSVH264Context {
44 // the internal parser and codec context for parsing the data
45 AVCodecParserContext *parser;
46 AVCodecContext *avctx_internal;
47 enum AVPixelFormat orig_pix_fmt;
49 // the filter for converting to Annex B
50 AVBitStreamFilterContext *bsf;
52 AVFifoBuffer *packet_fifo;
55 AVPacket pkt_filtered;
56 uint8_t *filtered_data;
59 static void qsv_clear_buffers(QSVH264Context *s)
62 while (av_fifo_size(s->packet_fifo) >= sizeof(pkt)) {
63 av_fifo_generic_read(s->packet_fifo, &pkt, sizeof(pkt), NULL);
64 av_packet_unref(&pkt);
67 if (s->filtered_data != s->input_ref.data)
68 av_freep(&s->filtered_data);
69 s->filtered_data = NULL;
70 av_packet_unref(&s->input_ref);
73 static av_cold int qsv_decode_close(AVCodecContext *avctx)
75 QSVH264Context *s = avctx->priv_data;
77 ff_qsv_decode_close(&s->qsv);
81 av_fifo_free(s->packet_fifo);
83 av_bitstream_filter_close(s->bsf);
84 av_parser_close(s->parser);
85 avcodec_free_context(&s->avctx_internal);
90 static av_cold int qsv_decode_init(AVCodecContext *avctx)
92 QSVH264Context *s = avctx->priv_data;
95 s->orig_pix_fmt = AV_PIX_FMT_NONE;
97 s->packet_fifo = av_fifo_alloc(sizeof(AVPacket));
98 if (!s->packet_fifo) {
99 ret = AVERROR(ENOMEM);
103 s->bsf = av_bitstream_filter_init("h264_mp4toannexb");
105 ret = AVERROR(ENOMEM);
109 s->avctx_internal = avcodec_alloc_context3(NULL);
110 if (!s->avctx_internal) {
111 ret = AVERROR(ENOMEM);
115 if (avctx->extradata) {
116 s->avctx_internal->extradata = av_mallocz(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
117 if (!s->avctx_internal->extradata) {
118 ret = AVERROR(ENOMEM);
121 memcpy(s->avctx_internal->extradata, avctx->extradata,
122 avctx->extradata_size);
123 s->avctx_internal->extradata_size = avctx->extradata_size;
126 s->parser = av_parser_init(AV_CODEC_ID_H264);
128 ret = AVERROR(ENOMEM);
131 s->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
133 s->qsv.iopattern = MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
137 qsv_decode_close(avctx);
141 static int qsv_process_data(AVCodecContext *avctx, AVFrame *frame,
142 int *got_frame, AVPacket *pkt)
144 QSVH264Context *s = avctx->priv_data;
149 /* we assume the packets are already split properly and want
150 * just the codec parameters here */
151 av_parser_parse2(s->parser, s->avctx_internal,
152 &dummy_data, &dummy_size,
153 pkt->data, pkt->size, pkt->pts, pkt->dts,
156 /* TODO: flush delayed frames on reinit */
157 if (s->parser->format != s->orig_pix_fmt ||
158 s->parser->coded_width != avctx->coded_width ||
159 s->parser->coded_height != avctx->coded_height) {
160 mfxSession session = NULL;
162 enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_QSV,
165 enum AVPixelFormat qsv_format;
167 qsv_format = ff_qsv_map_pixfmt(s->parser->format);
168 if (qsv_format < 0) {
169 av_log(avctx, AV_LOG_ERROR,
170 "Only 8-bit YUV420 streams are supported.\n");
171 ret = AVERROR(ENOSYS);
175 s->orig_pix_fmt = s->parser->format;
176 avctx->pix_fmt = pix_fmts[1] = qsv_format;
177 avctx->width = s->parser->width;
178 avctx->height = s->parser->height;
179 avctx->coded_width = s->parser->coded_width;
180 avctx->coded_height = s->parser->coded_height;
181 avctx->level = s->avctx_internal->level;
182 avctx->profile = s->avctx_internal->profile;
184 ret = ff_get_format(avctx, pix_fmts);
188 avctx->pix_fmt = ret;
190 if (avctx->hwaccel_context) {
191 AVQSVContext *user_ctx = avctx->hwaccel_context;
192 session = user_ctx->session;
193 s->qsv.iopattern = user_ctx->iopattern;
194 s->qsv.ext_buffers = user_ctx->ext_buffers;
195 s->qsv.nb_ext_buffers = user_ctx->nb_ext_buffers;
198 ret = ff_qsv_decode_init(avctx, &s->qsv, session);
203 return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, &s->pkt_filtered);
206 s->orig_pix_fmt = s->parser->format = avctx->pix_fmt = AV_PIX_FMT_NONE;
210 static int qsv_decode_frame(AVCodecContext *avctx, void *data,
211 int *got_frame, AVPacket *avpkt)
213 QSVH264Context *s = avctx->priv_data;
214 AVFrame *frame = data;
217 /* buffer the input packet */
219 AVPacket input_ref = { 0 };
221 if (av_fifo_space(s->packet_fifo) < sizeof(input_ref)) {
222 ret = av_fifo_realloc2(s->packet_fifo,
223 av_fifo_size(s->packet_fifo) + sizeof(input_ref));
228 ret = av_packet_ref(&input_ref, avpkt);
231 av_fifo_generic_write(s->packet_fifo, &input_ref, sizeof(input_ref), NULL);
234 /* process buffered data */
235 while (!*got_frame) {
236 /* prepare the input data -- convert to Annex B if needed */
237 if (s->pkt_filtered.size <= 0) {
241 if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket))
242 return avpkt->size ? avpkt->size : ff_qsv_decode(avctx, &s->qsv, frame, got_frame, avpkt);
244 if (s->filtered_data != s->input_ref.data)
245 av_freep(&s->filtered_data);
246 s->filtered_data = NULL;
247 av_packet_unref(&s->input_ref);
249 av_fifo_generic_read(s->packet_fifo, &s->input_ref, sizeof(s->input_ref), NULL);
250 ret = av_bitstream_filter_filter(s->bsf, avctx, NULL,
251 &s->filtered_data, &size,
252 s->input_ref.data, s->input_ref.size, 0);
254 s->filtered_data = s->input_ref.data;
255 size = s->input_ref.size;
257 s->pkt_filtered = s->input_ref;
258 s->pkt_filtered.data = s->filtered_data;
259 s->pkt_filtered.size = size;
262 ret = qsv_process_data(avctx, frame, got_frame, &s->pkt_filtered);
266 s->pkt_filtered.size -= ret;
267 s->pkt_filtered.data += ret;
273 static void qsv_decode_flush(AVCodecContext *avctx)
275 QSVH264Context *s = avctx->priv_data;
277 qsv_clear_buffers(s);
278 s->orig_pix_fmt = AV_PIX_FMT_NONE;
281 AVHWAccel ff_h264_qsv_hwaccel = {
283 .type = AVMEDIA_TYPE_VIDEO,
284 .id = AV_CODEC_ID_H264,
285 .pix_fmt = AV_PIX_FMT_QSV,
288 #define OFFSET(x) offsetof(QSVH264Context, x)
289 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
290 static const AVOption options[] = {
291 { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VD },
295 static const AVClass class = {
296 .class_name = "h264_qsv",
297 .item_name = av_default_item_name,
299 .version = LIBAVUTIL_VERSION_INT,
302 AVCodec ff_h264_qsv_decoder = {
304 .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration)"),
305 .priv_data_size = sizeof(QSVH264Context),
306 .type = AVMEDIA_TYPE_VIDEO,
307 .id = AV_CODEC_ID_H264,
308 .init = qsv_decode_init,
309 .decode = qsv_decode_frame,
310 .flush = qsv_decode_flush,
311 .close = qsv_decode_close,
312 .capabilities = CODEC_CAP_DELAY,
313 .priv_class = &class,