}
c = avcodec_alloc_context3(codec);
- picture = av_frame_alloc();
+ if (!c) {
+ fprintf(stderr, "Could not allocate video codec context\n");
+ exit(1);
+ }
if(codec->capabilities&CODEC_CAP_TRUNCATED)
- c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
+ c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
exit(1);
}
- ost->st = avformat_new_stream(oc, codec);
+ ost->st = avformat_new_stream(oc, *codec);
if (!ost->st) {
- fprintf(stderr, "Could not alloc stream\n");
+ fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
-
+ ost->st->id = oc->nb_streams-1;
c = ost->st->codec;
- /* put sample parameters */
- c->sample_fmt = codec->sample_fmts ? codec->sample_fmts[0] : AV_SAMPLE_FMT_S16;
- c->sample_rate = codec->supported_samplerates ? codec->supported_samplerates[0] : 44100;
- c->channel_layout = codec->channel_layouts ? codec->channel_layouts[0] : AV_CH_LAYOUT_STEREO;
- c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
- c->bit_rate = 64000;
+ switch ((*codec)->type) {
+ case AVMEDIA_TYPE_AUDIO:
+ c->sample_fmt = (*codec)->sample_fmts ?
+ (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
+ c->bit_rate = 64000;
+ c->sample_rate = 44100;
+ if ((*codec)->supported_samplerates) {
+ c->sample_rate = (*codec)->supported_samplerates[0];
+ for (i = 0; (*codec)->supported_samplerates[i]; i++) {
+ if ((*codec)->supported_samplerates[i] == 44100)
+ c->sample_rate = 44100;
+ }
+ }
+ c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
+ c->channel_layout = AV_CH_LAYOUT_STEREO;
+ if ((*codec)->channel_layouts) {
+ c->channel_layout = (*codec)->channel_layouts[0];
+ for (i = 0; (*codec)->channel_layouts[i]; i++) {
+ if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
+ c->channel_layout = AV_CH_LAYOUT_STEREO;
+ }
+ }
+ c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
+ ost->st->time_base = (AVRational){ 1, c->sample_rate };
+ break;
+
+ case AVMEDIA_TYPE_VIDEO:
+ c->codec_id = codec_id;
+
+ c->bit_rate = 400000;
+ /* Resolution must be a multiple of two. */
+ c->width = 352;
+ c->height = 288;
+ /* timebase: This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identical to 1. */
+ ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
+ c->time_base = ost->st->time_base;
+
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = STREAM_PIX_FMT;
+ if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+ /* just for testing, we also add B frames */
+ c->max_b_frames = 2;
+ }
+ if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
+ /* Needed to avoid using macroblocks in which some coeffs overflow.
+ * This does not happen with normal video, it just happens here as
+ * the motion of the chroma plane does not match the luma plane. */
+ c->mb_decision = 2;
+ }
+ break;
- ost->st->time_base = (AVRational){ 1, c->sample_rate };
+ default:
+ break;
+ }
- // some formats want stream headers to be separate
+ /* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
-
- /* initialize sample format conversion;
- * to simplify the code, we always pass the data through lavr, even
- * if the encoder supports the generated format directly -- the price is
- * some extra data copying;
- */
- ost->avr = avresample_alloc_context();
- if (!ost->avr) {
- fprintf(stderr, "Error allocating the resampling context\n");
- exit(1);
- }
-
- av_opt_set_int(ost->avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int(ost->avr, "in_sample_rate", 44100, 0);
- av_opt_set_int(ost->avr, "in_channel_layout", AV_CH_LAYOUT_STEREO, 0);
- av_opt_set_int(ost->avr, "out_sample_fmt", c->sample_fmt, 0);
- av_opt_set_int(ost->avr, "out_sample_rate", c->sample_rate, 0);
- av_opt_set_int(ost->avr, "out_channel_layout", c->channel_layout, 0);
-
- ret = avresample_open(ost->avr);
- if (ret < 0) {
- fprintf(stderr, "Error opening the resampling context\n");
- exit(1);
- }
}
+/**************************************************************/
+/* audio output */
+
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
uint64_t channel_layout,
int sample_rate, int nb_samples)
--- /dev/null
- out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
+/*
+ * Copyright (c) 2013 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * libavformat/libavcodec demuxing and muxing API example.
+ *
+ * Remux streams from one container format to another.
+ * @example remuxing.c
+ */
+
+#include <libavutil/timestamp.h>
+#include <libavformat/avformat.h>
+
+static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
+{
+ AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
+
+ printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
+ tag,
+ av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
+ av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
+ av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
+ pkt->stream_index);
+}
+
+int main(int argc, char **argv)
+{
+ AVOutputFormat *ofmt = NULL;
+ AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
+ AVPacket pkt;
+ const char *in_filename, *out_filename;
+ int ret, i;
+
+ if (argc < 3) {
+ printf("usage: %s input output\n"
+ "API example program to remux a media file with libavformat and libavcodec.\n"
+ "The output format is guessed according to the file extension.\n"
+ "\n", argv[0]);
+ return 1;
+ }
+
+ in_filename = argv[1];
+ out_filename = argv[2];
+
+ av_register_all();
+
+ if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
+ fprintf(stderr, "Could not open input file '%s'", in_filename);
+ goto end;
+ }
+
+ if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
+ fprintf(stderr, "Failed to retrieve input stream information");
+ goto end;
+ }
+
+ av_dump_format(ifmt_ctx, 0, in_filename, 0);
+
+ avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
+ if (!ofmt_ctx) {
+ fprintf(stderr, "Could not create output context\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ ofmt = ofmt_ctx->oformat;
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ AVStream *in_stream = ifmt_ctx->streams[i];
+ AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
+ if (!out_stream) {
+ fprintf(stderr, "Failed allocating output stream\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
+ goto end;
+ }
+ out_stream->codec->codec_tag = 0;
+ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
++ out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+ }
+ av_dump_format(ofmt_ctx, 0, out_filename, 1);
+
+ if (!(ofmt->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ fprintf(stderr, "Could not open output file '%s'", out_filename);
+ goto end;
+ }
+ }
+
+ ret = avformat_write_header(ofmt_ctx, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Error occurred when opening output file\n");
+ goto end;
+ }
+
+ while (1) {
+ AVStream *in_stream, *out_stream;
+
+ ret = av_read_frame(ifmt_ctx, &pkt);
+ if (ret < 0)
+ break;
+
+ in_stream = ifmt_ctx->streams[pkt.stream_index];
+ out_stream = ofmt_ctx->streams[pkt.stream_index];
+
+ log_packet(ifmt_ctx, &pkt, "in");
+
+ /* copy packet */
+ pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
+ pkt.pos = -1;
+ log_packet(ofmt_ctx, &pkt, "out");
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
+ if (ret < 0) {
+ fprintf(stderr, "Error muxing packet\n");
+ break;
+ }
+ av_free_packet(&pkt);
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+
+ avformat_close_input(&ifmt_ctx);
+
+ /* close output */
+ if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
- enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2014 Andrey Utkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for demuxing, decoding, filtering, encoding and muxing
+ * @example transcoding.c
+ */
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavfilter/avfiltergraph.h>
+#include <libavfilter/avcodec.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+#include <libavutil/pixdesc.h>
+
+static AVFormatContext *ifmt_ctx;
+static AVFormatContext *ofmt_ctx;
+typedef struct FilteringContext {
+ AVFilterContext *buffersink_ctx;
+ AVFilterContext *buffersrc_ctx;
+ AVFilterGraph *filter_graph;
+} FilteringContext;
+static FilteringContext *filter_ctx;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ unsigned int i;
+
+ ifmt_ctx = NULL;
+ if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ AVStream *stream;
+ AVCodecContext *codec_ctx;
+ stream = ifmt_ctx->streams[i];
+ codec_ctx = stream->codec;
+ /* Reencode video & audio and remux subtitles etc. */
+ if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* Open decoder */
+ ret = avcodec_open2(codec_ctx,
+ avcodec_find_decoder(codec_ctx->codec_id), NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
+ return ret;
+ }
+ }
+ }
+
+ av_dump_format(ifmt_ctx, 0, filename, 0);
+ return 0;
+}
+
+static int open_output_file(const char *filename)
+{
+ AVStream *out_stream;
+ AVStream *in_stream;
+ AVCodecContext *dec_ctx, *enc_ctx;
+ AVCodec *encoder;
+ int ret;
+ unsigned int i;
+
+ ofmt_ctx = NULL;
+ avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
+ if (!ofmt_ctx) {
+ av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
+ return AVERROR_UNKNOWN;
+ }
+
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ out_stream = avformat_new_stream(ofmt_ctx, NULL);
+ if (!out_stream) {
+ av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ in_stream = ifmt_ctx->streams[i];
+ dec_ctx = in_stream->codec;
+ enc_ctx = out_stream->codec;
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* in this example, we choose transcoding to same codec */
+ encoder = avcodec_find_encoder(dec_ctx->codec_id);
+ if (!encoder) {
+ av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* In this example, we transcode to same properties (picture size,
+ * sample rate etc.). These properties can be changed for output
+ * streams easily using filters */
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ enc_ctx->height = dec_ctx->height;
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
+ /* take first format from list of supported formats */
+ enc_ctx->pix_fmt = encoder->pix_fmts[0];
+ /* video time_base can be set to whatever is handy and supported by encoder */
+ enc_ctx->time_base = dec_ctx->time_base;
+ } else {
+ enc_ctx->sample_rate = dec_ctx->sample_rate;
+ enc_ctx->channel_layout = dec_ctx->channel_layout;
+ enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
+ /* take first format from list of supported formats */
+ enc_ctx->sample_fmt = encoder->sample_fmts[0];
+ enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
+ }
+
+ /* Third parameter can be used to pass settings to encoder */
+ ret = avcodec_open2(enc_ctx, encoder, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
+ return ret;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
+ av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
+ return AVERROR_INVALIDDATA;
+ } else {
+ /* if this stream must be remuxed */
+ ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
+ ifmt_ctx->streams[i]->codec);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
+ return ret;
+ }
+ }
+
+ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
++ enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+ }
+ av_dump_format(ofmt_ctx, 0, filename, 1);
+
+ if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
+ return ret;
+ }
+ }
+
+ /* init muxer, write output file header */
+ ret = avformat_write_header(ofmt_ctx, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
+ AVCodecContext *enc_ctx, const char *filter_spec)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *buffersrc = NULL;
+ AVFilter *buffersink = NULL;
+ AVFilterContext *buffersrc_ctx = NULL;
+ AVFilterContext *buffersink_ctx = NULL;
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ AVFilterGraph *filter_graph = avfilter_graph_alloc();
+
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ buffersrc = avfilter_get_by_name("buffer");
+ buffersink = avfilter_get_by_name("buffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ snprintf(args, sizeof(args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
+ dec_ctx->time_base.num, dec_ctx->time_base.den,
+ dec_ctx->sample_aspect_ratio.num,
+ dec_ctx->sample_aspect_ratio.den);
+
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
+ (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ buffersrc = avfilter_get_by_name("abuffer");
+ buffersink = avfilter_get_by_name("abuffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ if (!dec_ctx->channel_layout)
+ dec_ctx->channel_layout =
+ av_get_default_channel_layout(dec_ctx->channels);
+ snprintf(args, sizeof(args),
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
+ dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
+ av_get_sample_fmt_name(dec_ctx->sample_fmt),
+ dec_ctx->channel_layout);
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
+ (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
+ (uint8_t*)&enc_ctx->channel_layout,
+ sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
+ (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
+ }
+ } else {
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ /* Endpoints for the filter graph. */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if (!outputs->name || !inputs->name) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+ /* Fill FilteringContext */
+ fctx->buffersrc_ctx = buffersrc_ctx;
+ fctx->buffersink_ctx = buffersink_ctx;
+ fctx->filter_graph = filter_graph;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static int init_filters(void)
+{
+ const char *filter_spec;
+ unsigned int i;
+ int ret;
+ filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
+ if (!filter_ctx)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ filter_ctx[i].buffersrc_ctx = NULL;
+ filter_ctx[i].buffersink_ctx = NULL;
+ filter_ctx[i].filter_graph = NULL;
+ if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
+ || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
+ continue;
+
+
+ if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ filter_spec = "null"; /* passthrough (dummy) filter for video */
+ else
+ filter_spec = "anull"; /* passthrough (dummy) filter for audio */
+ ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
+ ofmt_ctx->streams[i]->codec, filter_spec);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
+ int ret;
+ int got_frame_local;
+ AVPacket enc_pkt;
+ int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
+ (ifmt_ctx->streams[stream_index]->codec->codec_type ==
+ AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
+
+ if (!got_frame)
+ got_frame = &got_frame_local;
+
+ av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
+ /* encode filtered frame */
+ enc_pkt.data = NULL;
+ enc_pkt.size = 0;
+ av_init_packet(&enc_pkt);
+ ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
+ filt_frame, got_frame);
+ av_frame_free(&filt_frame);
+ if (ret < 0)
+ return ret;
+ if (!(*got_frame))
+ return 0;
+
+ /* prepare packet for muxing */
+ enc_pkt.stream_index = stream_index;
+ av_packet_rescale_ts(&enc_pkt,
+ ofmt_ctx->streams[stream_index]->codec->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
+ /* mux encoded frame */
+ ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
+ return ret;
+}
+
+static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
+{
+ int ret;
+ AVFrame *filt_frame;
+
+ av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
+ /* push the decoded frame into the filtergraph */
+ ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
+ frame, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
+ return ret;
+ }
+
+ /* pull filtered frames from the filtergraph */
+ while (1) {
+ filt_frame = av_frame_alloc();
+ if (!filt_frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
+ ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
+ filt_frame);
+ if (ret < 0) {
+ /* if no more frames for output - returns AVERROR(EAGAIN)
+ * if flushed and no more frames for output - returns AVERROR_EOF
+ * rewrite retcode to 0 to show it as normal procedure completion
+ */
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ av_frame_free(&filt_frame);
+ break;
+ }
+
+ filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
+ ret = encode_write_frame(filt_frame, stream_index, NULL);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int flush_encoder(unsigned int stream_index)
+{
+ int ret;
+ int got_frame;
+
+ if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
+ CODEC_CAP_DELAY))
+ return 0;
+
+ while (1) {
+ av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
+ ret = encode_write_frame(NULL, stream_index, &got_frame);
+ if (ret < 0)
+ break;
+ if (!got_frame)
+ return 0;
+ }
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet = { .data = NULL, .size = 0 };
+ AVFrame *frame = NULL;
+ enum AVMediaType type;
+ unsigned int stream_index;
+ unsigned int i;
+ int got_frame;
+ int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
+
+ if (argc != 3) {
+ av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
+ return 1;
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = open_output_file(argv[2])) < 0)
+ goto end;
+ if ((ret = init_filters()) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
+ break;
+ stream_index = packet.stream_index;
+ type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
+ av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
+ stream_index);
+
+ if (filter_ctx[stream_index].filter_graph) {
+ av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
+ frame = av_frame_alloc();
+ if (!frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ifmt_ctx->streams[stream_index]->codec->time_base);
+ dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
+ avcodec_decode_audio4;
+ ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
+ &got_frame, &packet);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
+ break;
+ }
+
+ if (got_frame) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ ret = filter_encode_write_frame(frame, stream_index);
+ av_frame_free(&frame);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_frame_free(&frame);
+ }
+ } else {
+ /* remux this frame without reencoding */
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &packet);
+ if (ret < 0)
+ goto end;
+ }
+ av_free_packet(&packet);
+ }
+
+ /* flush filters and encoders */
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ /* flush filter */
+ if (!filter_ctx[i].filter_graph)
+ continue;
+ ret = filter_encode_write_frame(NULL, i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
+ goto end;
+ }
+
+ /* flush encoder */
+ ret = flush_encoder(i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
+ goto end;
+ }
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+ av_free_packet(&packet);
+ av_frame_free(&frame);
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ avcodec_close(ifmt_ctx->streams[i]->codec);
+ if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
+ avcodec_close(ofmt_ctx->streams[i]->codec);
+ if (filter_ctx && filter_ctx[i].filter_graph)
+ avfilter_graph_free(&filter_ctx[i].filter_graph);
+ }
+ av_free(filter_ctx);
+ avformat_close_input(&ifmt_ctx);
+ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
+
+ return ret ? 1 : 0;
+}
--- /dev/null
- if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * multimedia converter based on the FFmpeg libraries
+ */
+
+#include "config.h"
+#include <ctype.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdint.h>
+
+#if HAVE_ISATTY
+#if HAVE_IO_H
+#include <io.h>
+#endif
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#endif
+
+#include "libavformat/avformat.h"
+#include "libavdevice/avdevice.h"
+#include "libswresample/swresample.h"
+#include "libavutil/opt.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/fifo.h"
+#include "libavutil/internal.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/dict.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+#include "libavutil/libm.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/timestamp.h"
+#include "libavutil/bprint.h"
+#include "libavutil/time.h"
+#include "libavutil/threadmessage.h"
+#include "libavcodec/mathops.h"
+#include "libavformat/os_support.h"
+
+# include "libavfilter/avcodec.h"
+# include "libavfilter/avfilter.h"
+# include "libavfilter/buffersrc.h"
+# include "libavfilter/buffersink.h"
+
+#if HAVE_SYS_RESOURCE_H
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#elif HAVE_GETPROCESSTIMES
+#include <windows.h>
+#endif
+#if HAVE_GETPROCESSMEMORYINFO
+#include <windows.h>
+#include <psapi.h>
+#endif
+#if HAVE_SETCONSOLECTRLHANDLER
+#include <windows.h>
+#endif
+
+
+#if HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+
+#if HAVE_TERMIOS_H
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <termios.h>
+#elif HAVE_KBHIT
+#include <conio.h>
+#endif
+
+#if HAVE_PTHREADS
+#include <pthread.h>
+#endif
+
+#include <time.h>
+
+#include "ffmpeg.h"
+#include "cmdutils.h"
+
+#include "libavutil/avassert.h"
+
+const char program_name[] = "ffmpeg";
+const int program_birth_year = 2000;
+
+static FILE *vstats_file;
+
+const char *const forced_keyframes_const_names[] = {
+ "n",
+ "n_forced",
+ "prev_forced_n",
+ "prev_forced_t",
+ "t",
+ NULL
+};
+
+static void do_video_stats(OutputStream *ost, int frame_size);
+static int64_t getutime(void);
+static int64_t getmaxrss(void);
+
+static int run_as_daemon = 0;
+static int nb_frames_dup = 0;
+static int nb_frames_drop = 0;
+static int64_t decode_error_stat[2];
+
+static int current_time;
+AVIOContext *progress_avio = NULL;
+
+static uint8_t *subtitle_out;
+
+InputStream **input_streams = NULL;
+int nb_input_streams = 0;
+InputFile **input_files = NULL;
+int nb_input_files = 0;
+
+OutputStream **output_streams = NULL;
+int nb_output_streams = 0;
+OutputFile **output_files = NULL;
+int nb_output_files = 0;
+
+FilterGraph **filtergraphs;
+int nb_filtergraphs;
+
+#if HAVE_TERMIOS_H
+
+/* init terminal so that we can grab keys */
+static struct termios oldtty;
+static int restore_tty;
+#endif
+
+#if HAVE_PTHREADS
+static void free_input_threads(void);
+#endif
+
+/* sub2video hack:
+ Convert subtitles to video with alpha to insert them in filter graphs.
+ This is a temporary solution until libavfilter gets real subtitles support.
+ */
+
+static int sub2video_get_blank_frame(InputStream *ist)
+{
+ int ret;
+ AVFrame *frame = ist->sub2video.frame;
+
+ av_frame_unref(frame);
+ ist->sub2video.frame->width = ist->sub2video.w;
+ ist->sub2video.frame->height = ist->sub2video.h;
+ ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
+ if ((ret = av_frame_get_buffer(frame, 32)) < 0)
+ return ret;
+ memset(frame->data[0], 0, frame->height * frame->linesize[0]);
+ return 0;
+}
+
+static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
+ AVSubtitleRect *r)
+{
+ uint32_t *pal, *dst2;
+ uint8_t *src, *src2;
+ int x, y;
+
+ if (r->type != SUBTITLE_BITMAP) {
+ av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
+ return;
+ }
+ if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
+ av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
+ return;
+ }
+
+ dst += r->y * dst_linesize + r->x * 4;
+ src = r->pict.data[0];
+ pal = (uint32_t *)r->pict.data[1];
+ for (y = 0; y < r->h; y++) {
+ dst2 = (uint32_t *)dst;
+ src2 = src;
+ for (x = 0; x < r->w; x++)
+ *(dst2++) = pal[*(src2++)];
+ dst += dst_linesize;
+ src += r->pict.linesize[0];
+ }
+}
+
+static void sub2video_push_ref(InputStream *ist, int64_t pts)
+{
+ AVFrame *frame = ist->sub2video.frame;
+ int i;
+
+ av_assert1(frame->data[0]);
+ ist->sub2video.last_pts = frame->pts = pts;
+ for (i = 0; i < ist->nb_filters; i++)
+ av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
+ AV_BUFFERSRC_FLAG_KEEP_REF |
+ AV_BUFFERSRC_FLAG_PUSH);
+}
+
+static void sub2video_update(InputStream *ist, AVSubtitle *sub)
+{
+ int w = ist->sub2video.w, h = ist->sub2video.h;
+ AVFrame *frame = ist->sub2video.frame;
+ int8_t *dst;
+ int dst_linesize;
+ int num_rects, i;
+ int64_t pts, end_pts;
+
+ if (!frame)
+ return;
+ if (sub) {
+ pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
+ AV_TIME_BASE_Q, ist->st->time_base);
+ end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
+ AV_TIME_BASE_Q, ist->st->time_base);
+ num_rects = sub->num_rects;
+ } else {
+ pts = ist->sub2video.end_pts;
+ end_pts = INT64_MAX;
+ num_rects = 0;
+ }
+ if (sub2video_get_blank_frame(ist) < 0) {
+ av_log(ist->dec_ctx, AV_LOG_ERROR,
+ "Impossible to get a blank canvas.\n");
+ return;
+ }
+ dst = frame->data [0];
+ dst_linesize = frame->linesize[0];
+ for (i = 0; i < num_rects; i++)
+ sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
+ sub2video_push_ref(ist, pts);
+ ist->sub2video.end_pts = end_pts;
+}
+
+static void sub2video_heartbeat(InputStream *ist, int64_t pts)
+{
+ InputFile *infile = input_files[ist->file_index];
+ int i, j, nb_reqs;
+ int64_t pts2;
+
+ /* When a frame is read from a file, examine all sub2video streams in
+ the same file and send the sub2video frame again. Otherwise, decoded
+ video frames could be accumulating in the filter graph while a filter
+ (possibly overlay) is desperately waiting for a subtitle frame. */
+ for (i = 0; i < infile->nb_streams; i++) {
+ InputStream *ist2 = input_streams[infile->ist_index + i];
+ if (!ist2->sub2video.frame)
+ continue;
+ /* subtitles seem to be usually muxed ahead of other streams;
+ if not, subtracting a larger time here is necessary */
+ pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
+ /* do not send the heartbeat frame if the subtitle is already ahead */
+ if (pts2 <= ist2->sub2video.last_pts)
+ continue;
+ if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
+ sub2video_update(ist2, NULL);
+ for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
+ nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
+ if (nb_reqs)
+ sub2video_push_ref(ist2, pts2);
+ }
+}
+
+static void sub2video_flush(InputStream *ist)
+{
+ int i;
+
+ if (ist->sub2video.end_pts < INT64_MAX)
+ sub2video_update(ist, NULL);
+ for (i = 0; i < ist->nb_filters; i++)
+ av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
+}
+
+/* end of sub2video hack */
+
+static void term_exit_sigsafe(void)
+{
+#if HAVE_TERMIOS_H
+ if(restore_tty)
+ tcsetattr (0, TCSANOW, &oldtty);
+#endif
+}
+
+void term_exit(void)
+{
+ av_log(NULL, AV_LOG_QUIET, "%s", "");
+ term_exit_sigsafe();
+}
+
+static volatile int received_sigterm = 0;
+static volatile int received_nb_signals = 0;
+static volatile int transcode_init_done = 0;
+static volatile int ffmpeg_exited = 0;
+static int main_return_code = 0;
+
+static void
+sigterm_handler(int sig)
+{
+ received_sigterm = sig;
+ received_nb_signals++;
+ term_exit_sigsafe();
+ if(received_nb_signals > 3) {
+ write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
+ strlen("Received > 3 system signals, hard exiting\n"));
+
+ exit(123);
+ }
+}
+
+#if HAVE_SETCONSOLECTRLHANDLER
+static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
+{
+ av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
+
+ switch (fdwCtrlType)
+ {
+ case CTRL_C_EVENT:
+ case CTRL_BREAK_EVENT:
+ sigterm_handler(SIGINT);
+ return TRUE;
+
+ case CTRL_CLOSE_EVENT:
+ case CTRL_LOGOFF_EVENT:
+ case CTRL_SHUTDOWN_EVENT:
+ sigterm_handler(SIGTERM);
+ /* Basically, with these 3 events, when we return from this method the
+ process is hard terminated, so stall as long as we need to
+ to try and let the main thread(s) clean up and gracefully terminate
+ (we have at most 5 seconds, but should be done far before that). */
+ while (!ffmpeg_exited) {
+ Sleep(0);
+ }
+ return TRUE;
+
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
+ return FALSE;
+ }
+}
+#endif
+
+void term_init(void)
+{
+#if HAVE_TERMIOS_H
+ if(!run_as_daemon){
+ struct termios tty;
+ int istty = 1;
+#if HAVE_ISATTY
+ istty = isatty(0) && isatty(2);
+#endif
+ if (istty && tcgetattr (0, &tty) == 0) {
+ oldtty = tty;
+ restore_tty = 1;
+
+ tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
+ |INLCR|IGNCR|ICRNL|IXON);
+ tty.c_oflag |= OPOST;
+ tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
+ tty.c_cflag &= ~(CSIZE|PARENB);
+ tty.c_cflag |= CS8;
+ tty.c_cc[VMIN] = 1;
+ tty.c_cc[VTIME] = 0;
+
+ tcsetattr (0, TCSANOW, &tty);
+ }
+ signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
+ }
+#endif
+
+ signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
+ signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
+#ifdef SIGXCPU
+ signal(SIGXCPU, sigterm_handler);
+#endif
+#if HAVE_SETCONSOLECTRLHANDLER
+ SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
+#endif
+}
+
+/* read a key without blocking */
+static int read_key(void)
+{
+ unsigned char ch;
+#if HAVE_TERMIOS_H
+ int n = 1;
+ struct timeval tv;
+ fd_set rfds;
+
+ FD_ZERO(&rfds);
+ FD_SET(0, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ n = select(1, &rfds, NULL, NULL, &tv);
+ if (n > 0) {
+ n = read(0, &ch, 1);
+ if (n == 1)
+ return ch;
+
+ return n;
+ }
+#elif HAVE_KBHIT
+# if HAVE_PEEKNAMEDPIPE
+ static int is_pipe;
+ static HANDLE input_handle;
+ DWORD dw, nchars;
+ if(!input_handle){
+ input_handle = GetStdHandle(STD_INPUT_HANDLE);
+ is_pipe = !GetConsoleMode(input_handle, &dw);
+ }
+
+ if (stdin->_cnt > 0) {
+ read(0, &ch, 1);
+ return ch;
+ }
+ if (is_pipe) {
+ /* When running under a GUI, you will end here. */
+ if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
+ // input pipe may have been closed by the program that ran ffmpeg
+ return -1;
+ }
+ //Read it
+ if(nchars != 0) {
+ read(0, &ch, 1);
+ return ch;
+ }else{
+ return -1;
+ }
+ }
+# endif
+ if(kbhit())
+ return(getch());
+#endif
+ return -1;
+}
+
+static int decode_interrupt_cb(void *ctx)
+{
+ return received_nb_signals > transcode_init_done;
+}
+
+const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
+
+static void ffmpeg_cleanup(int ret)
+{
+ int i, j;
+
+ if (do_benchmark) {
+ int maxrss = getmaxrss() / 1024;
+ av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
+ }
+
+ for (i = 0; i < nb_filtergraphs; i++) {
+ FilterGraph *fg = filtergraphs[i];
+ avfilter_graph_free(&fg->graph);
+ for (j = 0; j < fg->nb_inputs; j++) {
+ av_freep(&fg->inputs[j]->name);
+ av_freep(&fg->inputs[j]);
+ }
+ av_freep(&fg->inputs);
+ for (j = 0; j < fg->nb_outputs; j++) {
+ av_freep(&fg->outputs[j]->name);
+ av_freep(&fg->outputs[j]);
+ }
+ av_freep(&fg->outputs);
+ av_freep(&fg->graph_desc);
+
+ av_freep(&filtergraphs[i]);
+ }
+ av_freep(&filtergraphs);
+
+ av_freep(&subtitle_out);
+
+ /* close files */
+ for (i = 0; i < nb_output_files; i++) {
+ OutputFile *of = output_files[i];
+ AVFormatContext *s;
+ if (!of)
+ continue;
+ s = of->ctx;
+ if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&s->pb);
+ avformat_free_context(s);
+ av_dict_free(&of->opts);
+
+ av_freep(&output_files[i]);
+ }
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ AVBitStreamFilterContext *bsfc;
+
+ if (!ost)
+ continue;
+
+ bsfc = ost->bitstream_filters;
+ while (bsfc) {
+ AVBitStreamFilterContext *next = bsfc->next;
+ av_bitstream_filter_close(bsfc);
+ bsfc = next;
+ }
+ ost->bitstream_filters = NULL;
+ av_frame_free(&ost->filtered_frame);
+ av_frame_free(&ost->last_frame);
+
+ av_parser_close(ost->parser);
+
+ av_freep(&ost->forced_keyframes);
+ av_expr_free(ost->forced_keyframes_pexpr);
+ av_freep(&ost->avfilter);
+ av_freep(&ost->logfile_prefix);
+
+ av_freep(&ost->audio_channels_map);
+ ost->audio_channels_mapped = 0;
+
+ avcodec_free_context(&ost->enc_ctx);
+
+ av_freep(&output_streams[i]);
+ }
+#if HAVE_PTHREADS
+ free_input_threads();
+#endif
+ for (i = 0; i < nb_input_files; i++) {
+ avformat_close_input(&input_files[i]->ctx);
+ av_freep(&input_files[i]);
+ }
+ for (i = 0; i < nb_input_streams; i++) {
+ InputStream *ist = input_streams[i];
+
+ av_frame_free(&ist->decoded_frame);
+ av_frame_free(&ist->filter_frame);
+ av_dict_free(&ist->decoder_opts);
+ avsubtitle_free(&ist->prev_sub.subtitle);
+ av_frame_free(&ist->sub2video.frame);
+ av_freep(&ist->filters);
+ av_freep(&ist->hwaccel_device);
+
+ avcodec_free_context(&ist->dec_ctx);
+
+ av_freep(&input_streams[i]);
+ }
+
+ if (vstats_file)
+ fclose(vstats_file);
+ av_freep(&vstats_filename);
+
+ av_freep(&input_streams);
+ av_freep(&input_files);
+ av_freep(&output_streams);
+ av_freep(&output_files);
+
+ uninit_opts();
+
+ avformat_network_deinit();
+
+ if (received_sigterm) {
+ av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
+ (int) received_sigterm);
+ } else if (ret && transcode_init_done) {
+ av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
+ }
+ term_exit();
+ ffmpeg_exited = 1;
+}
+
+void remove_avoptions(AVDictionary **a, AVDictionary *b)
+{
+ AVDictionaryEntry *t = NULL;
+
+ while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
+ }
+}
+
+void assert_avoptions(AVDictionary *m)
+{
+ AVDictionaryEntry *t;
+ if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
+ exit_program(1);
+ }
+}
+
+static void abort_codec_experimental(AVCodec *c, int encoder)
+{
+ exit_program(1);
+}
+
+static void update_benchmark(const char *fmt, ...)
+{
+ if (do_benchmark_all) {
+ int64_t t = getutime();
+ va_list va;
+ char buf[1024];
+
+ if (fmt) {
+ va_start(va, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, va);
+ va_end(va);
+ av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
+ }
+ current_time = t;
+ }
+}
+
+static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
+{
+ int i;
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost2 = output_streams[i];
+ ost2->finished |= ost == ost2 ? this_stream : others;
+ }
+}
+
+static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+{
+ AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
+ AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
+ int ret;
+
+ if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
+ ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (ost->st->codec->extradata) {
+ memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
+ ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
+ }
+ }
+
+ if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
+ (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
+ pkt->pts = pkt->dts = AV_NOPTS_VALUE;
+
+ /*
+ * Audio encoders may split the packets -- #frames in != #packets out.
+ * But there is no reordering, so we can limit the number of output packets
+ * by simply dropping them here.
+ * Counting encoded video frames needs to be done separately because of
+ * reordering, see do_video_out()
+ */
+ if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
+ if (ost->frame_number >= ost->max_frames) {
+ av_free_packet(pkt);
+ return;
+ }
+ ost->frame_number++;
+ }
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
+ NULL);
+ ost->quality = sd ? AV_RL32(sd) : -1;
+ }
+
+ if (bsfc)
+ av_packet_split_side_data(pkt);
+
+ while (bsfc) {
+ AVPacket new_pkt = *pkt;
+ AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
+ bsfc->filter->name,
+ NULL, 0);
+ int a = av_bitstream_filter_filter(bsfc, avctx,
+ bsf_arg ? bsf_arg->value : NULL,
+ &new_pkt.data, &new_pkt.size,
+ pkt->data, pkt->size,
+ pkt->flags & AV_PKT_FLAG_KEY);
+ if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
+ uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
+ if(t) {
+ memcpy(t, new_pkt.data, new_pkt.size);
+ memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ new_pkt.data = t;
+ new_pkt.buf = NULL;
+ a = 1;
+ } else
+ a = AVERROR(ENOMEM);
+ }
+ if (a > 0) {
+ pkt->side_data = NULL;
+ pkt->side_data_elems = 0;
+ av_free_packet(pkt);
+ new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
+ av_buffer_default_free, NULL, 0);
+ if (!new_pkt.buf)
+ exit_program(1);
+ } else if (a < 0) {
+ new_pkt = *pkt;
+ av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
+ bsfc->filter->name, pkt->stream_index,
+ avctx->codec ? avctx->codec->name : "copy");
+ print_error("", a);
+ if (exit_on_error)
+ exit_program(1);
+ }
+ *pkt = new_pkt;
+
+ bsfc = bsfc->next;
+ }
+
+ if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
+ if (pkt->dts != AV_NOPTS_VALUE &&
+ pkt->pts != AV_NOPTS_VALUE &&
+ pkt->dts > pkt->pts) {
+ av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
+ pkt->dts, pkt->pts,
+ ost->file_index, ost->st->index);
+ pkt->pts =
+ pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
+ - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
+ - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
+ }
+ if(
+ (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
+ pkt->dts != AV_NOPTS_VALUE &&
+ ost->last_mux_dts != AV_NOPTS_VALUE) {
+ int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
+ if (pkt->dts < max) {
+ int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
+ av_log(s, loglevel, "Non-monotonous DTS in output stream "
+ "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
+ ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
+ if (exit_on_error) {
+ av_log(NULL, AV_LOG_FATAL, "aborting.\n");
+ exit_program(1);
+ }
+ av_log(s, loglevel, "changing to %"PRId64". This may result "
+ "in incorrect timestamps in the output file.\n",
+ max);
+ if(pkt->pts >= pkt->dts)
+ pkt->pts = FFMAX(pkt->pts, max);
+ pkt->dts = max;
+ }
+ }
+ }
+ ost->last_mux_dts = pkt->dts;
+
+ ost->data_size += pkt->size;
+ ost->packets_written++;
+
+ pkt->stream_index = ost->index;
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
+ av_get_media_type_string(ost->enc_ctx->codec_type),
+ av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
+ av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
+ pkt->size
+ );
+ }
+
+ ret = av_interleaved_write_frame(s, pkt);
+ if (ret < 0) {
+ print_error("av_interleaved_write_frame()", ret);
+ main_return_code = 1;
+ close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
+ }
+ av_free_packet(pkt);
+}
+
+static void close_output_stream(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+
+ ost->finished |= ENCODER_FINISHED;
+ if (of->shortest) {
+ int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
+ of->recording_time = FFMIN(of->recording_time, end);
+ }
+}
+
+static int check_recording_time(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+
+ if (of->recording_time != INT64_MAX &&
+ av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
+ AV_TIME_BASE_Q) >= 0) {
+ close_output_stream(ost);
+ return 0;
+ }
+ return 1;
+}
+
+static void do_audio_out(AVFormatContext *s, OutputStream *ost,
+ AVFrame *frame)
+{
+ AVCodecContext *enc = ost->enc_ctx;
+ AVPacket pkt;
+ int got_packet = 0;
+
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ if (!check_recording_time(ost))
+ return;
+
+ if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
+ frame->pts = ost->sync_opts;
+ ost->sync_opts = frame->pts + frame->nb_samples;
+ ost->samples_encoded += frame->nb_samples;
+ ost->frames_encoded++;
+
+ av_assert0(pkt.size || !pkt.data);
+ update_benchmark(NULL);
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
+ "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
+ enc->time_base.num, enc->time_base.den);
+ }
+
+ if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
+ exit_program(1);
+ }
+ update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
+
+ if (got_packet) {
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
+ }
+
+ write_frame(s, &pkt, ost);
+ }
+}
+
+static void do_subtitle_out(AVFormatContext *s,
+ OutputStream *ost,
+ InputStream *ist,
+ AVSubtitle *sub)
+{
+ int subtitle_out_max_size = 1024 * 1024;
+ int subtitle_out_size, nb, i;
+ AVCodecContext *enc;
+ AVPacket pkt;
+ int64_t pts;
+
+ if (sub->pts == AV_NOPTS_VALUE) {
+ av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
+ if (exit_on_error)
+ exit_program(1);
+ return;
+ }
+
+ enc = ost->enc_ctx;
+
+ if (!subtitle_out) {
+ subtitle_out = av_malloc(subtitle_out_max_size);
+ if (!subtitle_out) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
+ exit_program(1);
+ }
+ }
+
+ /* Note: DVB subtitle need one packet to draw them and one other
+ packet to clear them */
+ /* XXX: signal it in the codec context ? */
+ if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
+ nb = 2;
+ else
+ nb = 1;
+
+ /* shift timestamp to honor -ss and make check_recording_time() work with -t */
+ pts = sub->pts;
+ if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
+ pts -= output_files[ost->file_index]->start_time;
+ for (i = 0; i < nb; i++) {
+ unsigned save_num_rects = sub->num_rects;
+
+ ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
+ if (!check_recording_time(ost))
+ return;
+
+ sub->pts = pts;
+ // start_display_time is required to be 0
+ sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
+ sub->end_display_time -= sub->start_display_time;
+ sub->start_display_time = 0;
+ if (i == 1)
+ sub->num_rects = 0;
+
+ ost->frames_encoded++;
+
+ subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
+ subtitle_out_max_size, sub);
+ if (i == 1)
+ sub->num_rects = save_num_rects;
+ if (subtitle_out_size < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
+ exit_program(1);
+ }
+
+ av_init_packet(&pkt);
+ pkt.data = subtitle_out;
+ pkt.size = subtitle_out_size;
+ pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
+ pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
+ if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
+ /* XXX: the pts correction is handled here. Maybe handling
+ it in the codec would be better */
+ if (i == 0)
+ pkt.pts += 90 * sub->start_display_time;
+ else
+ pkt.pts += 90 * sub->end_display_time;
+ }
+ pkt.dts = pkt.pts;
+ write_frame(s, &pkt, ost);
+ }
+}
+
+static void do_video_out(AVFormatContext *s,
+ OutputStream *ost,
+ AVFrame *next_picture,
+ double sync_ipts)
+{
+ int ret, format_video_sync;
+ AVPacket pkt;
+ AVCodecContext *enc = ost->enc_ctx;
+ AVCodecContext *mux_enc = ost->st->codec;
+ int nb_frames, nb0_frames, i;
+ double delta, delta0;
+ double duration = 0;
+ int frame_size = 0;
+ InputStream *ist = NULL;
+ AVFilterContext *filter = ost->filter->filter;
+
+ if (ost->source_index >= 0)
+ ist = input_streams[ost->source_index];
+
+ if (filter->inputs[0]->frame_rate.num > 0 &&
+ filter->inputs[0]->frame_rate.den > 0)
+ duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
+
+ if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
+ duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
+
+ if (!ost->filters_script &&
+ !ost->filters &&
+ next_picture &&
+ ist &&
+ lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
+ duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
+ }
+
+ if (!next_picture) {
+ //end, flushing
+ nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
+ ost->last_nb0_frames[1],
+ ost->last_nb0_frames[2]);
+ } else {
+ delta0 = sync_ipts - ost->sync_opts;
+ delta = delta0 + duration;
+
+ /* by default, we output a single frame */
+ nb0_frames = 0;
+ nb_frames = 1;
+
+ format_video_sync = video_sync_method;
+ if (format_video_sync == VSYNC_AUTO) {
+ if(!strcmp(s->oformat->name, "avi")) {
+ format_video_sync = VSYNC_VFR;
+ } else
+ format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
+ if ( ist
+ && format_video_sync == VSYNC_CFR
+ && input_files[ist->file_index]->ctx->nb_streams == 1
+ && input_files[ist->file_index]->input_ts_offset == 0) {
+ format_video_sync = VSYNC_VSCFR;
+ }
+ if (format_video_sync == VSYNC_CFR && copy_ts) {
+ format_video_sync = VSYNC_VSCFR;
+ }
+ }
+
+ if (delta0 < 0 &&
+ delta > 0 &&
+ format_video_sync != VSYNC_PASSTHROUGH &&
+ format_video_sync != VSYNC_DROP) {
+ double cor = FFMIN(-delta0, duration);
+ if (delta0 < -0.6) {
+ av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
+ } else
+ av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
+ sync_ipts += cor;
+ duration -= cor;
+ delta0 += cor;
+ }
+
+ switch (format_video_sync) {
+ case VSYNC_VSCFR:
+ if (ost->frame_number == 0 && delta - duration >= 0.5) {
+ av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
+ delta = duration;
+ delta0 = 0;
+ ost->sync_opts = lrint(sync_ipts);
+ }
+ case VSYNC_CFR:
+ // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
+ if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
+ nb_frames = 0;
+ } else if (delta < -1.1)
+ nb_frames = 0;
+ else if (delta > 1.1) {
+ nb_frames = lrintf(delta);
+ if (delta0 > 1.1)
+ nb0_frames = lrintf(delta0 - 0.6);
+ }
+ break;
+ case VSYNC_VFR:
+ if (delta <= -0.6)
+ nb_frames = 0;
+ else if (delta > 0.6)
+ ost->sync_opts = lrint(sync_ipts);
+ break;
+ case VSYNC_DROP:
+ case VSYNC_PASSTHROUGH:
+ ost->sync_opts = lrint(sync_ipts);
+ break;
+ default:
+ av_assert0(0);
+ }
+ }
+
+ nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
+ nb0_frames = FFMIN(nb0_frames, nb_frames);
+
+ memmove(ost->last_nb0_frames + 1,
+ ost->last_nb0_frames,
+ sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
+ ost->last_nb0_frames[0] = nb0_frames;
+
+ if (nb0_frames == 0 && ost->last_droped) {
+ nb_frames_drop++;
+ av_log(NULL, AV_LOG_VERBOSE,
+ "*** dropping frame %d from stream %d at ts %"PRId64"\n",
+ ost->frame_number, ost->st->index, ost->last_frame->pts);
+ }
+ if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
+ if (nb_frames > dts_error_threshold * 30) {
+ av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
+ nb_frames_drop++;
+ return;
+ }
+ nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
+ av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
+ }
+ ost->last_droped = nb_frames == nb0_frames && next_picture;
+
+ /* duplicates frame if needed */
+ for (i = 0; i < nb_frames; i++) {
+ AVFrame *in_picture;
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ if (i < nb0_frames && ost->last_frame) {
+ in_picture = ost->last_frame;
+ } else
+ in_picture = next_picture;
+
+ if (!in_picture)
+ return;
+
+ in_picture->pts = ost->sync_opts;
+
+#if 1
+ if (!check_recording_time(ost))
+#else
+ if (ost->frame_number >= ost->max_frames)
+#endif
+ return;
+
+ if (s->oformat->flags & AVFMT_RAWPICTURE &&
+ enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
+ /* raw pictures are written as AVPicture structure to
+ avoid any copies. We support temporarily the older
+ method. */
+ if (in_picture->interlaced_frame)
+ mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
+ else
+ mux_enc->field_order = AV_FIELD_PROGRESSIVE;
+ pkt.data = (uint8_t *)in_picture;
+ pkt.size = sizeof(AVPicture);
+ pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
+ pkt.flags |= AV_PKT_FLAG_KEY;
+
+ write_frame(s, &pkt, ost);
+ } else {
+ int got_packet, forced_keyframe = 0;
+ double pts_time;
+
- if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
++ if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
+ ost->top_field_first >= 0)
+ in_picture->top_field_first = !!ost->top_field_first;
+
+ if (in_picture->interlaced_frame) {
+ if (enc->codec->id == AV_CODEC_ID_MJPEG)
+ mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
+ else
+ mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
+ } else
+ mux_enc->field_order = AV_FIELD_PROGRESSIVE;
+
+ in_picture->quality = enc->global_quality;
+ in_picture->pict_type = 0;
+
+ pts_time = in_picture->pts != AV_NOPTS_VALUE ?
+ in_picture->pts * av_q2d(enc->time_base) : NAN;
+ if (ost->forced_kf_index < ost->forced_kf_count &&
+ in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
+ ost->forced_kf_index++;
+ forced_keyframe = 1;
+ } else if (ost->forced_keyframes_pexpr) {
+ double res;
+ ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
+ res = av_expr_eval(ost->forced_keyframes_pexpr,
+ ost->forced_keyframes_expr_const_values, NULL);
+ av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
+ ost->forced_keyframes_expr_const_values[FKF_N],
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
+ ost->forced_keyframes_expr_const_values[FKF_T],
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
+ res);
+ if (res) {
+ forced_keyframe = 1;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
+ ost->forced_keyframes_expr_const_values[FKF_N];
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
+ ost->forced_keyframes_expr_const_values[FKF_T];
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
+ }
+
+ ost->forced_keyframes_expr_const_values[FKF_N] += 1;
+ } else if ( ost->forced_keyframes
+ && !strncmp(ost->forced_keyframes, "source", 6)
+ && in_picture->key_frame==1) {
+ forced_keyframe = 1;
+ }
+
+ if (forced_keyframe) {
+ in_picture->pict_type = AV_PICTURE_TYPE_I;
+ av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
+ }
+
+ update_benchmark(NULL);
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
+ "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
+ av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
+ enc->time_base.num, enc->time_base.den);
+ }
+
+ ost->frames_encoded++;
+
+ ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
+ update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
+ exit_program(1);
+ }
+
+ if (got_packet) {
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
+ }
+
+ if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
+ pkt.pts = ost->sync_opts;
+
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
+ }
+
+ frame_size = pkt.size;
+ write_frame(s, &pkt, ost);
+
+ /* if two pass, output log */
+ if (ost->logfile && enc->stats_out) {
+ fprintf(ost->logfile, "%s", enc->stats_out);
+ }
+ }
+ }
+ ost->sync_opts++;
+ /*
+ * For video, number of frames in == number of packets out.
+ * But there may be reordering, so we can't throw away frames on encoder
+ * flush, we need to limit them here, before they go into encoder.
+ */
+ ost->frame_number++;
+
+ if (vstats_filename && frame_size)
+ do_video_stats(ost, frame_size);
+ }
+
+ if (!ost->last_frame)
+ ost->last_frame = av_frame_alloc();
+ av_frame_unref(ost->last_frame);
+ if (next_picture && ost->last_frame)
+ av_frame_ref(ost->last_frame, next_picture);
+ else
+ av_frame_free(&ost->last_frame);
+}
+
+static double psnr(double d)
+{
+ return -10.0 * log(d) / log(10.0);
+}
+
+static void do_video_stats(OutputStream *ost, int frame_size)
+{
+ AVCodecContext *enc;
+ int frame_number;
+ double ti1, bitrate, avg_bitrate;
+
+ /* this is executed just the first time do_video_stats is called */
+ if (!vstats_file) {
+ vstats_file = fopen(vstats_filename, "w");
+ if (!vstats_file) {
+ perror("fopen");
+ exit_program(1);
+ }
+ }
+
+ enc = ost->enc_ctx;
+ if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ frame_number = ost->st->nb_frames;
+ fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
+ ost->quality / (float)FF_QP2LAMBDA);
+
- if ( (ost->enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
- != CODEC_FLAG_PASS1)
++ if (enc->coded_frame && (enc->flags & AV_CODEC_FLAG_PSNR))
+ fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
+
+ fprintf(vstats_file,"f_size= %6d ", frame_size);
+ /* compute pts value */
+ ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
+ if (ti1 < 0.01)
+ ti1 = 0.01;
+
+ bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
+ avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
+ fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
+ (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
+ fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
+ }
+}
+
+static void finish_output_stream(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+ int i;
+
+ ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
+
+ if (of->shortest) {
+ for (i = 0; i < of->ctx->nb_streams; i++)
+ output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
+ }
+}
+
+/**
+ * Get and encode new output from any of the filtergraphs, without causing
+ * activity.
+ *
+ * @return 0 for success, <0 for severe errors
+ */
+static int reap_filters(int flush)
+{
+ AVFrame *filtered_frame = NULL;
+ int i;
+
+ /* Reap all buffers present in the buffer sinks */
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ OutputFile *of = output_files[ost->file_index];
+ AVFilterContext *filter;
+ AVCodecContext *enc = ost->enc_ctx;
+ int ret = 0;
+
+ if (!ost->filter)
+ continue;
+ filter = ost->filter->filter;
+
+ if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
+ return AVERROR(ENOMEM);
+ }
+ filtered_frame = ost->filtered_frame;
+
+ while (1) {
+ double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
+ ret = av_buffersink_get_frame_flags(filter, filtered_frame,
+ AV_BUFFERSINK_FLAG_NO_REQUEST);
+ if (ret < 0) {
+ if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
+ av_log(NULL, AV_LOG_WARNING,
+ "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
+ } else if (flush && ret == AVERROR_EOF) {
+ if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
+ do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
+ }
+ break;
+ }
+ if (ost->finished) {
+ av_frame_unref(filtered_frame);
+ continue;
+ }
+ if (filtered_frame->pts != AV_NOPTS_VALUE) {
+ int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+ AVRational tb = enc->time_base;
+ int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
+
+ tb.den <<= extra_bits;
+ float_pts =
+ av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
+ av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
+ float_pts /= 1 << extra_bits;
+ // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
+ float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
+
+ filtered_frame->pts =
+ av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
+ av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
+ }
+ //if (ost->source_index >= 0)
+ // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
+
+ switch (filter->inputs[0]->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (!ost->frame_aspect_ratio.num)
+ enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
+ av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
+ float_pts,
+ enc->time_base.num, enc->time_base.den);
+ }
+
+ do_video_out(of->ctx, ost, filtered_frame, float_pts);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
+ enc->channels != av_frame_get_channels(filtered_frame)) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
+ break;
+ }
+ do_audio_out(of->ctx, ost, filtered_frame);
+ break;
+ default:
+ // TODO support subtitle filters
+ av_assert0(0);
+ }
+
+ av_frame_unref(filtered_frame);
+ }
+ }
+
+ return 0;
+}
+
+static void print_final_stats(int64_t total_size)
+{
+ uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
+ uint64_t subtitle_size = 0;
+ uint64_t data_size = 0;
+ float percent = -1.0;
+ int i, j;
+ int pass1_used = 1;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ switch (ost->enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
+ case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
+ case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
+ default: other_size += ost->data_size; break;
+ }
+ extra_size += ost->enc_ctx->extradata_size;
+ data_size += ost->data_size;
- if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
++ if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
++ != AV_CODEC_FLAG_PASS1)
+ pass1_used = 0;
+ }
+
+ if (data_size && total_size>0 && total_size >= data_size)
+ percent = 100.0 * (total_size - data_size) / data_size;
+
+ av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
+ video_size / 1024.0,
+ audio_size / 1024.0,
+ subtitle_size / 1024.0,
+ other_size / 1024.0,
+ extra_size / 1024.0);
+ if (percent >= 0.0)
+ av_log(NULL, AV_LOG_INFO, "%f%%", percent);
+ else
+ av_log(NULL, AV_LOG_INFO, "unknown");
+ av_log(NULL, AV_LOG_INFO, "\n");
+
+ /* print verbose per-stream stats */
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *f = input_files[i];
+ uint64_t total_packets = 0, total_size = 0;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
+ i, f->ctx->filename);
+
+ for (j = 0; j < f->nb_streams; j++) {
+ InputStream *ist = input_streams[f->ist_index + j];
+ enum AVMediaType type = ist->dec_ctx->codec_type;
+
+ total_size += ist->data_size;
+ total_packets += ist->nb_packets;
+
+ av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
+ i, j, media_type_string(type));
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
+ ist->nb_packets, ist->data_size);
+
+ if (ist->decoding_needed) {
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
+ ist->frames_decoded);
+ if (type == AVMEDIA_TYPE_AUDIO)
+ av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
+ av_log(NULL, AV_LOG_VERBOSE, "; ");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, "\n");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
+ total_packets, total_size);
+ }
+
+ for (i = 0; i < nb_output_files; i++) {
+ OutputFile *of = output_files[i];
+ uint64_t total_packets = 0, total_size = 0;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
+ i, of->ctx->filename);
+
+ for (j = 0; j < of->ctx->nb_streams; j++) {
+ OutputStream *ost = output_streams[of->ost_index + j];
+ enum AVMediaType type = ost->enc_ctx->codec_type;
+
+ total_size += ost->data_size;
+ total_packets += ost->packets_written;
+
+ av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
+ i, j, media_type_string(type));
+ if (ost->encoding_needed) {
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
+ ost->frames_encoded);
+ if (type == AVMEDIA_TYPE_AUDIO)
+ av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
+ av_log(NULL, AV_LOG_VERBOSE, "; ");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
+ ost->packets_written, ost->data_size);
+
+ av_log(NULL, AV_LOG_VERBOSE, "\n");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
+ total_packets, total_size);
+ }
+ if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
+ av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
+ if (pass1_used) {
+ av_log(NULL, AV_LOG_WARNING, "\n");
+ } else {
+ av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
+ }
+ }
+}
+
+static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
+{
+ char buf[1024];
+ AVBPrint buf_script;
+ OutputStream *ost;
+ AVFormatContext *oc;
+ int64_t total_size;
+ AVCodecContext *enc;
+ int frame_number, vid, i;
+ double bitrate;
+ int64_t pts = INT64_MIN;
+ static int64_t last_time = -1;
+ static int qp_histogram[52];
+ int hours, mins, secs, us;
+
+ if (!print_stats && !is_last_report && !progress_avio)
+ return;
+
+ if (!is_last_report) {
+ if (last_time == -1) {
+ last_time = cur_time;
+ return;
+ }
+ if ((cur_time - last_time) < 500000)
+ return;
+ last_time = cur_time;
+ }
+
+
+ oc = output_files[0]->ctx;
+
+ total_size = avio_size(oc->pb);
+ if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
+ total_size = avio_tell(oc->pb);
+
+ buf[0] = '\0';
+ vid = 0;
+ av_bprint_init(&buf_script, 0, 1);
+ for (i = 0; i < nb_output_streams; i++) {
+ float q = -1;
+ ost = output_streams[i];
+ enc = ost->enc_ctx;
+ if (!ost->stream_copy)
+ q = ost->quality / (float) FF_QP2LAMBDA;
+
+ if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
+ av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
+ ost->file_index, ost->index, q);
+ }
+ if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ float fps, t = (cur_time-timer_start) / 1000000.0;
+
+ frame_number = ost->frame_number;
+ fps = t > 1 ? frame_number / t : 0;
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
+ frame_number, fps < 9.95, fps, q);
+ av_bprintf(&buf_script, "frame=%d\n", frame_number);
+ av_bprintf(&buf_script, "fps=%.1f\n", fps);
+ av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
+ ost->file_index, ost->index, q);
+ if (is_last_report)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
+ if (qp_hist) {
+ int j;
+ int qp = lrintf(q);
+ if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
+ qp_histogram[qp]++;
+ for (j = 0; j < 32; j++)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
+ }
+
- if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
++ if ((enc->flags & AV_CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
+ int j;
+ double error, error_sum = 0;
+ double scale, scale_sum = 0;
+ double p;
+ char type[3] = { 'Y','U','V' };
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
+ for (j = 0; j < 3; j++) {
+ if (is_last_report) {
+ error = enc->error[j];
+ scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
+ } else {
+ error = enc->coded_frame->error[j];
+ scale = enc->width * enc->height * 255.0 * 255.0;
+ }
+ if (j)
+ scale /= 4;
+ error_sum += error;
+ scale_sum += scale;
+ p = psnr(error / scale);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
+ av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
+ ost->file_index, ost->index, type[j] | 32, p);
+ }
+ p = psnr(error_sum / scale_sum);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
+ av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
+ ost->file_index, ost->index, p);
+ }
+ vid = 1;
+ }
+ /* compute min output value */
+ if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
+ pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
+ ost->st->time_base, AV_TIME_BASE_Q));
+ if (is_last_report)
+ nb_frames_drop += ost->last_droped;
+ }
+
+ secs = FFABS(pts) / AV_TIME_BASE;
+ us = FFABS(pts) % AV_TIME_BASE;
+ mins = secs / 60;
+ secs %= 60;
+ hours = mins / 60;
+ mins %= 60;
+
+ bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
+
+ if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "size=N/A time=");
+ else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "size=%8.0fkB time=", total_size / 1024.0);
+ if (pts < 0)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "%02d:%02d:%02d.%02d ", hours, mins, secs,
+ (100 * us) / AV_TIME_BASE);
+
+ if (bitrate < 0) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
+ av_bprintf(&buf_script, "bitrate=N/A\n");
+ }else{
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
+ av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
+ }
+
+ if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
+ else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
+ av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
+ av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
+ hours, mins, secs, us);
+
+ if (nb_frames_dup || nb_frames_drop)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
+ nb_frames_dup, nb_frames_drop);
+ av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
+ av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
+
+ if (print_stats || is_last_report) {
+ const char end = is_last_report ? '\n' : '\r';
+ if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
+ fprintf(stderr, "%s %c", buf, end);
+ } else
+ av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
+
+ fflush(stderr);
+ }
+
+ if (progress_avio) {
+ av_bprintf(&buf_script, "progress=%s\n",
+ is_last_report ? "end" : "continue");
+ avio_write(progress_avio, buf_script.str,
+ FFMIN(buf_script.len, buf_script.size - 1));
+ avio_flush(progress_avio);
+ av_bprint_finalize(&buf_script, NULL);
+ if (is_last_report) {
+ avio_closep(&progress_avio);
+ }
+ }
+
+ if (is_last_report)
+ print_final_stats(total_size);
+}
+
+static void flush_encoders(void)
+{
+ int i, ret;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ AVCodecContext *enc = ost->enc_ctx;
+ AVFormatContext *os = output_files[ost->file_index]->ctx;
+ int stop_encoding = 0;
+
+ if (!ost->encoding_needed)
+ continue;
+
+ if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
+ continue;
+ if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
+ continue;
+
+ for (;;) {
+ int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
+ const char *desc;
+
+ switch (enc->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ encode = avcodec_encode_audio2;
+ desc = "Audio";
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ encode = avcodec_encode_video2;
+ desc = "Video";
+ break;
+ default:
+ stop_encoding = 1;
+ }
+
+ if (encode) {
+ AVPacket pkt;
+ int pkt_size;
+ int got_packet;
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ update_benchmark(NULL);
+ ret = encode(enc, &pkt, NULL, &got_packet);
+ update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
+ exit_program(1);
+ }
+ if (ost->logfile && enc->stats_out) {
+ fprintf(ost->logfile, "%s", enc->stats_out);
+ }
+ if (!got_packet) {
+ stop_encoding = 1;
+ break;
+ }
+ if (ost->finished & MUXER_FINISHED) {
+ av_free_packet(&pkt);
+ continue;
+ }
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+ pkt_size = pkt.size;
+ write_frame(os, &pkt, ost);
+ if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
+ do_video_stats(ost, pkt_size);
+ }
+ }
+
+ if (stop_encoding)
+ break;
+ }
+ }
+}
+
+/*
+ * Check whether a packet from ist should be written into ost at this time
+ */
+static int check_output_constraints(InputStream *ist, OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+ int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
+
+ if (ost->source_index != ist_index)
+ return 0;
+
+ if (ost->finished)
+ return 0;
+
+ if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
+ return 0;
+
+ return 1;
+}
+
+static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
+{
+ OutputFile *of = output_files[ost->file_index];
+ InputFile *f = input_files [ist->file_index];
+ int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+ int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
+ int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
+ AVPicture pict;
+ AVPacket opkt;
+
+ av_init_packet(&opkt);
+
+ if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
+ !ost->copy_initial_nonkeyframes)
+ return;
+
+ if (pkt->pts == AV_NOPTS_VALUE) {
+ if (!ost->frame_number && ist->pts < start_time &&
+ !ost->copy_prior_start)
+ return;
+ } else {
+ if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
+ !ost->copy_prior_start)
+ return;
+ }
+
+ if (of->recording_time != INT64_MAX &&
+ ist->pts >= of->recording_time + start_time) {
+ close_output_stream(ost);
+ return;
+ }
+
+ if (f->recording_time != INT64_MAX) {
+ start_time = f->ctx->start_time;
+ if (f->start_time != AV_NOPTS_VALUE)
+ start_time += f->start_time;
+ if (ist->pts >= f->recording_time + start_time) {
+ close_output_stream(ost);
+ return;
+ }
+ }
+
+ /* force the input stream PTS */
+ if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
+ ost->sync_opts++;
+
+ if (pkt->pts != AV_NOPTS_VALUE)
+ opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
+ else
+ opkt.pts = AV_NOPTS_VALUE;
+
+ if (pkt->dts == AV_NOPTS_VALUE)
+ opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
+ else
+ opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
+ opkt.dts -= ost_tb_start_time;
+
+ if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
+ int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
+ if(!duration)
+ duration = ist->dec_ctx->frame_size;
+ opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
+ (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
+ ost->st->time_base) - ost_tb_start_time;
+ }
+
+ opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
+ opkt.flags = pkt->flags;
+
+ // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+ if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
+ && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
+ && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
+ && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
+ ) {
+ if (av_parser_change(ost->parser, ost->st->codec,
+ &opkt.data, &opkt.size,
+ pkt->data, pkt->size,
+ pkt->flags & AV_PKT_FLAG_KEY)) {
+ opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
+ if (!opkt.buf)
+ exit_program(1);
+ }
+ } else {
+ opkt.data = pkt->data;
+ opkt.size = pkt->size;
+ }
+ av_copy_packet_side_data(&opkt, pkt);
+
+ if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
+ /* store AVPicture in AVPacket, as expected by the output format */
+ avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
+ opkt.data = (uint8_t *)&pict;
+ opkt.size = sizeof(AVPicture);
+ opkt.flags |= AV_PKT_FLAG_KEY;
+ }
+
+ write_frame(of->ctx, &opkt, ost);
+}
+
+int guess_input_channel_layout(InputStream *ist)
+{
+ AVCodecContext *dec = ist->dec_ctx;
+
+ if (!dec->channel_layout) {
+ char layout_name[256];
+
+ if (dec->channels > ist->guess_layout_max)
+ return 0;
+ dec->channel_layout = av_get_default_channel_layout(dec->channels);
+ if (!dec->channel_layout)
+ return 0;
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ dec->channels, dec->channel_layout);
+ av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
+ "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
+ }
+ return 1;
+}
+
+static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVFrame *decoded_frame, *f;
+ AVCodecContext *avctx = ist->dec_ctx;
+ int i, ret, err = 0, resample_changed;
+ AVRational decoded_frame_tb;
+
+ if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ decoded_frame = ist->decoded_frame;
+
+ update_benchmark(NULL);
+ ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
+ update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
+
+ if (ret >= 0 && avctx->sample_rate <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
+ ret = AVERROR_INVALIDDATA;
+ }
+
+ if (*got_output || ret<0)
+ decode_error_stat[ret<0] ++;
+
+ if (ret < 0 && exit_on_error)
+ exit_program(1);
+
+ if (!*got_output || ret < 0)
+ return ret;
+
+ ist->samples_decoded += decoded_frame->nb_samples;
+ ist->frames_decoded++;
+
+#if 1
+ /* increment next_dts to use for the case where the input stream does not
+ have timestamps or there are multiple frames in the packet */
+ ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+ avctx->sample_rate;
+ ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+ avctx->sample_rate;
+#endif
+
+ resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
+ ist->resample_channels != avctx->channels ||
+ ist->resample_channel_layout != decoded_frame->channel_layout ||
+ ist->resample_sample_rate != decoded_frame->sample_rate;
+ if (resample_changed) {
+ char layout1[64], layout2[64];
+
+ if (!guess_input_channel_layout(ist)) {
+ av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
+ "layout for Input Stream #%d.%d\n", ist->file_index,
+ ist->st->index);
+ exit_program(1);
+ }
+ decoded_frame->channel_layout = avctx->channel_layout;
+
+ av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
+ ist->resample_channel_layout);
+ av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
+ decoded_frame->channel_layout);
+
+ av_log(NULL, AV_LOG_INFO,
+ "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
+ ist->file_index, ist->st->index,
+ ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
+ ist->resample_channels, layout1,
+ decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
+ avctx->channels, layout2);
+
+ ist->resample_sample_fmt = decoded_frame->format;
+ ist->resample_sample_rate = decoded_frame->sample_rate;
+ ist->resample_channel_layout = decoded_frame->channel_layout;
+ ist->resample_channels = avctx->channels;
+
+ for (i = 0; i < nb_filtergraphs; i++)
+ if (ist_in_filtergraph(filtergraphs[i], ist)) {
+ FilterGraph *fg = filtergraphs[i];
+ if (configure_filtergraph(fg) < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
+ exit_program(1);
+ }
+ }
+ }
+
+ /* if the decoder provides a pts, use it instead of the last packet pts.
+ the decoder could be delaying output by a packet or more. */
+ if (decoded_frame->pts != AV_NOPTS_VALUE) {
+ ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
+ decoded_frame_tb = avctx->time_base;
+ } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
+ decoded_frame->pts = decoded_frame->pkt_pts;
+ decoded_frame_tb = ist->st->time_base;
+ } else if (pkt->pts != AV_NOPTS_VALUE) {
+ decoded_frame->pts = pkt->pts;
+ decoded_frame_tb = ist->st->time_base;
+ }else {
+ decoded_frame->pts = ist->dts;
+ decoded_frame_tb = AV_TIME_BASE_Q;
+ }
+ pkt->pts = AV_NOPTS_VALUE;
+ if (decoded_frame->pts != AV_NOPTS_VALUE)
+ decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
+ (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
+ (AVRational){1, avctx->sample_rate});
+ for (i = 0; i < ist->nb_filters; i++) {
+ if (i < ist->nb_filters - 1) {
+ f = ist->filter_frame;
+ err = av_frame_ref(f, decoded_frame);
+ if (err < 0)
+ break;
+ } else
+ f = decoded_frame;
+ err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
+ AV_BUFFERSRC_FLAG_PUSH);
+ if (err == AVERROR_EOF)
+ err = 0; /* ignore */
+ if (err < 0)
+ break;
+ }
+ decoded_frame->pts = AV_NOPTS_VALUE;
+
+ av_frame_unref(ist->filter_frame);
+ av_frame_unref(decoded_frame);
+ return err < 0 ? err : ret;
+}
+
+static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVFrame *decoded_frame, *f;
+ int i, ret = 0, err = 0, resample_changed;
+ int64_t best_effort_timestamp;
+ AVRational *frame_sample_aspect;
+
+ if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ decoded_frame = ist->decoded_frame;
+ pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
+
+ update_benchmark(NULL);
+ ret = avcodec_decode_video2(ist->dec_ctx,
+ decoded_frame, got_output, pkt);
+ update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
+
+ // The following line may be required in some cases where there is no parser
+ // or the parser does not has_b_frames correctly
+ if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
+ if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
+ ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
+ } else
+ av_log_ask_for_sample(
+ ist->dec_ctx,
+ "has_b_frames is larger in decoder than demuxer %d > %d ",
+ ist->dec_ctx->has_b_frames,
+ ist->st->codec->has_b_frames
+ );
+ }
+
+ if (*got_output || ret<0)
+ decode_error_stat[ret<0] ++;
+
+ if (ret < 0 && exit_on_error)
+ exit_program(1);
+
+ if (*got_output && ret >= 0) {
+ if (ist->dec_ctx->width != decoded_frame->width ||
+ ist->dec_ctx->height != decoded_frame->height ||
+ ist->dec_ctx->pix_fmt != decoded_frame->format) {
+ av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
+ decoded_frame->width,
+ decoded_frame->height,
+ decoded_frame->format,
+ ist->dec_ctx->width,
+ ist->dec_ctx->height,
+ ist->dec_ctx->pix_fmt);
+ }
+ }
+
+ if (!*got_output || ret < 0)
+ return ret;
+
+ if(ist->top_field_first>=0)
+ decoded_frame->top_field_first = ist->top_field_first;
+
+ ist->frames_decoded++;
+
+ if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
+ err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
+ if (err < 0)
+ goto fail;
+ }
+ ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
+
+ best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
+ if(best_effort_timestamp != AV_NOPTS_VALUE)
+ ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
+ "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
+ ist->st->index, av_ts2str(decoded_frame->pts),
+ av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
+ best_effort_timestamp,
+ av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
+ decoded_frame->key_frame, decoded_frame->pict_type,
+ ist->st->time_base.num, ist->st->time_base.den);
+ }
+
+ pkt->size = 0;
+
+ if (ist->st->sample_aspect_ratio.num)
+ decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
+
+ resample_changed = ist->resample_width != decoded_frame->width ||
+ ist->resample_height != decoded_frame->height ||
+ ist->resample_pix_fmt != decoded_frame->format;
+ if (resample_changed) {
+ av_log(NULL, AV_LOG_INFO,
+ "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
+ ist->file_index, ist->st->index,
+ ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
+ decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
+
+ ist->resample_width = decoded_frame->width;
+ ist->resample_height = decoded_frame->height;
+ ist->resample_pix_fmt = decoded_frame->format;
+
+ for (i = 0; i < nb_filtergraphs; i++) {
+ if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
+ configure_filtergraph(filtergraphs[i]) < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
+ exit_program(1);
+ }
+ }
+ }
+
+ frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
+ for (i = 0; i < ist->nb_filters; i++) {
+ if (!frame_sample_aspect->num)
+ *frame_sample_aspect = ist->st->sample_aspect_ratio;
+
+ if (i < ist->nb_filters - 1) {
+ f = ist->filter_frame;
+ err = av_frame_ref(f, decoded_frame);
+ if (err < 0)
+ break;
+ } else
+ f = decoded_frame;
+ ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
+ if (ret == AVERROR_EOF) {
+ ret = 0; /* ignore */
+ } else if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Failed to inject frame into filter network: %s\n", av_err2str(ret));
+ exit_program(1);
+ }
+ }
+
+fail:
+ av_frame_unref(ist->filter_frame);
+ av_frame_unref(decoded_frame);
+ return err < 0 ? err : ret;
+}
+
+static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVSubtitle subtitle;
+ int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
+ &subtitle, got_output, pkt);
+
+ if (*got_output || ret<0)
+ decode_error_stat[ret<0] ++;
+
+ if (ret < 0 && exit_on_error)
+ exit_program(1);
+
+ if (ret < 0 || !*got_output) {
+ if (!pkt->size)
+ sub2video_flush(ist);
+ return ret;
+ }
+
+ if (ist->fix_sub_duration) {
+ int end = 1;
+ if (ist->prev_sub.got_output) {
+ end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
+ 1000, AV_TIME_BASE);
+ if (end < ist->prev_sub.subtitle.end_display_time) {
+ av_log(ist->dec_ctx, AV_LOG_DEBUG,
+ "Subtitle duration reduced from %d to %d%s\n",
+ ist->prev_sub.subtitle.end_display_time, end,
+ end <= 0 ? ", dropping it" : "");
+ ist->prev_sub.subtitle.end_display_time = end;
+ }
+ }
+ FFSWAP(int, *got_output, ist->prev_sub.got_output);
+ FFSWAP(int, ret, ist->prev_sub.ret);
+ FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
+ if (end <= 0)
+ goto out;
+ }
+
+ if (!*got_output)
+ return ret;
+
+ sub2video_update(ist, &subtitle);
+
+ if (!subtitle.num_rects)
+ goto out;
+
+ ist->frames_decoded++;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+
+ if (!check_output_constraints(ist, ost) || !ost->encoding_needed
+ || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
+ continue;
+
+ do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
+ }
+
+out:
+ avsubtitle_free(&subtitle);
+ return ret;
+}
+
+static int send_filter_eof(InputStream *ist)
+{
+ int i, ret;
+ for (i = 0; i < ist->nb_filters; i++) {
+#if 1
+ ret = av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
+#else
+ ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+#endif
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/* pkt = NULL means EOF (needed to flush decoder buffers) */
+static int process_input_packet(InputStream *ist, const AVPacket *pkt)
+{
+ int ret = 0, i;
+ int got_output = 0;
+
+ AVPacket avpkt;
+ if (!ist->saw_first_ts) {
+ ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
+ ist->pts = 0;
+ if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
+ ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
+ ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
+ }
+ ist->saw_first_ts = 1;
+ }
+
+ if (ist->next_dts == AV_NOPTS_VALUE)
+ ist->next_dts = ist->dts;
+ if (ist->next_pts == AV_NOPTS_VALUE)
+ ist->next_pts = ist->pts;
+
+ if (!pkt) {
+ /* EOF handling */
+ av_init_packet(&avpkt);
+ avpkt.data = NULL;
+ avpkt.size = 0;
+ goto handle_eof;
+ } else {
+ avpkt = *pkt;
+ }
+
+ if (pkt->dts != AV_NOPTS_VALUE) {
+ ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+ if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
+ ist->next_pts = ist->pts = ist->dts;
+ }
+
+ // while we have more to decode or while the decoder did output something on EOF
+ while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
+ int duration;
+ handle_eof:
+
+ ist->pts = ist->next_pts;
+ ist->dts = ist->next_dts;
+
+ if (avpkt.size && avpkt.size != pkt->size &&
+ !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
+ av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
+ "Multiple frames in a packet from stream %d\n", pkt->stream_index);
+ ist->showed_multi_packet_warning = 1;
+ }
+
+ switch (ist->dec_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ ret = decode_audio (ist, &avpkt, &got_output);
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ ret = decode_video (ist, &avpkt, &got_output);
+ if (avpkt.duration) {
+ duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
+ } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
+ int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
+ duration = ((int64_t)AV_TIME_BASE *
+ ist->dec_ctx->framerate.den * ticks) /
+ ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
+ } else
+ duration = 0;
+
+ if(ist->dts != AV_NOPTS_VALUE && duration) {
+ ist->next_dts += duration;
+ }else
+ ist->next_dts = AV_NOPTS_VALUE;
+
+ if (got_output)
+ ist->next_pts += duration; //FIXME the duration is not correct in some cases
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ ret = transcode_subtitles(ist, &avpkt, &got_output);
+ break;
+ default:
+ return -1;
+ }
+
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
+ ist->file_index, ist->st->index, av_err2str(ret));
+ if (exit_on_error)
+ exit_program(1);
+ break;
+ }
+
+ avpkt.dts=
+ avpkt.pts= AV_NOPTS_VALUE;
+
+ // touch data and size only if not EOF
+ if (pkt) {
+ if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
+ ret = avpkt.size;
+ avpkt.data += ret;
+ avpkt.size -= ret;
+ }
+ if (!got_output) {
+ continue;
+ }
+ if (got_output && !pkt)
+ break;
+ }
+
+ /* after flushing, send an EOF on all the filter inputs attached to the stream */
+ if (!pkt && ist->decoding_needed && !got_output) {
+ int ret = send_filter_eof(ist);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
+ exit_program(1);
+ }
+ }
+
+ /* handle stream copy */
+ if (!ist->decoding_needed) {
+ ist->dts = ist->next_dts;
+ switch (ist->dec_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
+ ist->dec_ctx->sample_rate;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ if (ist->framerate.num) {
+ // TODO: Remove work-around for c99-to-c89 issue 7
+ AVRational time_base_q = AV_TIME_BASE_Q;
+ int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
+ ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
+ } else if (pkt->duration) {
+ ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
+ } else if(ist->dec_ctx->framerate.num != 0) {
+ int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
+ ist->next_dts += ((int64_t)AV_TIME_BASE *
+ ist->dec_ctx->framerate.den * ticks) /
+ ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
+ }
+ break;
+ }
+ ist->pts = ist->dts;
+ ist->next_pts = ist->next_dts;
+ }
+ for (i = 0; pkt && i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+
+ if (!check_output_constraints(ist, ost) || ost->encoding_needed)
+ continue;
+
+ do_streamcopy(ist, ost, pkt);
+ }
+
+ return got_output;
+}
+
+static void print_sdp(void)
+{
+ char sdp[16384];
+ int i;
+ int j;
+ AVIOContext *sdp_pb;
+ AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
+
+ if (!avc)
+ exit_program(1);
+ for (i = 0, j = 0; i < nb_output_files; i++) {
+ if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
+ avc[j] = output_files[i]->ctx;
+ j++;
+ }
+ }
+
+ av_sdp_create(avc, j, sdp, sizeof(sdp));
+
+ if (!sdp_filename) {
+ printf("SDP:\n%s\n", sdp);
+ fflush(stdout);
+ } else {
+ if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
+ } else {
+ avio_printf(sdp_pb, "SDP:\n%s", sdp);
+ avio_closep(&sdp_pb);
+ av_freep(&sdp_filename);
+ }
+ }
+
+ av_freep(&avc);
+}
+
+static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
+{
+ int i;
+ for (i = 0; hwaccels[i].name; i++)
+ if (hwaccels[i].pix_fmt == pix_fmt)
+ return &hwaccels[i];
+ return NULL;
+}
+
+static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
+{
+ InputStream *ist = s->opaque;
+ const enum AVPixelFormat *p;
+ int ret;
+
+ for (p = pix_fmts; *p != -1; p++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
+ const HWAccel *hwaccel;
+
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ break;
+
+ hwaccel = get_hwaccel(*p);
+ if (!hwaccel ||
+ (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
+ (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
+ continue;
+
+ ret = hwaccel->init(s);
+ if (ret < 0) {
+ if (ist->hwaccel_id == hwaccel->id) {
+ av_log(NULL, AV_LOG_FATAL,
+ "%s hwaccel requested for input stream #%d:%d, "
+ "but cannot be initialized.\n", hwaccel->name,
+ ist->file_index, ist->st->index);
+ return AV_PIX_FMT_NONE;
+ }
+ continue;
+ }
+ ist->active_hwaccel_id = hwaccel->id;
+ ist->hwaccel_pix_fmt = *p;
+ break;
+ }
+
+ return *p;
+}
+
+static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
+{
+ InputStream *ist = s->opaque;
+
+ if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
+ return ist->hwaccel_get_buffer(s, frame, flags);
+
+ return avcodec_default_get_buffer2(s, frame, flags);
+}
+
+static int init_input_stream(int ist_index, char *error, int error_len)
+{
+ int ret;
+ InputStream *ist = input_streams[ist_index];
+
+ if (ist->decoding_needed) {
+ AVCodec *codec = ist->dec;
+ if (!codec) {
+ snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
+ avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
+ return AVERROR(EINVAL);
+ }
+
+ ist->dec_ctx->opaque = ist;
+ ist->dec_ctx->get_format = get_format;
+ ist->dec_ctx->get_buffer2 = get_buffer;
+ ist->dec_ctx->thread_safe_callbacks = 1;
+
+ av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
+ if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
+ (ist->decoding_needed & DECODING_FOR_OST)) {
+ av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
+ if (ist->decoding_needed & DECODING_FOR_FILTER)
+ av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
+ }
+
+ if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
+ av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
+ if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
+ if (ret == AVERROR_EXPERIMENTAL)
+ abort_codec_experimental(codec, 0);
+
+ snprintf(error, error_len,
+ "Error while opening decoder for input stream "
+ "#%d:%d : %s",
+ ist->file_index, ist->st->index, av_err2str(ret));
+ return ret;
+ }
+ assert_avoptions(ist->decoder_opts);
+ }
+
+ ist->next_pts = AV_NOPTS_VALUE;
+ ist->next_dts = AV_NOPTS_VALUE;
+
+ return 0;
+}
+
+static InputStream *get_input_stream(OutputStream *ost)
+{
+ if (ost->source_index >= 0)
+ return input_streams[ost->source_index];
+ return NULL;
+}
+
+static int compare_int64(const void *a, const void *b)
+{
+ int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
+ return va < vb ? -1 : va > vb ? +1 : 0;
+}
+
+static int init_output_stream(OutputStream *ost, char *error, int error_len)
+{
+ int ret = 0;
+
+ if (ost->encoding_needed) {
+ AVCodec *codec = ost->enc;
+ AVCodecContext *dec = NULL;
+ InputStream *ist;
+
+ if ((ist = get_input_stream(ost)))
+ dec = ist->dec_ctx;
+ if (dec && dec->subtitle_header) {
+ /* ASS code assumes this buffer is null terminated so add extra byte. */
+ ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
+ if (!ost->enc_ctx->subtitle_header)
+ return AVERROR(ENOMEM);
+ memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
+ ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
+ }
+ if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
+ av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
+ av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
+
+ if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
+ if (ret == AVERROR_EXPERIMENTAL)
+ abort_codec_experimental(codec, 1);
+ snprintf(error, error_len,
+ "Error while opening encoder for output stream #%d:%d - "
+ "maybe incorrect parameters such as bit_rate, rate, width or height",
+ ost->file_index, ost->index);
+ return ret;
+ }
+ if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
+ !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
+ av_buffersink_set_frame_size(ost->filter->filter,
+ ost->enc_ctx->frame_size);
+ assert_avoptions(ost->encoder_opts);
+ if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
+ av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
+ " It takes bits/s as argument, not kbits/s\n");
+
+ ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Error initializing the output stream codec context.\n");
+ exit_program(1);
+ }
+
+ // copy timebase while removing common factors
+ ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
+ ost->st->codec->codec= ost->enc_ctx->codec;
+ } else {
+ ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Error setting up codec context options.\n");
+ return ret;
+ }
+ // copy timebase while removing common factors
+ ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
+ }
+
+ return ret;
+}
+
+static void parse_forced_key_frames(char *kf, OutputStream *ost,
+ AVCodecContext *avctx)
+{
+ char *p;
+ int n = 1, i, size, index = 0;
+ int64_t t, *pts;
+
+ for (p = kf; *p; p++)
+ if (*p == ',')
+ n++;
+ size = n;
+ pts = av_malloc_array(size, sizeof(*pts));
+ if (!pts) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
+ exit_program(1);
+ }
+
+ p = kf;
+ for (i = 0; i < n; i++) {
+ char *next = strchr(p, ',');
+
+ if (next)
+ *next++ = 0;
+
+ if (!memcmp(p, "chapters", 8)) {
+
+ AVFormatContext *avf = output_files[ost->file_index]->ctx;
+ int j;
+
+ if (avf->nb_chapters > INT_MAX - size ||
+ !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
+ sizeof(*pts)))) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Could not allocate forced key frames array.\n");
+ exit_program(1);
+ }
+ t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
+ t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+ for (j = 0; j < avf->nb_chapters; j++) {
+ AVChapter *c = avf->chapters[j];
+ av_assert1(index < size);
+ pts[index++] = av_rescale_q(c->start, c->time_base,
+ avctx->time_base) + t;
+ }
+
+ } else {
+
+ t = parse_time_or_die("force_key_frames", p, 1);
+ av_assert1(index < size);
+ pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+ }
+
+ p = next;
+ }
+
+ av_assert0(index == size);
+ qsort(pts, size, sizeof(*pts), compare_int64);
+ ost->forced_kf_count = size;
+ ost->forced_kf_pts = pts;
+}
+
+static void report_new_stream(int input_index, AVPacket *pkt)
+{
+ InputFile *file = input_files[input_index];
+ AVStream *st = file->ctx->streams[pkt->stream_index];
+
+ if (pkt->stream_index < file->nb_streams_warn)
+ return;
+ av_log(file->ctx, AV_LOG_WARNING,
+ "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
+ av_get_media_type_string(st->codec->codec_type),
+ input_index, pkt->stream_index,
+ pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
+ file->nb_streams_warn = pkt->stream_index + 1;
+}
+
+static void set_encoder_id(OutputFile *of, OutputStream *ost)
+{
+ AVDictionaryEntry *e;
+
+ uint8_t *encoder_string;
+ int encoder_string_len;
+ int format_flags = 0;
+ int codec_flags = 0;
+
+ if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
+ return;
+
+ e = av_dict_get(of->opts, "fflags", NULL, 0);
+ if (e) {
+ const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
+ if (!o)
+ return;
+ av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
+ }
+ e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
+ if (e) {
+ const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
+ if (!o)
+ return;
+ av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
+ }
+
+ encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
+ encoder_string = av_mallocz(encoder_string_len);
+ if (!encoder_string)
+ exit_program(1);
+
++ if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
+ av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
+ else
+ av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
+ av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
+ av_dict_set(&ost->st->metadata, "encoder", encoder_string,
+ AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
+}
+
+static int transcode_init(void)
+{
+ int ret = 0, i, j, k;
+ AVFormatContext *oc;
+ OutputStream *ost;
+ InputStream *ist;
+ char error[1024] = {0};
+ int want_sdp = 1;
+
+ for (i = 0; i < nb_filtergraphs; i++) {
+ FilterGraph *fg = filtergraphs[i];
+ for (j = 0; j < fg->nb_outputs; j++) {
+ OutputFilter *ofilter = fg->outputs[j];
+ if (!ofilter->ost || ofilter->ost->source_index >= 0)
+ continue;
+ if (fg->nb_inputs != 1)
+ continue;
+ for (k = nb_input_streams-1; k >= 0 ; k--)
+ if (fg->inputs[0]->ist == input_streams[k])
+ break;
+ ofilter->ost->source_index = k;
+ }
+ }
+
+ /* init framerate emulation */
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *ifile = input_files[i];
+ if (ifile->rate_emu)
+ for (j = 0; j < ifile->nb_streams; j++)
+ input_streams[j + ifile->ist_index]->start = av_gettime_relative();
+ }
+
+ /* for each output stream, we compute the right encoding parameters */
+ for (i = 0; i < nb_output_streams; i++) {
+ AVCodecContext *enc_ctx;
+ AVCodecContext *dec_ctx = NULL;
+ ost = output_streams[i];
+ oc = output_files[ost->file_index]->ctx;
+ ist = get_input_stream(ost);
+
+ if (ost->attachment_filename)
+ continue;
+
+ enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
+
+ if (ist) {
+ dec_ctx = ist->dec_ctx;
+
+ ost->st->disposition = ist->st->disposition;
+ enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
+ enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
+ } else {
+ for (j=0; j<oc->nb_streams; j++) {
+ AVStream *st = oc->streams[j];
+ if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
+ break;
+ }
+ if (j == oc->nb_streams)
+ if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
+ ost->st->disposition = AV_DISPOSITION_DEFAULT;
+ }
+
+ if (ost->stream_copy) {
+ AVRational sar;
+ uint64_t extra_size;
+
+ av_assert0(ist && !ost->filter);
+
+ extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
+
+ if (extra_size > INT_MAX) {
+ return AVERROR(EINVAL);
+ }
+
+ /* if stream_copy is selected, no need to decode or encode */
+ enc_ctx->codec_id = dec_ctx->codec_id;
+ enc_ctx->codec_type = dec_ctx->codec_type;
+
+ if (!enc_ctx->codec_tag) {
+ unsigned int codec_tag;
+ if (!oc->oformat->codec_tag ||
+ av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
+ !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
+ enc_ctx->codec_tag = dec_ctx->codec_tag;
+ }
+
+ enc_ctx->bit_rate = dec_ctx->bit_rate;
+ enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
+ enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
+ enc_ctx->field_order = dec_ctx->field_order;
+ if (dec_ctx->extradata_size) {
+ enc_ctx->extradata = av_mallocz(extra_size);
+ if (!enc_ctx->extradata) {
+ return AVERROR(ENOMEM);
+ }
+ memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
+ }
+ enc_ctx->extradata_size= dec_ctx->extradata_size;
+ enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
+
+ enc_ctx->time_base = ist->st->time_base;
+ /*
+ * Avi is a special case here because it supports variable fps but
+ * having the fps and timebase differe significantly adds quite some
+ * overhead
+ */
+ if(!strcmp(oc->oformat->name, "avi")) {
+ if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
+ && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
+ && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
+ && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
+ || copy_tb==2){
+ enc_ctx->time_base.num = ist->st->r_frame_rate.den;
+ enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
+ enc_ctx->ticks_per_frame = 2;
+ } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
+ && av_q2d(ist->st->time_base) < 1.0/500
+ || copy_tb==0){
+ enc_ctx->time_base = dec_ctx->time_base;
+ enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
+ enc_ctx->time_base.den *= 2;
+ enc_ctx->ticks_per_frame = 2;
+ }
+ } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
+ && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
+ && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
+ && strcmp(oc->oformat->name, "f4v")
+ ) {
+ if( copy_tb<0 && dec_ctx->time_base.den
+ && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
+ && av_q2d(ist->st->time_base) < 1.0/500
+ || copy_tb==0){
+ enc_ctx->time_base = dec_ctx->time_base;
+ enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
+ }
+ }
+ if ( enc_ctx->codec_tag == AV_RL32("tmcd")
+ && dec_ctx->time_base.num < dec_ctx->time_base.den
+ && dec_ctx->time_base.num > 0
+ && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
+ enc_ctx->time_base = dec_ctx->time_base;
+ }
+
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->framerate;
+ if(ost->frame_rate.num)
+ enc_ctx->time_base = av_inv_q(ost->frame_rate);
+
+ av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
+ enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
+
+ if (ist->st->nb_side_data) {
+ ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
+ sizeof(*ist->st->side_data));
+ if (!ost->st->side_data)
+ return AVERROR(ENOMEM);
+
+ ost->st->nb_side_data = 0;
+ for (j = 0; j < ist->st->nb_side_data; j++) {
+ const AVPacketSideData *sd_src = &ist->st->side_data[j];
+ AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
+
+ if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
+ continue;
+
+ sd_dst->data = av_malloc(sd_src->size);
+ if (!sd_dst->data)
+ return AVERROR(ENOMEM);
+ memcpy(sd_dst->data, sd_src->data, sd_src->size);
+ sd_dst->size = sd_src->size;
+ sd_dst->type = sd_src->type;
+ ost->st->nb_side_data++;
+ }
+ }
+
+ ost->parser = av_parser_init(enc_ctx->codec_id);
+
+ switch (enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ if (audio_volume != 256) {
+ av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
+ exit_program(1);
+ }
+ enc_ctx->channel_layout = dec_ctx->channel_layout;
+ enc_ctx->sample_rate = dec_ctx->sample_rate;
+ enc_ctx->channels = dec_ctx->channels;
+ enc_ctx->frame_size = dec_ctx->frame_size;
+ enc_ctx->audio_service_type = dec_ctx->audio_service_type;
+ enc_ctx->block_align = dec_ctx->block_align;
+ enc_ctx->initial_padding = dec_ctx->delay;
+#if FF_API_AUDIOENC_DELAY
+ enc_ctx->delay = dec_ctx->delay;
+#endif
+ if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
+ enc_ctx->block_align= 0;
+ if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
+ enc_ctx->block_align= 0;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ enc_ctx->pix_fmt = dec_ctx->pix_fmt;
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->height = dec_ctx->height;
+ enc_ctx->has_b_frames = dec_ctx->has_b_frames;
+ if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
+ sar =
+ av_mul_q(ost->frame_aspect_ratio,
+ (AVRational){ enc_ctx->height, enc_ctx->width });
+ av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
+ "with stream copy may produce invalid files\n");
+ }
+ else if (ist->st->sample_aspect_ratio.num)
+ sar = ist->st->sample_aspect_ratio;
+ else
+ sar = dec_ctx->sample_aspect_ratio;
+ ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
+ ost->st->avg_frame_rate = ist->st->avg_frame_rate;
+ ost->st->r_frame_rate = ist->st->r_frame_rate;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->height = dec_ctx->height;
+ break;
+ case AVMEDIA_TYPE_UNKNOWN:
+ case AVMEDIA_TYPE_DATA:
+ case AVMEDIA_TYPE_ATTACHMENT:
+ break;
+ default:
+ abort();
+ }
+ } else {
+ if (!ost->enc)
+ ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
+ if (!ost->enc) {
+ /* should only happen when a default codec is not present. */
+ snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
+ avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
+ ret = AVERROR(EINVAL);
+ goto dump_format;
+ }
+
+ set_encoder_id(output_files[ost->file_index], ost);
+
+ if (!ost->filter &&
+ (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
+ FilterGraph *fg;
+ fg = init_simple_filtergraph(ist, ost);
+ if (configure_filtergraph(fg)) {
+ av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
+ exit_program(1);
+ }
+ }
+
+ if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (!ost->frame_rate.num)
+ ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->framerate;
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->st->r_frame_rate;
+ if (ist && !ost->frame_rate.num) {
+ ost->frame_rate = (AVRational){25, 1};
+ av_log(NULL, AV_LOG_WARNING,
+ "No information "
+ "about the input framerate is available. Falling "
+ "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
+ "if you want a different framerate.\n",
+ ost->file_index, ost->index);
+ }
+// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
+ if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
+ int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
+ ost->frame_rate = ost->enc->supported_framerates[idx];
+ }
+ // reduce frame rate for mpeg4 to be within the spec limits
+ if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
+ av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
+ ost->frame_rate.num, ost->frame_rate.den, 65535);
+ }
+ }
+
+ switch (enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
+ enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
+ enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
+ enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
+ enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ enc_ctx->time_base = av_inv_q(ost->frame_rate);
+ if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
+ enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
+ if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
+ && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
+ av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
+ "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
+ }
+ for (j = 0; j < ost->forced_kf_count; j++)
+ ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
+ AV_TIME_BASE_Q,
+ enc_ctx->time_base);
+
+ enc_ctx->width = ost->filter->filter->inputs[0]->w;
+ enc_ctx->height = ost->filter->filter->inputs[0]->h;
+ enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
+ ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
+ av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
+ ost->filter->filter->inputs[0]->sample_aspect_ratio;
+ if (!strncmp(ost->enc->name, "libx264", 7) &&
+ enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+ ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+ av_log(NULL, AV_LOG_WARNING,
+ "No pixel format specified, %s for H.264 encoding chosen.\n"
+ "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+ av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+ if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
+ enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+ ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+ av_log(NULL, AV_LOG_WARNING,
+ "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
+ "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+ av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+ enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
+
+ ost->st->avg_frame_rate = ost->frame_rate;
+
+ if (!dec_ctx ||
+ enc_ctx->width != dec_ctx->width ||
+ enc_ctx->height != dec_ctx->height ||
+ enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
+ enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
+ }
+
+ if (ost->forced_keyframes) {
+ if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
+ ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
+ forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
+ return ret;
+ }
+ ost->forced_keyframes_expr_const_values[FKF_N] = 0;
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
+
+ // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
+ // parse it only for static kf timings
+ } else if(strncmp(ost->forced_keyframes, "source", 6)) {
+ parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ enc_ctx->time_base = (AVRational){1, 1000};
+ if (!enc_ctx->width) {
+ enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
+ enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
+ }
+ break;
+ case AVMEDIA_TYPE_DATA:
+ break;
+ default:
+ abort();
+ break;
+ }
+ }
+
+ if (ost->disposition) {
+ static const AVOption opts[] = {
+ { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
+ { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
+ { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
+ { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
+ { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
+ { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
+ { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
+ { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
+ { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
+ { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
+ { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
+ { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
+ { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
+ { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
+ { NULL },
+ };
+ static const AVClass class = {
+ .class_name = "",
+ .item_name = av_default_item_name,
+ .option = opts,
+ .version = LIBAVUTIL_VERSION_INT,
+ };
+ const AVClass *pclass = &class;
+
+ ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
+ if (ret < 0)
+ goto dump_format;
+ }
+ }
+
+ /* open each encoder */
+ for (i = 0; i < nb_output_streams; i++) {
+ ret = init_output_stream(output_streams[i], error, sizeof(error));
+ if (ret < 0)
+ goto dump_format;
+ }
+
+ /* init input streams */
+ for (i = 0; i < nb_input_streams; i++)
+ if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+ avcodec_close(ost->enc_ctx);
+ }
+ goto dump_format;
+ }
+
+ /* discard unused programs */
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *ifile = input_files[i];
+ for (j = 0; j < ifile->ctx->nb_programs; j++) {
+ AVProgram *p = ifile->ctx->programs[j];
+ int discard = AVDISCARD_ALL;
+
+ for (k = 0; k < p->nb_stream_indexes; k++)
+ if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
+ discard = AVDISCARD_DEFAULT;
+ break;
+ }
+ p->discard = discard;
+ }
+ }
+
+ /* open files and write file headers */
+ for (i = 0; i < nb_output_files; i++) {
+ oc = output_files[i]->ctx;
+ oc->interrupt_callback = int_cb;
+ if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
+ snprintf(error, sizeof(error),
+ "Could not write header for output file #%d "
+ "(incorrect codec parameters ?): %s",
+ i, av_err2str(ret));
+ ret = AVERROR(EINVAL);
+ goto dump_format;
+ }
+// assert_avoptions(output_files[i]->opts);
+ if (strcmp(oc->oformat->name, "rtp")) {
+ want_sdp = 0;
+ }
+ }
+
+ dump_format:
+ /* dump the file output parameters - cannot be done before in case
+ of stream copy */
+ for (i = 0; i < nb_output_files; i++) {
+ av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
+ }
+
+ /* dump the stream mapping */
+ av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
+ for (i = 0; i < nb_input_streams; i++) {
+ ist = input_streams[i];
+
+ for (j = 0; j < ist->nb_filters; j++) {
+ if (ist->filters[j]->graph->graph_desc) {
+ av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
+ ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
+ ist->filters[j]->name);
+ if (nb_filtergraphs > 1)
+ av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+ }
+ }
+
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+
+ if (ost->attachment_filename) {
+ /* an attached file */
+ av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
+ ost->attachment_filename, ost->file_index, ost->index);
+ continue;
+ }
+
+ if (ost->filter && ost->filter->graph->graph_desc) {
+ /* output from a complex graph */
+ av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
+ if (nb_filtergraphs > 1)
+ av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
+
+ av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
+ ost->index, ost->enc ? ost->enc->name : "?");
+ continue;
+ }
+
+ av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
+ input_streams[ost->source_index]->file_index,
+ input_streams[ost->source_index]->st->index,
+ ost->file_index,
+ ost->index);
+ if (ost->sync_ist != input_streams[ost->source_index])
+ av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
+ ost->sync_ist->file_index,
+ ost->sync_ist->st->index);
+ if (ost->stream_copy)
+ av_log(NULL, AV_LOG_INFO, " (copy)");
+ else {
+ const AVCodec *in_codec = input_streams[ost->source_index]->dec;
+ const AVCodec *out_codec = ost->enc;
+ const char *decoder_name = "?";
+ const char *in_codec_name = "?";
+ const char *encoder_name = "?";
+ const char *out_codec_name = "?";
+ const AVCodecDescriptor *desc;
+
+ if (in_codec) {
+ decoder_name = in_codec->name;
+ desc = avcodec_descriptor_get(in_codec->id);
+ if (desc)
+ in_codec_name = desc->name;
+ if (!strcmp(decoder_name, in_codec_name))
+ decoder_name = "native";
+ }
+
+ if (out_codec) {
+ encoder_name = out_codec->name;
+ desc = avcodec_descriptor_get(out_codec->id);
+ if (desc)
+ out_codec_name = desc->name;
+ if (!strcmp(encoder_name, out_codec_name))
+ encoder_name = "native";
+ }
+
+ av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
+ in_codec_name, decoder_name,
+ out_codec_name, encoder_name);
+ }
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+
+ if (ret) {
+ av_log(NULL, AV_LOG_ERROR, "%s\n", error);
+ return ret;
+ }
+
+ if (sdp_filename || want_sdp) {
+ print_sdp();
+ }
+
+ transcode_init_done = 1;
+
+ return 0;
+}
+
+/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
+static int need_output(void)
+{
+ int i;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ OutputFile *of = output_files[ost->file_index];
+ AVFormatContext *os = output_files[ost->file_index]->ctx;
+
+ if (ost->finished ||
+ (os->pb && avio_tell(os->pb) >= of->limit_filesize))
+ continue;
+ if (ost->frame_number >= ost->max_frames) {
+ int j;
+ for (j = 0; j < of->ctx->nb_streams; j++)
+ close_output_stream(output_streams[of->ost_index + j]);
+ continue;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Select the output stream to process.
+ *
+ * @return selected output stream, or NULL if none available
+ */
+static OutputStream *choose_output(void)
+{
+ int i;
+ int64_t opts_min = INT64_MAX;
+ OutputStream *ost_min = NULL;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
+ AV_TIME_BASE_Q);
+ if (!ost->finished && opts < opts_min) {
+ opts_min = opts;
+ ost_min = ost->unavailable ? NULL : ost;
+ }
+ }
+ return ost_min;
+}
+
+static int check_keyboard_interaction(int64_t cur_time)
+{
+ int i, ret, key;
+ static int64_t last_time;
+ if (received_nb_signals)
+ return AVERROR_EXIT;
+ /* read_key() returns 0 on EOF */
+ if(cur_time - last_time >= 100000 && !run_as_daemon){
+ key = read_key();
+ last_time = cur_time;
+ }else
+ key = -1;
+ if (key == 'q')
+ return AVERROR_EXIT;
+ if (key == '+') av_log_set_level(av_log_get_level()+10);
+ if (key == '-') av_log_set_level(av_log_get_level()-10);
+ if (key == 's') qp_hist ^= 1;
+ if (key == 'h'){
+ if (do_hex_dump){
+ do_hex_dump = do_pkt_dump = 0;
+ } else if(do_pkt_dump){
+ do_hex_dump = 1;
+ } else
+ do_pkt_dump = 1;
+ av_log_set_level(AV_LOG_DEBUG);
+ }
+ if (key == 'c' || key == 'C'){
+ char buf[4096], target[64], command[256], arg[256] = {0};
+ double time;
+ int k, n = 0;
+ fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
+ i = 0;
+ while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
+ if (k > 0)
+ buf[i++] = k;
+ buf[i] = 0;
+ if (k > 0 &&
+ (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
+ av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
+ target, time, command, arg);
+ for (i = 0; i < nb_filtergraphs; i++) {
+ FilterGraph *fg = filtergraphs[i];
+ if (fg->graph) {
+ if (time < 0) {
+ ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
+ key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
+ fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
+ } else if (key == 'c') {
+ fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
+ ret = AVERROR_PATCHWELCOME;
+ } else {
+ ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
+ if (ret < 0)
+ fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
+ }
+ }
+ }
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "Parse error, at least 3 arguments were expected, "
+ "only %d given in string '%s'\n", n, buf);
+ }
+ }
+ if (key == 'd' || key == 'D'){
+ int debug=0;
+ if(key == 'D') {
+ debug = input_streams[0]->st->codec->debug<<1;
+ if(!debug) debug = 1;
+ while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
+ debug += debug;
+ }else
+ if(scanf("%d", &debug)!=1)
+ fprintf(stderr,"error parsing debug value\n");
+ for(i=0;i<nb_input_streams;i++) {
+ input_streams[i]->st->codec->debug = debug;
+ }
+ for(i=0;i<nb_output_streams;i++) {
+ OutputStream *ost = output_streams[i];
+ ost->enc_ctx->debug = debug;
+ }
+ if(debug) av_log_set_level(AV_LOG_DEBUG);
+ fprintf(stderr,"debug=%d\n", debug);
+ }
+ if (key == '?'){
+ fprintf(stderr, "key function\n"
+ "? show this help\n"
+ "+ increase verbosity\n"
+ "- decrease verbosity\n"
+ "c Send command to first matching filter supporting it\n"
+ "C Send/Que command to all matching filters\n"
+ "D cycle through available debug modes\n"
+ "h dump packets/hex press to cycle through the 3 states\n"
+ "q quit\n"
+ "s Show QP histogram\n"
+ );
+ }
+ return 0;
+}
+
+#if HAVE_PTHREADS
+static void *input_thread(void *arg)
+{
+ InputFile *f = arg;
+ unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
+ int ret = 0;
+
+ while (1) {
+ AVPacket pkt;
+ ret = av_read_frame(f->ctx, &pkt);
+
+ if (ret == AVERROR(EAGAIN)) {
+ av_usleep(10000);
+ continue;
+ }
+ if (ret < 0) {
+ av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
+ break;
+ }
+ av_dup_packet(&pkt);
+ ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
+ if (flags && ret == AVERROR(EAGAIN)) {
+ flags = 0;
+ ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
+ av_log(f->ctx, AV_LOG_WARNING,
+ "Thread message queue blocking; consider raising the "
+ "thread_queue_size option (current value: %d)\n",
+ f->thread_queue_size);
+ }
+ if (ret < 0) {
+ if (ret != AVERROR_EOF)
+ av_log(f->ctx, AV_LOG_ERROR,
+ "Unable to send packet to main thread: %s\n",
+ av_err2str(ret));
+ av_free_packet(&pkt);
+ av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+static void free_input_threads(void)
+{
+ int i;
+
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *f = input_files[i];
+ AVPacket pkt;
+
+ if (!f || !f->in_thread_queue)
+ continue;
+ av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
+ while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
+ av_free_packet(&pkt);
+
+ pthread_join(f->thread, NULL);
+ f->joined = 1;
+ av_thread_message_queue_free(&f->in_thread_queue);
+ }
+}
+
+static int init_input_threads(void)
+{
+ int i, ret;
+
+ if (nb_input_files == 1)
+ return 0;
+
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *f = input_files[i];
+
+ if (f->ctx->pb ? !f->ctx->pb->seekable :
+ strcmp(f->ctx->iformat->name, "lavfi"))
+ f->non_blocking = 1;
+ ret = av_thread_message_queue_alloc(&f->in_thread_queue,
+ f->thread_queue_size, sizeof(AVPacket));
+ if (ret < 0)
+ return ret;
+
+ if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
+ av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
+ av_thread_message_queue_free(&f->in_thread_queue);
+ return AVERROR(ret);
+ }
+ }
+ return 0;
+}
+
+static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
+{
+ return av_thread_message_queue_recv(f->in_thread_queue, pkt,
+ f->non_blocking ?
+ AV_THREAD_MESSAGE_NONBLOCK : 0);
+}
+#endif
+
+static int get_input_packet(InputFile *f, AVPacket *pkt)
+{
+ if (f->rate_emu) {
+ int i;
+ for (i = 0; i < f->nb_streams; i++) {
+ InputStream *ist = input_streams[f->ist_index + i];
+ int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
+ int64_t now = av_gettime_relative() - ist->start;
+ if (pts > now)
+ return AVERROR(EAGAIN);
+ }
+ }
+
+#if HAVE_PTHREADS
+ if (nb_input_files > 1)
+ return get_input_packet_mt(f, pkt);
+#endif
+ return av_read_frame(f->ctx, pkt);
+}
+
+static int got_eagain(void)
+{
+ int i;
+ for (i = 0; i < nb_output_streams; i++)
+ if (output_streams[i]->unavailable)
+ return 1;
+ return 0;
+}
+
+static void reset_eagain(void)
+{
+ int i;
+ for (i = 0; i < nb_input_files; i++)
+ input_files[i]->eagain = 0;
+ for (i = 0; i < nb_output_streams; i++)
+ output_streams[i]->unavailable = 0;
+}
+
+/*
+ * Return
+ * - 0 -- one packet was read and processed
+ * - AVERROR(EAGAIN) -- no packets were available for selected file,
+ * this function should be called again
+ * - AVERROR_EOF -- this function should not be called again
+ */
+static int process_input(int file_index)
+{
+ InputFile *ifile = input_files[file_index];
+ AVFormatContext *is;
+ InputStream *ist;
+ AVPacket pkt;
+ int ret, i, j;
+
+ is = ifile->ctx;
+ ret = get_input_packet(ifile, &pkt);
+
+ if (ret == AVERROR(EAGAIN)) {
+ ifile->eagain = 1;
+ return ret;
+ }
+ if (ret < 0) {
+ if (ret != AVERROR_EOF) {
+ print_error(is->filename, ret);
+ if (exit_on_error)
+ exit_program(1);
+ }
+
+ for (i = 0; i < ifile->nb_streams; i++) {
+ ist = input_streams[ifile->ist_index + i];
+ if (ist->decoding_needed) {
+ ret = process_input_packet(ist, NULL);
+ if (ret>0)
+ return 0;
+ }
+
+ /* mark all outputs that don't go through lavfi as finished */
+ for (j = 0; j < nb_output_streams; j++) {
+ OutputStream *ost = output_streams[j];
+
+ if (ost->source_index == ifile->ist_index + i &&
+ (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
+ finish_output_stream(ost);
+ }
+ }
+
+ ifile->eof_reached = 1;
+ return AVERROR(EAGAIN);
+ }
+
+ reset_eagain();
+
+ if (do_pkt_dump) {
+ av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
+ is->streams[pkt.stream_index]);
+ }
+ /* the following test is needed in case new streams appear
+ dynamically in stream : we ignore them */
+ if (pkt.stream_index >= ifile->nb_streams) {
+ report_new_stream(file_index, &pkt);
+ goto discard_packet;
+ }
+
+ ist = input_streams[ifile->ist_index + pkt.stream_index];
+
+ ist->data_size += pkt.size;
+ ist->nb_packets++;
+
+ if (ist->discard)
+ goto discard_packet;
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
+ "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
+ ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
+ av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
+ av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
+ av_ts2str(input_files[ist->file_index]->ts_offset),
+ av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
+ }
+
+ if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
+ int64_t stime, stime2;
+ // Correcting starttime based on the enabled streams
+ // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
+ // so we instead do it here as part of discontinuity handling
+ if ( ist->next_dts == AV_NOPTS_VALUE
+ && ifile->ts_offset == -is->start_time
+ && (is->iformat->flags & AVFMT_TS_DISCONT)) {
+ int64_t new_start_time = INT64_MAX;
+ for (i=0; i<is->nb_streams; i++) {
+ AVStream *st = is->streams[i];
+ if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
+ continue;
+ new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
+ }
+ if (new_start_time > is->start_time) {
+ av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
+ ifile->ts_offset = -new_start_time;
+ }
+ }
+
+ stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
+ stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
+ ist->wrap_correction_done = 1;
+
+ if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
+ pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
+ ist->wrap_correction_done = 0;
+ }
+ if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
+ pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
+ ist->wrap_correction_done = 0;
+ }
+ }
+
+ /* add the stream-global side data to the first packet */
+ if (ist->nb_packets == 1) {
+ if (ist->st->nb_side_data)
+ av_packet_split_side_data(&pkt);
+ for (i = 0; i < ist->st->nb_side_data; i++) {
+ AVPacketSideData *src_sd = &ist->st->side_data[i];
+ uint8_t *dst_data;
+
+ if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
+ continue;
+ if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
+ continue;
+
+ dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
+ if (!dst_data)
+ exit_program(1);
+
+ memcpy(dst_data, src_sd->data, src_sd->size);
+ }
+ }
+
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts *= ist->ts_scale;
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts *= ist->ts_scale;
+
+ if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
+ pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
+ && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
+ int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+ int64_t delta = pkt_dts - ifile->last_ts;
+ if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
+ ifile->ts_offset -= delta;
+ av_log(NULL, AV_LOG_DEBUG,
+ "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+ delta, ifile->ts_offset);
+ pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ }
+ }
+
+ if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
+ pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
+ !copy_ts) {
+ int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+ int64_t delta = pkt_dts - ist->next_dts;
+ if (is->iformat->flags & AVFMT_TS_DISCONT) {
+ if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
+ pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
+ ifile->ts_offset -= delta;
+ av_log(NULL, AV_LOG_DEBUG,
+ "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+ delta, ifile->ts_offset);
+ pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ }
+ } else {
+ if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
+ av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
+ pkt.dts = AV_NOPTS_VALUE;
+ }
+ if (pkt.pts != AV_NOPTS_VALUE){
+ int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
+ delta = pkt_pts - ist->next_dts;
+ if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
+ av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
+ pkt.pts = AV_NOPTS_VALUE;
+ }
+ }
+ }
+ }
+
+ if (pkt.dts != AV_NOPTS_VALUE)
+ ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
+ ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
+ av_ts2str(input_files[ist->file_index]->ts_offset),
+ av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
+ }
+
+ sub2video_heartbeat(ist, pkt.pts);
+
+ process_input_packet(ist, &pkt);
+
+discard_packet:
+ av_free_packet(&pkt);
+
+ return 0;
+}
+
+/**
+ * Perform a step of transcoding for the specified filter graph.
+ *
+ * @param[in] graph filter graph to consider
+ * @param[out] best_ist input stream where a frame would allow to continue
+ * @return 0 for success, <0 for error
+ */
+static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
+{
+ int i, ret;
+ int nb_requests, nb_requests_max = 0;
+ InputFilter *ifilter;
+ InputStream *ist;
+
+ *best_ist = NULL;
+ ret = avfilter_graph_request_oldest(graph->graph);
+ if (ret >= 0)
+ return reap_filters(0);
+
+ if (ret == AVERROR_EOF) {
+ ret = reap_filters(1);
+ for (i = 0; i < graph->nb_outputs; i++)
+ close_output_stream(graph->outputs[i]->ost);
+ return ret;
+ }
+ if (ret != AVERROR(EAGAIN))
+ return ret;
+
+ for (i = 0; i < graph->nb_inputs; i++) {
+ ifilter = graph->inputs[i];
+ ist = ifilter->ist;
+ if (input_files[ist->file_index]->eagain ||
+ input_files[ist->file_index]->eof_reached)
+ continue;
+ nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
+ if (nb_requests > nb_requests_max) {
+ nb_requests_max = nb_requests;
+ *best_ist = ist;
+ }
+ }
+
+ if (!*best_ist)
+ for (i = 0; i < graph->nb_outputs; i++)
+ graph->outputs[i]->ost->unavailable = 1;
+
+ return 0;
+}
+
+/**
+ * Run a single step of transcoding.
+ *
+ * @return 0 for success, <0 for error
+ */
+static int transcode_step(void)
+{
+ OutputStream *ost;
+ InputStream *ist;
+ int ret;
+
+ ost = choose_output();
+ if (!ost) {
+ if (got_eagain()) {
+ reset_eagain();
+ av_usleep(10000);
+ return 0;
+ }
+ av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
+ return AVERROR_EOF;
+ }
+
+ if (ost->filter) {
+ if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
+ return ret;
+ if (!ist)
+ return 0;
+ } else {
+ av_assert0(ost->source_index >= 0);
+ ist = input_streams[ost->source_index];
+ }
+
+ ret = process_input(ist->file_index);
+ if (ret == AVERROR(EAGAIN)) {
+ if (input_files[ist->file_index]->eagain)
+ ost->unavailable = 1;
+ return 0;
+ }
+
+ if (ret < 0)
+ return ret == AVERROR_EOF ? 0 : ret;
+
+ return reap_filters(0);
+}
+
+/*
+ * The following code is the main loop of the file converter
+ */
+static int transcode(void)
+{
+ int ret, i;
+ AVFormatContext *os;
+ OutputStream *ost;
+ InputStream *ist;
+ int64_t timer_start;
+
+ ret = transcode_init();
+ if (ret < 0)
+ goto fail;
+
+ if (stdin_interaction) {
+ av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
+ }
+
+ timer_start = av_gettime_relative();
+
+#if HAVE_PTHREADS
+ if ((ret = init_input_threads()) < 0)
+ goto fail;
+#endif
+
+ while (!received_sigterm) {
+ int64_t cur_time= av_gettime_relative();
+
+ /* if 'q' pressed, exits */
+ if (stdin_interaction)
+ if (check_keyboard_interaction(cur_time) < 0)
+ break;
+
+ /* check if there's any stream where output is still needed */
+ if (!need_output()) {
+ av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
+ break;
+ }
+
+ ret = transcode_step();
+ if (ret < 0) {
+ if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
+ continue;
+ } else {
+ char errbuf[128];
+ av_strerror(ret, errbuf, sizeof(errbuf));
+
+ av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
+ break;
+ }
+ }
+
+ /* dump report by using the output first video and audio streams */
+ print_report(0, timer_start, cur_time);
+ }
+#if HAVE_PTHREADS
+ free_input_threads();
+#endif
+
+ /* at the end of stream, we must flush the decoder buffers */
+ for (i = 0; i < nb_input_streams; i++) {
+ ist = input_streams[i];
+ if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
+ process_input_packet(ist, NULL);
+ }
+ }
+ flush_encoders();
+
+ term_exit();
+
+ /* write the trailer if needed and close file */
+ for (i = 0; i < nb_output_files; i++) {
+ os = output_files[i]->ctx;
+ av_write_trailer(os);
+ }
+
+ /* dump report by using the first video and audio streams */
+ print_report(1, timer_start, av_gettime_relative());
+
+ /* close each encoder */
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+ if (ost->encoding_needed) {
+ av_freep(&ost->enc_ctx->stats_in);
+ }
+ }
+
+ /* close each decoder */
+ for (i = 0; i < nb_input_streams; i++) {
+ ist = input_streams[i];
+ if (ist->decoding_needed) {
+ avcodec_close(ist->dec_ctx);
+ if (ist->hwaccel_uninit)
+ ist->hwaccel_uninit(ist->dec_ctx);
+ }
+ }
+
+ /* finished ! */
+ ret = 0;
+
+ fail:
+#if HAVE_PTHREADS
+ free_input_threads();
+#endif
+
+ if (output_streams) {
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+ if (ost) {
+ if (ost->logfile) {
+ fclose(ost->logfile);
+ ost->logfile = NULL;
+ }
+ av_freep(&ost->forced_kf_pts);
+ av_freep(&ost->apad);
+ av_freep(&ost->disposition);
+ av_dict_free(&ost->encoder_opts);
+ av_dict_free(&ost->swr_opts);
+ av_dict_free(&ost->resample_opts);
+ av_dict_free(&ost->bsf_args);
+ }
+ }
+ }
+ return ret;
+}
+
+
+static int64_t getutime(void)
+{
+#if HAVE_GETRUSAGE
+ struct rusage rusage;
+
+ getrusage(RUSAGE_SELF, &rusage);
+ return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
+#elif HAVE_GETPROCESSTIMES
+ HANDLE proc;
+ FILETIME c, e, k, u;
+ proc = GetCurrentProcess();
+ GetProcessTimes(proc, &c, &e, &k, &u);
+ return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
+#else
+ return av_gettime_relative();
+#endif
+}
+
+static int64_t getmaxrss(void)
+{
+#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
+ struct rusage rusage;
+ getrusage(RUSAGE_SELF, &rusage);
+ return (int64_t)rusage.ru_maxrss * 1024;
+#elif HAVE_GETPROCESSMEMORYINFO
+ HANDLE proc;
+ PROCESS_MEMORY_COUNTERS memcounters;
+ proc = GetCurrentProcess();
+ memcounters.cb = sizeof(memcounters);
+ GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
+ return memcounters.PeakPagefileUsage;
+#else
+ return 0;
+#endif
+}
+
+static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ int64_t ti;
+
+ register_exit(ffmpeg_cleanup);
+
+ setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
+
+ av_log_set_flags(AV_LOG_SKIP_REPEATED);
+ parse_loglevel(argc, argv, options);
+
+ if(argc>1 && !strcmp(argv[1], "-d")){
+ run_as_daemon=1;
+ av_log_set_callback(log_callback_null);
+ argc--;
+ argv++;
+ }
+
+ avcodec_register_all();
+#if CONFIG_AVDEVICE
+ avdevice_register_all();
+#endif
+ avfilter_register_all();
+ av_register_all();
+ avformat_network_init();
+
+ show_banner(argc, argv, options);
+
+ term_init();
+
+ /* parse options and open all input/output files */
+ ret = ffmpeg_parse_options(argc, argv);
+ if (ret < 0)
+ exit_program(1);
+
+ if (nb_output_files <= 0 && nb_input_files == 0) {
+ show_usage();
+ av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
+ exit_program(1);
+ }
+
+ /* file converter / grab */
+ if (nb_output_files <= 0) {
+ av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
+ exit_program(1);
+ }
+
+// if (nb_input_files == 0) {
+// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
+// exit_program(1);
+// }
+
+ current_time = ti = getutime();
+ if (transcode() < 0)
+ exit_program(1);
+ ti = getutime() - ti;
+ if (do_benchmark) {
+ av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
+ }
+ av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
+ decode_error_stat[0], decode_error_stat[1]);
+ if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
+ exit_program(69);
+
+ exit_program(received_nb_signals ? 255 : main_return_code);
+ return main_return_code;
+}
sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio :
ist->dec_ctx->sample_aspect_ratio;
- snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->dec_ctx->width,
- ist->dec_ctx->height,
- ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->dec_ctx->pix_fmt,
- tb.num, tb.den, sar.num, sar.den);
+ if(!sar.den)
+ sar = (AVRational){0,1};
+ av_bprint_init(&args, 0, 1);
+ av_bprintf(&args,
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
+ "pixel_aspect=%d/%d:sws_param=flags=%d", ist->resample_width,
+ ist->resample_height,
+ ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
+ tb.num, tb.den, sar.num, sar.den,
- SWS_BILINEAR + ((ist->dec_ctx->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
++ SWS_BILINEAR + ((ist->dec_ctx->flags&AV_CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
+ if (fr.num && fr.den)
+ av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
}
+ MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
+ ost->disposition = av_strdup(ost->disposition);
+
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
- ost->enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
av_opt_get_int(o->g->sws_opts, "sws_flags", 0, &ost->sws_flags);
if (p) p++;
}
video_enc->rc_override_count = i;
- video_enc->intra_dc_precision = intra_dc_precision - 8;
+
+ if (do_psnr)
- video_enc->flags|= CODEC_FLAG_PSNR;
++ video_enc->flags|= AV_CODEC_FLAG_PSNR;
/* two pass mode */
MATCH_PER_STREAM_OPT(pass, i, do_pass, oc, st);
if (do_pass) {
- if (do_pass == 1) {
+ if (do_pass & 1) {
- video_enc->flags |= CODEC_FLAG_PASS1;
+ video_enc->flags |= AV_CODEC_FLAG_PASS1;
- } else {
+ av_dict_set(&ost->encoder_opts, "flags", "+pass1", AV_DICT_APPEND);
+ }
+ if (do_pass & 2) {
- video_enc->flags |= CODEC_FLAG_PASS2;
+ video_enc->flags |= AV_CODEC_FLAG_PASS2;
+ av_dict_set(&ost->encoder_opts, "flags", "+pass2", AV_DICT_APPEND);
}
}
if (!strcmp(ost->enc->name, "libx264")) {
av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
} else {
- if (video_enc->flags & CODEC_FLAG_PASS2) {
- if (video_enc->flags & AV_CODEC_FLAG_PASS1) {
- f = fopen(logfilename, "wb");
- if (!f) {
- av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
- logfilename, strerror(errno));
- exit_program(1);
- }
- ost->logfile = f;
- } else {
++ if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
char *logbuffer = read_file(logfilename);
if (!logbuffer) {
}
video_enc->stats_in = logbuffer;
}
- if (video_enc->flags & CODEC_FLAG_PASS1) {
++ if (video_enc->flags & AV_CODEC_FLAG_PASS1) {
+ f = av_fopen_utf8(logfilename, "wb");
+ if (!f) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Cannot write log file '%s' for pass-1 encoding: %s\n",
+ logfilename, strerror(errno));
+ exit_program(1);
+ }
+ ost->logfile = f;
+ }
}
}
--- /dev/null
- if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple media player based on the FFmpeg libraries
+ */
+
+#include "config.h"
+#include <inttypes.h>
+#include <math.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdint.h>
+
+#include "libavutil/avstring.h"
+#include "libavutil/colorspace.h"
+#include "libavutil/eval.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/dict.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/time.h"
+#include "libavformat/avformat.h"
+#include "libavdevice/avdevice.h"
+#include "libswscale/swscale.h"
+#include "libavutil/opt.h"
+#include "libavcodec/avfft.h"
+#include "libswresample/swresample.h"
+
+#if CONFIG_AVFILTER
+# include "libavfilter/avcodec.h"
+# include "libavfilter/avfilter.h"
+# include "libavfilter/buffersink.h"
+# include "libavfilter/buffersrc.h"
+#endif
+
+#include <SDL.h>
+#include <SDL_thread.h>
+
+#include "cmdutils.h"
+
+#include <assert.h>
+
+const char program_name[] = "ffplay";
+const int program_birth_year = 2003;
+
+#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
+#define MIN_FRAMES 5
+
+/* Minimum SDL audio buffer size, in samples. */
+#define SDL_AUDIO_MIN_BUFFER_SIZE 512
+/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
+#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
+
+/* no AV sync correction is done if below the minimum AV sync threshold */
+#define AV_SYNC_THRESHOLD_MIN 0.04
+/* AV sync correction is done if above the maximum AV sync threshold */
+#define AV_SYNC_THRESHOLD_MAX 0.1
+/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
+#define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
+/* no AV correction is done if too big error */
+#define AV_NOSYNC_THRESHOLD 10.0
+
+/* maximum audio speed change to get correct sync */
+#define SAMPLE_CORRECTION_PERCENT_MAX 10
+
+/* external clock speed adjustment constants for realtime sources based on buffer fullness */
+#define EXTERNAL_CLOCK_SPEED_MIN 0.900
+#define EXTERNAL_CLOCK_SPEED_MAX 1.010
+#define EXTERNAL_CLOCK_SPEED_STEP 0.001
+
+/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
+#define AUDIO_DIFF_AVG_NB 20
+
+/* polls for possible required screen refresh at least this often, should be less than 1/fps */
+#define REFRESH_RATE 0.01
+
+/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
+/* TODO: We assume that a decoded and resampled frame fits into this buffer */
+#define SAMPLE_ARRAY_SIZE (8 * 65536)
+
+#define CURSOR_HIDE_DELAY 1000000
+
+static int64_t sws_flags = SWS_BICUBIC;
+
+typedef struct MyAVPacketList {
+ AVPacket pkt;
+ struct MyAVPacketList *next;
+ int serial;
+} MyAVPacketList;
+
+typedef struct PacketQueue {
+ MyAVPacketList *first_pkt, *last_pkt;
+ int nb_packets;
+ int size;
+ int abort_request;
+ int serial;
+ SDL_mutex *mutex;
+ SDL_cond *cond;
+} PacketQueue;
+
+#define VIDEO_PICTURE_QUEUE_SIZE 3
+#define SUBPICTURE_QUEUE_SIZE 16
+#define SAMPLE_QUEUE_SIZE 9
+#define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
+
+typedef struct AudioParams {
+ int freq;
+ int channels;
+ int64_t channel_layout;
+ enum AVSampleFormat fmt;
+ int frame_size;
+ int bytes_per_sec;
+} AudioParams;
+
+typedef struct Clock {
+ double pts; /* clock base */
+ double pts_drift; /* clock base minus time at which we updated the clock */
+ double last_updated;
+ double speed;
+ int serial; /* clock is based on a packet with this serial */
+ int paused;
+ int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
+} Clock;
+
+/* Common struct for handling all types of decoded data and allocated render buffers. */
+typedef struct Frame {
+ AVFrame *frame;
+ AVSubtitle sub;
+ int serial;
+ double pts; /* presentation timestamp for the frame */
+ double duration; /* estimated duration of the frame */
+ int64_t pos; /* byte position of the frame in the input file */
+ SDL_Overlay *bmp;
+ int allocated;
+ int reallocate;
+ int width;
+ int height;
+ AVRational sar;
+} Frame;
+
+typedef struct FrameQueue {
+ Frame queue[FRAME_QUEUE_SIZE];
+ int rindex;
+ int windex;
+ int size;
+ int max_size;
+ int keep_last;
+ int rindex_shown;
+ SDL_mutex *mutex;
+ SDL_cond *cond;
+ PacketQueue *pktq;
+} FrameQueue;
+
+enum {
+ AV_SYNC_AUDIO_MASTER, /* default choice */
+ AV_SYNC_VIDEO_MASTER,
+ AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
+};
+
+typedef struct Decoder {
+ AVPacket pkt;
+ AVPacket pkt_temp;
+ PacketQueue *queue;
+ AVCodecContext *avctx;
+ int pkt_serial;
+ int finished;
+ int packet_pending;
+ SDL_cond *empty_queue_cond;
+ int64_t start_pts;
+ AVRational start_pts_tb;
+ int64_t next_pts;
+ AVRational next_pts_tb;
+ SDL_Thread *decoder_tid;
+} Decoder;
+
+typedef struct VideoState {
+ SDL_Thread *read_tid;
+ AVInputFormat *iformat;
+ int abort_request;
+ int force_refresh;
+ int paused;
+ int last_paused;
+ int queue_attachments_req;
+ int seek_req;
+ int seek_flags;
+ int64_t seek_pos;
+ int64_t seek_rel;
+ int read_pause_return;
+ AVFormatContext *ic;
+ int realtime;
+
+ Clock audclk;
+ Clock vidclk;
+ Clock extclk;
+
+ FrameQueue pictq;
+ FrameQueue subpq;
+ FrameQueue sampq;
+
+ Decoder auddec;
+ Decoder viddec;
+ Decoder subdec;
+
+ int audio_stream;
+
+ int av_sync_type;
+
+ double audio_clock;
+ int audio_clock_serial;
+ double audio_diff_cum; /* used for AV difference average computation */
+ double audio_diff_avg_coef;
+ double audio_diff_threshold;
+ int audio_diff_avg_count;
+ AVStream *audio_st;
+ PacketQueue audioq;
+ int audio_hw_buf_size;
+ uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
+ uint8_t *audio_buf;
+ uint8_t *audio_buf1;
+ unsigned int audio_buf_size; /* in bytes */
+ unsigned int audio_buf1_size;
+ int audio_buf_index; /* in bytes */
+ int audio_write_buf_size;
+ struct AudioParams audio_src;
+#if CONFIG_AVFILTER
+ struct AudioParams audio_filter_src;
+#endif
+ struct AudioParams audio_tgt;
+ struct SwrContext *swr_ctx;
+ int frame_drops_early;
+ int frame_drops_late;
+
+ enum ShowMode {
+ SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
+ } show_mode;
+ int16_t sample_array[SAMPLE_ARRAY_SIZE];
+ int sample_array_index;
+ int last_i_start;
+ RDFTContext *rdft;
+ int rdft_bits;
+ FFTSample *rdft_data;
+ int xpos;
+ double last_vis_time;
+
+ int subtitle_stream;
+ AVStream *subtitle_st;
+ PacketQueue subtitleq;
+
+ double frame_timer;
+ double frame_last_returned_time;
+ double frame_last_filter_delay;
+ int video_stream;
+ AVStream *video_st;
+ PacketQueue videoq;
+ double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
+#if !CONFIG_AVFILTER
+ struct SwsContext *img_convert_ctx;
+#endif
+ SDL_Rect last_display_rect;
+ int eof;
+
+ char filename[1024];
+ int width, height, xleft, ytop;
+ int step;
+
+#if CONFIG_AVFILTER
+ int vfilter_idx;
+ AVFilterContext *in_video_filter; // the first filter in the video chain
+ AVFilterContext *out_video_filter; // the last filter in the video chain
+ AVFilterContext *in_audio_filter; // the first filter in the audio chain
+ AVFilterContext *out_audio_filter; // the last filter in the audio chain
+ AVFilterGraph *agraph; // audio filter graph
+#endif
+
+ int last_video_stream, last_audio_stream, last_subtitle_stream;
+
+ SDL_cond *continue_read_thread;
+} VideoState;
+
+/* options specified by the user */
+static AVInputFormat *file_iformat;
+static const char *input_filename;
+static const char *window_title;
+static int fs_screen_width;
+static int fs_screen_height;
+static int default_width = 640;
+static int default_height = 480;
+static int screen_width = 0;
+static int screen_height = 0;
+static int audio_disable;
+static int video_disable;
+static int subtitle_disable;
+static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
+static int seek_by_bytes = -1;
+static int display_disable;
+static int show_status = 1;
+static int av_sync_type = AV_SYNC_AUDIO_MASTER;
+static int64_t start_time = AV_NOPTS_VALUE;
+static int64_t duration = AV_NOPTS_VALUE;
+static int fast = 0;
+static int genpts = 0;
+static int lowres = 0;
+static int decoder_reorder_pts = -1;
+static int autoexit;
+static int exit_on_keydown;
+static int exit_on_mousedown;
+static int loop = 1;
+static int framedrop = -1;
+static int infinite_buffer = -1;
+static enum ShowMode show_mode = SHOW_MODE_NONE;
+static const char *audio_codec_name;
+static const char *subtitle_codec_name;
+static const char *video_codec_name;
+double rdftspeed = 0.02;
+static int64_t cursor_last_shown;
+static int cursor_hidden = 0;
+#if CONFIG_AVFILTER
+static const char **vfilters_list = NULL;
+static int nb_vfilters = 0;
+static char *afilters = NULL;
+#endif
+static int autorotate = 1;
+
+/* current context */
+static int is_full_screen;
+static int64_t audio_callback_time;
+
+static AVPacket flush_pkt;
+
+#define FF_ALLOC_EVENT (SDL_USEREVENT)
+#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
+
+static SDL_Surface *screen;
+
+#if CONFIG_AVFILTER
+static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
+{
+ GROW_ARRAY(vfilters_list, nb_vfilters);
+ vfilters_list[nb_vfilters - 1] = arg;
+ return 0;
+}
+#endif
+
+static inline
+int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
+ enum AVSampleFormat fmt2, int64_t channel_count2)
+{
+ /* If channel count == 1, planar and non-planar formats are the same */
+ if (channel_count1 == 1 && channel_count2 == 1)
+ return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
+ else
+ return channel_count1 != channel_count2 || fmt1 != fmt2;
+}
+
+static inline
+int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
+{
+ if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
+ return channel_layout;
+ else
+ return 0;
+}
+
+static void free_picture(Frame *vp);
+
+static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
+{
+ MyAVPacketList *pkt1;
+
+ if (q->abort_request)
+ return -1;
+
+ pkt1 = av_malloc(sizeof(MyAVPacketList));
+ if (!pkt1)
+ return -1;
+ pkt1->pkt = *pkt;
+ pkt1->next = NULL;
+ if (pkt == &flush_pkt)
+ q->serial++;
+ pkt1->serial = q->serial;
+
+ if (!q->last_pkt)
+ q->first_pkt = pkt1;
+ else
+ q->last_pkt->next = pkt1;
+ q->last_pkt = pkt1;
+ q->nb_packets++;
+ q->size += pkt1->pkt.size + sizeof(*pkt1);
+ /* XXX: should duplicate packet data in DV case */
+ SDL_CondSignal(q->cond);
+ return 0;
+}
+
+static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
+{
+ int ret;
+
+ /* duplicate the packet */
+ if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
+ return -1;
+
+ SDL_LockMutex(q->mutex);
+ ret = packet_queue_put_private(q, pkt);
+ SDL_UnlockMutex(q->mutex);
+
+ if (pkt != &flush_pkt && ret < 0)
+ av_free_packet(pkt);
+
+ return ret;
+}
+
+static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
+{
+ AVPacket pkt1, *pkt = &pkt1;
+ av_init_packet(pkt);
+ pkt->data = NULL;
+ pkt->size = 0;
+ pkt->stream_index = stream_index;
+ return packet_queue_put(q, pkt);
+}
+
+/* packet queue handling */
+static void packet_queue_init(PacketQueue *q)
+{
+ memset(q, 0, sizeof(PacketQueue));
+ q->mutex = SDL_CreateMutex();
+ q->cond = SDL_CreateCond();
+ q->abort_request = 1;
+}
+
+static void packet_queue_flush(PacketQueue *q)
+{
+ MyAVPacketList *pkt, *pkt1;
+
+ SDL_LockMutex(q->mutex);
+ for (pkt = q->first_pkt; pkt; pkt = pkt1) {
+ pkt1 = pkt->next;
+ av_free_packet(&pkt->pkt);
+ av_freep(&pkt);
+ }
+ q->last_pkt = NULL;
+ q->first_pkt = NULL;
+ q->nb_packets = 0;
+ q->size = 0;
+ SDL_UnlockMutex(q->mutex);
+}
+
+static void packet_queue_destroy(PacketQueue *q)
+{
+ packet_queue_flush(q);
+ SDL_DestroyMutex(q->mutex);
+ SDL_DestroyCond(q->cond);
+}
+
+static void packet_queue_abort(PacketQueue *q)
+{
+ SDL_LockMutex(q->mutex);
+
+ q->abort_request = 1;
+
+ SDL_CondSignal(q->cond);
+
+ SDL_UnlockMutex(q->mutex);
+}
+
+static void packet_queue_start(PacketQueue *q)
+{
+ SDL_LockMutex(q->mutex);
+ q->abort_request = 0;
+ packet_queue_put_private(q, &flush_pkt);
+ SDL_UnlockMutex(q->mutex);
+}
+
+/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
+static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
+{
+ MyAVPacketList *pkt1;
+ int ret;
+
+ SDL_LockMutex(q->mutex);
+
+ for (;;) {
+ if (q->abort_request) {
+ ret = -1;
+ break;
+ }
+
+ pkt1 = q->first_pkt;
+ if (pkt1) {
+ q->first_pkt = pkt1->next;
+ if (!q->first_pkt)
+ q->last_pkt = NULL;
+ q->nb_packets--;
+ q->size -= pkt1->pkt.size + sizeof(*pkt1);
+ *pkt = pkt1->pkt;
+ if (serial)
+ *serial = pkt1->serial;
+ av_free(pkt1);
+ ret = 1;
+ break;
+ } else if (!block) {
+ ret = 0;
+ break;
+ } else {
+ SDL_CondWait(q->cond, q->mutex);
+ }
+ }
+ SDL_UnlockMutex(q->mutex);
+ return ret;
+}
+
+static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
+ memset(d, 0, sizeof(Decoder));
+ d->avctx = avctx;
+ d->queue = queue;
+ d->empty_queue_cond = empty_queue_cond;
+ d->start_pts = AV_NOPTS_VALUE;
+}
+
+static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
+ int got_frame = 0;
+
+ do {
+ int ret = -1;
+
+ if (d->queue->abort_request)
+ return -1;
+
+ if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
+ AVPacket pkt;
+ do {
+ if (d->queue->nb_packets == 0)
+ SDL_CondSignal(d->empty_queue_cond);
+ if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
+ return -1;
+ if (pkt.data == flush_pkt.data) {
+ avcodec_flush_buffers(d->avctx);
+ d->finished = 0;
+ d->next_pts = d->start_pts;
+ d->next_pts_tb = d->start_pts_tb;
+ }
+ } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
+ av_free_packet(&d->pkt);
+ d->pkt_temp = d->pkt = pkt;
+ d->packet_pending = 1;
+ }
+
+ switch (d->avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
+ if (got_frame) {
+ if (decoder_reorder_pts == -1) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ } else if (decoder_reorder_pts) {
+ frame->pts = frame->pkt_pts;
+ } else {
+ frame->pts = frame->pkt_dts;
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
+ if (got_frame) {
+ AVRational tb = (AVRational){1, frame->sample_rate};
+ if (frame->pts != AV_NOPTS_VALUE)
+ frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
+ else if (frame->pkt_pts != AV_NOPTS_VALUE)
+ frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
+ else if (d->next_pts != AV_NOPTS_VALUE)
+ frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
+ if (frame->pts != AV_NOPTS_VALUE) {
+ d->next_pts = frame->pts + frame->nb_samples;
+ d->next_pts_tb = tb;
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
+ break;
+ }
+
+ if (ret < 0) {
+ d->packet_pending = 0;
+ } else {
+ d->pkt_temp.dts =
+ d->pkt_temp.pts = AV_NOPTS_VALUE;
+ if (d->pkt_temp.data) {
+ if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
+ ret = d->pkt_temp.size;
+ d->pkt_temp.data += ret;
+ d->pkt_temp.size -= ret;
+ if (d->pkt_temp.size <= 0)
+ d->packet_pending = 0;
+ } else {
+ if (!got_frame) {
+ d->packet_pending = 0;
+ d->finished = d->pkt_serial;
+ }
+ }
+ }
+ } while (!got_frame && !d->finished);
+
+ return got_frame;
+}
+
+static void decoder_destroy(Decoder *d) {
+ av_free_packet(&d->pkt);
+}
+
+static void frame_queue_unref_item(Frame *vp)
+{
+ av_frame_unref(vp->frame);
+ avsubtitle_free(&vp->sub);
+}
+
+static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
+{
+ int i;
+ memset(f, 0, sizeof(FrameQueue));
+ if (!(f->mutex = SDL_CreateMutex()))
+ return AVERROR(ENOMEM);
+ if (!(f->cond = SDL_CreateCond()))
+ return AVERROR(ENOMEM);
+ f->pktq = pktq;
+ f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
+ f->keep_last = !!keep_last;
+ for (i = 0; i < f->max_size; i++)
+ if (!(f->queue[i].frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static void frame_queue_destory(FrameQueue *f)
+{
+ int i;
+ for (i = 0; i < f->max_size; i++) {
+ Frame *vp = &f->queue[i];
+ frame_queue_unref_item(vp);
+ av_frame_free(&vp->frame);
+ free_picture(vp);
+ }
+ SDL_DestroyMutex(f->mutex);
+ SDL_DestroyCond(f->cond);
+}
+
+static void frame_queue_signal(FrameQueue *f)
+{
+ SDL_LockMutex(f->mutex);
+ SDL_CondSignal(f->cond);
+ SDL_UnlockMutex(f->mutex);
+}
+
+static Frame *frame_queue_peek(FrameQueue *f)
+{
+ return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
+}
+
+static Frame *frame_queue_peek_next(FrameQueue *f)
+{
+ return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
+}
+
+static Frame *frame_queue_peek_last(FrameQueue *f)
+{
+ return &f->queue[f->rindex];
+}
+
+static Frame *frame_queue_peek_writable(FrameQueue *f)
+{
+ /* wait until we have space to put a new frame */
+ SDL_LockMutex(f->mutex);
+ while (f->size >= f->max_size &&
+ !f->pktq->abort_request) {
+ SDL_CondWait(f->cond, f->mutex);
+ }
+ SDL_UnlockMutex(f->mutex);
+
+ if (f->pktq->abort_request)
+ return NULL;
+
+ return &f->queue[f->windex];
+}
+
+static Frame *frame_queue_peek_readable(FrameQueue *f)
+{
+ /* wait until we have a readable a new frame */
+ SDL_LockMutex(f->mutex);
+ while (f->size - f->rindex_shown <= 0 &&
+ !f->pktq->abort_request) {
+ SDL_CondWait(f->cond, f->mutex);
+ }
+ SDL_UnlockMutex(f->mutex);
+
+ if (f->pktq->abort_request)
+ return NULL;
+
+ return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
+}
+
+static void frame_queue_push(FrameQueue *f)
+{
+ if (++f->windex == f->max_size)
+ f->windex = 0;
+ SDL_LockMutex(f->mutex);
+ f->size++;
+ SDL_CondSignal(f->cond);
+ SDL_UnlockMutex(f->mutex);
+}
+
+static void frame_queue_next(FrameQueue *f)
+{
+ if (f->keep_last && !f->rindex_shown) {
+ f->rindex_shown = 1;
+ return;
+ }
+ frame_queue_unref_item(&f->queue[f->rindex]);
+ if (++f->rindex == f->max_size)
+ f->rindex = 0;
+ SDL_LockMutex(f->mutex);
+ f->size--;
+ SDL_CondSignal(f->cond);
+ SDL_UnlockMutex(f->mutex);
+}
+
+/* jump back to the previous frame if available by resetting rindex_shown */
+static int frame_queue_prev(FrameQueue *f)
+{
+ int ret = f->rindex_shown;
+ f->rindex_shown = 0;
+ return ret;
+}
+
+/* return the number of undisplayed frames in the queue */
+static int frame_queue_nb_remaining(FrameQueue *f)
+{
+ return f->size - f->rindex_shown;
+}
+
+/* return last shown position */
+static int64_t frame_queue_last_pos(FrameQueue *f)
+{
+ Frame *fp = &f->queue[f->rindex];
+ if (f->rindex_shown && fp->serial == f->pktq->serial)
+ return fp->pos;
+ else
+ return -1;
+}
+
+static void decoder_abort(Decoder *d, FrameQueue *fq)
+{
+ packet_queue_abort(d->queue);
+ frame_queue_signal(fq);
+ SDL_WaitThread(d->decoder_tid, NULL);
+ d->decoder_tid = NULL;
+ packet_queue_flush(d->queue);
+}
+
+static inline void fill_rectangle(SDL_Surface *screen,
+ int x, int y, int w, int h, int color, int update)
+{
+ SDL_Rect rect;
+ rect.x = x;
+ rect.y = y;
+ rect.w = w;
+ rect.h = h;
+ SDL_FillRect(screen, &rect, color);
+ if (update && w > 0 && h > 0)
+ SDL_UpdateRect(screen, x, y, w, h);
+}
+
+/* draw only the border of a rectangle */
+static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
+{
+ int w1, w2, h1, h2;
+
+ /* fill the background */
+ w1 = x;
+ if (w1 < 0)
+ w1 = 0;
+ w2 = width - (x + w);
+ if (w2 < 0)
+ w2 = 0;
+ h1 = y;
+ if (h1 < 0)
+ h1 = 0;
+ h2 = height - (y + h);
+ if (h2 < 0)
+ h2 = 0;
+ fill_rectangle(screen,
+ xleft, ytop,
+ w1, height,
+ color, update);
+ fill_rectangle(screen,
+ xleft + width - w2, ytop,
+ w2, height,
+ color, update);
+ fill_rectangle(screen,
+ xleft + w1, ytop,
+ width - w1 - w2, h1,
+ color, update);
+ fill_rectangle(screen,
+ xleft + w1, ytop + height - h2,
+ width - w1 - w2, h2,
+ color, update);
+}
+
+#define ALPHA_BLEND(a, oldp, newp, s)\
+((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
+
+#define RGBA_IN(r, g, b, a, s)\
+{\
+ unsigned int v = ((const uint32_t *)(s))[0];\
+ a = (v >> 24) & 0xff;\
+ r = (v >> 16) & 0xff;\
+ g = (v >> 8) & 0xff;\
+ b = v & 0xff;\
+}
+
+#define YUVA_IN(y, u, v, a, s, pal)\
+{\
+ unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
+ a = (val >> 24) & 0xff;\
+ y = (val >> 16) & 0xff;\
+ u = (val >> 8) & 0xff;\
+ v = val & 0xff;\
+}
+
+#define YUVA_OUT(d, y, u, v, a)\
+{\
+ ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
+}
+
+
+#define BPP 1
+
+static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
+{
+ int wrap, wrap3, width2, skip2;
+ int y, u, v, a, u1, v1, a1, w, h;
+ uint8_t *lum, *cb, *cr;
+ const uint8_t *p;
+ const uint32_t *pal;
+ int dstx, dsty, dstw, dsth;
+
+ dstw = av_clip(rect->w, 0, imgw);
+ dsth = av_clip(rect->h, 0, imgh);
+ dstx = av_clip(rect->x, 0, imgw - dstw);
+ dsty = av_clip(rect->y, 0, imgh - dsth);
+ lum = dst->data[0] + dsty * dst->linesize[0];
+ cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
+ cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
+
+ width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
+ skip2 = dstx >> 1;
+ wrap = dst->linesize[0];
+ wrap3 = rect->pict.linesize[0];
+ p = rect->pict.data[0];
+ pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
+
+ if (dsty & 1) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ p++;
+ lum++;
+ }
+ p += wrap3 - dstw * BPP;
+ lum += wrap - dstw - dstx;
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ p += wrap3;
+ lum += wrap;
+
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
+
+ cb++;
+ cr++;
+ p += -wrap3 + 2 * BPP;
+ lum += -wrap + 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ p += wrap3 + (wrap3 - dstw * BPP);
+ lum += wrap + (wrap - dstw - dstx);
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ /* handle odd height */
+ if (h) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ }
+ }
+}
+
+static void free_picture(Frame *vp)
+{
+ if (vp->bmp) {
+ SDL_FreeYUVOverlay(vp->bmp);
+ vp->bmp = NULL;
+ }
+}
+
+static void calculate_display_rect(SDL_Rect *rect,
+ int scr_xleft, int scr_ytop, int scr_width, int scr_height,
+ int pic_width, int pic_height, AVRational pic_sar)
+{
+ float aspect_ratio;
+ int width, height, x, y;
+
+ if (pic_sar.num == 0)
+ aspect_ratio = 0;
+ else
+ aspect_ratio = av_q2d(pic_sar);
+
+ if (aspect_ratio <= 0.0)
+ aspect_ratio = 1.0;
+ aspect_ratio *= (float)pic_width / (float)pic_height;
+
+ /* XXX: we suppose the screen has a 1.0 pixel ratio */
+ height = scr_height;
+ width = ((int)rint(height * aspect_ratio)) & ~1;
+ if (width > scr_width) {
+ width = scr_width;
+ height = ((int)rint(width / aspect_ratio)) & ~1;
+ }
+ x = (scr_width - width) / 2;
+ y = (scr_height - height) / 2;
+ rect->x = scr_xleft + x;
+ rect->y = scr_ytop + y;
+ rect->w = FFMAX(width, 1);
+ rect->h = FFMAX(height, 1);
+}
+
+static void video_image_display(VideoState *is)
+{
+ Frame *vp;
+ Frame *sp;
+ AVPicture pict;
+ SDL_Rect rect;
+ int i;
+
+ vp = frame_queue_peek(&is->pictq);
+ if (vp->bmp) {
+ if (is->subtitle_st) {
+ if (frame_queue_nb_remaining(&is->subpq) > 0) {
+ sp = frame_queue_peek(&is->subpq);
+
+ if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
+ SDL_LockYUVOverlay (vp->bmp);
+
+ pict.data[0] = vp->bmp->pixels[0];
+ pict.data[1] = vp->bmp->pixels[2];
+ pict.data[2] = vp->bmp->pixels[1];
+
+ pict.linesize[0] = vp->bmp->pitches[0];
+ pict.linesize[1] = vp->bmp->pitches[2];
+ pict.linesize[2] = vp->bmp->pitches[1];
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ blend_subrect(&pict, sp->sub.rects[i],
+ vp->bmp->w, vp->bmp->h);
+
+ SDL_UnlockYUVOverlay (vp->bmp);
+ }
+ }
+ }
+
+ calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
+
+ SDL_DisplayYUVOverlay(vp->bmp, &rect);
+
+ if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
+ int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
+ is->last_display_rect = rect;
+ }
+ }
+}
+
+static inline int compute_mod(int a, int b)
+{
+ return a < 0 ? a%b + b : a%b;
+}
+
+static void video_audio_display(VideoState *s)
+{
+ int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
+ int ch, channels, h, h2, bgcolor, fgcolor;
+ int64_t time_diff;
+ int rdft_bits, nb_freq;
+
+ for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
+ ;
+ nb_freq = 1 << (rdft_bits - 1);
+
+ /* compute display index : center on currently output samples */
+ channels = s->audio_tgt.channels;
+ nb_display_channels = channels;
+ if (!s->paused) {
+ int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
+ n = 2 * channels;
+ delay = s->audio_write_buf_size;
+ delay /= n;
+
+ /* to be more precise, we take into account the time spent since
+ the last buffer computation */
+ if (audio_callback_time) {
+ time_diff = av_gettime_relative() - audio_callback_time;
+ delay -= (time_diff * s->audio_tgt.freq) / 1000000;
+ }
+
+ delay += 2 * data_used;
+ if (delay < data_used)
+ delay = data_used;
+
+ i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
+ if (s->show_mode == SHOW_MODE_WAVES) {
+ h = INT_MIN;
+ for (i = 0; i < 1000; i += channels) {
+ int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
+ int a = s->sample_array[idx];
+ int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
+ int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
+ int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
+ int score = a - d;
+ if (h < score && (b ^ c) < 0) {
+ h = score;
+ i_start = idx;
+ }
+ }
+ }
+
+ s->last_i_start = i_start;
+ } else {
+ i_start = s->last_i_start;
+ }
+
+ bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ if (s->show_mode == SHOW_MODE_WAVES) {
+ fill_rectangle(screen,
+ s->xleft, s->ytop, s->width, s->height,
+ bgcolor, 0);
+
+ fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
+
+ /* total height for one channel */
+ h = s->height / nb_display_channels;
+ /* graph height / 2 */
+ h2 = (h * 9) / 20;
+ for (ch = 0; ch < nb_display_channels; ch++) {
+ i = i_start + ch;
+ y1 = s->ytop + ch * h + (h / 2); /* position of center line */
+ for (x = 0; x < s->width; x++) {
+ y = (s->sample_array[i] * h2) >> 15;
+ if (y < 0) {
+ y = -y;
+ ys = y1 - y;
+ } else {
+ ys = y1;
+ }
+ fill_rectangle(screen,
+ s->xleft + x, ys, 1, y,
+ fgcolor, 0);
+ i += channels;
+ if (i >= SAMPLE_ARRAY_SIZE)
+ i -= SAMPLE_ARRAY_SIZE;
+ }
+ }
+
+ fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
+
+ for (ch = 1; ch < nb_display_channels; ch++) {
+ y = s->ytop + ch * h;
+ fill_rectangle(screen,
+ s->xleft, y, s->width, 1,
+ fgcolor, 0);
+ }
+ SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
+ } else {
+ nb_display_channels= FFMIN(nb_display_channels, 2);
+ if (rdft_bits != s->rdft_bits) {
+ av_rdft_end(s->rdft);
+ av_free(s->rdft_data);
+ s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ s->rdft_bits = rdft_bits;
+ s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
+ }
+ if (!s->rdft || !s->rdft_data){
+ av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
+ s->show_mode = SHOW_MODE_WAVES;
+ } else {
+ FFTSample *data[2];
+ for (ch = 0; ch < nb_display_channels; ch++) {
+ data[ch] = s->rdft_data + 2 * nb_freq * ch;
+ i = i_start + ch;
+ for (x = 0; x < 2 * nb_freq; x++) {
+ double w = (x-nb_freq) * (1.0 / nb_freq);
+ data[ch][x] = s->sample_array[i] * (1.0 - w * w);
+ i += channels;
+ if (i >= SAMPLE_ARRAY_SIZE)
+ i -= SAMPLE_ARRAY_SIZE;
+ }
+ av_rdft_calc(s->rdft, data[ch]);
+ }
+ /* Least efficient way to do this, we should of course
+ * directly access it but it is more than fast enough. */
+ for (y = 0; y < s->height; y++) {
+ double w = 1 / sqrt(nb_freq);
+ int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
+ int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
+ + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
+ a = FFMIN(a, 255);
+ b = FFMIN(b, 255);
+ fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
+
+ fill_rectangle(screen,
+ s->xpos, s->height-y, 1, 1,
+ fgcolor, 0);
+ }
+ }
+ SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
+ if (!s->paused)
+ s->xpos++;
+ if (s->xpos >= s->width)
+ s->xpos= s->xleft;
+ }
+}
+
+static void stream_close(VideoState *is)
+{
+ /* XXX: use a special url_shutdown call to abort parse cleanly */
+ is->abort_request = 1;
+ SDL_WaitThread(is->read_tid, NULL);
+ packet_queue_destroy(&is->videoq);
+ packet_queue_destroy(&is->audioq);
+ packet_queue_destroy(&is->subtitleq);
+
+ /* free all pictures */
+ frame_queue_destory(&is->pictq);
+ frame_queue_destory(&is->sampq);
+ frame_queue_destory(&is->subpq);
+ SDL_DestroyCond(is->continue_read_thread);
+#if !CONFIG_AVFILTER
+ sws_freeContext(is->img_convert_ctx);
+#endif
+ av_free(is);
+}
+
+static void do_exit(VideoState *is)
+{
+ if (is) {
+ stream_close(is);
+ }
+ av_lockmgr_register(NULL);
+ uninit_opts();
+#if CONFIG_AVFILTER
+ av_freep(&vfilters_list);
+#endif
+ avformat_network_deinit();
+ if (show_status)
+ printf("\n");
+ SDL_Quit();
+ av_log(NULL, AV_LOG_QUIET, "%s", "");
+ exit(0);
+}
+
+static void sigterm_handler(int sig)
+{
+ exit(123);
+}
+
+static void set_default_window_size(int width, int height, AVRational sar)
+{
+ SDL_Rect rect;
+ calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
+ default_width = rect.w;
+ default_height = rect.h;
+}
+
+static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
+{
+ int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
+ int w,h;
+
+ if (is_full_screen) flags |= SDL_FULLSCREEN;
+ else flags |= SDL_RESIZABLE;
+
+ if (vp && vp->width)
+ set_default_window_size(vp->width, vp->height, vp->sar);
+
+ if (is_full_screen && fs_screen_width) {
+ w = fs_screen_width;
+ h = fs_screen_height;
+ } else if (!is_full_screen && screen_width) {
+ w = screen_width;
+ h = screen_height;
+ } else {
+ w = default_width;
+ h = default_height;
+ }
+ w = FFMIN(16383, w);
+ if (screen && is->width == screen->w && screen->w == w
+ && is->height== screen->h && screen->h == h && !force_set_video_mode)
+ return 0;
+ screen = SDL_SetVideoMode(w, h, 0, flags);
+ if (!screen) {
+ av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
+ do_exit(is);
+ }
+ if (!window_title)
+ window_title = input_filename;
+ SDL_WM_SetCaption(window_title, window_title);
+
+ is->width = screen->w;
+ is->height = screen->h;
+
+ return 0;
+}
+
+/* display the current picture, if any */
+static void video_display(VideoState *is)
+{
+ if (!screen)
+ video_open(is, 0, NULL);
+ if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
+ video_audio_display(is);
+ else if (is->video_st)
+ video_image_display(is);
+}
+
+static double get_clock(Clock *c)
+{
+ if (*c->queue_serial != c->serial)
+ return NAN;
+ if (c->paused) {
+ return c->pts;
+ } else {
+ double time = av_gettime_relative() / 1000000.0;
+ return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
+ }
+}
+
+static void set_clock_at(Clock *c, double pts, int serial, double time)
+{
+ c->pts = pts;
+ c->last_updated = time;
+ c->pts_drift = c->pts - time;
+ c->serial = serial;
+}
+
+static void set_clock(Clock *c, double pts, int serial)
+{
+ double time = av_gettime_relative() / 1000000.0;
+ set_clock_at(c, pts, serial, time);
+}
+
+static void set_clock_speed(Clock *c, double speed)
+{
+ set_clock(c, get_clock(c), c->serial);
+ c->speed = speed;
+}
+
+static void init_clock(Clock *c, int *queue_serial)
+{
+ c->speed = 1.0;
+ c->paused = 0;
+ c->queue_serial = queue_serial;
+ set_clock(c, NAN, -1);
+}
+
+static void sync_clock_to_slave(Clock *c, Clock *slave)
+{
+ double clock = get_clock(c);
+ double slave_clock = get_clock(slave);
+ if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
+ set_clock(c, slave_clock, slave->serial);
+}
+
+static int get_master_sync_type(VideoState *is) {
+ if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
+ if (is->video_st)
+ return AV_SYNC_VIDEO_MASTER;
+ else
+ return AV_SYNC_AUDIO_MASTER;
+ } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
+ if (is->audio_st)
+ return AV_SYNC_AUDIO_MASTER;
+ else
+ return AV_SYNC_EXTERNAL_CLOCK;
+ } else {
+ return AV_SYNC_EXTERNAL_CLOCK;
+ }
+}
+
+/* get the current master clock value */
+static double get_master_clock(VideoState *is)
+{
+ double val;
+
+ switch (get_master_sync_type(is)) {
+ case AV_SYNC_VIDEO_MASTER:
+ val = get_clock(&is->vidclk);
+ break;
+ case AV_SYNC_AUDIO_MASTER:
+ val = get_clock(&is->audclk);
+ break;
+ default:
+ val = get_clock(&is->extclk);
+ break;
+ }
+ return val;
+}
+
+static void check_external_clock_speed(VideoState *is) {
+ if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
+ is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
+ set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
+ } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
+ (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
+ set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
+ } else {
+ double speed = is->extclk.speed;
+ if (speed != 1.0)
+ set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
+ }
+}
+
+/* seek in the stream */
+static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
+{
+ if (!is->seek_req) {
+ is->seek_pos = pos;
+ is->seek_rel = rel;
+ is->seek_flags &= ~AVSEEK_FLAG_BYTE;
+ if (seek_by_bytes)
+ is->seek_flags |= AVSEEK_FLAG_BYTE;
+ is->seek_req = 1;
+ SDL_CondSignal(is->continue_read_thread);
+ }
+}
+
+/* pause or resume the video */
+static void stream_toggle_pause(VideoState *is)
+{
+ if (is->paused) {
+ is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
+ if (is->read_pause_return != AVERROR(ENOSYS)) {
+ is->vidclk.paused = 0;
+ }
+ set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
+ }
+ set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
+ is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
+}
+
+static void toggle_pause(VideoState *is)
+{
+ stream_toggle_pause(is);
+ is->step = 0;
+}
+
+static void step_to_next_frame(VideoState *is)
+{
+ /* if the stream is paused unpause it, then step */
+ if (is->paused)
+ stream_toggle_pause(is);
+ is->step = 1;
+}
+
+static double compute_target_delay(double delay, VideoState *is)
+{
+ double sync_threshold, diff = 0;
+
+ /* update delay to follow master synchronisation source */
+ if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
+ /* if video is slave, we try to correct big delays by
+ duplicating or deleting a frame */
+ diff = get_clock(&is->vidclk) - get_master_clock(is);
+
+ /* skip or repeat frame. We take into account the
+ delay to compute the threshold. I still don't know
+ if it is the best guess */
+ sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
+ if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
+ if (diff <= -sync_threshold)
+ delay = FFMAX(0, delay + diff);
+ else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
+ delay = delay + diff;
+ else if (diff >= sync_threshold)
+ delay = 2 * delay;
+ }
+ }
+
+ av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
+ delay, -diff);
+
+ return delay;
+}
+
+static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
+ if (vp->serial == nextvp->serial) {
+ double duration = nextvp->pts - vp->pts;
+ if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
+ return vp->duration;
+ else
+ return duration;
+ } else {
+ return 0.0;
+ }
+}
+
+static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
+ /* update current video pts */
+ set_clock(&is->vidclk, pts, serial);
+ sync_clock_to_slave(&is->extclk, &is->vidclk);
+}
+
+/* called to display each frame */
+static void video_refresh(void *opaque, double *remaining_time)
+{
+ VideoState *is = opaque;
+ double time;
+
+ Frame *sp, *sp2;
+
+ if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
+ check_external_clock_speed(is);
+
+ if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
+ time = av_gettime_relative() / 1000000.0;
+ if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
+ video_display(is);
+ is->last_vis_time = time;
+ }
+ *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
+ }
+
+ if (is->video_st) {
+ int redisplay = 0;
+ if (is->force_refresh)
+ redisplay = frame_queue_prev(&is->pictq);
+retry:
+ if (frame_queue_nb_remaining(&is->pictq) == 0) {
+ // nothing to do, no picture to display in the queue
+ } else {
+ double last_duration, duration, delay;
+ Frame *vp, *lastvp;
+
+ /* dequeue the picture */
+ lastvp = frame_queue_peek_last(&is->pictq);
+ vp = frame_queue_peek(&is->pictq);
+
+ if (vp->serial != is->videoq.serial) {
+ frame_queue_next(&is->pictq);
+ redisplay = 0;
+ goto retry;
+ }
+
+ if (lastvp->serial != vp->serial && !redisplay)
+ is->frame_timer = av_gettime_relative() / 1000000.0;
+
+ if (is->paused)
+ goto display;
+
+ /* compute nominal last_duration */
+ last_duration = vp_duration(is, lastvp, vp);
+ if (redisplay)
+ delay = 0.0;
+ else
+ delay = compute_target_delay(last_duration, is);
+
+ time= av_gettime_relative()/1000000.0;
+ if (time < is->frame_timer + delay && !redisplay) {
+ *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
+ return;
+ }
+
+ is->frame_timer += delay;
+ if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
+ is->frame_timer = time;
+
+ SDL_LockMutex(is->pictq.mutex);
+ if (!redisplay && !isnan(vp->pts))
+ update_video_pts(is, vp->pts, vp->pos, vp->serial);
+ SDL_UnlockMutex(is->pictq.mutex);
+
+ if (frame_queue_nb_remaining(&is->pictq) > 1) {
+ Frame *nextvp = frame_queue_peek_next(&is->pictq);
+ duration = vp_duration(is, vp, nextvp);
+ if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
+ if (!redisplay)
+ is->frame_drops_late++;
+ frame_queue_next(&is->pictq);
+ redisplay = 0;
+ goto retry;
+ }
+ }
+
+ if (is->subtitle_st) {
+ while (frame_queue_nb_remaining(&is->subpq) > 0) {
+ sp = frame_queue_peek(&is->subpq);
+
+ if (frame_queue_nb_remaining(&is->subpq) > 1)
+ sp2 = frame_queue_peek_next(&is->subpq);
+ else
+ sp2 = NULL;
+
+ if (sp->serial != is->subtitleq.serial
+ || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
+ || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
+ {
+ frame_queue_next(&is->subpq);
+ } else {
+ break;
+ }
+ }
+ }
+
+display:
+ /* display picture */
+ if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
+ video_display(is);
+
+ frame_queue_next(&is->pictq);
+
+ if (is->step && !is->paused)
+ stream_toggle_pause(is);
+ }
+ }
+ is->force_refresh = 0;
+ if (show_status) {
+ static int64_t last_time;
+ int64_t cur_time;
+ int aqsize, vqsize, sqsize;
+ double av_diff;
+
+ cur_time = av_gettime_relative();
+ if (!last_time || (cur_time - last_time) >= 30000) {
+ aqsize = 0;
+ vqsize = 0;
+ sqsize = 0;
+ if (is->audio_st)
+ aqsize = is->audioq.size;
+ if (is->video_st)
+ vqsize = is->videoq.size;
+ if (is->subtitle_st)
+ sqsize = is->subtitleq.size;
+ av_diff = 0;
+ if (is->audio_st && is->video_st)
+ av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
+ else if (is->video_st)
+ av_diff = get_master_clock(is) - get_clock(&is->vidclk);
+ else if (is->audio_st)
+ av_diff = get_master_clock(is) - get_clock(&is->audclk);
+ av_log(NULL, AV_LOG_INFO,
+ "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
+ get_master_clock(is),
+ (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
+ av_diff,
+ is->frame_drops_early + is->frame_drops_late,
+ aqsize / 1024,
+ vqsize / 1024,
+ sqsize,
+ is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
+ is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
+ fflush(stdout);
+ last_time = cur_time;
+ }
+ }
+}
+
+/* allocate a picture (needs to do that in main thread to avoid
+ potential locking problems */
+static void alloc_picture(VideoState *is)
+{
+ Frame *vp;
+ int64_t bufferdiff;
+
+ vp = &is->pictq.queue[is->pictq.windex];
+
+ free_picture(vp);
+
+ video_open(is, 0, vp);
+
+ vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
+ SDL_YV12_OVERLAY,
+ screen);
+ bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
+ if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
+ /* SDL allocates a buffer smaller than requested if the video
+ * overlay hardware is unable to support the requested size. */
+ av_log(NULL, AV_LOG_FATAL,
+ "Error: the video system does not support an image\n"
+ "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
+ "to reduce the image size.\n", vp->width, vp->height );
+ do_exit(is);
+ }
+
+ SDL_LockMutex(is->pictq.mutex);
+ vp->allocated = 1;
+ SDL_CondSignal(is->pictq.cond);
+ SDL_UnlockMutex(is->pictq.mutex);
+}
+
+static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
+ int i, width, height;
+ Uint8 *p, *maxp;
+ for (i = 0; i < 3; i++) {
+ width = bmp->w;
+ height = bmp->h;
+ if (i > 0) {
+ width >>= 1;
+ height >>= 1;
+ }
+ if (bmp->pitches[i] > width) {
+ maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
+ for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
+ *(p+1) = *p;
+ }
+ }
+}
+
+static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
+{
+ Frame *vp;
+
+#if defined(DEBUG_SYNC) && 0
+ printf("frame_type=%c pts=%0.3f\n",
+ av_get_picture_type_char(src_frame->pict_type), pts);
+#endif
+
+ if (!(vp = frame_queue_peek_writable(&is->pictq)))
+ return -1;
+
+ vp->sar = src_frame->sample_aspect_ratio;
+
+ /* alloc or resize hardware picture buffer */
+ if (!vp->bmp || vp->reallocate || !vp->allocated ||
+ vp->width != src_frame->width ||
+ vp->height != src_frame->height) {
+ SDL_Event event;
+
+ vp->allocated = 0;
+ vp->reallocate = 0;
+ vp->width = src_frame->width;
+ vp->height = src_frame->height;
+
+ /* the allocation must be done in the main thread to avoid
+ locking problems. */
+ event.type = FF_ALLOC_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+
+ /* wait until the picture is allocated */
+ SDL_LockMutex(is->pictq.mutex);
+ while (!vp->allocated && !is->videoq.abort_request) {
+ SDL_CondWait(is->pictq.cond, is->pictq.mutex);
+ }
+ /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
+ if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
+ while (!vp->allocated && !is->abort_request) {
+ SDL_CondWait(is->pictq.cond, is->pictq.mutex);
+ }
+ }
+ SDL_UnlockMutex(is->pictq.mutex);
+
+ if (is->videoq.abort_request)
+ return -1;
+ }
+
+ /* if the frame is not skipped, then display it */
+ if (vp->bmp) {
+ AVPicture pict = { { 0 } };
+
+ /* get a pointer on the bitmap */
+ SDL_LockYUVOverlay (vp->bmp);
+
+ pict.data[0] = vp->bmp->pixels[0];
+ pict.data[1] = vp->bmp->pixels[2];
+ pict.data[2] = vp->bmp->pixels[1];
+
+ pict.linesize[0] = vp->bmp->pitches[0];
+ pict.linesize[1] = vp->bmp->pitches[2];
+ pict.linesize[2] = vp->bmp->pitches[1];
+
+#if CONFIG_AVFILTER
+ // FIXME use direct rendering
+ av_picture_copy(&pict, (AVPicture *)src_frame,
+ src_frame->format, vp->width, vp->height);
+#else
+ av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
+ is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
+ vp->width, vp->height, src_frame->format, vp->width, vp->height,
+ AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
+ if (!is->img_convert_ctx) {
+ av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
+ exit(1);
+ }
+ sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
+ 0, vp->height, pict.data, pict.linesize);
+#endif
+ /* workaround SDL PITCH_WORKAROUND */
+ duplicate_right_border_pixels(vp->bmp);
+ /* update the bitmap content */
+ SDL_UnlockYUVOverlay(vp->bmp);
+
+ vp->pts = pts;
+ vp->duration = duration;
+ vp->pos = pos;
+ vp->serial = serial;
+
+ /* now we can update the picture count */
+ frame_queue_push(&is->pictq);
+ }
+ return 0;
+}
+
+static int get_video_frame(VideoState *is, AVFrame *frame)
+{
+ int got_picture;
+
+ if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
+ return -1;
+
+ if (got_picture) {
+ double dpts = NAN;
+
+ if (frame->pts != AV_NOPTS_VALUE)
+ dpts = av_q2d(is->video_st->time_base) * frame->pts;
+
+ frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
+
+ if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
+ if (frame->pts != AV_NOPTS_VALUE) {
+ double diff = dpts - get_master_clock(is);
+ if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
+ diff - is->frame_last_filter_delay < 0 &&
+ is->viddec.pkt_serial == is->vidclk.serial &&
+ is->videoq.nb_packets) {
+ is->frame_drops_early++;
+ av_frame_unref(frame);
+ got_picture = 0;
+ }
+ }
+ }
+ }
+
+ return got_picture;
+}
+
+#if CONFIG_AVFILTER
+static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
+ AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
+{
+ int ret, i;
+ int nb_filters = graph->nb_filters;
+ AVFilterInOut *outputs = NULL, *inputs = NULL;
+
+ if (filtergraph) {
+ outputs = avfilter_inout_alloc();
+ inputs = avfilter_inout_alloc();
+ if (!outputs || !inputs) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = source_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = sink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
+ goto fail;
+ } else {
+ if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
+ goto fail;
+ }
+
+ /* Reorder the filters to ensure that inputs of the custom filters are merged first */
+ for (i = 0; i < graph->nb_filters - nb_filters; i++)
+ FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
+
+ ret = avfilter_graph_config(graph, NULL);
+fail:
+ avfilter_inout_free(&outputs);
+ avfilter_inout_free(&inputs);
+ return ret;
+}
+
+static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
+{
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
+ char sws_flags_str[128];
+ char buffersrc_args[256];
+ int ret;
+ AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
+ AVCodecContext *codec = is->video_st->codec;
+ AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
+
+ av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
+ snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
+ graph->scale_sws_opts = av_strdup(sws_flags_str);
+
+ snprintf(buffersrc_args, sizeof(buffersrc_args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ frame->width, frame->height, frame->format,
+ is->video_st->time_base.num, is->video_st->time_base.den,
+ codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
+ if (fr.num && fr.den)
+ av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
+
+ if ((ret = avfilter_graph_create_filter(&filt_src,
+ avfilter_get_by_name("buffer"),
+ "ffplay_buffer", buffersrc_args, NULL,
+ graph)) < 0)
+ goto fail;
+
+ ret = avfilter_graph_create_filter(&filt_out,
+ avfilter_get_by_name("buffersink"),
+ "ffplay_buffersink", NULL, NULL, graph);
+ if (ret < 0)
+ goto fail;
+
+ if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto fail;
+
+ last_filter = filt_out;
+
+/* Note: this macro adds a filter before the lastly added filter, so the
+ * processing order of the filters is in reverse */
+#define INSERT_FILT(name, arg) do { \
+ AVFilterContext *filt_ctx; \
+ \
+ ret = avfilter_graph_create_filter(&filt_ctx, \
+ avfilter_get_by_name(name), \
+ "ffplay_" name, arg, NULL, graph); \
+ if (ret < 0) \
+ goto fail; \
+ \
+ ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
+ if (ret < 0) \
+ goto fail; \
+ \
+ last_filter = filt_ctx; \
+} while (0)
+
+ /* SDL YUV code is not handling odd width/height for some driver
+ * combinations, therefore we crop the picture to an even width/height. */
+ INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
+
+ if (autorotate) {
+ double theta = get_rotation(is->video_st);
+
+ if (fabs(theta - 90) < 1.0) {
+ INSERT_FILT("transpose", "clock");
+ } else if (fabs(theta - 180) < 1.0) {
+ INSERT_FILT("hflip", NULL);
+ INSERT_FILT("vflip", NULL);
+ } else if (fabs(theta - 270) < 1.0) {
+ INSERT_FILT("transpose", "cclock");
+ } else if (fabs(theta) > 1.0) {
+ char rotate_buf[64];
+ snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
+ INSERT_FILT("rotate", rotate_buf);
+ }
+ }
+
+ if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
+ goto fail;
+
+ is->in_video_filter = filt_src;
+ is->out_video_filter = filt_out;
+
+fail:
+ return ret;
+}
+
+static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
+{
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
+ int sample_rates[2] = { 0, -1 };
+ int64_t channel_layouts[2] = { 0, -1 };
+ int channels[2] = { 0, -1 };
+ AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
+ char aresample_swr_opts[512] = "";
+ AVDictionaryEntry *e = NULL;
+ char asrc_args[256];
+ int ret;
+
+ avfilter_graph_free(&is->agraph);
+ if (!(is->agraph = avfilter_graph_alloc()))
+ return AVERROR(ENOMEM);
+
+ while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
+ av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
+ if (strlen(aresample_swr_opts))
+ aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
+ av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
+
+ ret = snprintf(asrc_args, sizeof(asrc_args),
+ "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
+ is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
+ is->audio_filter_src.channels,
+ 1, is->audio_filter_src.freq);
+ if (is->audio_filter_src.channel_layout)
+ snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
+ ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
+
+ ret = avfilter_graph_create_filter(&filt_asrc,
+ avfilter_get_by_name("abuffer"), "ffplay_abuffer",
+ asrc_args, NULL, is->agraph);
+ if (ret < 0)
+ goto end;
+
+
+ ret = avfilter_graph_create_filter(&filt_asink,
+ avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
+ NULL, NULL, is->agraph);
+ if (ret < 0)
+ goto end;
+
+ if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+
+ if (force_output_format) {
+ channel_layouts[0] = is->audio_tgt.channel_layout;
+ channels [0] = is->audio_tgt.channels;
+ sample_rates [0] = is->audio_tgt.freq;
+ if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ }
+
+
+ if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
+ goto end;
+
+ is->in_audio_filter = filt_asrc;
+ is->out_audio_filter = filt_asink;
+
+end:
+ if (ret < 0)
+ avfilter_graph_free(&is->agraph);
+ return ret;
+}
+#endif /* CONFIG_AVFILTER */
+
+static int audio_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFrame *frame = av_frame_alloc();
+ Frame *af;
+#if CONFIG_AVFILTER
+ int last_serial = -1;
+ int64_t dec_channel_layout;
+ int reconfigure;
+#endif
+ int got_frame = 0;
+ AVRational tb;
+ int ret = 0;
+
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ do {
+ if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
+ goto the_end;
+
+ if (got_frame) {
+ tb = (AVRational){1, frame->sample_rate};
+
+#if CONFIG_AVFILTER
+ dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
+
+ reconfigure =
+ cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
+ frame->format, av_frame_get_channels(frame)) ||
+ is->audio_filter_src.channel_layout != dec_channel_layout ||
+ is->audio_filter_src.freq != frame->sample_rate ||
+ is->auddec.pkt_serial != last_serial;
+
+ if (reconfigure) {
+ char buf1[1024], buf2[1024];
+ av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
+ av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
+ av_log(NULL, AV_LOG_DEBUG,
+ "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
+ is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
+ frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
+
+ is->audio_filter_src.fmt = frame->format;
+ is->audio_filter_src.channels = av_frame_get_channels(frame);
+ is->audio_filter_src.channel_layout = dec_channel_layout;
+ is->audio_filter_src.freq = frame->sample_rate;
+ last_serial = is->auddec.pkt_serial;
+
+ if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
+ goto the_end;
+ }
+
+ if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
+ goto the_end;
+
+ while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
+ tb = is->out_audio_filter->inputs[0]->time_base;
+#endif
+ if (!(af = frame_queue_peek_writable(&is->sampq)))
+ goto the_end;
+
+ af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
+ af->pos = av_frame_get_pkt_pos(frame);
+ af->serial = is->auddec.pkt_serial;
+ af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
+
+ av_frame_move_ref(af->frame, frame);
+ frame_queue_push(&is->sampq);
+
+#if CONFIG_AVFILTER
+ if (is->audioq.serial != is->auddec.pkt_serial)
+ break;
+ }
+ if (ret == AVERROR_EOF)
+ is->auddec.finished = is->auddec.pkt_serial;
+#endif
+ }
+ } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
+ the_end:
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&is->agraph);
+#endif
+ av_frame_free(&frame);
+ return ret;
+}
+
+static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
+{
+ packet_queue_start(d->queue);
+ d->decoder_tid = SDL_CreateThread(fn, arg);
+}
+
+static int video_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFrame *frame = av_frame_alloc();
+ double pts;
+ double duration;
+ int ret;
+ AVRational tb = is->video_st->time_base;
+ AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
+
+#if CONFIG_AVFILTER
+ AVFilterGraph *graph = avfilter_graph_alloc();
+ AVFilterContext *filt_out = NULL, *filt_in = NULL;
+ int last_w = 0;
+ int last_h = 0;
+ enum AVPixelFormat last_format = -2;
+ int last_serial = -1;
+ int last_vfilter_idx = 0;
+ if (!graph) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+
+#endif
+
+ if (!frame) {
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&graph);
+#endif
+ return AVERROR(ENOMEM);
+ }
+
+ for (;;) {
+ ret = get_video_frame(is, frame);
+ if (ret < 0)
+ goto the_end;
+ if (!ret)
+ continue;
+
+#if CONFIG_AVFILTER
+ if ( last_w != frame->width
+ || last_h != frame->height
+ || last_format != frame->format
+ || last_serial != is->viddec.pkt_serial
+ || last_vfilter_idx != is->vfilter_idx) {
+ av_log(NULL, AV_LOG_DEBUG,
+ "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
+ last_w, last_h,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
+ frame->width, frame->height,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
+ avfilter_graph_free(&graph);
+ graph = avfilter_graph_alloc();
+ if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
+ SDL_Event event;
+ event.type = FF_QUIT_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+ goto the_end;
+ }
+ filt_in = is->in_video_filter;
+ filt_out = is->out_video_filter;
+ last_w = frame->width;
+ last_h = frame->height;
+ last_format = frame->format;
+ last_serial = is->viddec.pkt_serial;
+ last_vfilter_idx = is->vfilter_idx;
+ frame_rate = filt_out->inputs[0]->frame_rate;
+ }
+
+ ret = av_buffersrc_add_frame(filt_in, frame);
+ if (ret < 0)
+ goto the_end;
+
+ while (ret >= 0) {
+ is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
+
+ ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
+ if (ret < 0) {
+ if (ret == AVERROR_EOF)
+ is->viddec.finished = is->viddec.pkt_serial;
+ ret = 0;
+ break;
+ }
+
+ is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
+ if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
+ is->frame_last_filter_delay = 0;
+ tb = filt_out->inputs[0]->time_base;
+#endif
+ duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
+ pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
+ ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
+ av_frame_unref(frame);
+#if CONFIG_AVFILTER
+ }
+#endif
+
+ if (ret < 0)
+ goto the_end;
+ }
+ the_end:
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&graph);
+#endif
+ av_frame_free(&frame);
+ return 0;
+}
+
+static int subtitle_thread(void *arg)
+{
+ VideoState *is = arg;
+ Frame *sp;
+ int got_subtitle;
+ double pts;
+ int i, j;
+ int r, g, b, y, u, v, a;
+
+ for (;;) {
+ if (!(sp = frame_queue_peek_writable(&is->subpq)))
+ return 0;
+
+ if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
+ break;
+
+ pts = 0;
+
+ if (got_subtitle && sp->sub.format == 0) {
+ if (sp->sub.pts != AV_NOPTS_VALUE)
+ pts = sp->sub.pts / (double)AV_TIME_BASE;
+ sp->pts = pts;
+ sp->serial = is->subdec.pkt_serial;
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ {
+ for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
+ {
+ RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
+ y = RGB_TO_Y_CCIR(r, g, b);
+ u = RGB_TO_U_CCIR(r, g, b, 0);
+ v = RGB_TO_V_CCIR(r, g, b, 0);
+ YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
+ }
+ }
+
+ /* now we can update the picture count */
+ frame_queue_push(&is->subpq);
+ } else if (got_subtitle) {
+ avsubtitle_free(&sp->sub);
+ }
+ }
+ return 0;
+}
+
+/* copy samples for viewing in editor window */
+static void update_sample_display(VideoState *is, short *samples, int samples_size)
+{
+ int size, len;
+
+ size = samples_size / sizeof(short);
+ while (size > 0) {
+ len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
+ if (len > size)
+ len = size;
+ memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
+ samples += len;
+ is->sample_array_index += len;
+ if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
+ is->sample_array_index = 0;
+ size -= len;
+ }
+}
+
+/* return the wanted number of samples to get better sync if sync_type is video
+ * or external master clock */
+static int synchronize_audio(VideoState *is, int nb_samples)
+{
+ int wanted_nb_samples = nb_samples;
+
+ /* if not master, then we try to remove or add samples to correct the clock */
+ if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
+ double diff, avg_diff;
+ int min_nb_samples, max_nb_samples;
+
+ diff = get_clock(&is->audclk) - get_master_clock(is);
+
+ if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
+ is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
+ if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
+ /* not enough measures to have a correct estimate */
+ is->audio_diff_avg_count++;
+ } else {
+ /* estimate the A-V difference */
+ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
+
+ if (fabs(avg_diff) >= is->audio_diff_threshold) {
+ wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
+ min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
+ max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
+ wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
+ }
+ av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
+ diff, avg_diff, wanted_nb_samples - nb_samples,
+ is->audio_clock, is->audio_diff_threshold);
+ }
+ } else {
+ /* too big difference : may be initial PTS errors, so
+ reset A-V filter */
+ is->audio_diff_avg_count = 0;
+ is->audio_diff_cum = 0;
+ }
+ }
+
+ return wanted_nb_samples;
+}
+
+/**
+ * Decode one audio frame and return its uncompressed size.
+ *
+ * The processed audio frame is decoded, converted if required, and
+ * stored in is->audio_buf, with size in bytes given by the return
+ * value.
+ */
+static int audio_decode_frame(VideoState *is)
+{
+ int data_size, resampled_data_size;
+ int64_t dec_channel_layout;
+ av_unused double audio_clock0;
+ int wanted_nb_samples;
+ Frame *af;
+
+ if (is->paused)
+ return -1;
+
+ do {
+ if (!(af = frame_queue_peek_readable(&is->sampq)))
+ return -1;
+ frame_queue_next(&is->sampq);
+ } while (af->serial != is->audioq.serial);
+
+ data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
+ af->frame->nb_samples,
+ af->frame->format, 1);
+
+ dec_channel_layout =
+ (af->frame->channel_layout && av_frame_get_channels(af->frame) == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
+ af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
+ wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
+
+ if (af->frame->format != is->audio_src.fmt ||
+ dec_channel_layout != is->audio_src.channel_layout ||
+ af->frame->sample_rate != is->audio_src.freq ||
+ (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
+ swr_free(&is->swr_ctx);
+ is->swr_ctx = swr_alloc_set_opts(NULL,
+ is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
+ dec_channel_layout, af->frame->format, af->frame->sample_rate,
+ 0, NULL);
+ if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
+ af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), av_frame_get_channels(af->frame),
+ is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
+ swr_free(&is->swr_ctx);
+ return -1;
+ }
+ is->audio_src.channel_layout = dec_channel_layout;
+ is->audio_src.channels = av_frame_get_channels(af->frame);
+ is->audio_src.freq = af->frame->sample_rate;
+ is->audio_src.fmt = af->frame->format;
+ }
+
+ if (is->swr_ctx) {
+ const uint8_t **in = (const uint8_t **)af->frame->extended_data;
+ uint8_t **out = &is->audio_buf1;
+ int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
+ int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
+ int len2;
+ if (out_size < 0) {
+ av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
+ return -1;
+ }
+ if (wanted_nb_samples != af->frame->nb_samples) {
+ if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
+ wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
+ return -1;
+ }
+ }
+ av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
+ if (!is->audio_buf1)
+ return AVERROR(ENOMEM);
+ len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
+ if (len2 < 0) {
+ av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
+ return -1;
+ }
+ if (len2 == out_count) {
+ av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
+ if (swr_init(is->swr_ctx) < 0)
+ swr_free(&is->swr_ctx);
+ }
+ is->audio_buf = is->audio_buf1;
+ resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
+ } else {
+ is->audio_buf = af->frame->data[0];
+ resampled_data_size = data_size;
+ }
+
+ audio_clock0 = is->audio_clock;
+ /* update the audio clock with the pts */
+ if (!isnan(af->pts))
+ is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
+ else
+ is->audio_clock = NAN;
+ is->audio_clock_serial = af->serial;
+#ifdef DEBUG
+ {
+ static double last_clock;
+ printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
+ is->audio_clock - last_clock,
+ is->audio_clock, audio_clock0);
+ last_clock = is->audio_clock;
+ }
+#endif
+ return resampled_data_size;
+}
+
+/* prepare a new audio buffer */
+static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
+{
+ VideoState *is = opaque;
+ int audio_size, len1;
+
+ audio_callback_time = av_gettime_relative();
+
+ while (len > 0) {
+ if (is->audio_buf_index >= is->audio_buf_size) {
+ audio_size = audio_decode_frame(is);
+ if (audio_size < 0) {
+ /* if error, just output silence */
+ is->audio_buf = is->silence_buf;
+ is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
+ } else {
+ if (is->show_mode != SHOW_MODE_VIDEO)
+ update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
+ is->audio_buf_size = audio_size;
+ }
+ is->audio_buf_index = 0;
+ }
+ len1 = is->audio_buf_size - is->audio_buf_index;
+ if (len1 > len)
+ len1 = len;
+ memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
+ len -= len1;
+ stream += len1;
+ is->audio_buf_index += len1;
+ }
+ is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
+ /* Let's assume the audio driver that is used by SDL has two periods. */
+ if (!isnan(is->audio_clock)) {
+ set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
+ sync_clock_to_slave(&is->extclk, &is->audclk);
+ }
+}
+
+static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
+{
+ SDL_AudioSpec wanted_spec, spec;
+ const char *env;
+ static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
+ static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
+ int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
+
+ env = SDL_getenv("SDL_AUDIO_CHANNELS");
+ if (env) {
+ wanted_nb_channels = atoi(env);
+ wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
+ }
+ if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
+ wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
+ wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
+ }
+ wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
+ wanted_spec.channels = wanted_nb_channels;
+ wanted_spec.freq = wanted_sample_rate;
+ if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
+ av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
+ return -1;
+ }
+ while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
+ next_sample_rate_idx--;
+ wanted_spec.format = AUDIO_S16SYS;
+ wanted_spec.silence = 0;
+ wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
+ wanted_spec.callback = sdl_audio_callback;
+ wanted_spec.userdata = opaque;
+ while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
+ av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
+ wanted_spec.channels, wanted_spec.freq, SDL_GetError());
+ wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
+ if (!wanted_spec.channels) {
+ wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
+ wanted_spec.channels = wanted_nb_channels;
+ if (!wanted_spec.freq) {
+ av_log(NULL, AV_LOG_ERROR,
+ "No more combinations to try, audio open failed\n");
+ return -1;
+ }
+ }
+ wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
+ }
+ if (spec.format != AUDIO_S16SYS) {
+ av_log(NULL, AV_LOG_ERROR,
+ "SDL advised audio format %d is not supported!\n", spec.format);
+ return -1;
+ }
+ if (spec.channels != wanted_spec.channels) {
+ wanted_channel_layout = av_get_default_channel_layout(spec.channels);
+ if (!wanted_channel_layout) {
+ av_log(NULL, AV_LOG_ERROR,
+ "SDL advised channel count %d is not supported!\n", spec.channels);
+ return -1;
+ }
+ }
+
+ audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
+ audio_hw_params->freq = spec.freq;
+ audio_hw_params->channel_layout = wanted_channel_layout;
+ audio_hw_params->channels = spec.channels;
+ audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
+ audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
+ if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
+ av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
+ return -1;
+ }
+ return spec.size;
+}
+
+/* open a given stream. Return 0 if OK */
+static int stream_component_open(VideoState *is, int stream_index)
+{
+ AVFormatContext *ic = is->ic;
+ AVCodecContext *avctx;
+ AVCodec *codec;
+ const char *forced_codec_name = NULL;
+ AVDictionary *opts;
+ AVDictionaryEntry *t = NULL;
+ int sample_rate, nb_channels;
+ int64_t channel_layout;
+ int ret = 0;
+ int stream_lowres = lowres;
+
+ if (stream_index < 0 || stream_index >= ic->nb_streams)
+ return -1;
+ avctx = ic->streams[stream_index]->codec;
+
+ codec = avcodec_find_decoder(avctx->codec_id);
+
+ switch(avctx->codec_type){
+ case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
+ case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
+ case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
+ }
+ if (forced_codec_name)
+ codec = avcodec_find_decoder_by_name(forced_codec_name);
+ if (!codec) {
+ if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
+ "No codec could be found with name '%s'\n", forced_codec_name);
+ else av_log(NULL, AV_LOG_WARNING,
+ "No codec could be found with id %d\n", avctx->codec_id);
+ return -1;
+ }
+
+ avctx->codec_id = codec->id;
+ if(stream_lowres > av_codec_get_max_lowres(codec)){
+ av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
+ av_codec_get_max_lowres(codec));
+ stream_lowres = av_codec_get_max_lowres(codec);
+ }
+ av_codec_set_lowres(avctx, stream_lowres);
+
+ if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
++ if (fast)
++ avctx->flags2 |= AV_CODEC_FLAG2_FAST;
+ if(codec->capabilities & CODEC_CAP_DR1)
+ avctx->flags |= CODEC_FLAG_EMU_EDGE;
+
+ opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
+ if (!av_dict_get(opts, "threads", NULL, 0))
+ av_dict_set(&opts, "threads", "auto", 0);
+ if (stream_lowres)
+ av_dict_set_int(&opts, "lowres", stream_lowres, 0);
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
+ av_dict_set(&opts, "refcounted_frames", "1", 0);
+ if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
+ goto fail;
+ }
+ if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
+ ret = AVERROR_OPTION_NOT_FOUND;
+ goto fail;
+ }
+
+ is->eof = 0;
+ ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+#if CONFIG_AVFILTER
+ {
+ AVFilterLink *link;
+
+ is->audio_filter_src.freq = avctx->sample_rate;
+ is->audio_filter_src.channels = avctx->channels;
+ is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
+ is->audio_filter_src.fmt = avctx->sample_fmt;
+ if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
+ goto fail;
+ link = is->out_audio_filter->inputs[0];
+ sample_rate = link->sample_rate;
+ nb_channels = link->channels;
+ channel_layout = link->channel_layout;
+ }
+#else
+ sample_rate = avctx->sample_rate;
+ nb_channels = avctx->channels;
+ channel_layout = avctx->channel_layout;
+#endif
+
+ /* prepare audio output */
+ if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
+ goto fail;
+ is->audio_hw_buf_size = ret;
+ is->audio_src = is->audio_tgt;
+ is->audio_buf_size = 0;
+ is->audio_buf_index = 0;
+
+ /* init averaging filter */
+ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
+ is->audio_diff_avg_count = 0;
+ /* since we do not have a precise anough audio fifo fullness,
+ we correct audio sync only if larger than this threshold */
+ is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
+
+ is->audio_stream = stream_index;
+ is->audio_st = ic->streams[stream_index];
+
+ decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
+ if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
+ is->auddec.start_pts = is->audio_st->start_time;
+ is->auddec.start_pts_tb = is->audio_st->time_base;
+ }
+ decoder_start(&is->auddec, audio_thread, is);
+ SDL_PauseAudio(0);
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ is->video_stream = stream_index;
+ is->video_st = ic->streams[stream_index];
+
+ decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
+ decoder_start(&is->viddec, video_thread, is);
+ is->queue_attachments_req = 1;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ is->subtitle_stream = stream_index;
+ is->subtitle_st = ic->streams[stream_index];
+
+ decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
+ decoder_start(&is->subdec, subtitle_thread, is);
+ break;
+ default:
+ break;
+ }
+
+fail:
+ av_dict_free(&opts);
+
+ return ret;
+}
+
+static void stream_component_close(VideoState *is, int stream_index)
+{
+ AVFormatContext *ic = is->ic;
+ AVCodecContext *avctx;
+
+ if (stream_index < 0 || stream_index >= ic->nb_streams)
+ return;
+ avctx = ic->streams[stream_index]->codec;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ decoder_abort(&is->auddec, &is->sampq);
+ SDL_CloseAudio();
+ decoder_destroy(&is->auddec);
+ swr_free(&is->swr_ctx);
+ av_freep(&is->audio_buf1);
+ is->audio_buf1_size = 0;
+ is->audio_buf = NULL;
+
+ if (is->rdft) {
+ av_rdft_end(is->rdft);
+ av_freep(&is->rdft_data);
+ is->rdft = NULL;
+ is->rdft_bits = 0;
+ }
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ decoder_abort(&is->viddec, &is->pictq);
+ decoder_destroy(&is->viddec);
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ decoder_abort(&is->subdec, &is->subpq);
+ decoder_destroy(&is->subdec);
+ break;
+ default:
+ break;
+ }
+
+ ic->streams[stream_index]->discard = AVDISCARD_ALL;
+ avcodec_close(avctx);
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ is->audio_st = NULL;
+ is->audio_stream = -1;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ is->video_st = NULL;
+ is->video_stream = -1;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ is->subtitle_st = NULL;
+ is->subtitle_stream = -1;
+ break;
+ default:
+ break;
+ }
+}
+
+static int decode_interrupt_cb(void *ctx)
+{
+ VideoState *is = ctx;
+ return is->abort_request;
+}
+
+static int is_realtime(AVFormatContext *s)
+{
+ if( !strcmp(s->iformat->name, "rtp")
+ || !strcmp(s->iformat->name, "rtsp")
+ || !strcmp(s->iformat->name, "sdp")
+ )
+ return 1;
+
+ if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
+ || !strncmp(s->filename, "udp:", 4)
+ )
+ )
+ return 1;
+ return 0;
+}
+
+/* this thread gets the stream from the disk or the network */
+static int read_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFormatContext *ic = NULL;
+ int err, i, ret;
+ int st_index[AVMEDIA_TYPE_NB];
+ AVPacket pkt1, *pkt = &pkt1;
+ int64_t stream_start_time;
+ int pkt_in_play_range = 0;
+ AVDictionaryEntry *t;
+ AVDictionary **opts;
+ int orig_nb_streams;
+ SDL_mutex *wait_mutex = SDL_CreateMutex();
+ int scan_all_pmts_set = 0;
+ int64_t pkt_ts;
+
+ memset(st_index, -1, sizeof(st_index));
+ is->last_video_stream = is->video_stream = -1;
+ is->last_audio_stream = is->audio_stream = -1;
+ is->last_subtitle_stream = is->subtitle_stream = -1;
+ is->eof = 0;
+
+ ic = avformat_alloc_context();
+ if (!ic) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ic->interrupt_callback.callback = decode_interrupt_cb;
+ ic->interrupt_callback.opaque = is;
+ if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
+ av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
+ scan_all_pmts_set = 1;
+ }
+ err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
+ if (err < 0) {
+ print_error(is->filename, err);
+ ret = -1;
+ goto fail;
+ }
+ if (scan_all_pmts_set)
+ av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
+
+ if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
+ ret = AVERROR_OPTION_NOT_FOUND;
+ goto fail;
+ }
+ is->ic = ic;
+
+ if (genpts)
+ ic->flags |= AVFMT_FLAG_GENPTS;
+
+ av_format_inject_global_side_data(ic);
+
+ opts = setup_find_stream_info_opts(ic, codec_opts);
+ orig_nb_streams = ic->nb_streams;
+
+ err = avformat_find_stream_info(ic, opts);
+
+ for (i = 0; i < orig_nb_streams; i++)
+ av_dict_free(&opts[i]);
+ av_freep(&opts);
+
+ if (err < 0) {
+ av_log(NULL, AV_LOG_WARNING,
+ "%s: could not find codec parameters\n", is->filename);
+ ret = -1;
+ goto fail;
+ }
+
+ if (ic->pb)
+ ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
+
+ if (seek_by_bytes < 0)
+ seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
+
+ is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
+
+ if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
+ window_title = av_asprintf("%s - %s", t->value, input_filename);
+
+ /* if seeking requested, we execute it */
+ if (start_time != AV_NOPTS_VALUE) {
+ int64_t timestamp;
+
+ timestamp = start_time;
+ /* add the stream start time */
+ if (ic->start_time != AV_NOPTS_VALUE)
+ timestamp += ic->start_time;
+ ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
+ is->filename, (double)timestamp / AV_TIME_BASE);
+ }
+ }
+
+ is->realtime = is_realtime(ic);
+
+ if (show_status)
+ av_dump_format(ic, 0, is->filename, 0);
+
+ for (i = 0; i < ic->nb_streams; i++) {
+ AVStream *st = ic->streams[i];
+ enum AVMediaType type = st->codec->codec_type;
+ st->discard = AVDISCARD_ALL;
+ if (wanted_stream_spec[type] && st_index[type] == -1)
+ if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
+ st_index[type] = i;
+ }
+ for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
+ if (wanted_stream_spec[i] && st_index[i] == -1) {
+ av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
+ st_index[i] = INT_MAX;
+ }
+ }
+
+ if (!video_disable)
+ st_index[AVMEDIA_TYPE_VIDEO] =
+ av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
+ st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
+ if (!audio_disable)
+ st_index[AVMEDIA_TYPE_AUDIO] =
+ av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
+ st_index[AVMEDIA_TYPE_AUDIO],
+ st_index[AVMEDIA_TYPE_VIDEO],
+ NULL, 0);
+ if (!video_disable && !subtitle_disable)
+ st_index[AVMEDIA_TYPE_SUBTITLE] =
+ av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
+ st_index[AVMEDIA_TYPE_SUBTITLE],
+ (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
+ st_index[AVMEDIA_TYPE_AUDIO] :
+ st_index[AVMEDIA_TYPE_VIDEO]),
+ NULL, 0);
+
+ is->show_mode = show_mode;
+ if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+ AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
+ AVCodecContext *avctx = st->codec;
+ AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
+ if (avctx->width)
+ set_default_window_size(avctx->width, avctx->height, sar);
+ }
+
+ /* open the streams */
+ if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
+ stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
+ }
+
+ ret = -1;
+ if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+ ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
+ }
+ if (is->show_mode == SHOW_MODE_NONE)
+ is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
+
+ if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
+ stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
+ }
+
+ if (is->video_stream < 0 && is->audio_stream < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
+ is->filename);
+ ret = -1;
+ goto fail;
+ }
+
+ if (infinite_buffer < 0 && is->realtime)
+ infinite_buffer = 1;
+
+ for (;;) {
+ if (is->abort_request)
+ break;
+ if (is->paused != is->last_paused) {
+ is->last_paused = is->paused;
+ if (is->paused)
+ is->read_pause_return = av_read_pause(ic);
+ else
+ av_read_play(ic);
+ }
+#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
+ if (is->paused &&
+ (!strcmp(ic->iformat->name, "rtsp") ||
+ (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
+ /* wait 10 ms to avoid trying to get another packet */
+ /* XXX: horrible */
+ SDL_Delay(10);
+ continue;
+ }
+#endif
+ if (is->seek_req) {
+ int64_t seek_target = is->seek_pos;
+ int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
+ int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
+// FIXME the +-2 is due to rounding being not done in the correct direction in generation
+// of the seek_pos/seek_rel variables
+
+ ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "%s: error while seeking\n", is->ic->filename);
+ } else {
+ if (is->audio_stream >= 0) {
+ packet_queue_flush(&is->audioq);
+ packet_queue_put(&is->audioq, &flush_pkt);
+ }
+ if (is->subtitle_stream >= 0) {
+ packet_queue_flush(&is->subtitleq);
+ packet_queue_put(&is->subtitleq, &flush_pkt);
+ }
+ if (is->video_stream >= 0) {
+ packet_queue_flush(&is->videoq);
+ packet_queue_put(&is->videoq, &flush_pkt);
+ }
+ if (is->seek_flags & AVSEEK_FLAG_BYTE) {
+ set_clock(&is->extclk, NAN, 0);
+ } else {
+ set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
+ }
+ }
+ is->seek_req = 0;
+ is->queue_attachments_req = 1;
+ is->eof = 0;
+ if (is->paused)
+ step_to_next_frame(is);
+ }
+ if (is->queue_attachments_req) {
+ if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
+ AVPacket copy;
+ if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
+ goto fail;
+ packet_queue_put(&is->videoq, ©);
+ packet_queue_put_nullpacket(&is->videoq, is->video_stream);
+ }
+ is->queue_attachments_req = 0;
+ }
+
+ /* if the queue are full, no need to read more */
+ if (infinite_buffer<1 &&
+ (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
+ || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
+ && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
+ || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
+ && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
+ /* wait 10 ms */
+ SDL_LockMutex(wait_mutex);
+ SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
+ SDL_UnlockMutex(wait_mutex);
+ continue;
+ }
+ if (!is->paused &&
+ (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
+ (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
+ if (loop != 1 && (!loop || --loop)) {
+ stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
+ } else if (autoexit) {
+ ret = AVERROR_EOF;
+ goto fail;
+ }
+ }
+ ret = av_read_frame(ic, pkt);
+ if (ret < 0) {
+ if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
+ if (is->video_stream >= 0)
+ packet_queue_put_nullpacket(&is->videoq, is->video_stream);
+ if (is->audio_stream >= 0)
+ packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
+ if (is->subtitle_stream >= 0)
+ packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
+ is->eof = 1;
+ }
+ if (ic->pb && ic->pb->error)
+ break;
+ SDL_LockMutex(wait_mutex);
+ SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
+ SDL_UnlockMutex(wait_mutex);
+ continue;
+ } else {
+ is->eof = 0;
+ }
+ /* check if packet is in play range specified by user, then queue, otherwise discard */
+ stream_start_time = ic->streams[pkt->stream_index]->start_time;
+ pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
+ pkt_in_play_range = duration == AV_NOPTS_VALUE ||
+ (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
+ av_q2d(ic->streams[pkt->stream_index]->time_base) -
+ (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
+ <= ((double)duration / 1000000);
+ if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
+ packet_queue_put(&is->audioq, pkt);
+ } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
+ && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
+ packet_queue_put(&is->videoq, pkt);
+ } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
+ packet_queue_put(&is->subtitleq, pkt);
+ } else {
+ av_free_packet(pkt);
+ }
+ }
+ /* wait until the end */
+ while (!is->abort_request) {
+ SDL_Delay(100);
+ }
+
+ ret = 0;
+ fail:
+ /* close each stream */
+ if (is->audio_stream >= 0)
+ stream_component_close(is, is->audio_stream);
+ if (is->video_stream >= 0)
+ stream_component_close(is, is->video_stream);
+ if (is->subtitle_stream >= 0)
+ stream_component_close(is, is->subtitle_stream);
+ if (ic) {
+ avformat_close_input(&ic);
+ is->ic = NULL;
+ }
+
+ if (ret != 0) {
+ SDL_Event event;
+
+ event.type = FF_QUIT_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+ }
+ SDL_DestroyMutex(wait_mutex);
+ return 0;
+}
+
+static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
+{
+ VideoState *is;
+
+ is = av_mallocz(sizeof(VideoState));
+ if (!is)
+ return NULL;
+ av_strlcpy(is->filename, filename, sizeof(is->filename));
+ is->iformat = iformat;
+ is->ytop = 0;
+ is->xleft = 0;
+
+ /* start video display */
+ if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
+ goto fail;
+ if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
+ goto fail;
+ if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
+ goto fail;
+
+ packet_queue_init(&is->videoq);
+ packet_queue_init(&is->audioq);
+ packet_queue_init(&is->subtitleq);
+
+ is->continue_read_thread = SDL_CreateCond();
+
+ init_clock(&is->vidclk, &is->videoq.serial);
+ init_clock(&is->audclk, &is->audioq.serial);
+ init_clock(&is->extclk, &is->extclk.serial);
+ is->audio_clock_serial = -1;
+ is->av_sync_type = av_sync_type;
+ is->read_tid = SDL_CreateThread(read_thread, is);
+ if (!is->read_tid) {
+fail:
+ stream_close(is);
+ return NULL;
+ }
+ return is;
+}
+
+static void stream_cycle_channel(VideoState *is, int codec_type)
+{
+ AVFormatContext *ic = is->ic;
+ int start_index, stream_index;
+ int old_index;
+ AVStream *st;
+ AVProgram *p = NULL;
+ int nb_streams = is->ic->nb_streams;
+
+ if (codec_type == AVMEDIA_TYPE_VIDEO) {
+ start_index = is->last_video_stream;
+ old_index = is->video_stream;
+ } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
+ start_index = is->last_audio_stream;
+ old_index = is->audio_stream;
+ } else {
+ start_index = is->last_subtitle_stream;
+ old_index = is->subtitle_stream;
+ }
+ stream_index = start_index;
+
+ if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
+ p = av_find_program_from_stream(ic, NULL, is->video_stream);
+ if (p) {
+ nb_streams = p->nb_stream_indexes;
+ for (start_index = 0; start_index < nb_streams; start_index++)
+ if (p->stream_index[start_index] == stream_index)
+ break;
+ if (start_index == nb_streams)
+ start_index = -1;
+ stream_index = start_index;
+ }
+ }
+
+ for (;;) {
+ if (++stream_index >= nb_streams)
+ {
+ if (codec_type == AVMEDIA_TYPE_SUBTITLE)
+ {
+ stream_index = -1;
+ is->last_subtitle_stream = -1;
+ goto the_end;
+ }
+ if (start_index == -1)
+ return;
+ stream_index = 0;
+ }
+ if (stream_index == start_index)
+ return;
+ st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
+ if (st->codec->codec_type == codec_type) {
+ /* check that parameters are OK */
+ switch (codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ if (st->codec->sample_rate != 0 &&
+ st->codec->channels != 0)
+ goto the_end;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ case AVMEDIA_TYPE_SUBTITLE:
+ goto the_end;
+ default:
+ break;
+ }
+ }
+ }
+ the_end:
+ if (p && stream_index != -1)
+ stream_index = p->stream_index[stream_index];
+ av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
+ av_get_media_type_string(codec_type),
+ old_index,
+ stream_index);
+
+ stream_component_close(is, old_index);
+ stream_component_open(is, stream_index);
+}
+
+
+static void toggle_full_screen(VideoState *is)
+{
+#if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
+ /* OS X needs to reallocate the SDL overlays */
+ int i;
+ for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
+ is->pictq.queue[i].reallocate = 1;
+#endif
+ is_full_screen = !is_full_screen;
+ video_open(is, 1, NULL);
+}
+
+static void toggle_audio_display(VideoState *is)
+{
+ int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ int next = is->show_mode;
+ do {
+ next = (next + 1) % SHOW_MODE_NB;
+ } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
+ if (is->show_mode != next) {
+ fill_rectangle(screen,
+ is->xleft, is->ytop, is->width, is->height,
+ bgcolor, 1);
+ is->force_refresh = 1;
+ is->show_mode = next;
+ }
+}
+
+static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
+ double remaining_time = 0.0;
+ SDL_PumpEvents();
+ while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
+ if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
+ SDL_ShowCursor(0);
+ cursor_hidden = 1;
+ }
+ if (remaining_time > 0.0)
+ av_usleep((int64_t)(remaining_time * 1000000.0));
+ remaining_time = REFRESH_RATE;
+ if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
+ video_refresh(is, &remaining_time);
+ SDL_PumpEvents();
+ }
+}
+
+static void seek_chapter(VideoState *is, int incr)
+{
+ int64_t pos = get_master_clock(is) * AV_TIME_BASE;
+ int i;
+
+ if (!is->ic->nb_chapters)
+ return;
+
+ /* find the current chapter */
+ for (i = 0; i < is->ic->nb_chapters; i++) {
+ AVChapter *ch = is->ic->chapters[i];
+ if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
+ i--;
+ break;
+ }
+ }
+
+ i += incr;
+ i = FFMAX(i, 0);
+ if (i >= is->ic->nb_chapters)
+ return;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
+ stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
+ AV_TIME_BASE_Q), 0, 0);
+}
+
+/* handle an event sent by the GUI */
+static void event_loop(VideoState *cur_stream)
+{
+ SDL_Event event;
+ double incr, pos, frac;
+
+ for (;;) {
+ double x;
+ refresh_loop_wait_event(cur_stream, &event);
+ switch (event.type) {
+ case SDL_KEYDOWN:
+ if (exit_on_keydown) {
+ do_exit(cur_stream);
+ break;
+ }
+ switch (event.key.keysym.sym) {
+ case SDLK_ESCAPE:
+ case SDLK_q:
+ do_exit(cur_stream);
+ break;
+ case SDLK_f:
+ toggle_full_screen(cur_stream);
+ cur_stream->force_refresh = 1;
+ break;
+ case SDLK_p:
+ case SDLK_SPACE:
+ toggle_pause(cur_stream);
+ break;
+ case SDLK_s: // S: Step to next frame
+ step_to_next_frame(cur_stream);
+ break;
+ case SDLK_a:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
+ break;
+ case SDLK_v:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
+ break;
+ case SDLK_c:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
+ break;
+ case SDLK_t:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
+ break;
+ case SDLK_w:
+#if CONFIG_AVFILTER
+ if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
+ if (++cur_stream->vfilter_idx >= nb_vfilters)
+ cur_stream->vfilter_idx = 0;
+ } else {
+ cur_stream->vfilter_idx = 0;
+ toggle_audio_display(cur_stream);
+ }
+#else
+ toggle_audio_display(cur_stream);
+#endif
+ break;
+ case SDLK_PAGEUP:
+ if (cur_stream->ic->nb_chapters <= 1) {
+ incr = 600.0;
+ goto do_seek;
+ }
+ seek_chapter(cur_stream, 1);
+ break;
+ case SDLK_PAGEDOWN:
+ if (cur_stream->ic->nb_chapters <= 1) {
+ incr = -600.0;
+ goto do_seek;
+ }
+ seek_chapter(cur_stream, -1);
+ break;
+ case SDLK_LEFT:
+ incr = -10.0;
+ goto do_seek;
+ case SDLK_RIGHT:
+ incr = 10.0;
+ goto do_seek;
+ case SDLK_UP:
+ incr = 60.0;
+ goto do_seek;
+ case SDLK_DOWN:
+ incr = -60.0;
+ do_seek:
+ if (seek_by_bytes) {
+ pos = -1;
+ if (pos < 0 && cur_stream->video_stream >= 0)
+ pos = frame_queue_last_pos(&cur_stream->pictq);
+ if (pos < 0 && cur_stream->audio_stream >= 0)
+ pos = frame_queue_last_pos(&cur_stream->sampq);
+ if (pos < 0)
+ pos = avio_tell(cur_stream->ic->pb);
+ if (cur_stream->ic->bit_rate)
+ incr *= cur_stream->ic->bit_rate / 8.0;
+ else
+ incr *= 180000.0;
+ pos += incr;
+ stream_seek(cur_stream, pos, incr, 1);
+ } else {
+ pos = get_master_clock(cur_stream);
+ if (isnan(pos))
+ pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
+ pos += incr;
+ if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
+ pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
+ stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case SDL_VIDEOEXPOSE:
+ cur_stream->force_refresh = 1;
+ break;
+ case SDL_MOUSEBUTTONDOWN:
+ if (exit_on_mousedown) {
+ do_exit(cur_stream);
+ break;
+ }
+ case SDL_MOUSEMOTION:
+ if (cursor_hidden) {
+ SDL_ShowCursor(1);
+ cursor_hidden = 0;
+ }
+ cursor_last_shown = av_gettime_relative();
+ if (event.type == SDL_MOUSEBUTTONDOWN) {
+ x = event.button.x;
+ } else {
+ if (event.motion.state != SDL_PRESSED)
+ break;
+ x = event.motion.x;
+ }
+ if (seek_by_bytes || cur_stream->ic->duration <= 0) {
+ uint64_t size = avio_size(cur_stream->ic->pb);
+ stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
+ } else {
+ int64_t ts;
+ int ns, hh, mm, ss;
+ int tns, thh, tmm, tss;
+ tns = cur_stream->ic->duration / 1000000LL;
+ thh = tns / 3600;
+ tmm = (tns % 3600) / 60;
+ tss = (tns % 60);
+ frac = x / cur_stream->width;
+ ns = frac * tns;
+ hh = ns / 3600;
+ mm = (ns % 3600) / 60;
+ ss = (ns % 60);
+ av_log(NULL, AV_LOG_INFO,
+ "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
+ hh, mm, ss, thh, tmm, tss);
+ ts = frac * cur_stream->ic->duration;
+ if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
+ ts += cur_stream->ic->start_time;
+ stream_seek(cur_stream, ts, 0, 0);
+ }
+ break;
+ case SDL_VIDEORESIZE:
+ screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
+ SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
+ if (!screen) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
+ do_exit(cur_stream);
+ }
+ screen_width = cur_stream->width = screen->w;
+ screen_height = cur_stream->height = screen->h;
+ cur_stream->force_refresh = 1;
+ break;
+ case SDL_QUIT:
+ case FF_QUIT_EVENT:
+ do_exit(cur_stream);
+ break;
+ case FF_ALLOC_EVENT:
+ alloc_picture(event.user.data1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int opt_frame_size(void *optctx, const char *opt, const char *arg)
+{
+ av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
+ return opt_default(NULL, "video_size", arg);
+}
+
+static int opt_width(void *optctx, const char *opt, const char *arg)
+{
+ screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
+ return 0;
+}
+
+static int opt_height(void *optctx, const char *opt, const char *arg)
+{
+ screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
+ return 0;
+}
+
+static int opt_format(void *optctx, const char *opt, const char *arg)
+{
+ file_iformat = av_find_input_format(arg);
+ if (!file_iformat) {
+ av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
+{
+ av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
+ return opt_default(NULL, "pixel_format", arg);
+}
+
+static int opt_sync(void *optctx, const char *opt, const char *arg)
+{
+ if (!strcmp(arg, "audio"))
+ av_sync_type = AV_SYNC_AUDIO_MASTER;
+ else if (!strcmp(arg, "video"))
+ av_sync_type = AV_SYNC_VIDEO_MASTER;
+ else if (!strcmp(arg, "ext"))
+ av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
+ else {
+ av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
+ exit(1);
+ }
+ return 0;
+}
+
+static int opt_seek(void *optctx, const char *opt, const char *arg)
+{
+ start_time = parse_time_or_die(opt, arg, 1);
+ return 0;
+}
+
+static int opt_duration(void *optctx, const char *opt, const char *arg)
+{
+ duration = parse_time_or_die(opt, arg, 1);
+ return 0;
+}
+
+static int opt_show_mode(void *optctx, const char *opt, const char *arg)
+{
+ show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
+ !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
+ !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
+ parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
+ return 0;
+}
+
+static void opt_input_file(void *optctx, const char *filename)
+{
+ if (input_filename) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Argument '%s' provided as input filename, but '%s' was already specified.\n",
+ filename, input_filename);
+ exit(1);
+ }
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+ input_filename = filename;
+}
+
+static int opt_codec(void *optctx, const char *opt, const char *arg)
+{
+ const char *spec = strchr(opt, ':');
+ if (!spec) {
+ av_log(NULL, AV_LOG_ERROR,
+ "No media specifier was specified in '%s' in option '%s'\n",
+ arg, opt);
+ return AVERROR(EINVAL);
+ }
+ spec++;
+ switch (spec[0]) {
+ case 'a' : audio_codec_name = arg; break;
+ case 's' : subtitle_codec_name = arg; break;
+ case 'v' : video_codec_name = arg; break;
+ default:
+ av_log(NULL, AV_LOG_ERROR,
+ "Invalid media specifier '%s' in option '%s'\n", spec, opt);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int dummy;
+
+static const OptionDef options[] = {
+#include "cmdutils_common_opts.h"
+ { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
+ { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
+ { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
+ { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
+ { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
+ { "vn", OPT_BOOL, { &video_disable }, "disable video" },
+ { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
+ { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
+ { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
+ { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
+ { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
+ { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
+ { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
+ { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
+ { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
+ { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
+ { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
+ { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
+ { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
+ { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
+ { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
+ { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
+ { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
+ { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
+ { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
+ { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
+ { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
+ { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
+ { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
+#if CONFIG_AVFILTER
+ { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
+ { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
+#endif
+ { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
+ { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
+ { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
+ { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
+ { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
+ { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
+ { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
+ { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
+ { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
+ { NULL, },
+};
+
+static void show_usage(void)
+{
+ av_log(NULL, AV_LOG_INFO, "Simple media player\n");
+ av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
+ av_log(NULL, AV_LOG_INFO, "\n");
+}
+
+void show_help_default(const char *opt, const char *arg)
+{
+ av_log_set_callback(log_callback_help);
+ show_usage();
+ show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
+ show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
+ printf("\n");
+ show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
+ show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
+#if !CONFIG_AVFILTER
+ show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
+#else
+ show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
+#endif
+ printf("\nWhile playing:\n"
+ "q, ESC quit\n"
+ "f toggle full screen\n"
+ "p, SPC pause\n"
+ "a cycle audio channel in the current program\n"
+ "v cycle video channel\n"
+ "t cycle subtitle channel in the current program\n"
+ "c cycle program\n"
+ "w cycle video filters or show modes\n"
+ "s activate frame-step mode\n"
+ "left/right seek backward/forward 10 seconds\n"
+ "down/up seek backward/forward 1 minute\n"
+ "page down/page up seek backward/forward 10 minutes\n"
+ "mouse click seek to percentage in file corresponding to fraction of width\n"
+ );
+}
+
+static int lockmgr(void **mtx, enum AVLockOp op)
+{
+ switch(op) {
+ case AV_LOCK_CREATE:
+ *mtx = SDL_CreateMutex();
+ if(!*mtx)
+ return 1;
+ return 0;
+ case AV_LOCK_OBTAIN:
+ return !!SDL_LockMutex(*mtx);
+ case AV_LOCK_RELEASE:
+ return !!SDL_UnlockMutex(*mtx);
+ case AV_LOCK_DESTROY:
+ SDL_DestroyMutex(*mtx);
+ return 0;
+ }
+ return 1;
+}
+
+/* Called from the main */
+int main(int argc, char **argv)
+{
+ int flags;
+ VideoState *is;
+ char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
+
+ av_log_set_flags(AV_LOG_SKIP_REPEATED);
+ parse_loglevel(argc, argv, options);
+
+ /* register all codecs, demux and protocols */
+#if CONFIG_AVDEVICE
+ avdevice_register_all();
+#endif
+#if CONFIG_AVFILTER
+ avfilter_register_all();
+#endif
+ av_register_all();
+ avformat_network_init();
+
+ init_opts();
+
+ signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
+ signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
+
+ show_banner(argc, argv, options);
+
+ parse_options(NULL, argc, argv, options, opt_input_file);
+
+ if (!input_filename) {
+ show_usage();
+ av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
+ av_log(NULL, AV_LOG_FATAL,
+ "Use -h to get full help or, even better, run 'man %s'\n", program_name);
+ exit(1);
+ }
+
+ if (display_disable) {
+ video_disable = 1;
+ }
+ flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
+ if (audio_disable)
+ flags &= ~SDL_INIT_AUDIO;
+ if (display_disable)
+ SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
+#if !defined(_WIN32) && !defined(__APPLE__)
+ flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
+#endif
+ if (SDL_Init (flags)) {
+ av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
+ av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
+ exit(1);
+ }
+
+ if (!display_disable) {
+ const SDL_VideoInfo *vi = SDL_GetVideoInfo();
+ fs_screen_width = vi->current_w;
+ fs_screen_height = vi->current_h;
+ }
+
+ SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
+ SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
+ SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
+
+ if (av_lockmgr_register(lockmgr)) {
+ av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
+ do_exit(NULL);
+ }
+
+ av_init_packet(&flush_pkt);
+ flush_pkt.data = (uint8_t *)&flush_pkt;
+
+ is = stream_open(input_filename, file_iformat);
+ if (!is) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
+ do_exit(NULL);
+ }
+
+ event_loop(is);
+
+ /* never returns */
+
+ return 0;
+}
--- /dev/null
- ac->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & CODEC_FLAG_BITEXACT);
+/*
+ * AAC decoder
+ * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
+ * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
+ * Copyright (c) 2008-2013 Alex Converse <alex.converse@gmail.com>
+ *
+ * AAC LATM decoder
+ * Copyright (c) 2008-2010 Paul Kendall <paul@kcbbs.gen.nz>
+ * Copyright (c) 2010 Janne Grunau <janne-libav@jannau.net>
+ *
+ * AAC decoder fixed-point implementation
+ * Copyright (c) 2013
+ * MIPS Technologies, Inc., California.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC decoder
+ * @author Oded Shimon ( ods15 ods15 dyndns org )
+ * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
+ *
+ * AAC decoder fixed-point implementation
+ * @author Stanislav Ocovaj ( stanislav.ocovaj imgtec com )
+ * @author Nedeljko Babic ( nedeljko.babic imgtec com )
+ */
+
+/*
+ * supported tools
+ *
+ * Support? Name
+ * N (code in SoC repo) gain control
+ * Y block switching
+ * Y window shapes - standard
+ * N window shapes - Low Delay
+ * Y filterbank - standard
+ * N (code in SoC repo) filterbank - Scalable Sample Rate
+ * Y Temporal Noise Shaping
+ * Y Long Term Prediction
+ * Y intensity stereo
+ * Y channel coupling
+ * Y frequency domain prediction
+ * Y Perceptual Noise Substitution
+ * Y Mid/Side stereo
+ * N Scalable Inverse AAC Quantization
+ * N Frequency Selective Switch
+ * N upsampling filter
+ * Y quantization & coding - AAC
+ * N quantization & coding - TwinVQ
+ * N quantization & coding - BSAC
+ * N AAC Error Resilience tools
+ * N Error Resilience payload syntax
+ * N Error Protection tool
+ * N CELP
+ * N Silence Compression
+ * N HVXC
+ * N HVXC 4kbits/s VR
+ * N Structured Audio tools
+ * N Structured Audio Sample Bank Format
+ * N MIDI
+ * N Harmonic and Individual Lines plus Noise
+ * N Text-To-Speech Interface
+ * Y Spectral Band Replication
+ * Y (not in this code) Layer-1
+ * Y (not in this code) Layer-2
+ * Y (not in this code) Layer-3
+ * N SinuSoidal Coding (Transient, Sinusoid, Noise)
+ * Y Parametric Stereo
+ * N Direct Stream Transfer
+ * Y (not in fixed point code) Enhanced AAC Low Delay (ER AAC ELD)
+ *
+ * Note: - HE AAC v1 comprises LC AAC with Spectral Band Replication.
+ * - HE AAC v2 comprises LC AAC with Spectral Band Replication and
+ Parametric Stereo.
+ */
+
+static VLC vlc_scalefactors;
+static VLC vlc_spectral[11];
+
+static int output_configure(AACContext *ac,
+ uint8_t layout_map[MAX_ELEM_ID*4][3], int tags,
+ enum OCStatus oc_type, int get_new_frame);
+
+#define overread_err "Input buffer exhausted before END element found\n"
+
+static int count_channels(uint8_t (*layout)[3], int tags)
+{
+ int i, sum = 0;
+ for (i = 0; i < tags; i++) {
+ int syn_ele = layout[i][0];
+ int pos = layout[i][2];
+ sum += (1 + (syn_ele == TYPE_CPE)) *
+ (pos != AAC_CHANNEL_OFF && pos != AAC_CHANNEL_CC);
+ }
+ return sum;
+}
+
+/**
+ * Check for the channel element in the current channel position configuration.
+ * If it exists, make sure the appropriate element is allocated and map the
+ * channel order to match the internal FFmpeg channel layout.
+ *
+ * @param che_pos current channel position configuration
+ * @param type channel element type
+ * @param id channel element id
+ * @param channels count of the number of channels in the configuration
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static av_cold int che_configure(AACContext *ac,
+ enum ChannelPosition che_pos,
+ int type, int id, int *channels)
+{
+ if (*channels >= MAX_CHANNELS)
+ return AVERROR_INVALIDDATA;
+ if (che_pos) {
+ if (!ac->che[type][id]) {
+ if (!(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
+ return AVERROR(ENOMEM);
+ AAC_RENAME(ff_aac_sbr_ctx_init)(ac, &ac->che[type][id]->sbr);
+ }
+ if (type != TYPE_CCE) {
+ if (*channels >= MAX_CHANNELS - (type == TYPE_CPE || (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1))) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Too many channels\n");
+ return AVERROR_INVALIDDATA;
+ }
+ ac->output_element[(*channels)++] = &ac->che[type][id]->ch[0];
+ if (type == TYPE_CPE ||
+ (type == TYPE_SCE && ac->oc[1].m4ac.ps == 1)) {
+ ac->output_element[(*channels)++] = &ac->che[type][id]->ch[1];
+ }
+ }
+ } else {
+ if (ac->che[type][id])
+ AAC_RENAME(ff_aac_sbr_ctx_close)(&ac->che[type][id]->sbr);
+ av_freep(&ac->che[type][id]);
+ }
+ return 0;
+}
+
+static int frame_configure_elements(AVCodecContext *avctx)
+{
+ AACContext *ac = avctx->priv_data;
+ int type, id, ch, ret;
+
+ /* set channel pointers to internal buffers by default */
+ for (type = 0; type < 4; type++) {
+ for (id = 0; id < MAX_ELEM_ID; id++) {
+ ChannelElement *che = ac->che[type][id];
+ if (che) {
+ che->ch[0].ret = che->ch[0].ret_buf;
+ che->ch[1].ret = che->ch[1].ret_buf;
+ }
+ }
+ }
+
+ /* get output buffer */
+ av_frame_unref(ac->frame);
+ if (!avctx->channels)
+ return 1;
+
+ ac->frame->nb_samples = 2048;
+ if ((ret = ff_get_buffer(avctx, ac->frame, 0)) < 0)
+ return ret;
+
+ /* map output channel pointers to AVFrame data */
+ for (ch = 0; ch < avctx->channels; ch++) {
+ if (ac->output_element[ch])
+ ac->output_element[ch]->ret = (INTFLOAT *)ac->frame->extended_data[ch];
+ }
+
+ return 0;
+}
+
+struct elem_to_channel {
+ uint64_t av_position;
+ uint8_t syn_ele;
+ uint8_t elem_id;
+ uint8_t aac_position;
+};
+
+static int assign_pair(struct elem_to_channel e2c_vec[MAX_ELEM_ID],
+ uint8_t (*layout_map)[3], int offset, uint64_t left,
+ uint64_t right, int pos)
+{
+ if (layout_map[offset][0] == TYPE_CPE) {
+ e2c_vec[offset] = (struct elem_to_channel) {
+ .av_position = left | right,
+ .syn_ele = TYPE_CPE,
+ .elem_id = layout_map[offset][1],
+ .aac_position = pos
+ };
+ return 1;
+ } else {
+ e2c_vec[offset] = (struct elem_to_channel) {
+ .av_position = left,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[offset][1],
+ .aac_position = pos
+ };
+ e2c_vec[offset + 1] = (struct elem_to_channel) {
+ .av_position = right,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[offset + 1][1],
+ .aac_position = pos
+ };
+ return 2;
+ }
+}
+
+static int count_paired_channels(uint8_t (*layout_map)[3], int tags, int pos,
+ int *current)
+{
+ int num_pos_channels = 0;
+ int first_cpe = 0;
+ int sce_parity = 0;
+ int i;
+ for (i = *current; i < tags; i++) {
+ if (layout_map[i][2] != pos)
+ break;
+ if (layout_map[i][0] == TYPE_CPE) {
+ if (sce_parity) {
+ if (pos == AAC_CHANNEL_FRONT && !first_cpe) {
+ sce_parity = 0;
+ } else {
+ return -1;
+ }
+ }
+ num_pos_channels += 2;
+ first_cpe = 1;
+ } else {
+ num_pos_channels++;
+ sce_parity ^= 1;
+ }
+ }
+ if (sce_parity &&
+ ((pos == AAC_CHANNEL_FRONT && first_cpe) || pos == AAC_CHANNEL_SIDE))
+ return -1;
+ *current = i;
+ return num_pos_channels;
+}
+
+static uint64_t sniff_channel_order(uint8_t (*layout_map)[3], int tags)
+{
+ int i, n, total_non_cc_elements;
+ struct elem_to_channel e2c_vec[4 * MAX_ELEM_ID] = { { 0 } };
+ int num_front_channels, num_side_channels, num_back_channels;
+ uint64_t layout;
+
+ if (FF_ARRAY_ELEMS(e2c_vec) < tags)
+ return 0;
+
+ i = 0;
+ num_front_channels =
+ count_paired_channels(layout_map, tags, AAC_CHANNEL_FRONT, &i);
+ if (num_front_channels < 0)
+ return 0;
+ num_side_channels =
+ count_paired_channels(layout_map, tags, AAC_CHANNEL_SIDE, &i);
+ if (num_side_channels < 0)
+ return 0;
+ num_back_channels =
+ count_paired_channels(layout_map, tags, AAC_CHANNEL_BACK, &i);
+ if (num_back_channels < 0)
+ return 0;
+
+ if (num_side_channels == 0 && num_back_channels >= 4) {
+ num_side_channels = 2;
+ num_back_channels -= 2;
+ }
+
+ i = 0;
+ if (num_front_channels & 1) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = AV_CH_FRONT_CENTER,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_FRONT
+ };
+ i++;
+ num_front_channels--;
+ }
+ if (num_front_channels >= 4) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_FRONT_LEFT_OF_CENTER,
+ AV_CH_FRONT_RIGHT_OF_CENTER,
+ AAC_CHANNEL_FRONT);
+ num_front_channels -= 2;
+ }
+ if (num_front_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_FRONT_LEFT,
+ AV_CH_FRONT_RIGHT,
+ AAC_CHANNEL_FRONT);
+ num_front_channels -= 2;
+ }
+ while (num_front_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ UINT64_MAX,
+ UINT64_MAX,
+ AAC_CHANNEL_FRONT);
+ num_front_channels -= 2;
+ }
+
+ if (num_side_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_SIDE_LEFT,
+ AV_CH_SIDE_RIGHT,
+ AAC_CHANNEL_FRONT);
+ num_side_channels -= 2;
+ }
+ while (num_side_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ UINT64_MAX,
+ UINT64_MAX,
+ AAC_CHANNEL_SIDE);
+ num_side_channels -= 2;
+ }
+
+ while (num_back_channels >= 4) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ UINT64_MAX,
+ UINT64_MAX,
+ AAC_CHANNEL_BACK);
+ num_back_channels -= 2;
+ }
+ if (num_back_channels >= 2) {
+ i += assign_pair(e2c_vec, layout_map, i,
+ AV_CH_BACK_LEFT,
+ AV_CH_BACK_RIGHT,
+ AAC_CHANNEL_BACK);
+ num_back_channels -= 2;
+ }
+ if (num_back_channels) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = AV_CH_BACK_CENTER,
+ .syn_ele = TYPE_SCE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_BACK
+ };
+ i++;
+ num_back_channels--;
+ }
+
+ if (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = AV_CH_LOW_FREQUENCY,
+ .syn_ele = TYPE_LFE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_LFE
+ };
+ i++;
+ }
+ while (i < tags && layout_map[i][2] == AAC_CHANNEL_LFE) {
+ e2c_vec[i] = (struct elem_to_channel) {
+ .av_position = UINT64_MAX,
+ .syn_ele = TYPE_LFE,
+ .elem_id = layout_map[i][1],
+ .aac_position = AAC_CHANNEL_LFE
+ };
+ i++;
+ }
+
+ // Must choose a stable sort
+ total_non_cc_elements = n = i;
+ do {
+ int next_n = 0;
+ for (i = 1; i < n; i++)
+ if (e2c_vec[i - 1].av_position > e2c_vec[i].av_position) {
+ FFSWAP(struct elem_to_channel, e2c_vec[i - 1], e2c_vec[i]);
+ next_n = i;
+ }
+ n = next_n;
+ } while (n > 0);
+
+ layout = 0;
+ for (i = 0; i < total_non_cc_elements; i++) {
+ layout_map[i][0] = e2c_vec[i].syn_ele;
+ layout_map[i][1] = e2c_vec[i].elem_id;
+ layout_map[i][2] = e2c_vec[i].aac_position;
+ if (e2c_vec[i].av_position != UINT64_MAX) {
+ layout |= e2c_vec[i].av_position;
+ }
+ }
+
+ return layout;
+}
+
+/**
+ * Save current output configuration if and only if it has been locked.
+ */
+static void push_output_configuration(AACContext *ac) {
+ if (ac->oc[1].status == OC_LOCKED || ac->oc[0].status == OC_NONE) {
+ ac->oc[0] = ac->oc[1];
+ }
+ ac->oc[1].status = OC_NONE;
+}
+
+/**
+ * Restore the previous output configuration if and only if the current
+ * configuration is unlocked.
+ */
+static void pop_output_configuration(AACContext *ac) {
+ if (ac->oc[1].status != OC_LOCKED && ac->oc[0].status != OC_NONE) {
+ ac->oc[1] = ac->oc[0];
+ ac->avctx->channels = ac->oc[1].channels;
+ ac->avctx->channel_layout = ac->oc[1].channel_layout;
+ output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
+ ac->oc[1].status, 0);
+ }
+}
+
+/**
+ * Configure output channel order based on the current program
+ * configuration element.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int output_configure(AACContext *ac,
+ uint8_t layout_map[MAX_ELEM_ID * 4][3], int tags,
+ enum OCStatus oc_type, int get_new_frame)
+{
+ AVCodecContext *avctx = ac->avctx;
+ int i, channels = 0, ret;
+ uint64_t layout = 0;
+ uint8_t id_map[TYPE_END][MAX_ELEM_ID] = {{ 0 }};
+ uint8_t type_counts[TYPE_END] = { 0 };
+
+ if (ac->oc[1].layout_map != layout_map) {
+ memcpy(ac->oc[1].layout_map, layout_map, tags * sizeof(layout_map[0]));
+ ac->oc[1].layout_map_tags = tags;
+ }
+ for (i = 0; i < tags; i++) {
+ int type = layout_map[i][0];
+ int id = layout_map[i][1];
+ id_map[type][id] = type_counts[type]++;
+ }
+ // Try to sniff a reasonable channel order, otherwise output the
+ // channels in the order the PCE declared them.
+ if (avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE)
+ layout = sniff_channel_order(layout_map, tags);
+ for (i = 0; i < tags; i++) {
+ int type = layout_map[i][0];
+ int id = layout_map[i][1];
+ int iid = id_map[type][id];
+ int position = layout_map[i][2];
+ // Allocate or free elements depending on if they are in the
+ // current program configuration.
+ ret = che_configure(ac, position, type, iid, &channels);
+ if (ret < 0)
+ return ret;
+ ac->tag_che_map[type][id] = ac->che[type][iid];
+ }
+ if (ac->oc[1].m4ac.ps == 1 && channels == 2) {
+ if (layout == AV_CH_FRONT_CENTER) {
+ layout = AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT;
+ } else {
+ layout = 0;
+ }
+ }
+
+ if (layout) avctx->channel_layout = layout;
+ ac->oc[1].channel_layout = layout;
+ avctx->channels = ac->oc[1].channels = channels;
+ ac->oc[1].status = oc_type;
+
+ if (get_new_frame) {
+ if ((ret = frame_configure_elements(ac->avctx)) < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void flush(AVCodecContext *avctx)
+{
+ AACContext *ac= avctx->priv_data;
+ int type, i, j;
+
+ for (type = 3; type >= 0; type--) {
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ ChannelElement *che = ac->che[type][i];
+ if (che) {
+ for (j = 0; j <= 1; j++) {
+ memset(che->ch[j].saved, 0, sizeof(che->ch[j].saved));
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Set up channel positions based on a default channel configuration
+ * as specified in table 1.17.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int set_default_channel_config(AVCodecContext *avctx,
+ uint8_t (*layout_map)[3],
+ int *tags,
+ int channel_config)
+{
+ if (channel_config < 1 || (channel_config > 7 && channel_config < 11) ||
+ channel_config > 12) {
+ av_log(avctx, AV_LOG_ERROR,
+ "invalid default channel configuration (%d)\n",
+ channel_config);
+ return AVERROR_INVALIDDATA;
+ }
+ *tags = tags_per_config[channel_config];
+ memcpy(layout_map, aac_channel_layout_map[channel_config - 1],
+ *tags * sizeof(*layout_map));
+
+ /*
+ * AAC specification has 7.1(wide) as a default layout for 8-channel streams.
+ * However, at least Nero AAC encoder encodes 7.1 streams using the default
+ * channel config 7, mapping the side channels of the original audio stream
+ * to the second AAC_CHANNEL_FRONT pair in the AAC stream. Similarly, e.g. FAAD
+ * decodes the second AAC_CHANNEL_FRONT pair as side channels, therefore decoding
+ * the incorrect streams as if they were correct (and as the encoder intended).
+ *
+ * As actual intended 7.1(wide) streams are very rare, default to assuming a
+ * 7.1 layout was intended.
+ */
+ if (channel_config == 7 && avctx->strict_std_compliance < FF_COMPLIANCE_STRICT) {
+ av_log(avctx, AV_LOG_INFO, "Assuming an incorrectly encoded 7.1 channel layout"
+ " instead of a spec-compliant 7.1(wide) layout, use -strict %d to decode"
+ " according to the specification instead.\n", FF_COMPLIANCE_STRICT);
+ layout_map[2][2] = AAC_CHANNEL_SIDE;
+ }
+
+ return 0;
+}
+
+static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
+{
+ /* For PCE based channel configurations map the channels solely based
+ * on tags. */
+ if (!ac->oc[1].m4ac.chan_config) {
+ return ac->tag_che_map[type][elem_id];
+ }
+ // Allow single CPE stereo files to be signalled with mono configuration.
+ if (!ac->tags_mapped && type == TYPE_CPE &&
+ ac->oc[1].m4ac.chan_config == 1) {
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags;
+ push_output_configuration(ac);
+
+ av_log(ac->avctx, AV_LOG_DEBUG, "mono with CPE\n");
+
+ if (set_default_channel_config(ac->avctx, layout_map,
+ &layout_map_tags, 2) < 0)
+ return NULL;
+ if (output_configure(ac, layout_map, layout_map_tags,
+ OC_TRIAL_FRAME, 1) < 0)
+ return NULL;
+
+ ac->oc[1].m4ac.chan_config = 2;
+ ac->oc[1].m4ac.ps = 0;
+ }
+ // And vice-versa
+ if (!ac->tags_mapped && type == TYPE_SCE &&
+ ac->oc[1].m4ac.chan_config == 2) {
+ uint8_t layout_map[MAX_ELEM_ID * 4][3];
+ int layout_map_tags;
+ push_output_configuration(ac);
+
+ av_log(ac->avctx, AV_LOG_DEBUG, "stereo with SCE\n");
+
+ if (set_default_channel_config(ac->avctx, layout_map,
+ &layout_map_tags, 1) < 0)
+ return NULL;
+ if (output_configure(ac, layout_map, layout_map_tags,
+ OC_TRIAL_FRAME, 1) < 0)
+ return NULL;
+
+ ac->oc[1].m4ac.chan_config = 1;
+ if (ac->oc[1].m4ac.sbr)
+ ac->oc[1].m4ac.ps = -1;
+ }
+ /* For indexed channel configurations map the channels solely based
+ * on position. */
+ switch (ac->oc[1].m4ac.chan_config) {
+ case 12:
+ case 7:
+ if (ac->tags_mapped == 3 && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][2];
+ }
+ case 11:
+ if (ac->tags_mapped == 2 &&
+ ac->oc[1].m4ac.chan_config == 11 &&
+ type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
+ }
+ case 6:
+ /* Some streams incorrectly code 5.1 audio as
+ * SCE[0] CPE[0] CPE[1] SCE[1]
+ * instead of
+ * SCE[0] CPE[0] CPE[1] LFE[0].
+ * If we seem to have encountered such a stream, transfer
+ * the LFE[0] element to the SCE[1]'s mapping */
+ if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
+ if (!ac->warned_remapping_once && (type != TYPE_LFE || elem_id != 0)) {
+ av_log(ac->avctx, AV_LOG_WARNING,
+ "This stream seems to incorrectly report its last channel as %s[%d], mapping to LFE[0]\n",
+ type == TYPE_SCE ? "SCE" : "LFE", elem_id);
+ ac->warned_remapping_once++;
+ }
+ ac->tags_mapped++;
+ return ac->tag_che_map[type][elem_id] = ac->che[TYPE_LFE][0];
+ }
+ case 5:
+ if (ac->tags_mapped == 2 && type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][1];
+ }
+ case 4:
+ /* Some streams incorrectly code 4.0 audio as
+ * SCE[0] CPE[0] LFE[0]
+ * instead of
+ * SCE[0] CPE[0] SCE[1].
+ * If we seem to have encountered such a stream, transfer
+ * the SCE[1] element to the LFE[0]'s mapping */
+ if (ac->tags_mapped == tags_per_config[ac->oc[1].m4ac.chan_config] - 1 && (type == TYPE_LFE || type == TYPE_SCE)) {
+ if (!ac->warned_remapping_once && (type != TYPE_SCE || elem_id != 1)) {
+ av_log(ac->avctx, AV_LOG_WARNING,
+ "This stream seems to incorrectly report its last channel as %s[%d], mapping to SCE[1]\n",
+ type == TYPE_SCE ? "SCE" : "LFE", elem_id);
+ ac->warned_remapping_once++;
+ }
+ ac->tags_mapped++;
+ return ac->tag_che_map[type][elem_id] = ac->che[TYPE_SCE][1];
+ }
+ if (ac->tags_mapped == 2 &&
+ ac->oc[1].m4ac.chan_config == 4 &&
+ type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][1];
+ }
+ case 3:
+ case 2:
+ if (ac->tags_mapped == (ac->oc[1].m4ac.chan_config != 2) &&
+ type == TYPE_CPE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_CPE][elem_id] = ac->che[TYPE_CPE][0];
+ } else if (ac->oc[1].m4ac.chan_config == 2) {
+ return NULL;
+ }
+ case 1:
+ if (!ac->tags_mapped && type == TYPE_SCE) {
+ ac->tags_mapped++;
+ return ac->tag_che_map[TYPE_SCE][elem_id] = ac->che[TYPE_SCE][0];
+ }
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * Decode an array of 4 bit element IDs, optionally interleaved with a
+ * stereo/mono switching bit.
+ *
+ * @param type speaker type/position for these channels
+ */
+static void decode_channel_map(uint8_t layout_map[][3],
+ enum ChannelPosition type,
+ GetBitContext *gb, int n)
+{
+ while (n--) {
+ enum RawDataBlockType syn_ele;
+ switch (type) {
+ case AAC_CHANNEL_FRONT:
+ case AAC_CHANNEL_BACK:
+ case AAC_CHANNEL_SIDE:
+ syn_ele = get_bits1(gb);
+ break;
+ case AAC_CHANNEL_CC:
+ skip_bits1(gb);
+ syn_ele = TYPE_CCE;
+ break;
+ case AAC_CHANNEL_LFE:
+ syn_ele = TYPE_LFE;
+ break;
+ default:
+ // AAC_CHANNEL_OFF has no channel map
+ av_assert0(0);
+ }
+ layout_map[0][0] = syn_ele;
+ layout_map[0][1] = get_bits(gb, 4);
+ layout_map[0][2] = type;
+ layout_map++;
+ }
+}
+
+/**
+ * Decode program configuration element; reference: table 4.2.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_pce(AVCodecContext *avctx, MPEG4AudioConfig *m4ac,
+ uint8_t (*layout_map)[3],
+ GetBitContext *gb)
+{
+ int num_front, num_side, num_back, num_lfe, num_assoc_data, num_cc;
+ int sampling_index;
+ int comment_len;
+ int tags;
+
+ skip_bits(gb, 2); // object_type
+
+ sampling_index = get_bits(gb, 4);
+ if (m4ac->sampling_index != sampling_index)
+ av_log(avctx, AV_LOG_WARNING,
+ "Sample rate index in program config element does not "
+ "match the sample rate index configured by the container.\n");
+
+ num_front = get_bits(gb, 4);
+ num_side = get_bits(gb, 4);
+ num_back = get_bits(gb, 4);
+ num_lfe = get_bits(gb, 2);
+ num_assoc_data = get_bits(gb, 3);
+ num_cc = get_bits(gb, 4);
+
+ if (get_bits1(gb))
+ skip_bits(gb, 4); // mono_mixdown_tag
+ if (get_bits1(gb))
+ skip_bits(gb, 4); // stereo_mixdown_tag
+
+ if (get_bits1(gb))
+ skip_bits(gb, 3); // mixdown_coeff_index and pseudo_surround
+
+ if (get_bits_left(gb) < 4 * (num_front + num_side + num_back + num_lfe + num_assoc_data + num_cc)) {
+ av_log(avctx, AV_LOG_ERROR, "decode_pce: " overread_err);
+ return -1;
+ }
+ decode_channel_map(layout_map , AAC_CHANNEL_FRONT, gb, num_front);
+ tags = num_front;
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_SIDE, gb, num_side);
+ tags += num_side;
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_BACK, gb, num_back);
+ tags += num_back;
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_LFE, gb, num_lfe);
+ tags += num_lfe;
+
+ skip_bits_long(gb, 4 * num_assoc_data);
+
+ decode_channel_map(layout_map + tags, AAC_CHANNEL_CC, gb, num_cc);
+ tags += num_cc;
+
+ align_get_bits(gb);
+
+ /* comment field, first byte is length */
+ comment_len = get_bits(gb, 8) * 8;
+ if (get_bits_left(gb) < comment_len) {
+ av_log(avctx, AV_LOG_ERROR, "decode_pce: " overread_err);
+ return AVERROR_INVALIDDATA;
+ }
+ skip_bits_long(gb, comment_len);
+ return tags;
+}
+
+/**
+ * Decode GA "General Audio" specific configuration; reference: table 4.1.
+ *
+ * @param ac pointer to AACContext, may be null
+ * @param avctx pointer to AVCCodecContext, used for logging
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_ga_specific_config(AACContext *ac, AVCodecContext *avctx,
+ GetBitContext *gb,
+ MPEG4AudioConfig *m4ac,
+ int channel_config)
+{
+ int extension_flag, ret, ep_config, res_flags;
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int tags = 0;
+
+ if (get_bits1(gb)) { // frameLengthFlag
+ avpriv_request_sample(avctx, "960/120 MDCT window");
+ return AVERROR_PATCHWELCOME;
+ }
+ m4ac->frame_length_short = 0;
+
+ if (get_bits1(gb)) // dependsOnCoreCoder
+ skip_bits(gb, 14); // coreCoderDelay
+ extension_flag = get_bits1(gb);
+
+ if (m4ac->object_type == AOT_AAC_SCALABLE ||
+ m4ac->object_type == AOT_ER_AAC_SCALABLE)
+ skip_bits(gb, 3); // layerNr
+
+ if (channel_config == 0) {
+ skip_bits(gb, 4); // element_instance_tag
+ tags = decode_pce(avctx, m4ac, layout_map, gb);
+ if (tags < 0)
+ return tags;
+ } else {
+ if ((ret = set_default_channel_config(avctx, layout_map,
+ &tags, channel_config)))
+ return ret;
+ }
+
+ if (count_channels(layout_map, tags) > 1) {
+ m4ac->ps = 0;
+ } else if (m4ac->sbr == 1 && m4ac->ps == -1)
+ m4ac->ps = 1;
+
+ if (ac && (ret = output_configure(ac, layout_map, tags, OC_GLOBAL_HDR, 0)))
+ return ret;
+
+ if (extension_flag) {
+ switch (m4ac->object_type) {
+ case AOT_ER_BSAC:
+ skip_bits(gb, 5); // numOfSubFrame
+ skip_bits(gb, 11); // layer_length
+ break;
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_LTP:
+ case AOT_ER_AAC_SCALABLE:
+ case AOT_ER_AAC_LD:
+ res_flags = get_bits(gb, 3);
+ if (res_flags) {
+ avpriv_report_missing_feature(avctx,
+ "AAC data resilience (flags %x)",
+ res_flags);
+ return AVERROR_PATCHWELCOME;
+ }
+ break;
+ }
+ skip_bits1(gb); // extensionFlag3 (TBD in version 3)
+ }
+ switch (m4ac->object_type) {
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_LTP:
+ case AOT_ER_AAC_SCALABLE:
+ case AOT_ER_AAC_LD:
+ ep_config = get_bits(gb, 2);
+ if (ep_config) {
+ avpriv_report_missing_feature(avctx,
+ "epConfig %d", ep_config);
+ return AVERROR_PATCHWELCOME;
+ }
+ }
+ return 0;
+}
+
+static int decode_eld_specific_config(AACContext *ac, AVCodecContext *avctx,
+ GetBitContext *gb,
+ MPEG4AudioConfig *m4ac,
+ int channel_config)
+{
+ int ret, ep_config, res_flags;
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int tags = 0;
+ const int ELDEXT_TERM = 0;
+
+ m4ac->ps = 0;
+ m4ac->sbr = 0;
+#if USE_FIXED
+ if (get_bits1(gb)) { // frameLengthFlag
+ avpriv_request_sample(avctx, "960/120 MDCT window");
+ return AVERROR_PATCHWELCOME;
+ }
+#else
+ m4ac->frame_length_short = get_bits1(gb);
+#endif
+ res_flags = get_bits(gb, 3);
+ if (res_flags) {
+ avpriv_report_missing_feature(avctx,
+ "AAC data resilience (flags %x)",
+ res_flags);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ if (get_bits1(gb)) { // ldSbrPresentFlag
+ avpriv_report_missing_feature(avctx,
+ "Low Delay SBR");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ while (get_bits(gb, 4) != ELDEXT_TERM) {
+ int len = get_bits(gb, 4);
+ if (len == 15)
+ len += get_bits(gb, 8);
+ if (len == 15 + 255)
+ len += get_bits(gb, 16);
+ if (get_bits_left(gb) < len * 8 + 4) {
+ av_log(avctx, AV_LOG_ERROR, overread_err);
+ return AVERROR_INVALIDDATA;
+ }
+ skip_bits_long(gb, 8 * len);
+ }
+
+ if ((ret = set_default_channel_config(avctx, layout_map,
+ &tags, channel_config)))
+ return ret;
+
+ if (ac && (ret = output_configure(ac, layout_map, tags, OC_GLOBAL_HDR, 0)))
+ return ret;
+
+ ep_config = get_bits(gb, 2);
+ if (ep_config) {
+ avpriv_report_missing_feature(avctx,
+ "epConfig %d", ep_config);
+ return AVERROR_PATCHWELCOME;
+ }
+ return 0;
+}
+
+/**
+ * Decode audio specific configuration; reference: table 1.13.
+ *
+ * @param ac pointer to AACContext, may be null
+ * @param avctx pointer to AVCCodecContext, used for logging
+ * @param m4ac pointer to MPEG4AudioConfig, used for parsing
+ * @param data pointer to buffer holding an audio specific config
+ * @param bit_size size of audio specific config or data in bits
+ * @param sync_extension look for an appended sync extension
+ *
+ * @return Returns error status or number of consumed bits. <0 - error
+ */
+static int decode_audio_specific_config(AACContext *ac,
+ AVCodecContext *avctx,
+ MPEG4AudioConfig *m4ac,
+ const uint8_t *data, int bit_size,
+ int sync_extension)
+{
+ GetBitContext gb;
+ int i, ret;
+
+ ff_dlog(avctx, "audio specific config size %d\n", bit_size >> 3);
+ for (i = 0; i < bit_size >> 3; i++)
+ ff_dlog(avctx, "%02x ", data[i]);
+ ff_dlog(avctx, "\n");
+
+ if ((ret = init_get_bits(&gb, data, bit_size)) < 0)
+ return ret;
+
+ if ((i = avpriv_mpeg4audio_get_config(m4ac, data, bit_size,
+ sync_extension)) < 0)
+ return AVERROR_INVALIDDATA;
+ if (m4ac->sampling_index > 12) {
+ av_log(avctx, AV_LOG_ERROR,
+ "invalid sampling rate index %d\n",
+ m4ac->sampling_index);
+ return AVERROR_INVALIDDATA;
+ }
+ if (m4ac->object_type == AOT_ER_AAC_LD &&
+ (m4ac->sampling_index < 3 || m4ac->sampling_index > 7)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "invalid low delay sampling rate index %d\n",
+ m4ac->sampling_index);
+ return AVERROR_INVALIDDATA;
+ }
+
+ skip_bits_long(&gb, i);
+
+ switch (m4ac->object_type) {
+ case AOT_AAC_MAIN:
+ case AOT_AAC_LC:
+ case AOT_AAC_LTP:
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_LD:
+ if ((ret = decode_ga_specific_config(ac, avctx, &gb,
+ m4ac, m4ac->chan_config)) < 0)
+ return ret;
+ break;
+ case AOT_ER_AAC_ELD:
+ if ((ret = decode_eld_specific_config(ac, avctx, &gb,
+ m4ac, m4ac->chan_config)) < 0)
+ return ret;
+ break;
+ default:
+ avpriv_report_missing_feature(avctx,
+ "Audio object type %s%d",
+ m4ac->sbr == 1 ? "SBR+" : "",
+ m4ac->object_type);
+ return AVERROR(ENOSYS);
+ }
+
+ ff_dlog(avctx,
+ "AOT %d chan config %d sampling index %d (%d) SBR %d PS %d\n",
+ m4ac->object_type, m4ac->chan_config, m4ac->sampling_index,
+ m4ac->sample_rate, m4ac->sbr,
+ m4ac->ps);
+
+ return get_bits_count(&gb);
+}
+
+/**
+ * linear congruential pseudorandom number generator
+ *
+ * @param previous_val pointer to the current state of the generator
+ *
+ * @return Returns a 32-bit pseudorandom integer
+ */
+static av_always_inline int lcg_random(unsigned previous_val)
+{
+ union { unsigned u; int s; } v = { previous_val * 1664525u + 1013904223 };
+ return v.s;
+}
+
+static void reset_all_predictors(PredictorState *ps)
+{
+ int i;
+ for (i = 0; i < MAX_PREDICTORS; i++)
+ reset_predict_state(&ps[i]);
+}
+
+static int sample_rate_idx (int rate)
+{
+ if (92017 <= rate) return 0;
+ else if (75132 <= rate) return 1;
+ else if (55426 <= rate) return 2;
+ else if (46009 <= rate) return 3;
+ else if (37566 <= rate) return 4;
+ else if (27713 <= rate) return 5;
+ else if (23004 <= rate) return 6;
+ else if (18783 <= rate) return 7;
+ else if (13856 <= rate) return 8;
+ else if (11502 <= rate) return 9;
+ else if (9391 <= rate) return 10;
+ else return 11;
+}
+
+static void reset_predictor_group(PredictorState *ps, int group_num)
+{
+ int i;
+ for (i = group_num - 1; i < MAX_PREDICTORS; i += 30)
+ reset_predict_state(&ps[i]);
+}
+
+#define AAC_INIT_VLC_STATIC(num, size) \
+ INIT_VLC_STATIC(&vlc_spectral[num], 8, ff_aac_spectral_sizes[num], \
+ ff_aac_spectral_bits[num], sizeof(ff_aac_spectral_bits[num][0]), \
+ sizeof(ff_aac_spectral_bits[num][0]), \
+ ff_aac_spectral_codes[num], sizeof(ff_aac_spectral_codes[num][0]), \
+ sizeof(ff_aac_spectral_codes[num][0]), \
+ size);
+
+static void aacdec_init(AACContext *ac);
+
+static av_cold int aac_decode_init(AVCodecContext *avctx)
+{
+ AACContext *ac = avctx->priv_data;
+ int ret;
+
+ ac->avctx = avctx;
+ ac->oc[1].m4ac.sample_rate = avctx->sample_rate;
+
+ aacdec_init(ac);
+#if USE_FIXED
+ avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
+#else
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
+#endif /* USE_FIXED */
+
+ if (avctx->extradata_size > 0) {
+ if ((ret = decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
+ avctx->extradata,
+ avctx->extradata_size * 8,
+ 1)) < 0)
+ return ret;
+ } else {
+ int sr, i;
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags;
+
+ sr = sample_rate_idx(avctx->sample_rate);
+ ac->oc[1].m4ac.sampling_index = sr;
+ ac->oc[1].m4ac.channels = avctx->channels;
+ ac->oc[1].m4ac.sbr = -1;
+ ac->oc[1].m4ac.ps = -1;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(ff_mpeg4audio_channels); i++)
+ if (ff_mpeg4audio_channels[i] == avctx->channels)
+ break;
+ if (i == FF_ARRAY_ELEMS(ff_mpeg4audio_channels)) {
+ i = 0;
+ }
+ ac->oc[1].m4ac.chan_config = i;
+
+ if (ac->oc[1].m4ac.chan_config) {
+ int ret = set_default_channel_config(avctx, layout_map,
+ &layout_map_tags, ac->oc[1].m4ac.chan_config);
+ if (!ret)
+ output_configure(ac, layout_map, layout_map_tags,
+ OC_GLOBAL_HDR, 0);
+ else if (avctx->err_recognition & AV_EF_EXPLODE)
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (avctx->channels > MAX_CHANNELS) {
+ av_log(avctx, AV_LOG_ERROR, "Too many channels\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ AAC_INIT_VLC_STATIC( 0, 304);
+ AAC_INIT_VLC_STATIC( 1, 270);
+ AAC_INIT_VLC_STATIC( 2, 550);
+ AAC_INIT_VLC_STATIC( 3, 300);
+ AAC_INIT_VLC_STATIC( 4, 328);
+ AAC_INIT_VLC_STATIC( 5, 294);
+ AAC_INIT_VLC_STATIC( 6, 306);
+ AAC_INIT_VLC_STATIC( 7, 268);
+ AAC_INIT_VLC_STATIC( 8, 510);
+ AAC_INIT_VLC_STATIC( 9, 366);
+ AAC_INIT_VLC_STATIC(10, 462);
+
+ AAC_RENAME(ff_aac_sbr_init)();
+
+#if USE_FIXED
- ac->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
++ ac->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+#else
++ ac->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+#endif /* USE_FIXED */
+ if (!ac->fdsp) {
+ return AVERROR(ENOMEM);
+ }
+
+ ac->random_state = 0x1f2e3d4c;
+
+ ff_aac_tableinit();
+
+ INIT_VLC_STATIC(&vlc_scalefactors, 7,
+ FF_ARRAY_ELEMS(ff_aac_scalefactor_code),
+ ff_aac_scalefactor_bits,
+ sizeof(ff_aac_scalefactor_bits[0]),
+ sizeof(ff_aac_scalefactor_bits[0]),
+ ff_aac_scalefactor_code,
+ sizeof(ff_aac_scalefactor_code[0]),
+ sizeof(ff_aac_scalefactor_code[0]),
+ 352);
+
+ AAC_RENAME_32(ff_mdct_init)(&ac->mdct, 11, 1, 1.0 / RANGE15(1024.0));
+ AAC_RENAME_32(ff_mdct_init)(&ac->mdct_ld, 10, 1, 1.0 / RANGE15(512.0));
+ AAC_RENAME_32(ff_mdct_init)(&ac->mdct_small, 8, 1, 1.0 / RANGE15(128.0));
+ AAC_RENAME_32(ff_mdct_init)(&ac->mdct_ltp, 11, 0, RANGE15(-2.0));
+#if !USE_FIXED
+ ret = ff_imdct15_init(&ac->mdct480, 5);
+ if (ret < 0)
+ return ret;
+#endif
+ // window initialization
+ AAC_RENAME(ff_kbd_window_init)(AAC_RENAME(ff_aac_kbd_long_1024), 4.0, 1024);
+ AAC_RENAME(ff_kbd_window_init)(AAC_RENAME(ff_aac_kbd_short_128), 6.0, 128);
+ AAC_RENAME(ff_init_ff_sine_windows)(10);
+ AAC_RENAME(ff_init_ff_sine_windows)( 9);
+ AAC_RENAME(ff_init_ff_sine_windows)( 7);
+
+ AAC_RENAME(cbrt_tableinit)();
+
+ return 0;
+}
+
+/**
+ * Skip data_stream_element; reference: table 4.10.
+ */
+static int skip_data_stream_element(AACContext *ac, GetBitContext *gb)
+{
+ int byte_align = get_bits1(gb);
+ int count = get_bits(gb, 8);
+ if (count == 255)
+ count += get_bits(gb, 8);
+ if (byte_align)
+ align_get_bits(gb);
+
+ if (get_bits_left(gb) < 8 * count) {
+ av_log(ac->avctx, AV_LOG_ERROR, "skip_data_stream_element: "overread_err);
+ return AVERROR_INVALIDDATA;
+ }
+ skip_bits_long(gb, 8 * count);
+ return 0;
+}
+
+static int decode_prediction(AACContext *ac, IndividualChannelStream *ics,
+ GetBitContext *gb)
+{
+ int sfb;
+ if (get_bits1(gb)) {
+ ics->predictor_reset_group = get_bits(gb, 5);
+ if (ics->predictor_reset_group == 0 ||
+ ics->predictor_reset_group > 30) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Invalid Predictor Reset Group.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ for (sfb = 0; sfb < FFMIN(ics->max_sfb, ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index]); sfb++) {
+ ics->prediction_used[sfb] = get_bits1(gb);
+ }
+ return 0;
+}
+
+/**
+ * Decode Long Term Prediction data; reference: table 4.xx.
+ */
+static void decode_ltp(LongTermPrediction *ltp,
+ GetBitContext *gb, uint8_t max_sfb)
+{
+ int sfb;
+
+ ltp->lag = get_bits(gb, 11);
+ ltp->coef = ltp_coef[get_bits(gb, 3)];
+ for (sfb = 0; sfb < FFMIN(max_sfb, MAX_LTP_LONG_SFB); sfb++)
+ ltp->used[sfb] = get_bits1(gb);
+}
+
+/**
+ * Decode Individual Channel Stream info; reference: table 4.6.
+ */
+static int decode_ics_info(AACContext *ac, IndividualChannelStream *ics,
+ GetBitContext *gb)
+{
+ const MPEG4AudioConfig *const m4ac = &ac->oc[1].m4ac;
+ const int aot = m4ac->object_type;
+ const int sampling_index = m4ac->sampling_index;
+ if (aot != AOT_ER_AAC_ELD) {
+ if (get_bits1(gb)) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Reserved bit set.\n");
+ if (ac->avctx->err_recognition & AV_EF_BITSTREAM)
+ return AVERROR_INVALIDDATA;
+ }
+ ics->window_sequence[1] = ics->window_sequence[0];
+ ics->window_sequence[0] = get_bits(gb, 2);
+ if (aot == AOT_ER_AAC_LD &&
+ ics->window_sequence[0] != ONLY_LONG_SEQUENCE) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "AAC LD is only defined for ONLY_LONG_SEQUENCE but "
+ "window sequence %d found.\n", ics->window_sequence[0]);
+ ics->window_sequence[0] = ONLY_LONG_SEQUENCE;
+ return AVERROR_INVALIDDATA;
+ }
+ ics->use_kb_window[1] = ics->use_kb_window[0];
+ ics->use_kb_window[0] = get_bits1(gb);
+ }
+ ics->num_window_groups = 1;
+ ics->group_len[0] = 1;
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ int i;
+ ics->max_sfb = get_bits(gb, 4);
+ for (i = 0; i < 7; i++) {
+ if (get_bits1(gb)) {
+ ics->group_len[ics->num_window_groups - 1]++;
+ } else {
+ ics->num_window_groups++;
+ ics->group_len[ics->num_window_groups - 1] = 1;
+ }
+ }
+ ics->num_windows = 8;
+ ics->swb_offset = ff_swb_offset_128[sampling_index];
+ ics->num_swb = ff_aac_num_swb_128[sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_128[sampling_index];
+ ics->predictor_present = 0;
+ } else {
+ ics->max_sfb = get_bits(gb, 6);
+ ics->num_windows = 1;
+ if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD) {
+ if (m4ac->frame_length_short) {
+ ics->swb_offset = ff_swb_offset_480[sampling_index];
+ ics->num_swb = ff_aac_num_swb_480[sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_480[sampling_index];
+ } else {
+ ics->swb_offset = ff_swb_offset_512[sampling_index];
+ ics->num_swb = ff_aac_num_swb_512[sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_512[sampling_index];
+ }
+ if (!ics->num_swb || !ics->swb_offset)
+ return AVERROR_BUG;
+ } else {
+ ics->swb_offset = ff_swb_offset_1024[sampling_index];
+ ics->num_swb = ff_aac_num_swb_1024[sampling_index];
+ ics->tns_max_bands = ff_tns_max_bands_1024[sampling_index];
+ }
+ if (aot != AOT_ER_AAC_ELD) {
+ ics->predictor_present = get_bits1(gb);
+ ics->predictor_reset_group = 0;
+ }
+ if (ics->predictor_present) {
+ if (aot == AOT_AAC_MAIN) {
+ if (decode_prediction(ac, ics, gb)) {
+ goto fail;
+ }
+ } else if (aot == AOT_AAC_LC ||
+ aot == AOT_ER_AAC_LC) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Prediction is not allowed in AAC-LC.\n");
+ goto fail;
+ } else {
+ if (aot == AOT_ER_AAC_LD) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "LTP in ER AAC LD not yet implemented.\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ if ((ics->ltp.present = get_bits(gb, 1)))
+ decode_ltp(&ics->ltp, gb, ics->max_sfb);
+ }
+ }
+ }
+
+ if (ics->max_sfb > ics->num_swb) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Number of scalefactor bands in group (%d) "
+ "exceeds limit (%d).\n",
+ ics->max_sfb, ics->num_swb);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ ics->max_sfb = 0;
+ return AVERROR_INVALIDDATA;
+}
+
+/**
+ * Decode band types (section_data payload); reference: table 4.46.
+ *
+ * @param band_type array of the used band type
+ * @param band_type_run_end array of the last scalefactor band of a band type run
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_band_types(AACContext *ac, enum BandType band_type[120],
+ int band_type_run_end[120], GetBitContext *gb,
+ IndividualChannelStream *ics)
+{
+ int g, idx = 0;
+ const int bits = (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) ? 3 : 5;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ int k = 0;
+ while (k < ics->max_sfb) {
+ uint8_t sect_end = k;
+ int sect_len_incr;
+ int sect_band_type = get_bits(gb, 4);
+ if (sect_band_type == 12) {
+ av_log(ac->avctx, AV_LOG_ERROR, "invalid band type\n");
+ return AVERROR_INVALIDDATA;
+ }
+ do {
+ sect_len_incr = get_bits(gb, bits);
+ sect_end += sect_len_incr;
+ if (get_bits_left(gb) < 0) {
+ av_log(ac->avctx, AV_LOG_ERROR, "decode_band_types: "overread_err);
+ return AVERROR_INVALIDDATA;
+ }
+ if (sect_end > ics->max_sfb) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Number of bands (%d) exceeds limit (%d).\n",
+ sect_end, ics->max_sfb);
+ return AVERROR_INVALIDDATA;
+ }
+ } while (sect_len_incr == (1 << bits) - 1);
+ for (; k < sect_end; k++) {
+ band_type [idx] = sect_band_type;
+ band_type_run_end[idx++] = sect_end;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode scalefactors; reference: table 4.47.
+ *
+ * @param global_gain first scalefactor value as scalefactors are differentially coded
+ * @param band_type array of the used band type
+ * @param band_type_run_end array of the last scalefactor band of a band type run
+ * @param sf array of scalefactors or intensity stereo positions
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_scalefactors(AACContext *ac, INTFLOAT sf[120], GetBitContext *gb,
+ unsigned int global_gain,
+ IndividualChannelStream *ics,
+ enum BandType band_type[120],
+ int band_type_run_end[120])
+{
+ int g, i, idx = 0;
+ int offset[3] = { global_gain, global_gain - NOISE_OFFSET, 0 };
+ int clipped_offset;
+ int noise_flag = 1;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb;) {
+ int run_end = band_type_run_end[idx];
+ if (band_type[idx] == ZERO_BT) {
+ for (; i < run_end; i++, idx++)
+ sf[idx] = FIXR(0.);
+ } else if ((band_type[idx] == INTENSITY_BT) ||
+ (band_type[idx] == INTENSITY_BT2)) {
+ for (; i < run_end; i++, idx++) {
+ offset[2] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - SCALE_DIFF_ZERO;
+ clipped_offset = av_clip(offset[2], -155, 100);
+ if (offset[2] != clipped_offset) {
+ avpriv_request_sample(ac->avctx,
+ "If you heard an audible artifact, there may be a bug in the decoder. "
+ "Clipped intensity stereo position (%d -> %d)",
+ offset[2], clipped_offset);
+ }
+#if USE_FIXED
+ sf[idx] = 100 - clipped_offset;
+#else
+ sf[idx] = ff_aac_pow2sf_tab[-clipped_offset + POW_SF2_ZERO];
+#endif /* USE_FIXED */
+ }
+ } else if (band_type[idx] == NOISE_BT) {
+ for (; i < run_end; i++, idx++) {
+ if (noise_flag-- > 0)
+ offset[1] += get_bits(gb, NOISE_PRE_BITS) - NOISE_PRE;
+ else
+ offset[1] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - SCALE_DIFF_ZERO;
+ clipped_offset = av_clip(offset[1], -100, 155);
+ if (offset[1] != clipped_offset) {
+ avpriv_request_sample(ac->avctx,
+ "If you heard an audible artifact, there may be a bug in the decoder. "
+ "Clipped noise gain (%d -> %d)",
+ offset[1], clipped_offset);
+ }
+#if USE_FIXED
+ sf[idx] = -(100 + clipped_offset);
+#else
+ sf[idx] = -ff_aac_pow2sf_tab[clipped_offset + POW_SF2_ZERO];
+#endif /* USE_FIXED */
+ }
+ } else {
+ for (; i < run_end; i++, idx++) {
+ offset[0] += get_vlc2(gb, vlc_scalefactors.table, 7, 3) - SCALE_DIFF_ZERO;
+ if (offset[0] > 255U) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Scalefactor (%d) out of range.\n", offset[0]);
+ return AVERROR_INVALIDDATA;
+ }
+#if USE_FIXED
+ sf[idx] = -offset[0];
+#else
+ sf[idx] = -ff_aac_pow2sf_tab[offset[0] - 100 + POW_SF2_ZERO];
+#endif /* USE_FIXED */
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode pulse data; reference: table 4.7.
+ */
+static int decode_pulses(Pulse *pulse, GetBitContext *gb,
+ const uint16_t *swb_offset, int num_swb)
+{
+ int i, pulse_swb;
+ pulse->num_pulse = get_bits(gb, 2) + 1;
+ pulse_swb = get_bits(gb, 6);
+ if (pulse_swb >= num_swb)
+ return -1;
+ pulse->pos[0] = swb_offset[pulse_swb];
+ pulse->pos[0] += get_bits(gb, 5);
+ if (pulse->pos[0] >= swb_offset[num_swb])
+ return -1;
+ pulse->amp[0] = get_bits(gb, 4);
+ for (i = 1; i < pulse->num_pulse; i++) {
+ pulse->pos[i] = get_bits(gb, 5) + pulse->pos[i - 1];
+ if (pulse->pos[i] >= swb_offset[num_swb])
+ return -1;
+ pulse->amp[i] = get_bits(gb, 4);
+ }
+ return 0;
+}
+
+/**
+ * Decode Temporal Noise Shaping data; reference: table 4.48.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_tns(AACContext *ac, TemporalNoiseShaping *tns,
+ GetBitContext *gb, const IndividualChannelStream *ics)
+{
+ int w, filt, i, coef_len, coef_res, coef_compress;
+ const int is8 = ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE;
+ const int tns_max_order = is8 ? 7 : ac->oc[1].m4ac.object_type == AOT_AAC_MAIN ? 20 : 12;
+ for (w = 0; w < ics->num_windows; w++) {
+ if ((tns->n_filt[w] = get_bits(gb, 2 - is8))) {
+ coef_res = get_bits1(gb);
+
+ for (filt = 0; filt < tns->n_filt[w]; filt++) {
+ int tmp2_idx;
+ tns->length[w][filt] = get_bits(gb, 6 - 2 * is8);
+
+ if ((tns->order[w][filt] = get_bits(gb, 5 - 2 * is8)) > tns_max_order) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "TNS filter order %d is greater than maximum %d.\n",
+ tns->order[w][filt], tns_max_order);
+ tns->order[w][filt] = 0;
+ return AVERROR_INVALIDDATA;
+ }
+ if (tns->order[w][filt]) {
+ tns->direction[w][filt] = get_bits1(gb);
+ coef_compress = get_bits1(gb);
+ coef_len = coef_res + 3 - coef_compress;
+ tmp2_idx = 2 * coef_compress + coef_res;
+
+ for (i = 0; i < tns->order[w][filt]; i++)
+ tns->coef[w][filt][i] = tns_tmp2_map[tmp2_idx][get_bits(gb, coef_len)];
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Decode Mid/Side data; reference: table 4.54.
+ *
+ * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
+ * [1] mask is decoded from bitstream; [2] mask is all 1s;
+ * [3] reserved for scalable AAC
+ */
+static void decode_mid_side_stereo(ChannelElement *cpe, GetBitContext *gb,
+ int ms_present)
+{
+ int idx;
+ int max_idx = cpe->ch[0].ics.num_window_groups * cpe->ch[0].ics.max_sfb;
+ if (ms_present == 1) {
+ for (idx = 0; idx < max_idx; idx++)
+ cpe->ms_mask[idx] = get_bits1(gb);
+ } else if (ms_present == 2) {
+ memset(cpe->ms_mask, 1, max_idx * sizeof(cpe->ms_mask[0]));
+ }
+}
+
+/**
+ * Decode spectral data; reference: table 4.50.
+ * Dequantize and scale spectral data; reference: 4.6.3.3.
+ *
+ * @param coef array of dequantized, scaled spectral data
+ * @param sf array of scalefactors or intensity stereo positions
+ * @param pulse_present set if pulses are present
+ * @param pulse pointer to pulse data struct
+ * @param band_type array of the used band type
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_spectrum_and_dequant(AACContext *ac, INTFLOAT coef[1024],
+ GetBitContext *gb, const INTFLOAT sf[120],
+ int pulse_present, const Pulse *pulse,
+ const IndividualChannelStream *ics,
+ enum BandType band_type[120])
+{
+ int i, k, g, idx = 0;
+ const int c = 1024 / ics->num_windows;
+ const uint16_t *offsets = ics->swb_offset;
+ INTFLOAT *coef_base = coef;
+
+ for (g = 0; g < ics->num_windows; g++)
+ memset(coef + g * 128 + offsets[ics->max_sfb], 0,
+ sizeof(INTFLOAT) * (c - offsets[ics->max_sfb]));
+
+ for (g = 0; g < ics->num_window_groups; g++) {
+ unsigned g_len = ics->group_len[g];
+
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ const unsigned cbt_m1 = band_type[idx] - 1;
+ INTFLOAT *cfo = coef + offsets[i];
+ int off_len = offsets[i + 1] - offsets[i];
+ int group;
+
+ if (cbt_m1 >= INTENSITY_BT2 - 1) {
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+ memset(cfo, 0, off_len * sizeof(*cfo));
+ }
+ } else if (cbt_m1 == NOISE_BT - 1) {
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+#if !USE_FIXED
+ float scale;
+#endif /* !USE_FIXED */
+ INTFLOAT band_energy;
+
+ for (k = 0; k < off_len; k++) {
+ ac->random_state = lcg_random(ac->random_state);
+#if USE_FIXED
+ cfo[k] = ac->random_state >> 3;
+#else
+ cfo[k] = ac->random_state;
+#endif /* USE_FIXED */
+ }
+
+#if USE_FIXED
+ band_energy = ac->fdsp->scalarproduct_fixed(cfo, cfo, off_len);
+ band_energy = fixed_sqrt(band_energy, 31);
+ noise_scale(cfo, sf[idx], band_energy, off_len);
+#else
+ band_energy = ac->fdsp->scalarproduct_float(cfo, cfo, off_len);
+ scale = sf[idx] / sqrtf(band_energy);
+ ac->fdsp->vector_fmul_scalar(cfo, cfo, scale, off_len);
+#endif /* USE_FIXED */
+ }
+ } else {
+#if !USE_FIXED
+ const float *vq = ff_aac_codebook_vector_vals[cbt_m1];
+#endif /* !USE_FIXED */
+ const uint16_t *cb_vector_idx = ff_aac_codebook_vector_idx[cbt_m1];
+ VLC_TYPE (*vlc_tab)[2] = vlc_spectral[cbt_m1].table;
+ OPEN_READER(re, gb);
+
+ switch (cbt_m1 >> 1) {
+ case 0:
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+ INTFLOAT *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned cb_idx;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+#if USE_FIXED
+ cf = DEC_SQUAD(cf, cb_idx);
+#else
+ cf = VMUL4(cf, vq, cb_idx, sf + idx);
+#endif /* USE_FIXED */
+ } while (len -= 4);
+ }
+ break;
+
+ case 1:
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+ INTFLOAT *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned nnz;
+ unsigned cb_idx;
+ uint32_t bits;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+ nnz = cb_idx >> 8 & 15;
+ bits = nnz ? GET_CACHE(re, gb) : 0;
+ LAST_SKIP_BITS(re, gb, nnz);
+#if USE_FIXED
+ cf = DEC_UQUAD(cf, cb_idx, bits);
+#else
+ cf = VMUL4S(cf, vq, cb_idx, bits, sf + idx);
+#endif /* USE_FIXED */
+ } while (len -= 4);
+ }
+ break;
+
+ case 2:
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+ INTFLOAT *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned cb_idx;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+#if USE_FIXED
+ cf = DEC_SPAIR(cf, cb_idx);
+#else
+ cf = VMUL2(cf, vq, cb_idx, sf + idx);
+#endif /* USE_FIXED */
+ } while (len -= 2);
+ }
+ break;
+
+ case 3:
+ case 4:
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+ INTFLOAT *cf = cfo;
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned nnz;
+ unsigned cb_idx;
+ unsigned sign;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+ cb_idx = cb_vector_idx[code];
+ nnz = cb_idx >> 8 & 15;
+ sign = nnz ? SHOW_UBITS(re, gb, nnz) << (cb_idx >> 12) : 0;
+ LAST_SKIP_BITS(re, gb, nnz);
+#if USE_FIXED
+ cf = DEC_UPAIR(cf, cb_idx, sign);
+#else
+ cf = VMUL2S(cf, vq, cb_idx, sign, sf + idx);
+#endif /* USE_FIXED */
+ } while (len -= 2);
+ }
+ break;
+
+ default:
+ for (group = 0; group < (AAC_SIGNE)g_len; group++, cfo+=128) {
+#if USE_FIXED
+ int *icf = cfo;
+ int v;
+#else
+ float *cf = cfo;
+ uint32_t *icf = (uint32_t *) cf;
+#endif /* USE_FIXED */
+ int len = off_len;
+
+ do {
+ int code;
+ unsigned nzt, nnz;
+ unsigned cb_idx;
+ uint32_t bits;
+ int j;
+
+ UPDATE_CACHE(re, gb);
+ GET_VLC(code, re, gb, vlc_tab, 8, 2);
+
+ if (!code) {
+ *icf++ = 0;
+ *icf++ = 0;
+ continue;
+ }
+
+ cb_idx = cb_vector_idx[code];
+ nnz = cb_idx >> 12;
+ nzt = cb_idx >> 8;
+ bits = SHOW_UBITS(re, gb, nnz) << (32-nnz);
+ LAST_SKIP_BITS(re, gb, nnz);
+
+ for (j = 0; j < 2; j++) {
+ if (nzt & 1<<j) {
+ uint32_t b;
+ int n;
+ /* The total length of escape_sequence must be < 22 bits according
+ to the specification (i.e. max is 111111110xxxxxxxxxxxx). */
+ UPDATE_CACHE(re, gb);
+ b = GET_CACHE(re, gb);
+ b = 31 - av_log2(~b);
+
+ if (b > 8) {
+ av_log(ac->avctx, AV_LOG_ERROR, "error in spectral data, ESC overflow\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ SKIP_BITS(re, gb, b + 1);
+ b += 4;
+ n = (1 << b) + SHOW_UBITS(re, gb, b);
+ LAST_SKIP_BITS(re, gb, b);
+#if USE_FIXED
+ v = n;
+ if (bits & 1U<<31)
+ v = -v;
+ *icf++ = v;
+#else
+ *icf++ = cbrt_tab[n] | (bits & 1U<<31);
+#endif /* USE_FIXED */
+ bits <<= 1;
+ } else {
+#if USE_FIXED
+ v = cb_idx & 15;
+ if (bits & 1U<<31)
+ v = -v;
+ *icf++ = v;
+#else
+ unsigned v = ((const uint32_t*)vq)[cb_idx & 15];
+ *icf++ = (bits & 1U<<31) | v;
+#endif /* USE_FIXED */
+ bits <<= !!v;
+ }
+ cb_idx >>= 4;
+ }
+ } while (len -= 2);
+#if !USE_FIXED
+ ac->fdsp->vector_fmul_scalar(cfo, cfo, sf[idx], off_len);
+#endif /* !USE_FIXED */
+ }
+ }
+
+ CLOSE_READER(re, gb);
+ }
+ }
+ coef += g_len << 7;
+ }
+
+ if (pulse_present) {
+ idx = 0;
+ for (i = 0; i < pulse->num_pulse; i++) {
+ INTFLOAT co = coef_base[ pulse->pos[i] ];
+ while (offsets[idx + 1] <= pulse->pos[i])
+ idx++;
+ if (band_type[idx] != NOISE_BT && sf[idx]) {
+ INTFLOAT ico = -pulse->amp[i];
+#if USE_FIXED
+ if (co) {
+ ico = co + (co > 0 ? -ico : ico);
+ }
+ coef_base[ pulse->pos[i] ] = ico;
+#else
+ if (co) {
+ co /= sf[idx];
+ ico = co / sqrtf(sqrtf(fabsf(co))) + (co > 0 ? -ico : ico);
+ }
+ coef_base[ pulse->pos[i] ] = cbrtf(fabsf(ico)) * ico * sf[idx];
+#endif /* USE_FIXED */
+ }
+ }
+ }
+#if USE_FIXED
+ coef = coef_base;
+ idx = 0;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ unsigned g_len = ics->group_len[g];
+
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ const unsigned cbt_m1 = band_type[idx] - 1;
+ int *cfo = coef + offsets[i];
+ int off_len = offsets[i + 1] - offsets[i];
+ int group;
+
+ if (cbt_m1 < NOISE_BT - 1) {
+ for (group = 0; group < (int)g_len; group++, cfo+=128) {
+ ac->vector_pow43(cfo, off_len);
+ ac->subband_scale(cfo, cfo, sf[idx], 34, off_len);
+ }
+ }
+ }
+ coef += g_len << 7;
+ }
+#endif /* USE_FIXED */
+ return 0;
+}
+
+/**
+ * Apply AAC-Main style frequency domain prediction.
+ */
+static void apply_prediction(AACContext *ac, SingleChannelElement *sce)
+{
+ int sfb, k;
+
+ if (!sce->ics.predictor_initialized) {
+ reset_all_predictors(sce->predictor_state);
+ sce->ics.predictor_initialized = 1;
+ }
+
+ if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ for (sfb = 0;
+ sfb < ff_aac_pred_sfb_max[ac->oc[1].m4ac.sampling_index];
+ sfb++) {
+ for (k = sce->ics.swb_offset[sfb];
+ k < sce->ics.swb_offset[sfb + 1];
+ k++) {
+ predict(&sce->predictor_state[k], &sce->coeffs[k],
+ sce->ics.predictor_present &&
+ sce->ics.prediction_used[sfb]);
+ }
+ }
+ if (sce->ics.predictor_reset_group)
+ reset_predictor_group(sce->predictor_state,
+ sce->ics.predictor_reset_group);
+ } else
+ reset_all_predictors(sce->predictor_state);
+}
+
+/**
+ * Decode an individual_channel_stream payload; reference: table 4.44.
+ *
+ * @param common_window Channels have independent [0], or shared [1], Individual Channel Stream information.
+ * @param scale_flag scalable [1] or non-scalable [0] AAC (Unused until scalable AAC is implemented.)
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_ics(AACContext *ac, SingleChannelElement *sce,
+ GetBitContext *gb, int common_window, int scale_flag)
+{
+ Pulse pulse;
+ TemporalNoiseShaping *tns = &sce->tns;
+ IndividualChannelStream *ics = &sce->ics;
+ INTFLOAT *out = sce->coeffs;
+ int global_gain, eld_syntax, er_syntax, pulse_present = 0;
+ int ret;
+
+ eld_syntax = ac->oc[1].m4ac.object_type == AOT_ER_AAC_ELD;
+ er_syntax = ac->oc[1].m4ac.object_type == AOT_ER_AAC_LC ||
+ ac->oc[1].m4ac.object_type == AOT_ER_AAC_LTP ||
+ ac->oc[1].m4ac.object_type == AOT_ER_AAC_LD ||
+ ac->oc[1].m4ac.object_type == AOT_ER_AAC_ELD;
+
+ /* This assignment is to silence a GCC warning about the variable being used
+ * uninitialized when in fact it always is.
+ */
+ pulse.num_pulse = 0;
+
+ global_gain = get_bits(gb, 8);
+
+ if (!common_window && !scale_flag) {
+ if (decode_ics_info(ac, ics, gb) < 0)
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ((ret = decode_band_types(ac, sce->band_type,
+ sce->band_type_run_end, gb, ics)) < 0)
+ return ret;
+ if ((ret = decode_scalefactors(ac, sce->sf, gb, global_gain, ics,
+ sce->band_type, sce->band_type_run_end)) < 0)
+ return ret;
+
+ pulse_present = 0;
+ if (!scale_flag) {
+ if (!eld_syntax && (pulse_present = get_bits1(gb))) {
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Pulse tool not allowed in eight short sequence.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (decode_pulses(&pulse, gb, ics->swb_offset, ics->num_swb)) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Pulse data corrupt or invalid.\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+ tns->present = get_bits1(gb);
+ if (tns->present && !er_syntax)
+ if (decode_tns(ac, tns, gb, ics) < 0)
+ return AVERROR_INVALIDDATA;
+ if (!eld_syntax && get_bits1(gb)) {
+ avpriv_request_sample(ac->avctx, "SSR");
+ return AVERROR_PATCHWELCOME;
+ }
+ // I see no textual basis in the spec for this occurring after SSR gain
+ // control, but this is what both reference and real implmentations do
+ if (tns->present && er_syntax)
+ if (decode_tns(ac, tns, gb, ics) < 0)
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (decode_spectrum_and_dequant(ac, out, gb, sce->sf, pulse_present,
+ &pulse, ics, sce->band_type) < 0)
+ return AVERROR_INVALIDDATA;
+
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN && !common_window)
+ apply_prediction(ac, sce);
+
+ return 0;
+}
+
+/**
+ * Mid/Side stereo decoding; reference: 4.6.8.1.3.
+ */
+static void apply_mid_side_stereo(AACContext *ac, ChannelElement *cpe)
+{
+ const IndividualChannelStream *ics = &cpe->ch[0].ics;
+ INTFLOAT *ch0 = cpe->ch[0].coeffs;
+ INTFLOAT *ch1 = cpe->ch[1].coeffs;
+ int g, i, group, idx = 0;
+ const uint16_t *offsets = ics->swb_offset;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ if (cpe->ms_mask[idx] &&
+ cpe->ch[0].band_type[idx] < NOISE_BT &&
+ cpe->ch[1].band_type[idx] < NOISE_BT) {
+#if USE_FIXED
+ for (group = 0; group < ics->group_len[g]; group++) {
+ ac->fdsp->butterflies_fixed(ch0 + group * 128 + offsets[i],
+ ch1 + group * 128 + offsets[i],
+ offsets[i+1] - offsets[i]);
+#else
+ for (group = 0; group < ics->group_len[g]; group++) {
+ ac->fdsp->butterflies_float(ch0 + group * 128 + offsets[i],
+ ch1 + group * 128 + offsets[i],
+ offsets[i+1] - offsets[i]);
+#endif /* USE_FIXED */
+ }
+ }
+ }
+ ch0 += ics->group_len[g] * 128;
+ ch1 += ics->group_len[g] * 128;
+ }
+}
+
+/**
+ * intensity stereo decoding; reference: 4.6.8.2.3
+ *
+ * @param ms_present Indicates mid/side stereo presence. [0] mask is all 0s;
+ * [1] mask is decoded from bitstream; [2] mask is all 1s;
+ * [3] reserved for scalable AAC
+ */
+static void apply_intensity_stereo(AACContext *ac,
+ ChannelElement *cpe, int ms_present)
+{
+ const IndividualChannelStream *ics = &cpe->ch[1].ics;
+ SingleChannelElement *sce1 = &cpe->ch[1];
+ INTFLOAT *coef0 = cpe->ch[0].coeffs, *coef1 = cpe->ch[1].coeffs;
+ const uint16_t *offsets = ics->swb_offset;
+ int g, group, i, idx = 0;
+ int c;
+ INTFLOAT scale;
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb;) {
+ if (sce1->band_type[idx] == INTENSITY_BT ||
+ sce1->band_type[idx] == INTENSITY_BT2) {
+ const int bt_run_end = sce1->band_type_run_end[idx];
+ for (; i < bt_run_end; i++, idx++) {
+ c = -1 + 2 * (sce1->band_type[idx] - 14);
+ if (ms_present)
+ c *= 1 - 2 * cpe->ms_mask[idx];
+ scale = c * sce1->sf[idx];
+ for (group = 0; group < ics->group_len[g]; group++)
+#if USE_FIXED
+ ac->subband_scale(coef1 + group * 128 + offsets[i],
+ coef0 + group * 128 + offsets[i],
+ scale,
+ 23,
+ offsets[i + 1] - offsets[i]);
+#else
+ ac->fdsp->vector_fmul_scalar(coef1 + group * 128 + offsets[i],
+ coef0 + group * 128 + offsets[i],
+ scale,
+ offsets[i + 1] - offsets[i]);
+#endif /* USE_FIXED */
+ }
+ } else {
+ int bt_run_end = sce1->band_type_run_end[idx];
+ idx += bt_run_end - i;
+ i = bt_run_end;
+ }
+ }
+ coef0 += ics->group_len[g] * 128;
+ coef1 += ics->group_len[g] * 128;
+ }
+}
+
+/**
+ * Decode a channel_pair_element; reference: table 4.4.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_cpe(AACContext *ac, GetBitContext *gb, ChannelElement *cpe)
+{
+ int i, ret, common_window, ms_present = 0;
+ int eld_syntax = ac->oc[1].m4ac.object_type == AOT_ER_AAC_ELD;
+
+ common_window = eld_syntax || get_bits1(gb);
+ if (common_window) {
+ if (decode_ics_info(ac, &cpe->ch[0].ics, gb))
+ return AVERROR_INVALIDDATA;
+ i = cpe->ch[1].ics.use_kb_window[0];
+ cpe->ch[1].ics = cpe->ch[0].ics;
+ cpe->ch[1].ics.use_kb_window[1] = i;
+ if (cpe->ch[1].ics.predictor_present &&
+ (ac->oc[1].m4ac.object_type != AOT_AAC_MAIN))
+ if ((cpe->ch[1].ics.ltp.present = get_bits(gb, 1)))
+ decode_ltp(&cpe->ch[1].ics.ltp, gb, cpe->ch[1].ics.max_sfb);
+ ms_present = get_bits(gb, 2);
+ if (ms_present == 3) {
+ av_log(ac->avctx, AV_LOG_ERROR, "ms_present = 3 is reserved.\n");
+ return AVERROR_INVALIDDATA;
+ } else if (ms_present)
+ decode_mid_side_stereo(cpe, gb, ms_present);
+ }
+ if ((ret = decode_ics(ac, &cpe->ch[0], gb, common_window, 0)))
+ return ret;
+ if ((ret = decode_ics(ac, &cpe->ch[1], gb, common_window, 0)))
+ return ret;
+
+ if (common_window) {
+ if (ms_present)
+ apply_mid_side_stereo(ac, cpe);
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_MAIN) {
+ apply_prediction(ac, &cpe->ch[0]);
+ apply_prediction(ac, &cpe->ch[1]);
+ }
+ }
+
+ apply_intensity_stereo(ac, cpe, ms_present);
+ return 0;
+}
+
+static const float cce_scale[] = {
+ 1.09050773266525765921, //2^(1/8)
+ 1.18920711500272106672, //2^(1/4)
+ M_SQRT2,
+ 2,
+};
+
+/**
+ * Decode coupling_channel_element; reference: table 4.8.
+ *
+ * @return Returns error status. 0 - OK, !0 - error
+ */
+static int decode_cce(AACContext *ac, GetBitContext *gb, ChannelElement *che)
+{
+ int num_gain = 0;
+ int c, g, sfb, ret;
+ int sign;
+ INTFLOAT scale;
+ SingleChannelElement *sce = &che->ch[0];
+ ChannelCoupling *coup = &che->coup;
+
+ coup->coupling_point = 2 * get_bits1(gb);
+ coup->num_coupled = get_bits(gb, 3);
+ for (c = 0; c <= coup->num_coupled; c++) {
+ num_gain++;
+ coup->type[c] = get_bits1(gb) ? TYPE_CPE : TYPE_SCE;
+ coup->id_select[c] = get_bits(gb, 4);
+ if (coup->type[c] == TYPE_CPE) {
+ coup->ch_select[c] = get_bits(gb, 2);
+ if (coup->ch_select[c] == 3)
+ num_gain++;
+ } else
+ coup->ch_select[c] = 2;
+ }
+ coup->coupling_point += get_bits1(gb) || (coup->coupling_point >> 1);
+
+ sign = get_bits(gb, 1);
+ scale = AAC_RENAME(cce_scale)[get_bits(gb, 2)];
+
+ if ((ret = decode_ics(ac, sce, gb, 0, 0)))
+ return ret;
+
+ for (c = 0; c < num_gain; c++) {
+ int idx = 0;
+ int cge = 1;
+ int gain = 0;
+ INTFLOAT gain_cache = FIXR10(1.);
+ if (c) {
+ cge = coup->coupling_point == AFTER_IMDCT ? 1 : get_bits1(gb);
+ gain = cge ? get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60: 0;
+ gain_cache = GET_GAIN(scale, gain);
+ }
+ if (coup->coupling_point == AFTER_IMDCT) {
+ coup->gain[c][0] = gain_cache;
+ } else {
+ for (g = 0; g < sce->ics.num_window_groups; g++) {
+ for (sfb = 0; sfb < sce->ics.max_sfb; sfb++, idx++) {
+ if (sce->band_type[idx] != ZERO_BT) {
+ if (!cge) {
+ int t = get_vlc2(gb, vlc_scalefactors.table, 7, 3) - 60;
+ if (t) {
+ int s = 1;
+ t = gain += t;
+ if (sign) {
+ s -= 2 * (t & 0x1);
+ t >>= 1;
+ }
+ gain_cache = GET_GAIN(scale, t) * s;
+ }
+ }
+ coup->gain[c][idx] = gain_cache;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Parse whether channels are to be excluded from Dynamic Range Compression; reference: table 4.53.
+ *
+ * @return Returns number of bytes consumed.
+ */
+static int decode_drc_channel_exclusions(DynamicRangeControl *che_drc,
+ GetBitContext *gb)
+{
+ int i;
+ int num_excl_chan = 0;
+
+ do {
+ for (i = 0; i < 7; i++)
+ che_drc->exclude_mask[num_excl_chan++] = get_bits1(gb);
+ } while (num_excl_chan < MAX_CHANNELS - 7 && get_bits1(gb));
+
+ return num_excl_chan / 7;
+}
+
+/**
+ * Decode dynamic range information; reference: table 4.52.
+ *
+ * @return Returns number of bytes consumed.
+ */
+static int decode_dynamic_range(DynamicRangeControl *che_drc,
+ GetBitContext *gb)
+{
+ int n = 1;
+ int drc_num_bands = 1;
+ int i;
+
+ /* pce_tag_present? */
+ if (get_bits1(gb)) {
+ che_drc->pce_instance_tag = get_bits(gb, 4);
+ skip_bits(gb, 4); // tag_reserved_bits
+ n++;
+ }
+
+ /* excluded_chns_present? */
+ if (get_bits1(gb)) {
+ n += decode_drc_channel_exclusions(che_drc, gb);
+ }
+
+ /* drc_bands_present? */
+ if (get_bits1(gb)) {
+ che_drc->band_incr = get_bits(gb, 4);
+ che_drc->interpolation_scheme = get_bits(gb, 4);
+ n++;
+ drc_num_bands += che_drc->band_incr;
+ for (i = 0; i < drc_num_bands; i++) {
+ che_drc->band_top[i] = get_bits(gb, 8);
+ n++;
+ }
+ }
+
+ /* prog_ref_level_present? */
+ if (get_bits1(gb)) {
+ che_drc->prog_ref_level = get_bits(gb, 7);
+ skip_bits1(gb); // prog_ref_level_reserved_bits
+ n++;
+ }
+
+ for (i = 0; i < drc_num_bands; i++) {
+ che_drc->dyn_rng_sgn[i] = get_bits1(gb);
+ che_drc->dyn_rng_ctl[i] = get_bits(gb, 7);
+ n++;
+ }
+
+ return n;
+}
+
+static int decode_fill(AACContext *ac, GetBitContext *gb, int len) {
+ uint8_t buf[256];
+ int i, major, minor;
+
+ if (len < 13+7*8)
+ goto unknown;
+
+ get_bits(gb, 13); len -= 13;
+
+ for(i=0; i+1<sizeof(buf) && len>=8; i++, len-=8)
+ buf[i] = get_bits(gb, 8);
+
+ buf[i] = 0;
+ if (ac->avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(ac->avctx, AV_LOG_DEBUG, "FILL:%s\n", buf);
+
+ if (sscanf(buf, "libfaac %d.%d", &major, &minor) == 2){
+ ac->avctx->internal->skip_samples = 1024;
+ }
+
+unknown:
+ skip_bits_long(gb, len);
+
+ return 0;
+}
+
+/**
+ * Decode extension data (incomplete); reference: table 4.51.
+ *
+ * @param cnt length of TYPE_FIL syntactic element in bytes
+ *
+ * @return Returns number of bytes consumed
+ */
+static int decode_extension_payload(AACContext *ac, GetBitContext *gb, int cnt,
+ ChannelElement *che, enum RawDataBlockType elem_type)
+{
+ int crc_flag = 0;
+ int res = cnt;
+ int type = get_bits(gb, 4);
+
+ if (ac->avctx->debug & FF_DEBUG_STARTCODE)
+ av_log(ac->avctx, AV_LOG_DEBUG, "extension type: %d len:%d\n", type, cnt);
+
+ switch (type) { // extension type
+ case EXT_SBR_DATA_CRC:
+ crc_flag++;
+ case EXT_SBR_DATA:
+ if (!che) {
+ av_log(ac->avctx, AV_LOG_ERROR, "SBR was found before the first channel element.\n");
+ return res;
+ } else if (!ac->oc[1].m4ac.sbr) {
+ av_log(ac->avctx, AV_LOG_ERROR, "SBR signaled to be not-present but was found in the bitstream.\n");
+ skip_bits_long(gb, 8 * cnt - 4);
+ return res;
+ } else if (ac->oc[1].m4ac.sbr == -1 && ac->oc[1].status == OC_LOCKED) {
+ av_log(ac->avctx, AV_LOG_ERROR, "Implicit SBR was found with a first occurrence after the first frame.\n");
+ skip_bits_long(gb, 8 * cnt - 4);
+ return res;
+ } else if (ac->oc[1].m4ac.ps == -1 && ac->oc[1].status < OC_LOCKED && ac->avctx->channels == 1) {
+ ac->oc[1].m4ac.sbr = 1;
+ ac->oc[1].m4ac.ps = 1;
+ ac->avctx->profile = FF_PROFILE_AAC_HE_V2;
+ output_configure(ac, ac->oc[1].layout_map, ac->oc[1].layout_map_tags,
+ ac->oc[1].status, 1);
+ } else {
+ ac->oc[1].m4ac.sbr = 1;
+ ac->avctx->profile = FF_PROFILE_AAC_HE;
+ }
+ res = AAC_RENAME(ff_decode_sbr_extension)(ac, &che->sbr, gb, crc_flag, cnt, elem_type);
+ break;
+ case EXT_DYNAMIC_RANGE:
+ res = decode_dynamic_range(&ac->che_drc, gb);
+ break;
+ case EXT_FILL:
+ decode_fill(ac, gb, 8 * cnt - 4);
+ break;
+ case EXT_FILL_DATA:
+ case EXT_DATA_ELEMENT:
+ default:
+ skip_bits_long(gb, 8 * cnt - 4);
+ break;
+ };
+ return res;
+}
+
+/**
+ * Decode Temporal Noise Shaping filter coefficients and apply all-pole filters; reference: 4.6.9.3.
+ *
+ * @param decode 1 if tool is used normally, 0 if tool is used in LTP.
+ * @param coef spectral coefficients
+ */
+static void apply_tns(INTFLOAT coef[1024], TemporalNoiseShaping *tns,
+ IndividualChannelStream *ics, int decode)
+{
+ const int mmm = FFMIN(ics->tns_max_bands, ics->max_sfb);
+ int w, filt, m, i;
+ int bottom, top, order, start, end, size, inc;
+ INTFLOAT lpc[TNS_MAX_ORDER];
+ INTFLOAT tmp[TNS_MAX_ORDER+1];
+
+ for (w = 0; w < ics->num_windows; w++) {
+ bottom = ics->num_swb;
+ for (filt = 0; filt < tns->n_filt[w]; filt++) {
+ top = bottom;
+ bottom = FFMAX(0, top - tns->length[w][filt]);
+ order = tns->order[w][filt];
+ if (order == 0)
+ continue;
+
+ // tns_decode_coef
+ AAC_RENAME(compute_lpc_coefs)(tns->coef[w][filt], order, lpc, 0, 0, 0);
+
+ start = ics->swb_offset[FFMIN(bottom, mmm)];
+ end = ics->swb_offset[FFMIN( top, mmm)];
+ if ((size = end - start) <= 0)
+ continue;
+ if (tns->direction[w][filt]) {
+ inc = -1;
+ start = end - 1;
+ } else {
+ inc = 1;
+ }
+ start += w * 128;
+
+ if (decode) {
+ // ar filter
+ for (m = 0; m < size; m++, start += inc)
+ for (i = 1; i <= FFMIN(m, order); i++)
+ coef[start] -= AAC_MUL26(coef[start - i * inc], lpc[i - 1]);
+ } else {
+ // ma filter
+ for (m = 0; m < size; m++, start += inc) {
+ tmp[0] = coef[start];
+ for (i = 1; i <= FFMIN(m, order); i++)
+ coef[start] += AAC_MUL26(tmp[i], lpc[i - 1]);
+ for (i = order; i > 0; i--)
+ tmp[i] = tmp[i - 1];
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Apply windowing and MDCT to obtain the spectral
+ * coefficient from the predicted sample by LTP.
+ */
+static void windowing_and_mdct_ltp(AACContext *ac, INTFLOAT *out,
+ INTFLOAT *in, IndividualChannelStream *ics)
+{
+ const INTFLOAT *lwindow = ics->use_kb_window[0] ? AAC_RENAME(ff_aac_kbd_long_1024) : AAC_RENAME(ff_sine_1024);
+ const INTFLOAT *swindow = ics->use_kb_window[0] ? AAC_RENAME(ff_aac_kbd_short_128) : AAC_RENAME(ff_sine_128);
+ const INTFLOAT *lwindow_prev = ics->use_kb_window[1] ? AAC_RENAME(ff_aac_kbd_long_1024) : AAC_RENAME(ff_sine_1024);
+ const INTFLOAT *swindow_prev = ics->use_kb_window[1] ? AAC_RENAME(ff_aac_kbd_short_128) : AAC_RENAME(ff_sine_128);
+
+ if (ics->window_sequence[0] != LONG_STOP_SEQUENCE) {
+ ac->fdsp->vector_fmul(in, in, lwindow_prev, 1024);
+ } else {
+ memset(in, 0, 448 * sizeof(*in));
+ ac->fdsp->vector_fmul(in + 448, in + 448, swindow_prev, 128);
+ }
+ if (ics->window_sequence[0] != LONG_START_SEQUENCE) {
+ ac->fdsp->vector_fmul_reverse(in + 1024, in + 1024, lwindow, 1024);
+ } else {
+ ac->fdsp->vector_fmul_reverse(in + 1024 + 448, in + 1024 + 448, swindow, 128);
+ memset(in + 1024 + 576, 0, 448 * sizeof(*in));
+ }
+ ac->mdct_ltp.mdct_calc(&ac->mdct_ltp, out, in);
+}
+
+/**
+ * Apply the long term prediction
+ */
+static void apply_ltp(AACContext *ac, SingleChannelElement *sce)
+{
+ const LongTermPrediction *ltp = &sce->ics.ltp;
+ const uint16_t *offsets = sce->ics.swb_offset;
+ int i, sfb;
+
+ if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
+ INTFLOAT *predTime = sce->ret;
+ INTFLOAT *predFreq = ac->buf_mdct;
+ int16_t num_samples = 2048;
+
+ if (ltp->lag < 1024)
+ num_samples = ltp->lag + 1024;
+ for (i = 0; i < num_samples; i++)
+ predTime[i] = AAC_MUL30(sce->ltp_state[i + 2048 - ltp->lag], ltp->coef);
+ memset(&predTime[i], 0, (2048 - i) * sizeof(*predTime));
+
+ ac->windowing_and_mdct_ltp(ac, predFreq, predTime, &sce->ics);
+
+ if (sce->tns.present)
+ ac->apply_tns(predFreq, &sce->tns, &sce->ics, 0);
+
+ for (sfb = 0; sfb < FFMIN(sce->ics.max_sfb, MAX_LTP_LONG_SFB); sfb++)
+ if (ltp->used[sfb])
+ for (i = offsets[sfb]; i < offsets[sfb + 1]; i++)
+ sce->coeffs[i] += predFreq[i];
+ }
+}
+
+/**
+ * Update the LTP buffer for next frame
+ */
+static void update_ltp(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ INTFLOAT *saved = sce->saved;
+ INTFLOAT *saved_ltp = sce->coeffs;
+ const INTFLOAT *lwindow = ics->use_kb_window[0] ? AAC_RENAME(ff_aac_kbd_long_1024) : AAC_RENAME(ff_sine_1024);
+ const INTFLOAT *swindow = ics->use_kb_window[0] ? AAC_RENAME(ff_aac_kbd_short_128) : AAC_RENAME(ff_sine_128);
+ int i;
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ memcpy(saved_ltp, saved, 512 * sizeof(*saved_ltp));
+ memset(saved_ltp + 576, 0, 448 * sizeof(*saved_ltp));
+ ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+
+ for (i = 0; i < 64; i++)
+ saved_ltp[i + 512] = AAC_MUL31(ac->buf_mdct[1023 - i], swindow[63 - i]);
+ } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+ memcpy(saved_ltp, ac->buf_mdct + 512, 448 * sizeof(*saved_ltp));
+ memset(saved_ltp + 576, 0, 448 * sizeof(*saved_ltp));
+ ac->fdsp->vector_fmul_reverse(saved_ltp + 448, ac->buf_mdct + 960, &swindow[64], 64);
+
+ for (i = 0; i < 64; i++)
+ saved_ltp[i + 512] = AAC_MUL31(ac->buf_mdct[1023 - i], swindow[63 - i]);
+ } else { // LONG_STOP or ONLY_LONG
+ ac->fdsp->vector_fmul_reverse(saved_ltp, ac->buf_mdct + 512, &lwindow[512], 512);
+
+ for (i = 0; i < 512; i++)
+ saved_ltp[i + 512] = AAC_MUL31(ac->buf_mdct[1023 - i], lwindow[511 - i]);
+ }
+
+ memcpy(sce->ltp_state, sce->ltp_state+1024, 1024 * sizeof(*sce->ltp_state));
+ memcpy(sce->ltp_state+1024, sce->ret, 1024 * sizeof(*sce->ltp_state));
+ memcpy(sce->ltp_state+2048, saved_ltp, 1024 * sizeof(*sce->ltp_state));
+}
+
+/**
+ * Conduct IMDCT and windowing.
+ */
+static void imdct_and_windowing(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ INTFLOAT *in = sce->coeffs;
+ INTFLOAT *out = sce->ret;
+ INTFLOAT *saved = sce->saved;
+ const INTFLOAT *swindow = ics->use_kb_window[0] ? AAC_RENAME(ff_aac_kbd_short_128) : AAC_RENAME(ff_sine_128);
+ const INTFLOAT *lwindow_prev = ics->use_kb_window[1] ? AAC_RENAME(ff_aac_kbd_long_1024) : AAC_RENAME(ff_sine_1024);
+ const INTFLOAT *swindow_prev = ics->use_kb_window[1] ? AAC_RENAME(ff_aac_kbd_short_128) : AAC_RENAME(ff_sine_128);
+ INTFLOAT *buf = ac->buf_mdct;
+ INTFLOAT *temp = ac->temp;
+ int i;
+
+ // imdct
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ for (i = 0; i < 1024; i += 128)
+ ac->mdct_small.imdct_half(&ac->mdct_small, buf + i, in + i);
+ } else {
+ ac->mdct.imdct_half(&ac->mdct, buf, in);
+#if USE_FIXED
+ for (i=0; i<1024; i++)
+ buf[i] = (buf[i] + 4) >> 3;
+#endif /* USE_FIXED */
+ }
+
+ /* window overlapping
+ * NOTE: To simplify the overlapping code, all 'meaningless' short to long
+ * and long to short transitions are considered to be short to short
+ * transitions. This leaves just two cases (long to long and short to short)
+ * with a little special sauce for EIGHT_SHORT_SEQUENCE.
+ */
+ if ((ics->window_sequence[1] == ONLY_LONG_SEQUENCE || ics->window_sequence[1] == LONG_STOP_SEQUENCE) &&
+ (ics->window_sequence[0] == ONLY_LONG_SEQUENCE || ics->window_sequence[0] == LONG_START_SEQUENCE)) {
+ ac->fdsp->vector_fmul_window( out, saved, buf, lwindow_prev, 512);
+ } else {
+ memcpy( out, saved, 448 * sizeof(*out));
+
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ ac->fdsp->vector_fmul_window(out + 448 + 0*128, saved + 448, buf + 0*128, swindow_prev, 64);
+ ac->fdsp->vector_fmul_window(out + 448 + 1*128, buf + 0*128 + 64, buf + 1*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(out + 448 + 2*128, buf + 1*128 + 64, buf + 2*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(out + 448 + 3*128, buf + 2*128 + 64, buf + 3*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(temp, buf + 3*128 + 64, buf + 4*128, swindow, 64);
+ memcpy( out + 448 + 4*128, temp, 64 * sizeof(*out));
+ } else {
+ ac->fdsp->vector_fmul_window(out + 448, saved + 448, buf, swindow_prev, 64);
+ memcpy( out + 576, buf + 64, 448 * sizeof(*out));
+ }
+ }
+
+ // buffer update
+ if (ics->window_sequence[0] == EIGHT_SHORT_SEQUENCE) {
+ memcpy( saved, temp + 64, 64 * sizeof(*saved));
+ ac->fdsp->vector_fmul_window(saved + 64, buf + 4*128 + 64, buf + 5*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(saved + 192, buf + 5*128 + 64, buf + 6*128, swindow, 64);
+ ac->fdsp->vector_fmul_window(saved + 320, buf + 6*128 + 64, buf + 7*128, swindow, 64);
+ memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(*saved));
+ } else if (ics->window_sequence[0] == LONG_START_SEQUENCE) {
+ memcpy( saved, buf + 512, 448 * sizeof(*saved));
+ memcpy( saved + 448, buf + 7*128 + 64, 64 * sizeof(*saved));
+ } else { // LONG_STOP or ONLY_LONG
+ memcpy( saved, buf + 512, 512 * sizeof(*saved));
+ }
+}
+
+static void imdct_and_windowing_ld(AACContext *ac, SingleChannelElement *sce)
+{
+ IndividualChannelStream *ics = &sce->ics;
+ INTFLOAT *in = sce->coeffs;
+ INTFLOAT *out = sce->ret;
+ INTFLOAT *saved = sce->saved;
+ INTFLOAT *buf = ac->buf_mdct;
+#if USE_FIXED
+ int i;
+#endif /* USE_FIXED */
+
+ // imdct
+ ac->mdct.imdct_half(&ac->mdct_ld, buf, in);
+
+#if USE_FIXED
+ for (i = 0; i < 1024; i++)
+ buf[i] = (buf[i] + 2) >> 2;
+#endif /* USE_FIXED */
+
+ // window overlapping
+ if (ics->use_kb_window[1]) {
+ // AAC LD uses a low overlap sine window instead of a KBD window
+ memcpy(out, saved, 192 * sizeof(*out));
+ ac->fdsp->vector_fmul_window(out + 192, saved + 192, buf, AAC_RENAME(ff_sine_128), 64);
+ memcpy( out + 320, buf + 64, 192 * sizeof(*out));
+ } else {
+ ac->fdsp->vector_fmul_window(out, saved, buf, AAC_RENAME(ff_sine_512), 256);
+ }
+
+ // buffer update
+ memcpy(saved, buf + 256, 256 * sizeof(*saved));
+}
+
+static void imdct_and_windowing_eld(AACContext *ac, SingleChannelElement *sce)
+{
+ INTFLOAT *in = sce->coeffs;
+ INTFLOAT *out = sce->ret;
+ INTFLOAT *saved = sce->saved;
+ INTFLOAT *buf = ac->buf_mdct;
+ int i;
+ const int n = ac->oc[1].m4ac.frame_length_short ? 480 : 512;
+ const int n2 = n >> 1;
+ const int n4 = n >> 2;
+ const INTFLOAT *const window = n == 480 ? AAC_RENAME(ff_aac_eld_window_480) :
+ AAC_RENAME(ff_aac_eld_window_512);
+
+ // Inverse transform, mapped to the conventional IMDCT by
+ // Chivukula, R.K.; Reznik, Y.A.; Devarajan, V.,
+ // "Efficient algorithms for MPEG-4 AAC-ELD, AAC-LD and AAC-LC filterbanks,"
+ // International Conference on Audio, Language and Image Processing, ICALIP 2008.
+ // URL: http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=4590245&isnumber=4589950
+ for (i = 0; i < n2; i+=2) {
+ INTFLOAT temp;
+ temp = in[i ]; in[i ] = -in[n - 1 - i]; in[n - 1 - i] = temp;
+ temp = -in[i + 1]; in[i + 1] = in[n - 2 - i]; in[n - 2 - i] = temp;
+ }
+#if !USE_FIXED
+ if (n == 480)
+ ac->mdct480->imdct_half(ac->mdct480, buf, in, 1, -1.f/(16*1024*960));
+ else
+#endif
+ ac->mdct.imdct_half(&ac->mdct_ld, buf, in);
+
+#if USE_FIXED
+ for (i = 0; i < 1024; i++)
+ buf[i] = (buf[i] + 1) >> 1;
+#endif /* USE_FIXED */
+
+ for (i = 0; i < n; i+=2) {
+ buf[i] = -buf[i];
+ }
+ // Like with the regular IMDCT at this point we still have the middle half
+ // of a transform but with even symmetry on the left and odd symmetry on
+ // the right
+
+ // window overlapping
+ // The spec says to use samples [0..511] but the reference decoder uses
+ // samples [128..639].
+ for (i = n4; i < n2; i ++) {
+ out[i - n4] = AAC_MUL31( buf[ n2 - 1 - i] , window[i - n4]) +
+ AAC_MUL31( saved[ i + n2] , window[i + n - n4]) +
+ AAC_MUL31(-saved[n + n2 - 1 - i] , window[i + 2*n - n4]) +
+ AAC_MUL31(-saved[ 2*n + n2 + i] , window[i + 3*n - n4]);
+ }
+ for (i = 0; i < n2; i ++) {
+ out[n4 + i] = AAC_MUL31( buf[ i] , window[i + n2 - n4]) +
+ AAC_MUL31(-saved[ n - 1 - i] , window[i + n2 + n - n4]) +
+ AAC_MUL31(-saved[ n + i] , window[i + n2 + 2*n - n4]) +
+ AAC_MUL31( saved[2*n + n - 1 - i] , window[i + n2 + 3*n - n4]);
+ }
+ for (i = 0; i < n4; i ++) {
+ out[n2 + n4 + i] = AAC_MUL31( buf[ i + n2] , window[i + n - n4]) +
+ AAC_MUL31(-saved[n2 - 1 - i] , window[i + 2*n - n4]) +
+ AAC_MUL31(-saved[n + n2 + i] , window[i + 3*n - n4]);
+ }
+
+ // buffer update
+ memmove(saved + n, saved, 2 * n * sizeof(*saved));
+ memcpy( saved, buf, n * sizeof(*saved));
+}
+
+/**
+ * channel coupling transformation interface
+ *
+ * @param apply_coupling_method pointer to (in)dependent coupling function
+ */
+static void apply_channel_coupling(AACContext *ac, ChannelElement *cc,
+ enum RawDataBlockType type, int elem_id,
+ enum CouplingPoint coupling_point,
+ void (*apply_coupling_method)(AACContext *ac, SingleChannelElement *target, ChannelElement *cce, int index))
+{
+ int i, c;
+
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ ChannelElement *cce = ac->che[TYPE_CCE][i];
+ int index = 0;
+
+ if (cce && cce->coup.coupling_point == coupling_point) {
+ ChannelCoupling *coup = &cce->coup;
+
+ for (c = 0; c <= coup->num_coupled; c++) {
+ if (coup->type[c] == type && coup->id_select[c] == elem_id) {
+ if (coup->ch_select[c] != 1) {
+ apply_coupling_method(ac, &cc->ch[0], cce, index);
+ if (coup->ch_select[c] != 0)
+ index++;
+ }
+ if (coup->ch_select[c] != 2)
+ apply_coupling_method(ac, &cc->ch[1], cce, index++);
+ } else
+ index += 1 + (coup->ch_select[c] == 3);
+ }
+ }
+ }
+}
+
+/**
+ * Convert spectral data to samples, applying all supported tools as appropriate.
+ */
+static void spectral_to_sample(AACContext *ac)
+{
+ int i, type;
+ void (*imdct_and_window)(AACContext *ac, SingleChannelElement *sce);
+ switch (ac->oc[1].m4ac.object_type) {
+ case AOT_ER_AAC_LD:
+ imdct_and_window = imdct_and_windowing_ld;
+ break;
+ case AOT_ER_AAC_ELD:
+ imdct_and_window = imdct_and_windowing_eld;
+ break;
+ default:
+ imdct_and_window = ac->imdct_and_windowing;
+ }
+ for (type = 3; type >= 0; type--) {
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ ChannelElement *che = ac->che[type][i];
+ if (che && che->present) {
+ if (type <= TYPE_CPE)
+ apply_channel_coupling(ac, che, type, i, BEFORE_TNS, AAC_RENAME(apply_dependent_coupling));
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
+ if (che->ch[0].ics.predictor_present) {
+ if (che->ch[0].ics.ltp.present)
+ ac->apply_ltp(ac, &che->ch[0]);
+ if (che->ch[1].ics.ltp.present && type == TYPE_CPE)
+ ac->apply_ltp(ac, &che->ch[1]);
+ }
+ }
+ if (che->ch[0].tns.present)
+ ac->apply_tns(che->ch[0].coeffs, &che->ch[0].tns, &che->ch[0].ics, 1);
+ if (che->ch[1].tns.present)
+ ac->apply_tns(che->ch[1].coeffs, &che->ch[1].tns, &che->ch[1].ics, 1);
+ if (type <= TYPE_CPE)
+ apply_channel_coupling(ac, che, type, i, BETWEEN_TNS_AND_IMDCT, AAC_RENAME(apply_dependent_coupling));
+ if (type != TYPE_CCE || che->coup.coupling_point == AFTER_IMDCT) {
+ imdct_and_window(ac, &che->ch[0]);
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
+ ac->update_ltp(ac, &che->ch[0]);
+ if (type == TYPE_CPE) {
+ imdct_and_window(ac, &che->ch[1]);
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP)
+ ac->update_ltp(ac, &che->ch[1]);
+ }
+ if (ac->oc[1].m4ac.sbr > 0) {
+ AAC_RENAME(ff_sbr_apply)(ac, &che->sbr, type, che->ch[0].ret, che->ch[1].ret);
+ }
+ }
+ if (type <= TYPE_CCE)
+ apply_channel_coupling(ac, che, type, i, AFTER_IMDCT, AAC_RENAME(apply_independent_coupling));
+
+#if USE_FIXED
+ {
+ int j;
+ /* preparation for resampler */
+ for(j = 0; j<2048; j++){
+ che->ch[0].ret[j] = (int32_t)av_clipl_int32((int64_t)che->ch[0].ret[j]<<7)+0x8000;
+ che->ch[1].ret[j] = (int32_t)av_clipl_int32((int64_t)che->ch[1].ret[j]<<7)+0x8000;
+ }
+ }
+#endif /* USE_FIXED */
+ che->present = 0;
+ } else if (che) {
+ av_log(ac->avctx, AV_LOG_VERBOSE, "ChannelElement %d.%d missing \n", type, i);
+ }
+ }
+ }
+}
+
+static int parse_adts_frame_header(AACContext *ac, GetBitContext *gb)
+{
+ int size;
+ AACADTSHeaderInfo hdr_info;
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int layout_map_tags, ret;
+
+ size = avpriv_aac_parse_header(gb, &hdr_info);
+ if (size > 0) {
+ if (!ac->warned_num_aac_frames && hdr_info.num_aac_frames != 1) {
+ // This is 2 for "VLB " audio in NSV files.
+ // See samples/nsv/vlb_audio.
+ avpriv_report_missing_feature(ac->avctx,
+ "More than one AAC RDB per ADTS frame");
+ ac->warned_num_aac_frames = 1;
+ }
+ push_output_configuration(ac);
+ if (hdr_info.chan_config) {
+ ac->oc[1].m4ac.chan_config = hdr_info.chan_config;
+ if ((ret = set_default_channel_config(ac->avctx,
+ layout_map,
+ &layout_map_tags,
+ hdr_info.chan_config)) < 0)
+ return ret;
+ if ((ret = output_configure(ac, layout_map, layout_map_tags,
+ FFMAX(ac->oc[1].status,
+ OC_TRIAL_FRAME), 0)) < 0)
+ return ret;
+ } else {
+ ac->oc[1].m4ac.chan_config = 0;
+ /**
+ * dual mono frames in Japanese DTV can have chan_config 0
+ * WITHOUT specifying PCE.
+ * thus, set dual mono as default.
+ */
+ if (ac->dmono_mode && ac->oc[0].status == OC_NONE) {
+ layout_map_tags = 2;
+ layout_map[0][0] = layout_map[1][0] = TYPE_SCE;
+ layout_map[0][2] = layout_map[1][2] = AAC_CHANNEL_FRONT;
+ layout_map[0][1] = 0;
+ layout_map[1][1] = 1;
+ if (output_configure(ac, layout_map, layout_map_tags,
+ OC_TRIAL_FRAME, 0))
+ return -7;
+ }
+ }
+ ac->oc[1].m4ac.sample_rate = hdr_info.sample_rate;
+ ac->oc[1].m4ac.sampling_index = hdr_info.sampling_index;
+ ac->oc[1].m4ac.object_type = hdr_info.object_type;
+ ac->oc[1].m4ac.frame_length_short = 0;
+ if (ac->oc[0].status != OC_LOCKED ||
+ ac->oc[0].m4ac.chan_config != hdr_info.chan_config ||
+ ac->oc[0].m4ac.sample_rate != hdr_info.sample_rate) {
+ ac->oc[1].m4ac.sbr = -1;
+ ac->oc[1].m4ac.ps = -1;
+ }
+ if (!hdr_info.crc_absent)
+ skip_bits(gb, 16);
+ }
+ return size;
+}
+
+static int aac_decode_er_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, GetBitContext *gb)
+{
+ AACContext *ac = avctx->priv_data;
+ const MPEG4AudioConfig *const m4ac = &ac->oc[1].m4ac;
+ ChannelElement *che;
+ int err, i;
+ int samples = m4ac->frame_length_short ? 960 : 1024;
+ int chan_config = m4ac->chan_config;
+ int aot = m4ac->object_type;
+
+ if (aot == AOT_ER_AAC_LD || aot == AOT_ER_AAC_ELD)
+ samples >>= 1;
+
+ ac->frame = data;
+
+ if ((err = frame_configure_elements(avctx)) < 0)
+ return err;
+
+ // The FF_PROFILE_AAC_* defines are all object_type - 1
+ // This may lead to an undefined profile being signaled
+ ac->avctx->profile = aot - 1;
+
+ ac->tags_mapped = 0;
+
+ if (chan_config < 0 || (chan_config >= 8 && chan_config < 11) || chan_config >= 13) {
+ avpriv_request_sample(avctx, "Unknown ER channel configuration %d",
+ chan_config);
+ return AVERROR_INVALIDDATA;
+ }
+ for (i = 0; i < tags_per_config[chan_config]; i++) {
+ const int elem_type = aac_channel_layout_map[chan_config-1][i][0];
+ const int elem_id = aac_channel_layout_map[chan_config-1][i][1];
+ if (!(che=get_che(ac, elem_type, elem_id))) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "channel element %d.%d is not allocated\n",
+ elem_type, elem_id);
+ return AVERROR_INVALIDDATA;
+ }
+ che->present = 1;
+ if (aot != AOT_ER_AAC_ELD)
+ skip_bits(gb, 4);
+ switch (elem_type) {
+ case TYPE_SCE:
+ err = decode_ics(ac, &che->ch[0], gb, 0, 0);
+ break;
+ case TYPE_CPE:
+ err = decode_cpe(ac, gb, che);
+ break;
+ case TYPE_LFE:
+ err = decode_ics(ac, &che->ch[0], gb, 0, 0);
+ break;
+ }
+ if (err < 0)
+ return err;
+ }
+
+ spectral_to_sample(ac);
+
+ ac->frame->nb_samples = samples;
+ ac->frame->sample_rate = avctx->sample_rate;
+ *got_frame_ptr = 1;
+
+ skip_bits_long(gb, get_bits_left(gb));
+ return 0;
+}
+
+static int aac_decode_frame_int(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, GetBitContext *gb, AVPacket *avpkt)
+{
+ AACContext *ac = avctx->priv_data;
+ ChannelElement *che = NULL, *che_prev = NULL;
+ enum RawDataBlockType elem_type, elem_type_prev = TYPE_END;
+ int err, elem_id;
+ int samples = 0, multiplier, audio_found = 0, pce_found = 0;
+ int is_dmono, sce_count = 0;
+
+ ac->frame = data;
+
+ if (show_bits(gb, 12) == 0xfff) {
+ if ((err = parse_adts_frame_header(ac, gb)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error decoding AAC frame header.\n");
+ goto fail;
+ }
+ if (ac->oc[1].m4ac.sampling_index > 12) {
+ av_log(ac->avctx, AV_LOG_ERROR, "invalid sampling rate index %d\n", ac->oc[1].m4ac.sampling_index);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+
+ if ((err = frame_configure_elements(avctx)) < 0)
+ goto fail;
+
+ // The FF_PROFILE_AAC_* defines are all object_type - 1
+ // This may lead to an undefined profile being signaled
+ ac->avctx->profile = ac->oc[1].m4ac.object_type - 1;
+
+ ac->tags_mapped = 0;
+ // parse
+ while ((elem_type = get_bits(gb, 3)) != TYPE_END) {
+ elem_id = get_bits(gb, 4);
+
+ if (avctx->debug & FF_DEBUG_STARTCODE)
+ av_log(avctx, AV_LOG_DEBUG, "Elem type:%x id:%x\n", elem_type, elem_id);
+
+ if (!avctx->channels && elem_type != TYPE_PCE) {
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (elem_type < TYPE_DSE) {
+ if (!(che=get_che(ac, elem_type, elem_id))) {
+ av_log(ac->avctx, AV_LOG_ERROR, "channel element %d.%d is not allocated\n",
+ elem_type, elem_id);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ samples = 1024;
+ che->present = 1;
+ }
+
+ switch (elem_type) {
+
+ case TYPE_SCE:
+ err = decode_ics(ac, &che->ch[0], gb, 0, 0);
+ audio_found = 1;
+ sce_count++;
+ break;
+
+ case TYPE_CPE:
+ err = decode_cpe(ac, gb, che);
+ audio_found = 1;
+ break;
+
+ case TYPE_CCE:
+ err = decode_cce(ac, gb, che);
+ break;
+
+ case TYPE_LFE:
+ err = decode_ics(ac, &che->ch[0], gb, 0, 0);
+ audio_found = 1;
+ break;
+
+ case TYPE_DSE:
+ err = skip_data_stream_element(ac, gb);
+ break;
+
+ case TYPE_PCE: {
+ uint8_t layout_map[MAX_ELEM_ID*4][3];
+ int tags;
+ push_output_configuration(ac);
+ tags = decode_pce(avctx, &ac->oc[1].m4ac, layout_map, gb);
+ if (tags < 0) {
+ err = tags;
+ break;
+ }
+ if (pce_found) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Not evaluating a further program_config_element as this construct is dubious at best.\n");
+ } else {
+ err = output_configure(ac, layout_map, tags, OC_TRIAL_PCE, 1);
+ if (!err)
+ ac->oc[1].m4ac.chan_config = 0;
+ pce_found = 1;
+ }
+ break;
+ }
+
+ case TYPE_FIL:
+ if (elem_id == 15)
+ elem_id += get_bits(gb, 8) - 1;
+ if (get_bits_left(gb) < 8 * elem_id) {
+ av_log(avctx, AV_LOG_ERROR, "TYPE_FIL: "overread_err);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ while (elem_id > 0)
+ elem_id -= decode_extension_payload(ac, gb, elem_id, che_prev, elem_type_prev);
+ err = 0; /* FIXME */
+ break;
+
+ default:
+ err = AVERROR_BUG; /* should not happen, but keeps compiler happy */
+ break;
+ }
+
+ che_prev = che;
+ elem_type_prev = elem_type;
+
+ if (err)
+ goto fail;
+
+ if (get_bits_left(gb) < 3) {
+ av_log(avctx, AV_LOG_ERROR, overread_err);
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ }
+
+ if (!avctx->channels) {
+ *got_frame_ptr = 0;
+ return 0;
+ }
+
+ spectral_to_sample(ac);
+
+ multiplier = (ac->oc[1].m4ac.sbr == 1) ? ac->oc[1].m4ac.ext_sample_rate > ac->oc[1].m4ac.sample_rate : 0;
+ samples <<= multiplier;
+
+ if (ac->oc[1].status && audio_found) {
+ avctx->sample_rate = ac->oc[1].m4ac.sample_rate << multiplier;
+ avctx->frame_size = samples;
+ ac->oc[1].status = OC_LOCKED;
+ }
+
+ if (multiplier) {
+ int side_size;
+ const uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
+ if (side && side_size>=4)
+ AV_WL32(side, 2*AV_RL32(side));
+ }
+
+ if (!ac->frame->data[0] && samples) {
+ av_log(avctx, AV_LOG_ERROR, "no frame data found\n");
+ err = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (samples) {
+ ac->frame->nb_samples = samples;
+ ac->frame->sample_rate = avctx->sample_rate;
+ } else
+ av_frame_unref(ac->frame);
+ *got_frame_ptr = !!samples;
+
+ /* for dual-mono audio (SCE + SCE) */
+ is_dmono = ac->dmono_mode && sce_count == 2 &&
+ ac->oc[1].channel_layout == (AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT);
+ if (is_dmono) {
+ if (ac->dmono_mode == 1)
+ ((AVFrame *)data)->data[1] =((AVFrame *)data)->data[0];
+ else if (ac->dmono_mode == 2)
+ ((AVFrame *)data)->data[0] =((AVFrame *)data)->data[1];
+ }
+
+ return 0;
+fail:
+ pop_output_configuration(ac);
+ return err;
+}
+
+static int aac_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ AACContext *ac = avctx->priv_data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ GetBitContext gb;
+ int buf_consumed;
+ int buf_offset;
+ int err;
+ int new_extradata_size;
+ const uint8_t *new_extradata = av_packet_get_side_data(avpkt,
+ AV_PKT_DATA_NEW_EXTRADATA,
+ &new_extradata_size);
+ int jp_dualmono_size;
+ const uint8_t *jp_dualmono = av_packet_get_side_data(avpkt,
+ AV_PKT_DATA_JP_DUALMONO,
+ &jp_dualmono_size);
+
+ if (new_extradata && 0) {
+ av_free(avctx->extradata);
+ avctx->extradata = av_mallocz(new_extradata_size +
+ FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!avctx->extradata)
+ return AVERROR(ENOMEM);
+ avctx->extradata_size = new_extradata_size;
+ memcpy(avctx->extradata, new_extradata, new_extradata_size);
+ push_output_configuration(ac);
+ if (decode_audio_specific_config(ac, ac->avctx, &ac->oc[1].m4ac,
+ avctx->extradata,
+ avctx->extradata_size*8, 1) < 0) {
+ pop_output_configuration(ac);
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ ac->dmono_mode = 0;
+ if (jp_dualmono && jp_dualmono_size > 0)
+ ac->dmono_mode = 1 + *jp_dualmono;
+ if (ac->force_dmono_mode >= 0)
+ ac->dmono_mode = ac->force_dmono_mode;
+
+ if (INT_MAX / 8 <= buf_size)
+ return AVERROR_INVALIDDATA;
+
+ if ((err = init_get_bits(&gb, buf, buf_size * 8)) < 0)
+ return err;
+
+ switch (ac->oc[1].m4ac.object_type) {
+ case AOT_ER_AAC_LC:
+ case AOT_ER_AAC_LTP:
+ case AOT_ER_AAC_LD:
+ case AOT_ER_AAC_ELD:
+ err = aac_decode_er_frame(avctx, data, got_frame_ptr, &gb);
+ break;
+ default:
+ err = aac_decode_frame_int(avctx, data, got_frame_ptr, &gb, avpkt);
+ }
+ if (err < 0)
+ return err;
+
+ buf_consumed = (get_bits_count(&gb) + 7) >> 3;
+ for (buf_offset = buf_consumed; buf_offset < buf_size; buf_offset++)
+ if (buf[buf_offset])
+ break;
+
+ return buf_size > buf_offset ? buf_consumed : buf_size;
+}
+
+static av_cold int aac_decode_close(AVCodecContext *avctx)
+{
+ AACContext *ac = avctx->priv_data;
+ int i, type;
+
+ for (i = 0; i < MAX_ELEM_ID; i++) {
+ for (type = 0; type < 4; type++) {
+ if (ac->che[type][i])
+ AAC_RENAME(ff_aac_sbr_ctx_close)(&ac->che[type][i]->sbr);
+ av_freep(&ac->che[type][i]);
+ }
+ }
+
+ ff_mdct_end(&ac->mdct);
+ ff_mdct_end(&ac->mdct_small);
+ ff_mdct_end(&ac->mdct_ld);
+ ff_mdct_end(&ac->mdct_ltp);
+#if !USE_FIXED
+ ff_imdct15_uninit(&ac->mdct480);
+#endif
+ av_freep(&ac->fdsp);
+ return 0;
+}
+
+static void aacdec_init(AACContext *c)
+{
+ c->imdct_and_windowing = imdct_and_windowing;
+ c->apply_ltp = apply_ltp;
+ c->apply_tns = apply_tns;
+ c->windowing_and_mdct_ltp = windowing_and_mdct_ltp;
+ c->update_ltp = update_ltp;
+#if USE_FIXED
+ c->vector_pow43 = vector_pow43;
+ c->subband_scale = subband_scale;
+#endif
+
+#if !USE_FIXED
+ if(ARCH_MIPS)
+ ff_aacdec_init_mips(c);
+#endif /* !USE_FIXED */
+}
+/**
+ * AVOptions for Japanese DTV specific extensions (ADTS only)
+ */
+#define AACDEC_FLAGS AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption options[] = {
+ {"dual_mono_mode", "Select the channel to decode for dual mono",
+ offsetof(AACContext, force_dmono_mode), AV_OPT_TYPE_INT, {.i64=-1}, -1, 2,
+ AACDEC_FLAGS, "dual_mono_mode"},
+
+ {"auto", "autoselection", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, AACDEC_FLAGS, "dual_mono_mode"},
+ {"main", "Select Main/Left channel", 0, AV_OPT_TYPE_CONST, {.i64= 1}, INT_MIN, INT_MAX, AACDEC_FLAGS, "dual_mono_mode"},
+ {"sub" , "Select Sub/Right channel", 0, AV_OPT_TYPE_CONST, {.i64= 2}, INT_MIN, INT_MAX, AACDEC_FLAGS, "dual_mono_mode"},
+ {"both", "Select both channels", 0, AV_OPT_TYPE_CONST, {.i64= 0}, INT_MIN, INT_MAX, AACDEC_FLAGS, "dual_mono_mode"},
+
+ {NULL},
+};
+
+static const AVClass aac_decoder_class = {
+ .class_name = "AAC decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const AVProfile profiles[] = {
+ { FF_PROFILE_AAC_MAIN, "Main" },
+ { FF_PROFILE_AAC_LOW, "LC" },
+ { FF_PROFILE_AAC_SSR, "SSR" },
+ { FF_PROFILE_AAC_LTP, "LTP" },
+ { FF_PROFILE_AAC_HE, "HE-AAC" },
+ { FF_PROFILE_AAC_HE_V2, "HE-AACv2" },
+ { FF_PROFILE_AAC_LD, "LD" },
+ { FF_PROFILE_AAC_ELD, "ELD" },
+ { FF_PROFILE_UNKNOWN },
+};
{
int ret = 0;
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
// window init
ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
ac3_tables_init();
ff_mdct_init(&s->imdct_256, 8, 1, 1.0);
ff_mdct_init(&s->imdct_512, 9, 1, 1.0);
- ff_kbd_window_init(s->window, 5.0, 256);
+ AC3_RENAME(ff_kbd_window_init)(s->window, 5.0, 256);
ff_bswapdsp_init(&s->bdsp);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
- ff_ac3dsp_init(&s->ac3dsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
+
+#if (USE_FIXED)
- s->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_alloc_fixed_dsp(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+#else
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
ff_fmt_convert_init(&s->fmt_conv, avctx);
- ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
+#endif
+
++ ff_ac3dsp_init(&s->ac3dsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
av_lfg_init(&s->dith_state, 0);
- avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
+ if (USE_FIXED)
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
+ else
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
/* allow downmixing to stereo or mono */
#if FF_API_REQUEST_CHANNELS
av_cold int ff_ac3_float_encode_init(AVCodecContext *avctx)
{
AC3EncodeContext *s = avctx->priv_data;
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
return ff_ac3_encode_init(avctx);
}
{
int cpu_flags = av_get_cpu_flags();
- if (!high_bit_depth) {
- if (avctx->idct_algo == FF_IDCT_AUTO ||
+ if (!avctx->lowres && !high_bit_depth) {
- if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) ||
++ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) ||
avctx->idct_algo == FF_IDCT_ARM) {
c->idct_put = j_rev_dct_arm_put;
c->idct_add = j_rev_dct_arm_add;
av_cold void ff_idctdsp_init_armv6(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
- if (!high_bit_depth) {
- if (avctx->idct_algo == FF_IDCT_AUTO ||
+ if (!avctx->lowres && !high_bit_depth) {
- if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) ||
++ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) ||
avctx->idct_algo == FF_IDCT_SIMPLEARMV6) {
c->idct_put = ff_simple_idct_put_armv6;
c->idct_add = ff_simple_idct_add_armv6;
#include "avcodec.h"
#include "ass.h"
+#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/common.h"
-/**
- * Generate a suitable AVCodecContext.subtitle_header for SUBTITLE_ASS.
- *
- * @param avctx pointer to the AVCodecContext
- * @param font name of the default font face to use
- * @param font_size default font size to use
- * @param color default text color to use (ABGR)
- * @param back_color default background color to use (ABGR)
- * @param bold 1 for bold text, 0 for normal text
- * @param italic 1 for italic text, 0 for normal text
- * @param underline 1 for underline text, 0 for normal text
- * @param alignment position of the text (left, center, top...), defined after
- * the layout of the numpad (1-3 sub, 4-6 mid, 7-9 top)
- * @return >= 0 on success otherwise an error code <0
- */
-static int ass_subtitle_header(AVCodecContext *avctx,
- const char *font, int font_size,
- int color, int back_color,
- int bold, int italic, int underline,
- int alignment)
+int ff_ass_subtitle_header(AVCodecContext *avctx,
+ const char *font, int font_size,
+ int color, int back_color,
+ int bold, int italic, int underline,
+ int alignment)
{
- char header[512];
-
- snprintf(header, sizeof(header),
+ avctx->subtitle_header = av_asprintf(
"[Script Info]\r\n"
+ "; Script generated by FFmpeg/Lavc%s\r\n"
"ScriptType: v4.00+\r\n"
+ "PlayResX: %d\r\n"
+ "PlayResY: %d\r\n"
"\r\n"
"[V4+ Styles]\r\n"
- "Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding\r\n"
- "Style: Default,%s,%d,&H%x,&H%x,&H%x,&H%x,%d,%d,%d,1,1,0,%d,10,10,10,0,0\r\n"
+
+ /* ASSv4 header */
+ "Format: Name, "
+ "Fontname, Fontsize, "
+ "PrimaryColour, SecondaryColour, OutlineColour, BackColour, "
+ "Bold, Italic, Underline, StrikeOut, "
+ "ScaleX, ScaleY, "
+ "Spacing, Angle, "
+ "BorderStyle, Outline, Shadow, "
+ "Alignment, MarginL, MarginR, MarginV, "
+ "Encoding\r\n"
+
+ "Style: "
+ "Default," /* Name */
+ "%s,%d," /* Font{name,size} */
+ "&H%x,&H%x,&H%x,&H%x," /* {Primary,Secondary,Outline,Back}Colour */
+ "%d,%d,%d,0," /* Bold, Italic, Underline, StrikeOut */
+ "100,100," /* Scale{X,Y} */
+ "0,0," /* Spacing, Angle */
+ "1,1,0," /* BorderStyle, Outline, Shadow */
+ "%d,10,10,10," /* Alignment, Margin[LRV] */
+ "0\r\n" /* Encoding */
+
"\r\n"
"[Events]\r\n"
- "Format: Layer, Start, End, Text\r\n",
+ "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n",
- !(avctx->flags & CODEC_FLAG_BITEXACT) ? AV_STRINGIFY(LIBAVCODEC_VERSION) : "",
++ !(avctx->flags & AV_CODEC_FLAG_BITEXACT) ? AV_STRINGIFY(LIBAVCODEC_VERSION) : "",
+ ASS_DEFAULT_PLAYRESX, ASS_DEFAULT_PLAYRESY,
font, font_size, color, color, back_color, back_color,
-bold, -italic, -underline, alignment);
ff_atrac_generate_tables();
- q->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&q->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ q->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
q->bands[0] = q->low;
q->bands[1] = q->mid;
}
ff_atrac_init_gain_compensation(&q->gainc_ctx, 4, 3);
- q->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&q->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ q->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
- q->units = av_mallocz(sizeof(*q->units) * avctx->channels);
- if (!q->units) {
+ q->units = av_mallocz_array(avctx->channels, sizeof(*q->units));
+ if (!q->units || !q->fdsp) {
atrac3_decode_close(avctx);
return AVERROR(ENOMEM);
}
ctx->my_channel_layout = avctx->channel_layout;
- ctx->ch_units = av_mallocz(sizeof(*ctx->ch_units) *
- ctx->num_channel_blocks);
- if (!ctx->ch_units) {
+ ctx->ch_units = av_mallocz_array(ctx->num_channel_blocks, sizeof(*ctx->ch_units));
- ctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
++ ctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+
+ if (!ctx->ch_units || !ctx->fdsp) {
atrac3p_decode_close(avctx);
return AVERROR(ENOMEM);
}
Note: Not everything is supported yet.
*/
-#define AV_CODEC_FLAG_CLOSED_GOP (1 << 31)
+ /**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+ #define AV_CODEC_FLAG_UNALIGNED (1 << 0)
+ /**
+ * Use fixed qscale.
+ */
+ #define AV_CODEC_FLAG_QSCALE (1 << 1)
+ /**
+ * 4 MV per MB allowed / advanced prediction for H.263.
+ */
+ #define AV_CODEC_FLAG_4MV (1 << 2)
+ /**
+ * Output even those frames that might be corrupted.
+ */
+ #define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3)
+ /**
+ * Use qpel MC.
+ */
+ #define AV_CODEC_FLAG_QPEL (1 << 4)
+ /**
+ * Use internal 2pass ratecontrol in first pass mode.
+ */
+ #define AV_CODEC_FLAG_PASS1 (1 << 9)
+ /**
+ * Use internal 2pass ratecontrol in second pass mode.
+ */
+ #define AV_CODEC_FLAG_PASS2 (1 << 10)
+ /**
+ * loop filter.
+ */
+ #define AV_CODEC_FLAG_LOOP_FILTER (1 << 11)
+ /**
+ * Only decode/encode grayscale.
+ */
+ #define AV_CODEC_FLAG_GRAY (1 << 13)
+ /**
+ * error[?] variables will be set during encoding.
+ */
+ #define AV_CODEC_FLAG_PSNR (1 << 15)
+ /**
+ * Input bitstream might be truncated at a random location
+ * instead of only at frame boundaries.
+ */
+ #define AV_CODEC_FLAG_TRUNCATED (1 << 16)
+ /**
+ * Use interlaced DCT.
+ */
+ #define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18)
+ /**
+ * Force low delay.
+ */
+ #define AV_CODEC_FLAG_LOW_DELAY (1 << 19)
+ /**
+ * Place global headers in extradata instead of every keyframe.
+ */
+ #define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)
+ /**
+ * Use only bitexact stuff (except (I)DCT).
+ */
+ #define AV_CODEC_FLAG_BITEXACT (1 << 23)
+ /* Fx : Flag for h263+ extra options */
+ /**
+ * H.263 advanced intra coding / MPEG-4 AC prediction
+ */
+ #define AV_CODEC_FLAG_AC_PRED (1 << 24)
+ /**
+ * interlaced motion estimation
+ */
+ #define AV_CODEC_FLAG_INTERLACED_ME (1 << 29)
+ /**
+ * Allow non spec compliant speedup tricks.
+ */
++#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31)
+
+ #define AV_CODEC_FLAG2_FAST (1 << 0)
+ /**
+ * Skip bitstream encoding.
+ */
+ #define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2)
+ /**
+ * Place global headers at every keyframe instead of in extradata.
+ */
+ #define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3)
++
++/**
++ * timecode is in drop frame format. DEPRECATED!!!!
++ */
++#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000
++
+ /**
+ * Input bitstream might be truncated at a packet boundaries
+ * instead of only at frame boundaries.
+ */
+ #define AV_CODEC_FLAG2_CHUNKS (1 << 15)
+ /**
+ * Discard cropping information from SPS.
+ */
+ #define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
+
++/**
++ * Show all frames before the first keyframe
++ */
++#define AV_CODEC_FLAG2_SHOW_ALL 0x00400000
++/**
++ * Export motion vectors through frame side data
++ */
++#define AV_CODEC_FLAG2_EXPORT_MVS 0x10000000
++/**
++ * Do not skip samples and export skip information as frame side data
++ */
++#define AV_CODEC_FLAG2_SKIP_MANUAL 0x20000000
++
/**
* Allow decoders to produce frames with data planes that are not aligned
* to CPU requirements (e.g. due to cropping).
s->avctx = avctx;
dca_init_vlcs();
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
+
ff_mdct_init(&s->imdct, 6, 1, 1.0);
ff_synth_filter_init(&s->synth);
ff_dcadsp_init(&s->dcadsp);
for (i = 0; i < FF_ARRAY_ELEMS(ff_dnxhd_cid_table); i++) {
const CIDEntry *cid = &ff_dnxhd_cid_table[i];
if (cid->width == avctx->width && cid->height == avctx->height &&
- cid->interlaced == !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT) &&
+ cid->interlaced == !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) &&
cid->bit_depth == bit_depth) {
- for (j = 0; j < sizeof(cid->bit_rates); j++) {
+ for (j = 0; j < FF_ARRAY_ELEMS(cid->bit_rates); j++) {
if (cid->bit_rates[j] == mbs)
return cid->cid;
}
case 5:
if (!tmpptr) {
av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
- if (!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL))
- return AVERROR_INVALIDDATA;
++ if (!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
+ return AVERROR_INVALIDDATA;
}
frame->key_frame = 0;
frame->pict_type = AV_PICTURE_TYPE_P;
int j, int mv_x, int mv_y, int add)
{
if (j < 4) {
+ unsigned offset = (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame->linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x;
+ if (offset >= (t->avctx->height - 7) * t->last_frame->linesize[0] - 7)
+ return;
comp(frame->data[0] + (mb_y*16 + ((j&2)<<2))*frame->linesize[0] + mb_x*16 + ((j&1)<<3),
frame->linesize[0],
- t->last_frame->data[0] + (mb_y*16 + ((j&2)<<2) + mv_y)*t->last_frame->linesize[0] + mb_x*16 + ((j&1)<<3) + mv_x,
+ t->last_frame->data[0] + offset,
t->last_frame->linesize[0], add);
- } else if (!(t->avctx->flags & CODEC_FLAG_GRAY)) {
+ } else if (!(t->avctx->flags & AV_CODEC_FLAG_GRAY)) {
int index = j - 3;
+ unsigned offset = (mb_y * 8 + (mv_y/2))*t->last_frame->linesize[index] + mb_x * 8 + (mv_x/2);
+ if (offset >= (t->avctx->height/2 - 7) * t->last_frame->linesize[index] - 7)
+ return;
comp(frame->data[index] + (mb_y*8)*frame->linesize[index] + mb_x * 8,
frame->linesize[index],
- t->last_frame->data[index] + (mb_y * 8 + (mv_y/2))*t->last_frame->linesize[index] + mb_x * 8 + (mv_x/2),
+ t->last_frame->data[index] + offset,
t->last_frame->linesize[index], add);
}
}
s->version = 0;
- if ((avctx->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) || avctx->slices>1)
+ if ((avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) ||
+ avctx->slices > 1)
s->version = FFMAX(s->version, 2);
- if (avctx->level == 3) {
+ // Unspecified level & slices, we choose version 1.2+ to ensure multithreaded decodability
+ if (avctx->slices == 0 && avctx->level < 0 && avctx->width * avctx->height > 720*576)
+ s->version = FFMAX(s->version, 2);
+
+ if (avctx->level <= 0 && s->version == 2) {
s->version = 3;
}
+ if (avctx->level >= 0 && avctx->level <= 4)
+ s->version = FFMAX(s->version, avctx->level);
if (s->ec < 0) {
s->ec = (s->version >= 3);
if (!s->transparency)
s->plane_count = 2;
+ if (!s->chroma_planes && s->version > 3)
+ s->plane_count--;
- av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift,
- &s->chroma_v_shift);
-
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
s->picture_number = 0;
- if (avctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
+ if (avctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
for (i = 0; i < s->quant_table_count; i++) {
s->rc_stat2[i] = av_mallocz(s->context_count[i] *
sizeof(*s->rc_stat2[i]));
return ret;
#define STATS_OUT_SIZE 1024 * 1024 * 6
- if (avctx->flags & CODEC_FLAG_PASS1) {
+ if (avctx->flags & AV_CODEC_FLAG_PASS1) {
avctx->stats_out = av_mallocz(STATS_OUT_SIZE);
+ if (!avctx->stats_out)
+ return AVERROR(ENOMEM);
for (i = 0; i < s->quant_table_count; i++)
for (j = 0; j < s->slice_count; j++) {
FFV1Context *sf = s->slice_context[j];
uint8_t keystate = 128;
uint8_t *buf_p;
int i, ret;
- if (avctx->flags & CODEC_FLAG_PASS1) {
+ int64_t maxsize = FF_MIN_BUFFER_SIZE
+ + avctx->width*avctx->height*35LL*4;
+
+ if(!pict) {
++ if (avctx->flags & AV_CODEC_FLAG_PASS1) {
+ int j, k, m;
+ char *p = avctx->stats_out;
+ char *end = p + STATS_OUT_SIZE;
+
+ memset(f->rc_stat, 0, sizeof(f->rc_stat));
+ for (i = 0; i < f->quant_table_count; i++)
+ memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
+
+ for (j = 0; j < f->slice_count; j++) {
+ FFV1Context *fs = f->slice_context[j];
+ for (i = 0; i < 256; i++) {
+ f->rc_stat[i][0] += fs->rc_stat[i][0];
+ f->rc_stat[i][1] += fs->rc_stat[i][1];
+ }
+ for (i = 0; i < f->quant_table_count; i++) {
+ for (k = 0; k < f->context_count[i]; k++)
+ for (m = 0; m < 32; m++) {
+ f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
+ f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
+ }
+ }
+ }
- f->frame = pict;
+ for (j = 0; j < 256; j++) {
+ snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
+ f->rc_stat[j][0], f->rc_stat[j][1]);
+ p += strlen(p);
+ }
+ snprintf(p, end - p, "\n");
- if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height *
- ((8 * 2 + 1 + 1) * 4) / 8 +
- FF_MIN_BUFFER_SIZE)) < 0) {
- av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
- return ret;
+ for (i = 0; i < f->quant_table_count; i++) {
+ for (j = 0; j < f->context_count[i]; j++)
+ for (m = 0; m < 32; m++) {
+ snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
+ f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
+ p += strlen(p);
+ }
+ }
+ snprintf(p, end - p, "%d\n", f->gob_count);
+ }
+ return 0;
}
+ if (f->version > 3)
+ maxsize = FF_MIN_BUFFER_SIZE + avctx->width*avctx->height*3LL*4;
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, maxsize, 0)) < 0)
+ return ret;
+
ff_init_range_encoder(c, pkt->data, pkt->size);
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
buf_p += bytes;
}
- if (avctx->flags & CODEC_FLAG_PASS1)
- if ((avctx->flags & AV_CODEC_FLAG_PASS1) && (f->picture_number & 31) == 0) {
- int j, k, m;
- char *p = avctx->stats_out;
- char *end = p + STATS_OUT_SIZE;
-
- memset(f->rc_stat, 0, sizeof(f->rc_stat));
- for (i = 0; i < f->quant_table_count; i++)
- memset(f->rc_stat2[i], 0, f->context_count[i] * sizeof(*f->rc_stat2[i]));
-
- for (j = 0; j < f->slice_count; j++) {
- FFV1Context *fs = f->slice_context[j];
- for (i = 0; i < 256; i++) {
- f->rc_stat[i][0] += fs->rc_stat[i][0];
- f->rc_stat[i][1] += fs->rc_stat[i][1];
- }
- for (i = 0; i < f->quant_table_count; i++) {
- for (k = 0; k < f->context_count[i]; k++)
- for (m = 0; m < 32; m++) {
- f->rc_stat2[i][k][m][0] += fs->rc_stat2[i][k][m][0];
- f->rc_stat2[i][k][m][1] += fs->rc_stat2[i][k][m][1];
- }
- }
- }
-
- for (j = 0; j < 256; j++) {
- snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
- f->rc_stat[j][0], f->rc_stat[j][1]);
- p += strlen(p);
- }
- snprintf(p, end - p, "\n");
-
- for (i = 0; i < f->quant_table_count; i++) {
- for (j = 0; j < f->context_count[i]; j++)
- for (m = 0; m < 32; m++) {
- snprintf(p, end - p, "%" PRIu64 " %" PRIu64 " ",
- f->rc_stat2[i][j][m][0], f->rc_stat2[i][j][m][1]);
- p += strlen(p);
- }
- }
- snprintf(p, end - p, "%d\n", f->gob_count);
- } else if (avctx->flags & AV_CODEC_FLAG_PASS1)
++ if (avctx->flags & AV_CODEC_FLAG_PASS1)
avctx->stats_out[0] = '\0';
#if FF_API_CODED_FRAME
--- /dev/null
- && !(avctx->flags & CODEC_FLAG_QSCALE)) {
+/*
+ * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "frame_thread_encoder.h"
+
+#include "libavutil/fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "thread.h"
+
+#if HAVE_PTHREADS
+#include <pthread.h>
+#elif HAVE_W32THREADS
+#include "compat/w32pthreads.h"
+#elif HAVE_OS2THREADS
+#include "compat/os2threads.h"
+#endif
+
+#define MAX_THREADS 64
+#define BUFFER_SIZE (2*MAX_THREADS)
+
+typedef struct{
+ void *indata;
+ void *outdata;
+ int64_t return_code;
+ unsigned index;
+} Task;
+
+typedef struct{
+ AVCodecContext *parent_avctx;
+ pthread_mutex_t buffer_mutex;
+
+ AVFifoBuffer *task_fifo;
+ pthread_mutex_t task_fifo_mutex;
+ pthread_cond_t task_fifo_cond;
+
+ Task finished_tasks[BUFFER_SIZE];
+ pthread_mutex_t finished_task_mutex;
+ pthread_cond_t finished_task_cond;
+
+ unsigned task_index;
+ unsigned finished_task_index;
+
+ pthread_t worker[MAX_THREADS];
+ int exit;
+} ThreadContext;
+
+static void * attribute_align_arg worker(void *v){
+ AVCodecContext *avctx = v;
+ ThreadContext *c = avctx->internal->frame_thread_encoder;
+ AVPacket *pkt = NULL;
+
+ while(!c->exit){
+ int got_packet, ret;
+ AVFrame *frame;
+ Task task;
+
+ if(!pkt) pkt= av_mallocz(sizeof(*pkt));
+ if(!pkt) continue;
+ av_init_packet(pkt);
+
+ pthread_mutex_lock(&c->task_fifo_mutex);
+ while (av_fifo_size(c->task_fifo) <= 0 || c->exit) {
+ if(c->exit){
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+ goto end;
+ }
+ pthread_cond_wait(&c->task_fifo_cond, &c->task_fifo_mutex);
+ }
+ av_fifo_generic_read(c->task_fifo, &task, sizeof(task), NULL);
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+ frame = task.indata;
+
+ ret = avcodec_encode_video2(avctx, pkt, frame, &got_packet);
+ pthread_mutex_lock(&c->buffer_mutex);
+ av_frame_unref(frame);
+ pthread_mutex_unlock(&c->buffer_mutex);
+ av_frame_free(&frame);
+ if(got_packet) {
+ av_dup_packet(pkt);
+ } else {
+ pkt->data = NULL;
+ pkt->size = 0;
+ }
+ pthread_mutex_lock(&c->finished_task_mutex);
+ c->finished_tasks[task.index].outdata = pkt; pkt = NULL;
+ c->finished_tasks[task.index].return_code = ret;
+ pthread_cond_signal(&c->finished_task_cond);
+ pthread_mutex_unlock(&c->finished_task_mutex);
+ }
+end:
+ av_free(pkt);
+ pthread_mutex_lock(&c->buffer_mutex);
+ avcodec_close(avctx);
+ pthread_mutex_unlock(&c->buffer_mutex);
+ av_freep(&avctx);
+ return NULL;
+}
+
+int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){
+ int i=0;
+ ThreadContext *c;
+
+
+ if( !(avctx->thread_type & FF_THREAD_FRAME)
+ || !(avctx->codec->capabilities & CODEC_CAP_INTRA_ONLY))
+ return 0;
+
+ if( !avctx->thread_count
+ && avctx->codec_id == AV_CODEC_ID_MJPEG
- && !(avctx->flags & CODEC_FLAG_QSCALE))
++ && !(avctx->flags & AV_CODEC_FLAG_QSCALE)) {
+ av_log(avctx, AV_LOG_DEBUG,
+ "Forcing thread count to 1 for MJPEG encoding, use -thread_type slice "
+ "or a constant quantizer if you want to use multiple cpu cores\n");
+ avctx->thread_count = 1;
+ }
+ if( avctx->thread_count > 1
+ && avctx->codec_id == AV_CODEC_ID_MJPEG
- if (avctx->flags & CODEC_FLAG_PASS1)
++ && !(avctx->flags & AV_CODEC_FLAG_QSCALE))
+ av_log(avctx, AV_LOG_WARNING,
+ "MJPEG CBR encoding works badly with frame multi-threading, consider "
+ "using -threads 1, -thread_type slice or a constant quantizer.\n");
+
+ if (avctx->codec_id == AV_CODEC_ID_HUFFYUV ||
+ avctx->codec_id == AV_CODEC_ID_FFVHUFF) {
+ int warn = 0;
++ if (avctx->flags & AV_CODEC_FLAG_PASS1)
+ warn = 1;
+ else if(avctx->context_model > 0) {
+ AVDictionaryEntry *t = av_dict_get(options, "non_deterministic",
+ NULL, AV_DICT_MATCH_CASE);
+ warn = !t || !t->value || !atoi(t->value) ? 1 : 0;
+ }
+ // huffyuv does not support these with multiple frame threads currently
+ if (warn) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Forcing thread count to 1 for huffyuv encoding with first pass or context 1\n");
+ avctx->thread_count = 1;
+ }
+ }
+
+ if(!avctx->thread_count) {
+ avctx->thread_count = av_cpu_count();
+ avctx->thread_count = FFMIN(avctx->thread_count, MAX_THREADS);
+ }
+
+ if(avctx->thread_count <= 1)
+ return 0;
+
+ if(avctx->thread_count > MAX_THREADS)
+ return AVERROR(EINVAL);
+
+ av_assert0(!avctx->internal->frame_thread_encoder);
+ c = avctx->internal->frame_thread_encoder = av_mallocz(sizeof(ThreadContext));
+ if(!c)
+ return AVERROR(ENOMEM);
+
+ c->parent_avctx = avctx;
+
+ c->task_fifo = av_fifo_alloc_array(BUFFER_SIZE, sizeof(Task));
+ if(!c->task_fifo)
+ goto fail;
+
+ pthread_mutex_init(&c->task_fifo_mutex, NULL);
+ pthread_mutex_init(&c->finished_task_mutex, NULL);
+ pthread_mutex_init(&c->buffer_mutex, NULL);
+ pthread_cond_init(&c->task_fifo_cond, NULL);
+ pthread_cond_init(&c->finished_task_cond, NULL);
+
+ for(i=0; i<avctx->thread_count ; i++){
+ AVDictionary *tmp = NULL;
+ void *tmpv;
+ AVCodecContext *thread_avctx = avcodec_alloc_context3(avctx->codec);
+ if(!thread_avctx)
+ goto fail;
+ tmpv = thread_avctx->priv_data;
+ *thread_avctx = *avctx;
+ thread_avctx->priv_data = tmpv;
+ thread_avctx->internal = NULL;
+ memcpy(thread_avctx->priv_data, avctx->priv_data, avctx->codec->priv_data_size);
+ thread_avctx->thread_count = 1;
+ thread_avctx->active_thread_type &= ~FF_THREAD_FRAME;
+
+ av_dict_copy(&tmp, options, 0);
+ av_dict_set(&tmp, "threads", "1", 0);
+ if(avcodec_open2(thread_avctx, avctx->codec, &tmp) < 0) {
+ av_dict_free(&tmp);
+ goto fail;
+ }
+ av_dict_free(&tmp);
+ av_assert0(!thread_avctx->internal->frame_thread_encoder);
+ thread_avctx->internal->frame_thread_encoder = c;
+ if(pthread_create(&c->worker[i], NULL, worker, thread_avctx)) {
+ goto fail;
+ }
+ }
+
+ avctx->active_thread_type = FF_THREAD_FRAME;
+
+ return 0;
+fail:
+ avctx->thread_count = i;
+ av_log(avctx, AV_LOG_ERROR, "ff_frame_thread_encoder_init failed\n");
+ ff_frame_thread_encoder_free(avctx);
+ return -1;
+}
+
+void ff_frame_thread_encoder_free(AVCodecContext *avctx){
+ int i;
+ ThreadContext *c= avctx->internal->frame_thread_encoder;
+
+ pthread_mutex_lock(&c->task_fifo_mutex);
+ c->exit = 1;
+ pthread_cond_broadcast(&c->task_fifo_cond);
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+
+ for (i=0; i<avctx->thread_count; i++) {
+ pthread_join(c->worker[i], NULL);
+ }
+
+ pthread_mutex_destroy(&c->task_fifo_mutex);
+ pthread_mutex_destroy(&c->finished_task_mutex);
+ pthread_mutex_destroy(&c->buffer_mutex);
+ pthread_cond_destroy(&c->task_fifo_cond);
+ pthread_cond_destroy(&c->finished_task_cond);
+ av_fifo_freep(&c->task_fifo);
+ av_freep(&avctx->internal->frame_thread_encoder);
+}
+
+int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet_ptr){
+ ThreadContext *c = avctx->internal->frame_thread_encoder;
+ Task task;
+ int ret;
+
+ av_assert1(!*got_packet_ptr);
+
+ if(frame){
+ AVFrame *new = av_frame_alloc();
+ if(!new)
+ return AVERROR(ENOMEM);
+ ret = av_frame_ref(new, frame);
+ if(ret < 0) {
+ av_frame_free(&new);
+ return ret;
+ }
+
+ task.index = c->task_index;
+ task.indata = (void*)new;
+ pthread_mutex_lock(&c->task_fifo_mutex);
+ av_fifo_generic_write(c->task_fifo, &task, sizeof(task), NULL);
+ pthread_cond_signal(&c->task_fifo_cond);
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+
+ c->task_index = (c->task_index+1) % BUFFER_SIZE;
+
+ if(!c->finished_tasks[c->finished_task_index].outdata && (c->task_index - c->finished_task_index) % BUFFER_SIZE <= avctx->thread_count)
+ return 0;
+ }
+
+ if(c->task_index == c->finished_task_index)
+ return 0;
+
+ pthread_mutex_lock(&c->finished_task_mutex);
+ while (!c->finished_tasks[c->finished_task_index].outdata) {
+ pthread_cond_wait(&c->finished_task_cond, &c->finished_task_mutex);
+ }
+ task = c->finished_tasks[c->finished_task_index];
+ *pkt = *(AVPacket*)(task.outdata);
+ if(pkt->data)
+ *got_packet_ptr = 1;
+ av_freep(&c->finished_tasks[c->finished_task_index].outdata);
+ c->finished_task_index = (c->finished_task_index+1) % BUFFER_SIZE;
+ pthread_mutex_unlock(&c->finished_task_mutex);
+
+ return task.return_code;
+}
}
static inline void ff_h263_encode_motion_vector(MpegEncContext * s, int x, int y, int f_code){
- if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT) {
+ if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
skip_put_bits(&s->pb,
- h263_get_motion_length(s, x, f_code)
- +h263_get_motion_length(s, y, f_code));
+ h263_get_motion_length(x, f_code)
+ +h263_get_motion_length(y, f_code));
}else{
- ff_h263_encode_motion(s, x, f_code);
- ff_h263_encode_motion(s, y, f_code);
+ ff_h263_encode_motion(&s->pb, x, f_code);
+ ff_h263_encode_motion(&s->pb, y, f_code);
}
}
if (avctx->codec->id == AV_CODEC_ID_MSS2)
return AV_PIX_FMT_YUV420P;
- if (CONFIG_GRAY && (avctx->flags & CODEC_FLAG_GRAY)) {
++ if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ if (avctx->color_range == AVCOL_RANGE_UNSPECIFIED)
+ avctx->color_range = AVCOL_RANGE_MPEG;
+ return AV_PIX_FMT_GRAY8;
+ }
+
return avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
}
if (!dst)
return NULL;
- if(h->avctx->flags2 & CODEC_FLAG2_FAST){
+ if(i>=length-1){ //no escaped 0
+ *dst_length= length;
+ *consumed= length+1; //+1 for the header
++ if(h->avctx->flags2 & AV_CODEC_FLAG2_FAST){
+ return src;
+ }else{
+ memcpy(dst, src, length);
+ return dst;
+ }
+ }
+
memcpy(dst, src, i);
si = di = i;
while (si + 2 < length) {
int next_avc;
int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
int nal_index;
+ int idr_cleared=0;
int ret = 0;
+ h->nal_unit_type= 0;
+
+ if(!h->slice_context_count)
+ h->slice_context_count= 1;
h->max_contexts = h->slice_context_count;
- if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
h->current_slice = 0;
if (!h->first_field)
h->cur_pic_ptr = NULL;
// "recovered".
if (h->nal_unit_type == NAL_IDR_SLICE)
h->frame_recovered |= FRAME_RECOVERED_IDR;
- h->frame_recovered |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL);
- h->frame_recovered |= 3*!!(avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT);
++ h->frame_recovered |= 3*!!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL);
++ h->frame_recovered |= 3*!!(avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT);
+#if 1
+ h->cur_pic_ptr->recovered |= h->frame_recovered;
+#else
h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
+#endif
if (h->current_slice == 1) {
- if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
decode_postinit(h, nal_index >= nals_needed);
if (h->avctx->hwaccel &&
goto out;
}
- if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
+ if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
- if (avctx->skip_frame >= AVDISCARD_NONREF)
- return 0;
+ if (avctx->skip_frame >= AVDISCARD_NONREF ||
+ buf_size >= 4 && !memcmp("Q264", buf, 4))
+ return buf_size;
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
return AVERROR_INVALIDDATA;
}
unsigned int uvlinesize,
int pixel_shift)
{
- int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
- int chroma = !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
++ int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
int chroma444 = CHROMA444(h);
int chroma422 = CHROMA422(h);
const int mb_type = h->cur_pic.mb_type[mb_xy];
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0;
- int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
- int chroma = !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
++ int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
int b = 52 + sl->slice_beta_offset - qp_bd_offset;
int weight1 = 64 - weight0;
luma_weight_avg(dest_y, tmp_y, sl->mb_linesize,
height, 5, weight0, weight1, 0);
- if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
- chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize,
- chroma_height, 5, weight0, weight1, 0);
- chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize,
- chroma_height, 5, weight0, weight1, 0);
++ if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
+ chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize,
+ chroma_height, 5, weight0, weight1, 0);
+ chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize,
+ chroma_height, 5, weight0, weight1, 0);
+ }
} else {
luma_weight_avg(dest_y, tmp_y, sl->mb_linesize, height,
sl->luma_log2_weight_denom,
sl->luma_weight[refn1][1][0],
sl->luma_weight[refn0][0][1] +
sl->luma_weight[refn1][1][1]);
- if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
- chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height,
- sl->chroma_log2_weight_denom,
- sl->chroma_weight[refn0][0][0][0],
- sl->chroma_weight[refn1][1][0][0],
- sl->chroma_weight[refn0][0][0][1] +
- sl->chroma_weight[refn1][1][0][1]);
- chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, chroma_height,
- sl->chroma_log2_weight_denom,
- sl->chroma_weight[refn0][0][1][0],
- sl->chroma_weight[refn1][1][1][0],
- sl->chroma_weight[refn0][0][1][1] +
- sl->chroma_weight[refn1][1][1][1]);
++ if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
+ chroma_weight_avg(dest_cb, tmp_cb, sl->mb_uvlinesize, chroma_height,
+ sl->chroma_log2_weight_denom,
+ sl->chroma_weight[refn0][0][0][0],
+ sl->chroma_weight[refn1][1][0][0],
+ sl->chroma_weight[refn0][0][0][1] +
+ sl->chroma_weight[refn1][1][0][1]);
+ chroma_weight_avg(dest_cr, tmp_cr, sl->mb_uvlinesize, chroma_height,
+ sl->chroma_log2_weight_denom,
+ sl->chroma_weight[refn0][0][1][0],
+ sl->chroma_weight[refn1][1][1][0],
+ sl->chroma_weight[refn0][0][1][1] +
+ sl->chroma_weight[refn1][1][1][1]);
+ }
}
} else {
int list = list1 ? 1 : 0;
sl->luma_log2_weight_denom,
sl->luma_weight[refn][list][0],
sl->luma_weight[refn][list][1]);
- if (!CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
- if (sl->use_weight_chroma) {
- chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height,
- sl->chroma_log2_weight_denom,
- sl->chroma_weight[refn][list][0][0],
- sl->chroma_weight[refn][list][0][1]);
- chroma_weight_op(dest_cr, sl->mb_uvlinesize, chroma_height,
- sl->chroma_log2_weight_denom,
- sl->chroma_weight[refn][list][1][0],
- sl->chroma_weight[refn][list][1][1]);
++ if (!CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
+ if (sl->use_weight_chroma) {
+ chroma_weight_op(dest_cb, sl->mb_uvlinesize, chroma_height,
+ sl->chroma_log2_weight_denom,
+ sl->chroma_weight[refn][list][0][0],
+ sl->chroma_weight[refn][list][0][1]);
+ chroma_weight_op(dest_cr, sl->mb_uvlinesize, chroma_height,
+ sl->chroma_log2_weight_denom,
+ sl->chroma_weight[refn][list][1][0],
+ sl->chroma_weight[refn][list][1][1]);
+ }
}
}
}
XCHG(sl->top_borders[top_idx][sl->mb_x + 1],
src_y + (17 << pixel_shift), 1);
}
- if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
- }
- if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
- if (chroma444) {
- if (deblock_top) {
++ if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
+ if (chroma444) {
if (deblock_topleft) {
XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
} else {
for (i = 0; i < 16; i++)
memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16);
- if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
+ if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
if (!h->sps.chroma_format_idc) {
- for (i = 0; i < block_h; i++) {
- memset(dest_cb + i * uvlinesize, 128, 8);
- memset(dest_cr + i * uvlinesize, 128, 8);
+ for (i = 0; i < 8; i++) {
+ memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8);
+ memset(dest_cr + i * uvlinesize, 1 << (bit_depth - 1), 8);
}
} else {
const uint8_t *src_cb = sl->intra_pcm_ptr + 256;
unsigned int crop_right = get_ue_golomb(&h->gb);
unsigned int crop_top = get_ue_golomb(&h->gb);
unsigned int crop_bottom = get_ue_golomb(&h->gb);
+ int width = 16 * sps->mb_width;
+ int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag);
- if (h->avctx->flags2 & CODEC_FLAG2_IGNORE_CROP) {
+ if (h->avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) {
av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original "
"values are l:%d r:%d t:%d b:%d\n",
crop_left, crop_right, crop_top, crop_bottom);
pic->hwaccel_picture_private = pic->hwaccel_priv_buf->data;
}
}
- if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & CODEC_FLAG_GRAY && pic->f->data[2]) {
++ if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
+ int h_chroma_shift, v_chroma_shift;
+ av_pix_fmt_get_chroma_sub_sample(pic->f->format,
+ &h_chroma_shift, &v_chroma_shift);
+
+ for(i=0; i<FF_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
+ memset(pic->f->data[1] + pic->f->linesize[1]*i,
+ 0x80, FF_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
+ memset(pic->f->data[2] + pic->f->linesize[2]*i,
+ 0x80, FF_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
+ }
+ }
if (!h->qscale_table_pool) {
ret = init_table_pools(h);
sl->mb_skip_run = -1;
+ av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
+
sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
avctx->codec_id != AV_CODEC_ID_H264 ||
- (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY));
+ (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
+ if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
+ const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
+ if (start_i) {
+ int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
+ prev_status &= ~ VP_START;
+ if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
+ h->slice_ctx[0].er.error_occurred = 1;
+ }
+ }
+
if (h->pps.cabac) {
/* realign */
align_get_bits(&sl->gb);
ff_huffyuv_common_init(avctx);
ff_huffyuvencdsp_init(&s->hencdsp);
- avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
- avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
+ avctx->extradata = av_mallocz(3*MAX_N + 4);
- if (s->flags&CODEC_FLAG_PASS1) {
++ if (s->flags&AV_CODEC_FLAG_PASS1) {
+#define STATS_OUT_SIZE 21*MAX_N*3 + 4
+ avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
+ if (!avctx->stats_out)
+ return AVERROR(ENOMEM);
+ }
s->version = 2;
- if (!avctx->extradata || !avctx->stats_out)
+ if (!avctx->extradata)
return AVERROR(ENOMEM);
#if FF_API_CODED_FRAME
break;
default:
av_log(avctx, AV_LOG_ERROR, "format not supported\n");
- return -1;
+ return AVERROR(EINVAL);
}
+ s->n = 1<<s->bps;
+ s->vlc_n = FFMIN(s->n, MAX_VLC_N);
+
avctx->bits_per_coded_sample = s->bitstream_bpp;
- s->decorrelate = s->bitstream_bpp >= 24;
+ s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
s->predictor = avctx->prediction_method;
- s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
+ s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
if (avctx->context_model == 1) {
s->context = avctx->context_model;
- if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
+ if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
av_log(avctx, AV_LOG_ERROR,
"context=1 is not compatible with "
"2 pass huffyuv encoding\n");
return 0;
}
- if (s->flags & CODEC_FLAG_PASS1) {
+static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
+{
+ int i, count = width/2;
+
+ if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < count * s->bps / 2) {
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+
+#define LOADEND\
+ int y0 = s->temp[0][width-1];
+#define LOADEND_14\
+ int y0 = s->temp16[0][width-1] & mask;
+#define LOADEND_16\
+ int y0 = s->temp16[0][width-1];
+#define STATEND\
+ s->stats[plane][y0]++;
+#define STATEND_16\
+ s->stats[plane][y0>>2]++;
+#define WRITEEND\
+ put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
+#define WRITEEND_16\
+ put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
+ put_bits(&s->pb, 2, y0&3);
+
+#define LOAD2\
+ int y0 = s->temp[0][2 * i];\
+ int y1 = s->temp[0][2 * i + 1];
+#define LOAD2_14\
+ int y0 = s->temp16[0][2 * i] & mask;\
+ int y1 = s->temp16[0][2 * i + 1] & mask;
+#define LOAD2_16\
+ int y0 = s->temp16[0][2 * i];\
+ int y1 = s->temp16[0][2 * i + 1];
+#define STAT2\
+ s->stats[plane][y0]++;\
+ s->stats[plane][y1]++;
+#define STAT2_16\
+ s->stats[plane][y0>>2]++;\
+ s->stats[plane][y1>>2]++;
+#define WRITE2\
+ put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
+ put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
+#define WRITE2_16\
+ put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
+ put_bits(&s->pb, 2, y0&3);\
+ put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
+ put_bits(&s->pb, 2, y1&3);
+
+ if (s->bps <= 8) {
- if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
++ if (s->flags & AV_CODEC_FLAG_PASS1) {
+ for (i = 0; i < count; i++) {
+ LOAD2;
+ STAT2;
+ }
+ if (width&1) {
+ LOADEND;
+ STATEND;
+ }
+ }
- if (s->flags & CODEC_FLAG_PASS1) {
++ if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
+ return 0;
+
+ if (s->context) {
+ for (i = 0; i < count; i++) {
+ LOAD2;
+ STAT2;
+ WRITE2;
+ }
+ if (width&1) {
+ LOADEND;
+ STATEND;
+ WRITEEND;
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ LOAD2;
+ WRITE2;
+ }
+ if (width&1) {
+ LOADEND;
+ WRITEEND;
+ }
+ }
+ } else if (s->bps <= 14) {
+ int mask = s->n - 1;
- if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
++ if (s->flags & AV_CODEC_FLAG_PASS1) {
+ for (i = 0; i < count; i++) {
+ LOAD2_14;
+ STAT2;
+ }
+ if (width&1) {
+ LOADEND_14;
+ STATEND;
+ }
+ }
- if (s->flags & CODEC_FLAG_PASS1) {
++ if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
+ return 0;
+
+ if (s->context) {
+ for (i = 0; i < count; i++) {
+ LOAD2_14;
+ STAT2;
+ WRITE2;
+ }
+ if (width&1) {
+ LOADEND_14;
+ STATEND;
+ WRITEEND;
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ LOAD2_14;
+ WRITE2;
+ }
+ if (width&1) {
+ LOADEND_14;
+ WRITEEND;
+ }
+ }
+ } else {
- if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
++ if (s->flags & AV_CODEC_FLAG_PASS1) {
+ for (i = 0; i < count; i++) {
+ LOAD2_16;
+ STAT2_16;
+ }
+ if (width&1) {
+ LOADEND_16;
+ STATEND_16;
+ }
+ }
++ if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
+ return 0;
+
+ if (s->context) {
+ for (i = 0; i < count; i++) {
+ LOAD2_16;
+ STAT2_16;
+ WRITE2_16;
+ }
+ if (width&1) {
+ LOADEND_16;
+ STATEND_16;
+ WRITEEND_16;
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ LOAD2_16;
+ WRITE2_16;
+ }
+ if (width&1) {
+ LOADEND_16;
+ WRITEEND_16;
+ }
+ }
+ }
+#undef LOAD2
+#undef STAT2
+#undef WRITE2
+ return 0;
+}
+
static int encode_gray_bitstream(HYuvContext *s, int count)
{
int i;
put_bits(&s->pb, 15, 0);
size /= 4;
- if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
+ if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
int j;
char *p = avctx->stats_out;
- char *end = p + 1024*30;
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 256; j++) {
+ char *end = p + STATS_OUT_SIZE;
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < s->vlc_n; j++) {
snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
p += strlen(p);
s->stats[i][j]= 0;
}
snprintf(p, end-p, "\n");
p++;
+ if (end <= p)
+ return AVERROR(ENOMEM);
}
- } else
+ } else if (avctx->stats_out)
avctx->stats_out[0] = '\0';
- if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
+ if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
flush_put_bits(&s->pb);
s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
}
return ret;
}
ff_bswapdsp_init(&q->bdsp);
- q->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&q->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ q->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!q->fdsp) {
+ ff_fft_end(&q->fft);
+
+ return AVERROR(ENOMEM);
+ }
+
avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO
: AV_CH_LAYOUT_STEREO;
--- /dev/null
- if (s->avctx->flags & CODEC_FLAG_BITEXACT)
+/*
+ * JPEG2000 image encoder
+ * Copyright (c) 2007 Kamil Nowosad
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * JPEG2000 image encoder
+ * @file
+ * @author Kamil Nowosad
+ */
+
+#include <float.h>
+#include "avcodec.h"
+#include "internal.h"
+#include "bytestream.h"
+#include "jpeg2000.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+
+#define NMSEDEC_BITS 7
+#define NMSEDEC_FRACBITS (NMSEDEC_BITS-1)
+#define WMSEDEC_SHIFT 13 ///< must be >= 13
+#define LAMBDA_SCALE (100000000LL << (WMSEDEC_SHIFT - 13))
+
+#define CODEC_JP2 1
+#define CODEC_J2K 0
+
+static int lut_nmsedec_ref [1<<NMSEDEC_BITS],
+ lut_nmsedec_ref0[1<<NMSEDEC_BITS],
+ lut_nmsedec_sig [1<<NMSEDEC_BITS],
+ lut_nmsedec_sig0[1<<NMSEDEC_BITS];
+
+static const int dwt_norms[2][4][10] = { // [dwt_type][band][rlevel] (multiplied by 10000)
+ {{10000, 19650, 41770, 84030, 169000, 338400, 676900, 1353000, 2706000, 5409000},
+ {20220, 39890, 83550, 170400, 342700, 686300, 1373000, 2746000, 5490000},
+ {20220, 39890, 83550, 170400, 342700, 686300, 1373000, 2746000, 5490000},
+ {20800, 38650, 83070, 171800, 347100, 695900, 1393000, 2786000, 5572000}},
+
+ {{10000, 15000, 27500, 53750, 106800, 213400, 426700, 853300, 1707000, 3413000},
+ {10380, 15920, 29190, 57030, 113300, 226400, 452500, 904800, 1809000},
+ {10380, 15920, 29190, 57030, 113300, 226400, 452500, 904800, 1809000},
+ { 7186, 9218, 15860, 30430, 60190, 120100, 240000, 479700, 959300}}
+};
+
+typedef struct {
+ Jpeg2000Component *comp;
+} Jpeg2000Tile;
+
+typedef struct {
+ AVClass *class;
+ AVCodecContext *avctx;
+ const AVFrame *picture;
+
+ int width, height; ///< image width and height
+ uint8_t cbps[4]; ///< bits per sample in particular components
+ int chroma_shift[2];
+ uint8_t planar;
+ int ncomponents;
+ int tile_width, tile_height; ///< tile size
+ int numXtiles, numYtiles;
+
+ uint8_t *buf_start;
+ uint8_t *buf;
+ uint8_t *buf_end;
+ int bit_index;
+
+ int64_t lambda;
+
+ Jpeg2000CodingStyle codsty;
+ Jpeg2000QuantStyle qntsty;
+
+ Jpeg2000Tile *tile;
+
+ int format;
+} Jpeg2000EncoderContext;
+
+
+/* debug */
+#if 0
+#undef ifprintf
+#undef printf
+
+static void nspaces(FILE *fd, int n)
+{
+ while(n--) putc(' ', fd);
+}
+
+static void printcomp(Jpeg2000Component *comp)
+{
+ int i;
+ for (i = 0; i < comp->y1 - comp->y0; i++)
+ ff_jpeg2000_printv(comp->i_data + i * (comp->x1 - comp->x0), comp->x1 - comp->x0);
+}
+
+static void dump(Jpeg2000EncoderContext *s, FILE *fd)
+{
+ int tileno, compno, reslevelno, bandno, precno;
+ fprintf(fd, "XSiz = %d, YSiz = %d, tile_width = %d, tile_height = %d\n"
+ "numXtiles = %d, numYtiles = %d, ncomponents = %d\n"
+ "tiles:\n",
+ s->width, s->height, s->tile_width, s->tile_height,
+ s->numXtiles, s->numYtiles, s->ncomponents);
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+ nspaces(fd, 2);
+ fprintf(fd, "tile %d:\n", tileno);
+ for(compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+ nspaces(fd, 4);
+ fprintf(fd, "component %d:\n", compno);
+ nspaces(fd, 4);
+ fprintf(fd, "x0 = %d, x1 = %d, y0 = %d, y1 = %d\n",
+ comp->x0, comp->x1, comp->y0, comp->y1);
+ for(reslevelno = 0; reslevelno < s->nreslevels; reslevelno++){
+ Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
+ nspaces(fd, 6);
+ fprintf(fd, "reslevel %d:\n", reslevelno);
+ nspaces(fd, 6);
+ fprintf(fd, "x0 = %d, x1 = %d, y0 = %d, y1 = %d, nbands = %d\n",
+ reslevel->x0, reslevel->x1, reslevel->y0,
+ reslevel->y1, reslevel->nbands);
+ for(bandno = 0; bandno < reslevel->nbands; bandno++){
+ Jpeg2000Band *band = reslevel->band + bandno;
+ nspaces(fd, 8);
+ fprintf(fd, "band %d:\n", bandno);
+ nspaces(fd, 8);
+ fprintf(fd, "x0 = %d, x1 = %d, y0 = %d, y1 = %d,"
+ "codeblock_width = %d, codeblock_height = %d cblknx = %d cblkny = %d\n",
+ band->x0, band->x1,
+ band->y0, band->y1,
+ band->codeblock_width, band->codeblock_height,
+ band->cblknx, band->cblkny);
+ for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
+ Jpeg2000Prec *prec = band->prec + precno;
+ nspaces(fd, 10);
+ fprintf(fd, "prec %d:\n", precno);
+ nspaces(fd, 10);
+ fprintf(fd, "xi0 = %d, xi1 = %d, yi0 = %d, yi1 = %d\n",
+ prec->xi0, prec->xi1, prec->yi0, prec->yi1);
+ }
+ }
+ }
+ }
+ }
+}
+#endif
+
+/* bitstream routines */
+
+/** put n times val bit */
+static void put_bits(Jpeg2000EncoderContext *s, int val, int n) // TODO: optimize
+{
+ while (n-- > 0){
+ if (s->bit_index == 8)
+ {
+ s->bit_index = *s->buf == 0xff;
+ *(++s->buf) = 0;
+ }
+ *s->buf |= val << (7 - s->bit_index++);
+ }
+}
+
+/** put n least significant bits of a number num */
+static void put_num(Jpeg2000EncoderContext *s, int num, int n)
+{
+ while(--n >= 0)
+ put_bits(s, (num >> n) & 1, 1);
+}
+
+/** flush the bitstream */
+static void j2k_flush(Jpeg2000EncoderContext *s)
+{
+ if (s->bit_index){
+ s->bit_index = 0;
+ s->buf++;
+ }
+}
+
+/* tag tree routines */
+
+/** code the value stored in node */
+static void tag_tree_code(Jpeg2000EncoderContext *s, Jpeg2000TgtNode *node, int threshold)
+{
+ Jpeg2000TgtNode *stack[30];
+ int sp = 1, curval = 0;
+ stack[0] = node;
+
+ node = node->parent;
+ while(node){
+ if (node->vis){
+ curval = node->val;
+ break;
+ }
+ node->vis++;
+ stack[sp++] = node;
+ node = node->parent;
+ }
+ while(--sp >= 0){
+ if (stack[sp]->val >= threshold){
+ put_bits(s, 0, threshold - curval);
+ break;
+ }
+ put_bits(s, 0, stack[sp]->val - curval);
+ put_bits(s, 1, 1);
+ curval = stack[sp]->val;
+ }
+}
+
+/** update the value in node */
+static void tag_tree_update(Jpeg2000TgtNode *node)
+{
+ int lev = 0;
+ while (node->parent){
+ if (node->parent->val <= node->val)
+ break;
+ node->parent->val = node->val;
+ node = node->parent;
+ lev++;
+ }
+}
+
+static int put_siz(Jpeg2000EncoderContext *s)
+{
+ int i;
+
+ if (s->buf_end - s->buf < 40 + 3 * s->ncomponents)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_SIZ);
+ bytestream_put_be16(&s->buf, 38 + 3 * s->ncomponents); // Lsiz
+ bytestream_put_be16(&s->buf, 0); // Rsiz
+ bytestream_put_be32(&s->buf, s->width); // width
+ bytestream_put_be32(&s->buf, s->height); // height
+ bytestream_put_be32(&s->buf, 0); // X0Siz
+ bytestream_put_be32(&s->buf, 0); // Y0Siz
+
+ bytestream_put_be32(&s->buf, s->tile_width); // XTSiz
+ bytestream_put_be32(&s->buf, s->tile_height); // YTSiz
+ bytestream_put_be32(&s->buf, 0); // XT0Siz
+ bytestream_put_be32(&s->buf, 0); // YT0Siz
+ bytestream_put_be16(&s->buf, s->ncomponents); // CSiz
+
+ for (i = 0; i < s->ncomponents; i++){ // Ssiz_i XRsiz_i, YRsiz_i
+ bytestream_put_byte(&s->buf, 7);
+ bytestream_put_byte(&s->buf, i?1<<s->chroma_shift[0]:1);
+ bytestream_put_byte(&s->buf, i?1<<s->chroma_shift[1]:1);
+ }
+ return 0;
+}
+
+static int put_cod(Jpeg2000EncoderContext *s)
+{
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ if (s->buf_end - s->buf < 14)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_COD);
+ bytestream_put_be16(&s->buf, 12); // Lcod
+ bytestream_put_byte(&s->buf, 0); // Scod
+ // SGcod
+ bytestream_put_byte(&s->buf, 0); // progression level
+ bytestream_put_be16(&s->buf, 1); // num of layers
+ if(s->avctx->pix_fmt == AV_PIX_FMT_YUV444P){
+ bytestream_put_byte(&s->buf, 0); // unspecified
+ }else{
+ bytestream_put_byte(&s->buf, 0); // unspecified
+ }
+ // SPcod
+ bytestream_put_byte(&s->buf, codsty->nreslevels - 1); // num of decomp. levels
+ bytestream_put_byte(&s->buf, codsty->log2_cblk_width-2); // cblk width
+ bytestream_put_byte(&s->buf, codsty->log2_cblk_height-2); // cblk height
+ bytestream_put_byte(&s->buf, 0); // cblk style
+ bytestream_put_byte(&s->buf, codsty->transform == FF_DWT53); // transformation
+ return 0;
+}
+
+static int put_qcd(Jpeg2000EncoderContext *s, int compno)
+{
+ int i, size;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ if (qntsty->quantsty == JPEG2000_QSTY_NONE)
+ size = 4 + 3 * (codsty->nreslevels-1);
+ else // QSTY_SE
+ size = 5 + 6 * (codsty->nreslevels-1);
+
+ if (s->buf_end - s->buf < size + 2)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_QCD);
+ bytestream_put_be16(&s->buf, size); // LQcd
+ bytestream_put_byte(&s->buf, (qntsty->nguardbits << 5) | qntsty->quantsty); // Sqcd
+ if (qntsty->quantsty == JPEG2000_QSTY_NONE)
+ for (i = 0; i < codsty->nreslevels * 3 - 2; i++)
+ bytestream_put_byte(&s->buf, qntsty->expn[i] << 3);
+ else // QSTY_SE
+ for (i = 0; i < codsty->nreslevels * 3 - 2; i++)
+ bytestream_put_be16(&s->buf, (qntsty->expn[i] << 11) | qntsty->mant[i]);
+ return 0;
+}
+
+static int put_com(Jpeg2000EncoderContext *s, int compno)
+{
+ int size = 4 + strlen(LIBAVCODEC_IDENT);
+
++ if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
+ return 0;
+
+ if (s->buf_end - s->buf < size + 2)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_COM);
+ bytestream_put_be16(&s->buf, size);
+ bytestream_put_be16(&s->buf, 1); // General use (ISO/IEC 8859-15 (Latin) values)
+
+ bytestream_put_buffer(&s->buf, LIBAVCODEC_IDENT, strlen(LIBAVCODEC_IDENT));
+
+ return 0;
+}
+
+static uint8_t *put_sot(Jpeg2000EncoderContext *s, int tileno)
+{
+ uint8_t *psotptr;
+
+ if (s->buf_end - s->buf < 12)
+ return NULL;
+
+ bytestream_put_be16(&s->buf, JPEG2000_SOT);
+ bytestream_put_be16(&s->buf, 10); // Lsot
+ bytestream_put_be16(&s->buf, tileno); // Isot
+
+ psotptr = s->buf;
+ bytestream_put_be32(&s->buf, 0); // Psot (filled in later)
+
+ bytestream_put_byte(&s->buf, 0); // TPsot
+ bytestream_put_byte(&s->buf, 1); // TNsot
+ return psotptr;
+}
+
+/**
+ * compute the sizes of tiles, resolution levels, bands, etc.
+ * allocate memory for them
+ * divide the input image into tile-components
+ */
+static int init_tiles(Jpeg2000EncoderContext *s)
+{
+ int tileno, tilex, tiley, compno;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ s->numXtiles = ff_jpeg2000_ceildiv(s->width, s->tile_width);
+ s->numYtiles = ff_jpeg2000_ceildiv(s->height, s->tile_height);
+
+ s->tile = av_malloc_array(s->numXtiles, s->numYtiles * sizeof(Jpeg2000Tile));
+ if (!s->tile)
+ return AVERROR(ENOMEM);
+ for (tileno = 0, tiley = 0; tiley < s->numYtiles; tiley++)
+ for (tilex = 0; tilex < s->numXtiles; tilex++, tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+
+ tile->comp = av_mallocz_array(s->ncomponents, sizeof(Jpeg2000Component));
+ if (!tile->comp)
+ return AVERROR(ENOMEM);
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+ int ret, i, j;
+
+ comp->coord[0][0] = comp->coord_o[0][0] = tilex * s->tile_width;
+ comp->coord[0][1] = comp->coord_o[0][1] = FFMIN((tilex+1)*s->tile_width, s->width);
+ comp->coord[1][0] = comp->coord_o[1][0] = tiley * s->tile_height;
+ comp->coord[1][1] = comp->coord_o[1][1] = FFMIN((tiley+1)*s->tile_height, s->height);
+ if (compno > 0)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ comp->coord[i][j] = comp->coord_o[i][j] = ff_jpeg2000_ceildivpow2(comp->coord[i][j], s->chroma_shift[i]);
+
+ if ((ret = ff_jpeg2000_init_component(comp,
+ codsty,
+ qntsty,
+ s->cbps[compno],
+ compno?1<<s->chroma_shift[0]:1,
+ compno?1<<s->chroma_shift[1]:1,
+ s->avctx
+ )) < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void copy_frame(Jpeg2000EncoderContext *s)
+{
+ int tileno, compno, i, y, x;
+ uint8_t *line;
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+ if (s->planar){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+ int *dst = comp->i_data;
+ line = s->picture->data[compno]
+ + comp->coord[1][0] * s->picture->linesize[compno]
+ + comp->coord[0][0];
+ for (y = comp->coord[1][0]; y < comp->coord[1][1]; y++){
+ uint8_t *ptr = line;
+ for (x = comp->coord[0][0]; x < comp->coord[0][1]; x++)
+ *dst++ = *ptr++ - (1 << 7);
+ line += s->picture->linesize[compno];
+ }
+ }
+ } else{
+ line = s->picture->data[0] + tile->comp[0].coord[1][0] * s->picture->linesize[0]
+ + tile->comp[0].coord[0][0] * s->ncomponents;
+
+ i = 0;
+ for (y = tile->comp[0].coord[1][0]; y < tile->comp[0].coord[1][1]; y++){
+ uint8_t *ptr = line;
+ for (x = tile->comp[0].coord[0][0]; x < tile->comp[0].coord[0][1]; x++, i++){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ tile->comp[compno].i_data[i] = *ptr++ - (1 << 7);
+ }
+ }
+ line += s->picture->linesize[0];
+ }
+ }
+ }
+}
+
+static void init_quantization(Jpeg2000EncoderContext *s)
+{
+ int compno, reslevelno, bandno;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ for (compno = 0; compno < s->ncomponents; compno++){
+ int gbandno = 0;
+ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
+ int nbands, lev = codsty->nreslevels - reslevelno - 1;
+ nbands = reslevelno ? 3 : 1;
+ for (bandno = 0; bandno < nbands; bandno++, gbandno++){
+ int expn, mant = 0;
+
+ if (codsty->transform == FF_DWT97_INT){
+ int bandpos = bandno + (reslevelno>0),
+ ss = 81920000 / dwt_norms[0][bandpos][lev],
+ log = av_log2(ss);
+ mant = (11 - log < 0 ? ss >> log - 11 : ss << 11 - log) & 0x7ff;
+ expn = s->cbps[compno] - log + 13;
+ } else
+ expn = ((bandno&2)>>1) + (reslevelno>0) + s->cbps[compno];
+
+ qntsty->expn[gbandno] = expn;
+ qntsty->mant[gbandno] = mant;
+ }
+ }
+ }
+}
+
+static void init_luts(void)
+{
+ int i, a,
+ mask = ~((1<<NMSEDEC_FRACBITS)-1);
+
+ for (i = 0; i < (1 << NMSEDEC_BITS); i++){
+ lut_nmsedec_sig[i] = FFMAX(6*i - (9<<NMSEDEC_FRACBITS-1) << 12-NMSEDEC_FRACBITS, 0);
+ lut_nmsedec_sig0[i] = FFMAX((i*i + (1<<NMSEDEC_FRACBITS-1) & mask) << 1, 0);
+
+ a = (i >> (NMSEDEC_BITS-2)&2) + 1;
+ lut_nmsedec_ref[i] = FFMAX((-2*i + (1<<NMSEDEC_FRACBITS) + a*i - (a*a<<NMSEDEC_FRACBITS-2))
+ << 13-NMSEDEC_FRACBITS, 0);
+ lut_nmsedec_ref0[i] = FFMAX(((i*i + (1-4*i << NMSEDEC_FRACBITS-1) + (1<<2*NMSEDEC_FRACBITS)) & mask)
+ << 1, 0);
+ }
+}
+
+/* tier-1 routines */
+static int getnmsedec_sig(int x, int bpno)
+{
+ if (bpno > NMSEDEC_FRACBITS)
+ return lut_nmsedec_sig[(x >> (bpno - NMSEDEC_FRACBITS)) & ((1 << NMSEDEC_BITS) - 1)];
+ return lut_nmsedec_sig0[x & ((1 << NMSEDEC_BITS) - 1)];
+}
+
+static int getnmsedec_ref(int x, int bpno)
+{
+ if (bpno > NMSEDEC_FRACBITS)
+ return lut_nmsedec_ref[(x >> (bpno - NMSEDEC_FRACBITS)) & ((1 << NMSEDEC_BITS) - 1)];
+ return lut_nmsedec_ref0[x & ((1 << NMSEDEC_BITS) - 1)];
+}
+
+static void encode_sigpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno)
+{
+ int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS);
+ for (y0 = 0; y0 < height; y0 += 4)
+ for (x = 0; x < width; x++)
+ for (y = y0; y < height && y < y0+4; y++){
+ if (!(t1->flags[(y+1) * t1->stride + x+1] & JPEG2000_T1_SIG) && (t1->flags[(y+1) * t1->stride + x+1] & JPEG2000_T1_SIG_NB)){
+ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[(y+1) * t1->stride + x+1], bandno),
+ bit = t1->data[(y) * t1->stride + x] & mask ? 1 : 0;
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, bit);
+ if (bit){
+ int xorbit;
+ int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[(y+1) * t1->stride + x+1], &xorbit);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[(y+1) * t1->stride + x+1] >> 15) ^ xorbit);
+ *nmsedec += getnmsedec_sig(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_jpeg2000_set_significance(t1, x, y, t1->flags[(y+1) * t1->stride + x+1] >> 15);
+ }
+ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_VIS;
+ }
+ }
+}
+
+static void encode_refpass(Jpeg2000T1Context *t1, int width, int height, int *nmsedec, int bpno)
+{
+ int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS);
+ for (y0 = 0; y0 < height; y0 += 4)
+ for (x = 0; x < width; x++)
+ for (y = y0; y < height && y < y0+4; y++)
+ if ((t1->flags[(y+1) * t1->stride + x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS)) == JPEG2000_T1_SIG){
+ int ctxno = ff_jpeg2000_getrefctxno(t1->flags[(y+1) * t1->stride + x+1]);
+ *nmsedec += getnmsedec_ref(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[(y) * t1->stride + x] & mask ? 1:0);
+ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_REF;
+ }
+}
+
+static void encode_clnpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno)
+{
+ int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS);
+ for (y0 = 0; y0 < height; y0 += 4)
+ for (x = 0; x < width; x++){
+ if (y0 + 3 < height && !(
+ (t1->flags[(y0+1) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
+ (t1->flags[(y0+2) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
+ (t1->flags[(y0+3) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
+ (t1->flags[(y0+4) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG))))
+ {
+ // aggregation mode
+ int rlen;
+ for (rlen = 0; rlen < 4; rlen++)
+ if (t1->data[(y0+rlen) * t1->stride + x] & mask)
+ break;
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_RL, rlen != 4);
+ if (rlen == 4)
+ continue;
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI, rlen >> 1);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI, rlen & 1);
+ for (y = y0 + rlen; y < y0 + 4; y++){
+ if (!(t1->flags[(y+1) * t1->stride + x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))){
+ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[(y+1) * t1->stride + x+1], bandno);
+ if (y > y0 + rlen)
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[(y) * t1->stride + x] & mask ? 1:0);
+ if (t1->data[(y) * t1->stride + x] & mask){ // newly significant
+ int xorbit;
+ int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[(y+1) * t1->stride + x+1], &xorbit);
+ *nmsedec += getnmsedec_sig(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[(y+1) * t1->stride + x+1] >> 15) ^ xorbit);
+ ff_jpeg2000_set_significance(t1, x, y, t1->flags[(y+1) * t1->stride + x+1] >> 15);
+ }
+ }
+ t1->flags[(y+1) * t1->stride + x+1] &= ~JPEG2000_T1_VIS;
+ }
+ } else{
+ for (y = y0; y < y0 + 4 && y < height; y++){
+ if (!(t1->flags[(y+1) * t1->stride + x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))){
+ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[(y+1) * t1->stride + x+1], bandno);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[(y) * t1->stride + x] & mask ? 1:0);
+ if (t1->data[(y) * t1->stride + x] & mask){ // newly significant
+ int xorbit;
+ int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[(y+1) * t1->stride + x+1], &xorbit);
+ *nmsedec += getnmsedec_sig(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[(y+1) * t1->stride + x+1] >> 15) ^ xorbit);
+ ff_jpeg2000_set_significance(t1, x, y, t1->flags[(y+1) * t1->stride + x+1] >> 15);
+ }
+ }
+ t1->flags[(y+1) * t1->stride + x+1] &= ~JPEG2000_T1_VIS;
+ }
+ }
+ }
+}
+
+static void encode_cblk(Jpeg2000EncoderContext *s, Jpeg2000T1Context *t1, Jpeg2000Cblk *cblk, Jpeg2000Tile *tile,
+ int width, int height, int bandpos, int lev)
+{
+ int pass_t = 2, passno, x, y, max=0, nmsedec, bpno;
+ int64_t wmsedec = 0;
+
+ memset(t1->flags, 0, t1->stride * (height + 2) * sizeof(*t1->flags));
+
+ for (y = 0; y < height; y++){
+ for (x = 0; x < width; x++){
+ if (t1->data[(y) * t1->stride + x] < 0){
+ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_SGN;
+ t1->data[(y) * t1->stride + x] = -t1->data[(y) * t1->stride + x];
+ }
+ max = FFMAX(max, t1->data[(y) * t1->stride + x]);
+ }
+ }
+
+ if (max == 0){
+ cblk->nonzerobits = 0;
+ bpno = 0;
+ } else{
+ cblk->nonzerobits = av_log2(max) + 1 - NMSEDEC_FRACBITS;
+ bpno = cblk->nonzerobits - 1;
+ }
+
+ ff_mqc_initenc(&t1->mqc, cblk->data);
+
+ for (passno = 0; bpno >= 0; passno++){
+ nmsedec=0;
+
+ switch(pass_t){
+ case 0: encode_sigpass(t1, width, height, bandpos, &nmsedec, bpno);
+ break;
+ case 1: encode_refpass(t1, width, height, &nmsedec, bpno);
+ break;
+ case 2: encode_clnpass(t1, width, height, bandpos, &nmsedec, bpno);
+ break;
+ }
+
+ cblk->passes[passno].rate = ff_mqc_flush_to(&t1->mqc, cblk->passes[passno].flushed, &cblk->passes[passno].flushed_len);
+ wmsedec += (int64_t)nmsedec << (2*bpno);
+ cblk->passes[passno].disto = wmsedec;
+
+ if (++pass_t == 3){
+ pass_t = 0;
+ bpno--;
+ }
+ }
+ cblk->npasses = passno;
+ cblk->ninclpasses = passno;
+
+ cblk->passes[passno-1].rate = ff_mqc_flush_to(&t1->mqc, cblk->passes[passno-1].flushed, &cblk->passes[passno-1].flushed_len);
+}
+
+/* tier-2 routines: */
+
+static void putnumpasses(Jpeg2000EncoderContext *s, int n)
+{
+ if (n == 1)
+ put_num(s, 0, 1);
+ else if (n == 2)
+ put_num(s, 2, 2);
+ else if (n <= 5)
+ put_num(s, 0xc | (n-3), 4);
+ else if (n <= 36)
+ put_num(s, 0x1e0 | (n-6), 9);
+ else
+ put_num(s, 0xff80 | (n-37), 16);
+}
+
+
+static int encode_packet(Jpeg2000EncoderContext *s, Jpeg2000ResLevel *rlevel, int precno,
+ uint8_t *expn, int numgbits)
+{
+ int bandno, empty = 1;
+
+ // init bitstream
+ *s->buf = 0;
+ s->bit_index = 0;
+
+ // header
+
+ // is the packet empty?
+ for (bandno = 0; bandno < rlevel->nbands; bandno++){
+ if (rlevel->band[bandno].coord[0][0] < rlevel->band[bandno].coord[0][1]
+ && rlevel->band[bandno].coord[1][0] < rlevel->band[bandno].coord[1][1]){
+ empty = 0;
+ break;
+ }
+ }
+
+ put_bits(s, !empty, 1);
+ if (empty){
+ j2k_flush(s);
+ return 0;
+ }
+
+ for (bandno = 0; bandno < rlevel->nbands; bandno++){
+ Jpeg2000Band *band = rlevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec + precno;
+ int yi, xi, pos;
+ int cblknw = prec->nb_codeblocks_width;
+
+ if (band->coord[0][0] == band->coord[0][1]
+ || band->coord[1][0] == band->coord[1][1])
+ continue;
+
+ for (pos=0, yi = 0; yi < prec->nb_codeblocks_height; yi++){
+ for (xi = 0; xi < cblknw; xi++, pos++){
+ prec->cblkincl[pos].val = prec->cblk[yi * cblknw + xi].ninclpasses == 0;
+ tag_tree_update(prec->cblkincl + pos);
+ prec->zerobits[pos].val = expn[bandno] + numgbits - 1 - prec->cblk[yi * cblknw + xi].nonzerobits;
+ tag_tree_update(prec->zerobits + pos);
+ }
+ }
+
+ for (pos=0, yi = 0; yi < prec->nb_codeblocks_height; yi++){
+ for (xi = 0; xi < cblknw; xi++, pos++){
+ int pad = 0, llen, length;
+ Jpeg2000Cblk *cblk = prec->cblk + yi * cblknw + xi;
+
+ if (s->buf_end - s->buf < 20) // approximately
+ return -1;
+
+ // inclusion information
+ tag_tree_code(s, prec->cblkincl + pos, 1);
+ if (!cblk->ninclpasses)
+ continue;
+ // zerobits information
+ tag_tree_code(s, prec->zerobits + pos, 100);
+ // number of passes
+ putnumpasses(s, cblk->ninclpasses);
+
+ length = cblk->passes[cblk->ninclpasses-1].rate;
+ llen = av_log2(length) - av_log2(cblk->ninclpasses) - 2;
+ if (llen < 0){
+ pad = -llen;
+ llen = 0;
+ }
+ // length of code block
+ put_bits(s, 1, llen);
+ put_bits(s, 0, 1);
+ put_num(s, length, av_log2(length)+1+pad);
+ }
+ }
+ }
+ j2k_flush(s);
+ for (bandno = 0; bandno < rlevel->nbands; bandno++){
+ Jpeg2000Band *band = rlevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec + precno;
+ int yi, cblknw = prec->nb_codeblocks_width;
+ for (yi =0; yi < prec->nb_codeblocks_height; yi++){
+ int xi;
+ for (xi = 0; xi < cblknw; xi++){
+ Jpeg2000Cblk *cblk = prec->cblk + yi * cblknw + xi;
+ if (cblk->ninclpasses){
+ if (s->buf_end - s->buf < cblk->passes[cblk->ninclpasses-1].rate)
+ return -1;
+ bytestream_put_buffer(&s->buf, cblk->data, cblk->passes[cblk->ninclpasses-1].rate
+ - cblk->passes[cblk->ninclpasses-1].flushed_len);
+ bytestream_put_buffer(&s->buf, cblk->passes[cblk->ninclpasses-1].flushed,
+ cblk->passes[cblk->ninclpasses-1].flushed_len);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int encode_packets(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno)
+{
+ int compno, reslevelno, ret;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ av_log(s->avctx, AV_LOG_DEBUG, "tier2\n");
+ // lay-rlevel-comp-pos progression
+ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ int precno;
+ Jpeg2000ResLevel *reslevel = s->tile[tileno].comp[compno].reslevel + reslevelno;
+ for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
+ if ((ret = encode_packet(s, reslevel, precno, qntsty->expn + (reslevelno ? 3*reslevelno-2 : 0),
+ qntsty->nguardbits)) < 0)
+ return ret;
+ }
+ }
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "after tier2\n");
+ return 0;
+}
+
+static int getcut(Jpeg2000Cblk *cblk, int64_t lambda, int dwt_norm)
+{
+ int passno, res = 0;
+ for (passno = 0; passno < cblk->npasses; passno++){
+ int dr;
+ int64_t dd;
+
+ dr = cblk->passes[passno].rate
+ - (res ? cblk->passes[res-1].rate:0);
+ dd = cblk->passes[passno].disto
+ - (res ? cblk->passes[res-1].disto:0);
+
+ if (((dd * dwt_norm) >> WMSEDEC_SHIFT) * dwt_norm >= dr * lambda)
+ res = passno+1;
+ }
+ return res;
+}
+
+static void truncpasses(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile)
+{
+ int precno, compno, reslevelno, bandno, cblkno, lev;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+
+ for (reslevelno = 0, lev = codsty->nreslevels-1; reslevelno < codsty->nreslevels; reslevelno++, lev--){
+ Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
+
+ for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
+ for (bandno = 0; bandno < reslevel->nbands ; bandno++){
+ int bandpos = bandno + (reslevelno > 0);
+ Jpeg2000Band *band = reslevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec + precno;
+
+ for (cblkno = 0; cblkno < prec->nb_codeblocks_height * prec->nb_codeblocks_width; cblkno++){
+ Jpeg2000Cblk *cblk = prec->cblk + cblkno;
+
+ cblk->ninclpasses = getcut(cblk, s->lambda,
+ (int64_t)dwt_norms[codsty->transform == FF_DWT53][bandpos][lev] * (int64_t)band->i_stepsize >> 15);
+ }
+ }
+ }
+ }
+ }
+}
+
+static int encode_tile(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno)
+{
+ int compno, reslevelno, bandno, ret;
+ Jpeg2000T1Context t1;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = s->tile[tileno].comp + compno;
+
+ t1.stride = (1<<codsty->log2_cblk_width) + 2;
+
+ av_log(s->avctx, AV_LOG_DEBUG,"dwt\n");
+ if ((ret = ff_dwt_encode(&comp->dwt, comp->i_data)) < 0)
+ return ret;
+ av_log(s->avctx, AV_LOG_DEBUG,"after dwt -> tier1\n");
+
+ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
+ Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
+
+ for (bandno = 0; bandno < reslevel->nbands ; bandno++){
+ Jpeg2000Band *band = reslevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec; // we support only 1 precinct per band ATM in the encoder
+ int cblkx, cblky, cblkno=0, xx0, x0, xx1, y0, yy0, yy1, bandpos;
+ yy0 = bandno == 0 ? 0 : comp->reslevel[reslevelno-1].coord[1][1] - comp->reslevel[reslevelno-1].coord[1][0];
+ y0 = yy0;
+ yy1 = FFMIN(ff_jpeg2000_ceildivpow2(band->coord[1][0] + 1, band->log2_cblk_height) << band->log2_cblk_height,
+ band->coord[1][1]) - band->coord[1][0] + yy0;
+
+ if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1])
+ continue;
+
+ bandpos = bandno + (reslevelno > 0);
+
+ for (cblky = 0; cblky < prec->nb_codeblocks_height; cblky++){
+ if (reslevelno == 0 || bandno == 1)
+ xx0 = 0;
+ else
+ xx0 = comp->reslevel[reslevelno-1].coord[0][1] - comp->reslevel[reslevelno-1].coord[0][0];
+ x0 = xx0;
+ xx1 = FFMIN(ff_jpeg2000_ceildivpow2(band->coord[0][0] + 1, band->log2_cblk_width) << band->log2_cblk_width,
+ band->coord[0][1]) - band->coord[0][0] + xx0;
+
+ for (cblkx = 0; cblkx < prec->nb_codeblocks_width; cblkx++, cblkno++){
+ int y, x;
+ if (codsty->transform == FF_DWT53){
+ for (y = yy0; y < yy1; y++){
+ int *ptr = t1.data + (y-yy0)*t1.stride;
+ for (x = xx0; x < xx1; x++){
+ *ptr++ = comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x] << NMSEDEC_FRACBITS;
+ }
+ }
+ } else{
+ for (y = yy0; y < yy1; y++){
+ int *ptr = t1.data + (y-yy0)*t1.stride;
+ for (x = xx0; x < xx1; x++){
+ *ptr = (comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x]);
+ *ptr = (int64_t)*ptr * (int64_t)(16384 * 65536 / band->i_stepsize) >> 15 - NMSEDEC_FRACBITS;
+ ptr++;
+ }
+ }
+ }
+ encode_cblk(s, &t1, prec->cblk + cblkno, tile, xx1 - xx0, yy1 - yy0,
+ bandpos, codsty->nreslevels - reslevelno - 1);
+ xx0 = xx1;
+ xx1 = FFMIN(xx1 + (1 << band->log2_cblk_width), band->coord[0][1] - band->coord[0][0] + x0);
+ }
+ yy0 = yy1;
+ yy1 = FFMIN(yy1 + (1 << band->log2_cblk_height), band->coord[1][1] - band->coord[1][0] + y0);
+ }
+ }
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "after tier1\n");
+ }
+
+ av_log(s->avctx, AV_LOG_DEBUG, "rate control\n");
+ truncpasses(s, tile);
+ if ((ret = encode_packets(s, tile, tileno)) < 0)
+ return ret;
+ av_log(s->avctx, AV_LOG_DEBUG, "after rate control\n");
+ return 0;
+}
+
+static void cleanup(Jpeg2000EncoderContext *s)
+{
+ int tileno, compno;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = s->tile[tileno].comp + compno;
+ ff_jpeg2000_cleanup(comp, codsty);
+ }
+ av_freep(&s->tile[tileno].comp);
+ }
+ av_freep(&s->tile);
+}
+
+static void reinit(Jpeg2000EncoderContext *s)
+{
+ int tileno, compno;
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+ for (compno = 0; compno < s->ncomponents; compno++)
+ ff_jpeg2000_reinit(tile->comp + compno, &s->codsty);
+ }
+}
+
+static void update_size(uint8_t *size, const uint8_t *end)
+{
+ AV_WB32(size, end-size);
+}
+
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pict, int *got_packet)
+{
+ int tileno, ret;
+ Jpeg2000EncoderContext *s = avctx->priv_data;
+ uint8_t *chunkstart, *jp2cstart, *jp2hstart;
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*9 + FF_MIN_BUFFER_SIZE, 0)) < 0)
+ return ret;
+
+ // init:
+ s->buf = s->buf_start = pkt->data;
+ s->buf_end = pkt->data + pkt->size;
+
+ s->picture = pict;
+
+ s->lambda = s->picture->quality * LAMBDA_SCALE;
+
+ copy_frame(s);
+ reinit(s);
+
+ if (s->format == CODEC_JP2) {
+ av_assert0(s->buf == pkt->data);
+
+ bytestream_put_be32(&s->buf, 0x0000000C);
+ bytestream_put_be32(&s->buf, 0x6A502020);
+ bytestream_put_be32(&s->buf, 0x0D0A870A);
+
+ chunkstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "ftyp", 4);
+ bytestream_put_buffer(&s->buf, "jp2\040\040", 4);
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "jp2\040", 4);
+ update_size(chunkstart, s->buf);
+
+ jp2hstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "jp2h", 4);
+
+ chunkstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "ihdr", 4);
+ bytestream_put_be32(&s->buf, avctx->height);
+ bytestream_put_be32(&s->buf, avctx->width);
+ bytestream_put_be16(&s->buf, s->ncomponents);
+ bytestream_put_byte(&s->buf, s->cbps[0]);
+ bytestream_put_byte(&s->buf, 7);
+ bytestream_put_byte(&s->buf, 0);
+ bytestream_put_byte(&s->buf, 0);
+ update_size(chunkstart, s->buf);
+
+ chunkstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "colr", 4);
+ bytestream_put_byte(&s->buf, 1);
+ bytestream_put_byte(&s->buf, 0);
+ bytestream_put_byte(&s->buf, 0);
+ if (s->ncomponents == 1) {
+ bytestream_put_be32(&s->buf, 17);
+ } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
+ bytestream_put_be32(&s->buf, 16);
+ } else {
+ bytestream_put_be32(&s->buf, 18);
+ }
+ update_size(chunkstart, s->buf);
+ update_size(jp2hstart, s->buf);
+
+ jp2cstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "jp2c", 4);
+ }
+
+ if (s->buf_end - s->buf < 2)
+ return -1;
+ bytestream_put_be16(&s->buf, JPEG2000_SOC);
+ if ((ret = put_siz(s)) < 0)
+ return ret;
+ if ((ret = put_cod(s)) < 0)
+ return ret;
+ if ((ret = put_qcd(s, 0)) < 0)
+ return ret;
+ if ((ret = put_com(s, 0)) < 0)
+ return ret;
+
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ uint8_t *psotptr;
+ if (!(psotptr = put_sot(s, tileno)))
+ return -1;
+ if (s->buf_end - s->buf < 2)
+ return -1;
+ bytestream_put_be16(&s->buf, JPEG2000_SOD);
+ if ((ret = encode_tile(s, s->tile + tileno, tileno)) < 0)
+ return ret;
+ bytestream_put_be32(&psotptr, s->buf - psotptr + 6);
+ }
+ if (s->buf_end - s->buf < 2)
+ return -1;
+ bytestream_put_be16(&s->buf, JPEG2000_EOC);
+
+ if (s->format == CODEC_JP2)
+ update_size(jp2cstart, s->buf);
+
+ av_log(s->avctx, AV_LOG_DEBUG, "end\n");
+ pkt->size = s->buf - s->buf_start;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static av_cold int j2kenc_init(AVCodecContext *avctx)
+{
+ int i, ret;
+ Jpeg2000EncoderContext *s = avctx->priv_data;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ s->avctx = avctx;
+ av_log(s->avctx, AV_LOG_DEBUG, "init\n");
+
+ // defaults:
+ // TODO: implement setting non-standard precinct size
+ memset(codsty->log2_prec_widths , 15, sizeof(codsty->log2_prec_widths ));
+ memset(codsty->log2_prec_heights, 15, sizeof(codsty->log2_prec_heights));
+ codsty->nreslevels2decode=
+ codsty->nreslevels = 7;
+ codsty->log2_cblk_width = 4;
+ codsty->log2_cblk_height = 4;
+ codsty->transform = avctx->prediction_method ? FF_DWT53 : FF_DWT97_INT;
+
+ qntsty->nguardbits = 1;
+
+ if ((s->tile_width & (s->tile_width -1)) ||
+ (s->tile_height & (s->tile_height-1))) {
+ av_log(avctx, AV_LOG_WARNING, "Tile dimension not a power of 2\n");
+ }
+
+ if (codsty->transform == FF_DWT53)
+ qntsty->quantsty = JPEG2000_QSTY_NONE;
+ else
+ qntsty->quantsty = JPEG2000_QSTY_SE;
+
+ s->width = avctx->width;
+ s->height = avctx->height;
+
+ for (i = 0; i < 3; i++)
+ s->cbps[i] = 8;
+
+ if (avctx->pix_fmt == AV_PIX_FMT_RGB24){
+ s->ncomponents = 3;
+ } else if (avctx->pix_fmt == AV_PIX_FMT_GRAY8){
+ s->ncomponents = 1;
+ } else{ // planar YUV
+ s->planar = 1;
+ s->ncomponents = 3;
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt,
+ s->chroma_shift, s->chroma_shift + 1);
+ }
+
+ ff_jpeg2000_init_tier1_luts();
+ ff_mqc_init_context_tables();
+ init_luts();
+
+ init_quantization(s);
+ if ((ret=init_tiles(s)) < 0)
+ return ret;
+
+ av_log(s->avctx, AV_LOG_DEBUG, "after init\n");
+
+ return 0;
+}
+
+static int j2kenc_destroy(AVCodecContext *avctx)
+{
+ Jpeg2000EncoderContext *s = avctx->priv_data;
+
+ cleanup(s);
+ return 0;
+}
+
+// taken from the libopenjpeg wraper so it matches
+
+#define OFFSET(x) offsetof(Jpeg2000EncoderContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "format", "Codec Format", OFFSET(format), AV_OPT_TYPE_INT, { .i64 = CODEC_JP2 }, CODEC_J2K, CODEC_JP2, VE, "format" },
+ { "j2k", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CODEC_J2K }, 0, 0, VE, "format" },
+ { "jp2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CODEC_JP2 }, 0, 0, VE, "format" },
+ { "tile_width", "Tile Width", OFFSET(tile_width), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, 1<<30, VE, },
+ { "tile_height", "Tile Height", OFFSET(tile_height), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, 1<<30, VE, },
+
+ { NULL }
+};
+
+static const AVClass j2k_class = {
+ .class_name = "jpeg 2000 encoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_jpeg2000_encoder = {
+ .name = "jpeg2000",
+ .long_name = NULL_IF_CONFIG_SMALL("JPEG 2000"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_JPEG2000,
+ .priv_data_size = sizeof(Jpeg2000EncoderContext),
+ .init = j2kenc_init,
+ .encode2 = encode_frame,
+ .close = j2kenc_destroy,
+ .capabilities = CODEC_CAP_EXPERIMENTAL,
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_YUV444P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE
+ },
+ .priv_class = &j2k_class,
+};
}
c->transform = bytestream2_get_byteu(&s->g); // DWT transformation type
/* set integer 9/7 DWT in case of BITEXACT flag */
- if ((s->avctx->flags & CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97))
+ if ((s->avctx->flags & AV_CODEC_FLAG_BITEXACT) && (c->transform == FF_DWT97))
c->transform = FF_DWT97_INT;
+ else if (c->transform == FF_DWT53) {
+ s->avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
+ }
if (c->csty & JPEG2000_CSTY_PREC) {
int i;
--- /dev/null
- aacplus_cfg->outputFormat = !(avctx->flags & CODEC_FLAG_GLOBAL_HEADER);
+/*
+ * Interface to libaacplus for aac+ (sbr+ps) encoding
+ * Copyright (c) 2010 tipok <piratfm@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Interface to libaacplus for aac+ (sbr+ps) encoding.
+ */
+
+#include <aacplus.h>
+
+#include "avcodec.h"
+#include "internal.h"
+
+typedef struct aacPlusAudioContext {
+ aacplusEncHandle aacplus_handle;
+ unsigned long max_output_bytes;
+ unsigned long samples_input;
+} aacPlusAudioContext;
+
+static av_cold int aacPlus_encode_init(AVCodecContext *avctx)
+{
+ aacPlusAudioContext *s = avctx->priv_data;
+ aacplusEncConfiguration *aacplus_cfg;
+
+ /* number of channels */
+ if (avctx->channels < 1 || avctx->channels > 2) {
+ av_log(avctx, AV_LOG_ERROR, "encoding %d channel(s) is not allowed\n", avctx->channels);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->profile != FF_PROFILE_AAC_LOW && avctx->profile != FF_PROFILE_UNKNOWN) {
+ av_log(avctx, AV_LOG_ERROR, "invalid AAC profile: %d, only LC supported\n", avctx->profile);
+ return AVERROR(EINVAL);
+ }
+
+ s->aacplus_handle = aacplusEncOpen(avctx->sample_rate, avctx->channels,
+ &s->samples_input, &s->max_output_bytes);
+ if (!s->aacplus_handle) {
+ av_log(avctx, AV_LOG_ERROR, "can't open encoder\n");
+ return AVERROR(EINVAL);
+ }
+
+ /* check aacplus version */
+ aacplus_cfg = aacplusEncGetCurrentConfiguration(s->aacplus_handle);
+
+ aacplus_cfg->bitRate = avctx->bit_rate;
+ aacplus_cfg->bandWidth = avctx->cutoff;
- if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
++ aacplus_cfg->outputFormat = !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
+ aacplus_cfg->inputFormat = avctx->sample_fmt == AV_SAMPLE_FMT_FLT ? AACPLUS_INPUT_FLOAT : AACPLUS_INPUT_16BIT;
+ if (!aacplusEncSetConfiguration(s->aacplus_handle, aacplus_cfg)) {
+ av_log(avctx, AV_LOG_ERROR, "libaacplus doesn't support this output format!\n");
+ return AVERROR(EINVAL);
+ }
+
+ avctx->frame_size = s->samples_input / avctx->channels;
+
+ /* Set decoder specific info */
+ avctx->extradata_size = 0;
++ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
+
+ unsigned char *buffer = NULL;
+ unsigned long decoder_specific_info_size;
+
+ if (aacplusEncGetDecoderSpecificInfo(s->aacplus_handle, &buffer,
+ &decoder_specific_info_size) == 1) {
+ avctx->extradata = av_malloc(decoder_specific_info_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!avctx->extradata) {
+ free(buffer);
+ return AVERROR(ENOMEM);
+ }
+ avctx->extradata_size = decoder_specific_info_size;
+ memcpy(avctx->extradata, buffer, avctx->extradata_size);
+ }
+ free(buffer);
+ }
+ return 0;
+}
+
+static int aacPlus_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *frame, int *got_packet)
+{
+ aacPlusAudioContext *s = avctx->priv_data;
+ int32_t *input_buffer = (int32_t *)frame->data[0];
+ int ret;
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, s->max_output_bytes, 0)) < 0)
+ return ret;
+
+ pkt->size = aacplusEncEncode(s->aacplus_handle, input_buffer,
+ s->samples_input, pkt->data, pkt->size);
+ *got_packet = 1;
+ pkt->pts = frame->pts;
+ return 0;
+}
+
+static av_cold int aacPlus_encode_close(AVCodecContext *avctx)
+{
+ aacPlusAudioContext *s = avctx->priv_data;
+
+ av_freep(&avctx->extradata);
+ aacplusEncClose(s->aacplus_handle);
+
+ return 0;
+}
+
+static const AVProfile profiles[] = {
+ { FF_PROFILE_AAC_LOW, "LC" },
+ { FF_PROFILE_UNKNOWN },
+};
+
+AVCodec ff_libaacplus_encoder = {
+ .name = "libaacplus",
+ .long_name = NULL_IF_CONFIG_SMALL("libaacplus AAC+ (Advanced Audio Codec with SBR+PS)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_AAC,
+ .priv_data_size = sizeof(aacPlusAudioContext),
+ .init = aacPlus_encode_init,
+ .encode2 = aacPlus_encode_frame,
+ .close = aacPlus_encode_close,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_NONE },
+ .profiles = profiles,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
+ AV_CH_LAYOUT_STEREO,
+ 0 },
+};
static av_cold int dcadec_init(AVCodecContext *avctx)
{
DCADecContext *s = avctx->priv_data;
- if (avctx->flags & CODEC_FLAG_BITEXACT)
+ int flags = 0;
+
+ /* Affects only lossy DTS profiles. DTS-HD MA is always bitexact */
++ if (avctx->flags & AV_CODEC_FLAG_BITEXACT)
+ flags |= DCADEC_FLAG_CORE_BIT_EXACT;
+
+ if (avctx->request_channel_layout > 0 && avctx->request_channel_layout != AV_CH_LAYOUT_NATIVE) {
+ switch (avctx->request_channel_layout) {
+ case AV_CH_LAYOUT_STEREO:
+ case AV_CH_LAYOUT_STEREO_DOWNMIX:
+ /* libdcadec ignores the 2ch flag if used alone when no custom downmix coefficients
+ are available, silently outputting a 5.1 downmix if possible instead.
+ Using both the 2ch and 6ch flags together forces a 2ch downmix using default
+ coefficients in such cases. This matches the behavior of the 6ch flag when used
+ alone, where a 5.1 downmix is generated if possible, regardless of custom
+ coefficients being available or not. */
+ flags |= DCADEC_FLAG_KEEP_DMIX_2CH | DCADEC_FLAG_KEEP_DMIX_6CH;
+ break;
+ case AV_CH_LAYOUT_5POINT1:
+ flags |= DCADEC_FLAG_KEEP_DMIX_6CH;
+ break;
+ default:
+ av_log(avctx, AV_LOG_WARNING, "Invalid request_channel_layout\n");
+ break;
+ }
+ }
- s->ctx = dcadec_context_create(0);
+ s->ctx = dcadec_context_create(flags);
if (!s->ctx)
return AVERROR(ENOMEM);
if (ret < 0)
goto error;
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+
return 0;
error:
t_info.pixel_fmt = TH_PF_444;
else {
av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n");
- return -1;
+ return AVERROR(EINVAL);
}
- av_pix_fmt_get_chroma_sub_sample(avc_context->pix_fmt,
- &h->uv_hshift, &h->uv_vshift);
+ avcodec_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift);
- if (avc_context->flags & CODEC_FLAG_QSCALE) {
+ if (avc_context->flags & AV_CODEC_FLAG_QSCALE) {
- /* to be constant with the libvorbis implementation, clip global_quality to 0 - 10
- Theora accepts a quality parameter p, which is:
- * 0 <= p <=63
- * an int value
- */
+ /* Clip global_quality in QP units to the [0 - 10] range
+ to be consistent with the libvorbis implementation.
+ Theora accepts a quality parameter which is an int value in
+ the [0 - 63] range.
+ */
t_info.quality = av_clipf(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3;
t_info.target_bitrate = 0;
} else {
}
// need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers
- if (avc_context->flags & CODEC_FLAG_PASS1) {
+ if (avc_context->flags & AV_CODEC_FLAG_PASS1) {
- if (get_stats(avc_context, 0))
- return -1;
+ if ((ret = get_stats(avc_context, 0)) < 0)
+ return ret;
- } else if (avc_context->flags & CODEC_FLAG_PASS2) {
+ } else if (avc_context->flags & AV_CODEC_FLAG_PASS2) {
- if (submit_stats(avc_context))
- return -1;
+ if ((ret = submit_stats(avc_context)) < 0)
+ return ret;
}
/*
// EOS, finish and get 1st pass stats if applicable
if (!frame) {
th_encode_packetout(h->t_state, 1, &o_packet);
- if (avc_context->flags & CODEC_FLAG_PASS1)
+ if (avc_context->flags & AV_CODEC_FLAG_PASS1)
- if (get_stats(avc_context, 1))
- return -1;
+ if ((ret = get_stats(avc_context, 1)) < 0)
+ return ret;
return 0;
}
t_yuv_buffer[i].data = frame->data[i];
}
- if (avc_context->flags & CODEC_FLAG_PASS2)
+ if (avc_context->flags & AV_CODEC_FLAG_PASS2)
- if (submit_stats(avc_context))
- return -1;
+ if ((ret = submit_stats(avc_context)) < 0)
+ return ret;
/* Now call into theora_encode_YUVin */
result = th_encode_ycbcr_in(h->t_state, t_yuv_buffer);
break;
}
av_log(avc_context, AV_LOG_ERROR, "theora_encode_YUVin failed (%s) [%d]\n", message, result);
- return -1;
+ return AVERROR_EXTERNAL;
}
- if (avc_context->flags & CODEC_FLAG_PASS1)
+ if (avc_context->flags & AV_CODEC_FLAG_PASS1)
- if (get_stats(avc_context, 0))
- return -1;
+ if ((ret = get_stats(avc_context, 0)) < 0)
+ return ret;
/* Pick up returned ogg_packet */
result = th_encode_packetout(h->t_state, 0, &o_packet);
twolame_set_num_channels(s->glopts, avctx->channels);
twolame_set_in_samplerate(s->glopts, avctx->sample_rate);
twolame_set_out_samplerate(s->glopts, avctx->sample_rate);
- if (avctx->flags & CODEC_FLAG_QSCALE || !avctx->bit_rate) {
+
+ if (!avctx->bit_rate)
+ avctx->bit_rate = avctx->sample_rate < 28000 ? 160000 : 384000;
+
+ if (avctx->flags & AV_CODEC_FLAG_QSCALE || !avctx->bit_rate) {
twolame_set_VBR(s->glopts, TRUE);
twolame_set_VBR_level(s->glopts,
avctx->global_quality / (float) FF_QP2LAMBDA);
}
vorbis_comment_init(&s->vc);
- if (!(avctx->flags & CODEC_FLAG_BITEXACT))
- vorbis_comment_add_tag(&s->vc, "encoder", LIBAVCODEC_IDENT);
++ if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT))
+ vorbis_comment_add_tag(&s->vc, "encoder", LIBAVCODEC_IDENT);
if ((ret = vorbis_analysis_headerout(&s->vd, &s->vc, &header, &header_comm,
&header_code))) {
{
VP8Context *ctx = avctx->priv_data;
struct vpx_codec_enc_cfg enccfg = { 0 };
- vpx_codec_flags_t flags = (avctx->flags & CODEC_FLAG_PSNR) ? VPX_CODEC_USE_PSNR : 0;
+ struct vpx_codec_enc_cfg enccfg_alpha;
++ vpx_codec_flags_t flags = (avctx->flags & AV_CODEC_FLAG_PSNR) ? VPX_CODEC_USE_PSNR : 0;
int res;
+ vpx_img_fmt_t img_fmt = VPX_IMG_FMT_I420;
+#if CONFIG_LIBVPX_VP9_ENCODER
+ vpx_codec_caps_t codec_caps = vpx_codec_get_caps(iface);
+#endif
av_log(avctx, AV_LOG_INFO, "%s\n", vpx_codec_version_str());
av_log(avctx, AV_LOG_VERBOSE, "%s\n", vpx_codec_build_config());
enccfg.g_timebase.num = avctx->time_base.num;
enccfg.g_timebase.den = avctx->time_base.den;
enccfg.g_threads = avctx->thread_count;
-
- if (ctx->lag_in_frames >= 0)
- enccfg.g_lag_in_frames = ctx->lag_in_frames;
+ enccfg.g_lag_in_frames= ctx->lag_in_frames;
- if (avctx->flags & CODEC_FLAG_PASS1)
+ if (avctx->flags & AV_CODEC_FLAG_PASS1)
enccfg.g_pass = VPX_RC_FIRST_PASS;
- else if (avctx->flags & CODEC_FLAG_PASS2)
+ else if (avctx->flags & AV_CODEC_FLAG_PASS2)
enccfg.g_pass = VPX_RC_LAST_PASS;
else
enccfg.g_pass = VPX_RC_ONE_PASS;
log_encoder_error(avctx, "Error encoding frame");
return AVERROR_INVALIDDATA;
}
+
+ if (ctx->is_alpha) {
+ res = vpx_codec_encode(&ctx->encoder_alpha, rawimg_alpha, timestamp,
+ avctx->ticks_per_frame, flags, ctx->deadline);
+ if (res != VPX_CODEC_OK) {
+ log_encoder_error(avctx, "Error encoding alpha frame");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
coded_size = queue_frames(avctx, pkt);
- if (!frame && avctx->flags & CODEC_FLAG_PASS1) {
+ if (!frame && avctx->flags & AV_CODEC_FLAG_PASS1) {
unsigned int b64_size = AV_BASE64_SIZE(ctx->twopass_stats.sz);
avctx->stats_out = av_malloc(b64_size);
x4->params.b_mpeg2 = 1;
x264_param_default_mpeg2(&x4->params);
} else
-#else
- x264_param_default(&x4->params);
#endif
+ x264_param_default(&x4->params);
- x4->params.b_deblocking_filter = avctx->flags & CODEC_FLAG_LOOP_FILTER;
+ x4->params.b_deblocking_filter = avctx->flags & AV_CODEC_FLAG_LOOP_FILTER;
if (x4->preset || x4->tune)
if (x264_param_default_preset(&x4->params, x4->preset, x4->tune) < 0) {
x4->params.i_width = avctx->width;
x4->params.i_height = avctx->height;
- x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
- x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den;
- x4->params.i_fps_num = x4->params.i_timebase_den = avctx->time_base.den;
- x4->params.i_fps_den = x4->params.i_timebase_num = avctx->time_base.num;
+ av_reduce(&sw, &sh, avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 4096);
+ x4->params.vui.i_sar_width = sw;
+ x4->params.vui.i_sar_height = sh;
+ x4->params.i_timebase_den = avctx->time_base.den;
+ x4->params.i_timebase_num = avctx->time_base.num;
+ x4->params.i_fps_num = avctx->time_base.den;
+ x4->params.i_fps_den = avctx->time_base.num * avctx->ticks_per_frame;
- x4->params.analyse.b_psnr = avctx->flags & CODEC_FLAG_PSNR;
+ x4->params.analyse.b_psnr = avctx->flags & AV_CODEC_FLAG_PSNR;
x4->params.i_threads = avctx->thread_count;
if (avctx->thread_type)
avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
avctx->color_range == AVCOL_RANGE_JPEG;
- // x264 validates the values internally
- x4->params.vui.i_colorprim = avctx->color_primaries;
- x4->params.vui.i_transfer = avctx->color_trc;
- x4->params.vui.i_colmatrix = avctx->colorspace;
+ if (avctx->colorspace != AVCOL_SPC_UNSPECIFIED)
+ x4->params.vui.i_colmatrix = avctx->colorspace;
+ if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
+ x4->params.vui.i_colorprim = avctx->color_primaries;
+ if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
+ x4->params.vui.i_transfer = avctx->color_trc;
- if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
+ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
x4->params.b_repeat_headers = 0;
+ if(x4->x264opts){
+ const char *p= x4->x264opts;
+ while(p){
+ char param[256]={0}, val[256]={0};
+ if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
+ OPT_STR(param, "1");
+ }else
+ OPT_STR(param, val);
+ p= strchr(p, ':');
+ p+=!!p;
+ }
+ }
+
if (x4->x264_params) {
AVDictionary *dict = NULL;
AVDictionaryEntry *en = NULL;
x4->enc = x264_encoder_open(&x4->params);
if (!x4->enc)
- return AVERROR_UNKNOWN;
+ return AVERROR_EXTERNAL;
- if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
+ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
x264_nal_t *nal;
uint8_t *p;
int nnal, s, i;
/* TAG: Do we have GLOBAL HEADER in AVS */
/* We Have PPS and SPS in AVS */
- if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER && 0) {
- if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
++ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && 0) {
xavs_nal_t *nal;
int nnal, s, i, size;
uint8_t *p;
uint16_t *intra, *inter;
int fd;
- xvid_plugin_single_t single = { 0 };
- struct xvid_ff_pass1 rc2pass1 = { 0 };
- xvid_plugin_2pass2_t rc2pass2 = { 0 };
- xvid_plugin_lumimasking_t masking_l = { 0 }; /* For lumi masking */
- xvid_plugin_lumimasking_t masking_v = { 0 }; /* For variance AQ */
- xvid_plugin_ssim_t ssim = { 0 };
- xvid_gbl_init_t xvid_gbl_init = { 0 };
- xvid_enc_create_t xvid_enc_create = { 0 };
- xvid_enc_plugin_t plugins[7];
-
- /* Bring in VOP flags from avconv command-line */
- x->vop_flags = XVID_VOP_HALFPEL; /* Bare minimum quality */
+ xvid_plugin_single_t single = { 0 };
+ struct xvid_ff_pass1 rc2pass1 = { 0 };
+ xvid_plugin_2pass2_t rc2pass2 = { 0 };
+ xvid_plugin_lumimasking_t masking_l = { 0 }; /* For lumi masking */
+ xvid_plugin_lumimasking_t masking_v = { 0 }; /* For variance AQ */
+ xvid_plugin_ssim_t ssim = { 0 };
+ xvid_gbl_init_t xvid_gbl_init = { 0 };
+ xvid_enc_create_t xvid_enc_create = { 0 };
+ xvid_enc_plugin_t plugins[4];
+
+ x->twopassfd = -1;
+
+ /* Bring in VOP flags from ffmpeg command-line */
+ x->vop_flags = XVID_VOP_HALFPEL; /* Bare minimum quality */
- if (xvid_flags & CODEC_FLAG_4MV)
+ if (xvid_flags & AV_CODEC_FLAG_4MV)
- x->vop_flags |= XVID_VOP_INTER4V; /* Level 3 */
+ x->vop_flags |= XVID_VOP_INTER4V; /* Level 3 */
if (avctx->trellis)
- x->vop_flags |= XVID_VOP_TRELLISQUANT; /* Level 5 */
+ x->vop_flags |= XVID_VOP_TRELLISQUANT; /* Level 5 */
- if (xvid_flags & CODEC_FLAG_AC_PRED)
+ if (xvid_flags & AV_CODEC_FLAG_AC_PRED)
- x->vop_flags |= XVID_VOP_HQACPRED; /* Level 6 */
+ x->vop_flags |= XVID_VOP_HQACPRED; /* Level 6 */
- if (xvid_flags & CODEC_FLAG_GRAY)
+ if (xvid_flags & AV_CODEC_FLAG_GRAY)
- x->vop_flags |= XVID_VOP_GREYSCALE;
+ x->vop_flags |= XVID_VOP_GREYSCALE;
/* Decide which ME quality setting to use */
x->me_flags = 0;
--- /dev/null
- ctx->flags |= CODEC_FLAG_BITEXACT;
+/*
+ * (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * motion test.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "config.h"
+#include "me_cmp.h"
+#include "libavutil/internal.h"
+#include "libavutil/lfg.h"
+#include "libavutil/mem.h"
+#include "libavutil/time.h"
+
+#undef printf
+
+#define WIDTH 64
+#define HEIGHT 64
+
+static uint8_t img1[WIDTH * HEIGHT];
+static uint8_t img2[WIDTH * HEIGHT];
+
+static void fill_random(uint8_t *tab, int size)
+{
+ int i;
+ AVLFG prng;
+
+ av_lfg_init(&prng, 1);
+ for(i=0;i<size;i++) {
+ tab[i] = av_lfg_get(&prng) % 256;
+ }
+}
+
+static void help(void)
+{
+ printf("motion-test [-h]\n"
+ "test motion implementations\n");
+}
+
+#define NB_ITS 500
+
+int dummy;
+
+static void test_motion(const char *name,
+ me_cmp_func test_func, me_cmp_func ref_func)
+{
+ int x, y, d1, d2, it;
+ uint8_t *ptr;
+ int64_t ti;
+ printf("testing '%s'\n", name);
+
+ /* test correctness */
+ for(it=0;it<20;it++) {
+
+ fill_random(img1, WIDTH * HEIGHT);
+ fill_random(img2, WIDTH * HEIGHT);
+
+ for(y=0;y<HEIGHT-17;y++) {
+ for(x=0;x<WIDTH-17;x++) {
+ ptr = img2 + y * WIDTH + x;
+ d1 = test_func(NULL, img1, ptr, WIDTH, 8);
+ d2 = ref_func(NULL, img1, ptr, WIDTH, 8);
+ if (d1 != d2) {
+ printf("error: mmx=%d c=%d\n", d1, d2);
+ }
+ }
+ }
+ }
+ emms_c();
+
+ /* speed test */
+ ti = av_gettime_relative();
+ d1 = 0;
+ for(it=0;it<NB_ITS;it++) {
+ for(y=0;y<HEIGHT-17;y++) {
+ for(x=0;x<WIDTH-17;x++) {
+ ptr = img2 + y * WIDTH + x;
+ d1 += test_func(NULL, img1, ptr, WIDTH, 8);
+ }
+ }
+ }
+ emms_c();
+ dummy = d1; /* avoid optimization */
+ ti = av_gettime_relative() - ti;
+
+ printf(" %0.0f kop/s\n",
+ (double)NB_ITS * (WIDTH - 16) * (HEIGHT - 16) /
+ (double)(ti / 1000.0));
+}
+
+
+int main(int argc, char **argv)
+{
+ AVCodecContext *ctx;
+ int c;
+ MECmpContext cctx, mmxctx;
+ int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMXEXT };
+ int flags_size = HAVE_MMXEXT ? 2 : 1;
+
+ if (argc > 1) {
+ help();
+ return 1;
+ }
+
+ printf("ffmpeg motion test\n");
+
+ ctx = avcodec_alloc_context3(NULL);
++ ctx->flags |= AV_CODEC_FLAG_BITEXACT;
+ av_force_cpu_flags(0);
+ memset(&cctx, 0, sizeof(cctx));
+ ff_me_cmp_init(&cctx, ctx);
+ for (c = 0; c < flags_size; c++) {
+ int x;
+ av_force_cpu_flags(flags[c]);
+ memset(&mmxctx, 0, sizeof(mmxctx));
+ ff_me_cmp_init(&mmxctx, ctx);
+
+ for (x = 0; x < 2; x++) {
+ printf("%s for %dx%d pixels\n", c ? "mmx2" : "mmx",
+ x ? 8 : 16, x ? 8 : 16);
+ test_motion("mmx", mmxctx.pix_abs[x][0], cctx.pix_abs[x][0]);
+ test_motion("mmx_x2", mmxctx.pix_abs[x][1], cctx.pix_abs[x][1]);
+ test_motion("mmx_y2", mmxctx.pix_abs[x][2], cctx.pix_abs[x][2]);
+ test_motion("mmx_xy2", mmxctx.pix_abs[x][3], cctx.pix_abs[x][3]);
+ }
+ }
+ av_free(ctx);
+
+ return 0;
+}
memset(s->last_mv, 0, sizeof(s->last_mv));
}
s->mb_intra = 1;
-#if FF_API_XVMC
-FF_DISABLE_DEPRECATION_WARNINGS
// if 1, we memcpy blocks in xvmcvideo
- if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1)
+ if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks)
ff_xvmc_pack_pblocks(s, -1); // inter are always full blocks
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif /* FF_API_XVMC */
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- if (s->avctx->flags2 & CODEC_FLAG2_FAST) {
+ if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
for (i = 0; i < 6; i++)
mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i);
} else {
return AVERROR_INVALIDDATA;
}
-#if FF_API_XVMC
-FF_DISABLE_DEPRECATION_WARNINGS
// if 1, we memcpy blocks in xvmcvideo
- if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration > 1)
+ if ((CONFIG_MPEG1_XVMC_HWACCEL || CONFIG_MPEG2_XVMC_HWACCEL) && s->pack_pblocks)
ff_xvmc_pack_pblocks(s, cbp);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif /* FF_API_XVMC */
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
- if (s->avctx->flags2 & CODEC_FLAG2_FAST) {
+ if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
for (i = 0; i < 6; i++) {
if (cbp & 32)
mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i);
MpegEncContext *s = &s1->mpeg_enc_ctx;
const enum AVPixelFormat *pix_fmts;
- if (CONFIG_GRAY && (avctx->flags & CODEC_FLAG_GRAY))
-#if FF_API_XVMC
-FF_DISABLE_DEPRECATION_WARNINGS
- if (avctx->xvmc_acceleration)
- return ff_get_format(avctx, pixfmt_xvmc_mpg2_420);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif /* FF_API_XVMC */
++ if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
+ return AV_PIX_FMT_GRAY8;
if (s->chroma_format < 2)
- pix_fmts = mpeg12_hwaccel_pixfmt_list_420;
+ pix_fmts = avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO ?
+ mpeg1_hwaccel_pixfmt_list_420 :
+ mpeg2_hwaccel_pixfmt_list_420;
else if (s->chroma_format == 2)
pix_fmts = mpeg12_pixfmt_list_422;
else
s->codec_id =
s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
s->out_format = FMT_MPEG1;
- if (s->avctx->flags & CODEC_FLAG_LOW_DELAY)
+ s->swap_uv = 0; // AFAIK VCR2 does not have SEQ_HEADER
+ if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
s->low_delay = 1;
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
}
}
}
- if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & CODEC_FLAG2_SHOW_ALL))
- if (s2->pict_type == AV_PICTURE_TYPE_I)
++ if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
s->sync = 1;
if (!s2->next_picture_ptr) {
/* Skip P-frames if we do not have a reference frame or
}
}
- s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE);
+ if ((avctx->width & 0xFFF) == 0 && (avctx->height & 0xFFF) == 1) {
+ av_log(avctx, AV_LOG_ERROR, "Width / Height is invalid for MPEG2\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
+ if ((avctx->width & 0xFFF) == 0 || (avctx->height & 0xFFF) == 0) {
+ av_log(avctx, AV_LOG_ERROR, "Width or Height are not allowed to be multiples of 4096\n"
+ "add '-strict %d' if you want to use them anyway.\n", FF_COMPLIANCE_UNOFFICIAL);
+ return AVERROR(EINVAL);
+ }
+ }
+
++ s->drop_frame_timecode = s->drop_frame_timecode || !!(avctx->flags2 & AV_CODEC_FLAG2_DROP_FRAME_TIMECODE);
+ if (s->drop_frame_timecode)
+ s->tc.flags |= AV_TIMECODE_FLAG_DROPFRAME;
if (s->drop_frame_timecode && s->frame_rate_index != 4) {
av_log(avctx, AV_LOG_ERROR,
"Drop frame time code only allowed with 1001/30000 fps\n");
put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60));
put_bits(&s->pb, 6, (uint32_t)((time_code % fps)));
- put_bits(&s->pb, 1, !!(s->avctx->flags & CODEC_FLAG_CLOSED_GOP) || s->intra_only || !s->gop_picture_number);
- put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
++ put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) || s->intra_only || !s->gop_picture_number);
put_bits(&s->pb, 1, 0); // broken link
}
}
put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 6, seconds);
- put_bits(&s->pb, 1, !!(s->avctx->flags & CODEC_FLAG_CLOSED_GOP));
+ put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP));
put_bits(&s->pb, 1, 0); // broken link == NO
- s->last_time_base = time / s->avctx->time_base.den;
-
ff_mpeg4_stuffing(&s->pb);
}
s->avctx = avctx;
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
+#if USE_FLOATS
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
+#endif
+
ff_mpadsp_init(&s->mpadsp);
if (avctx->request_sample_fmt == OUT_FMT &&
return AVERROR(ENOMEM);
}
- if (out_format == FMT_H263 || encoding) {
+ if (out_format == FMT_H263 || encoding || avctx->debug_mv ||
- (avctx->flags2 & CODEC_FLAG2_EXPORT_MVS)) {
++ (avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS)) {
int mv_size = 2 * (b8_array_size + 4) * sizeof(int16_t);
int ref_index_size = 4 * mb_array_size;
/**
* Print debugging info for the given picture.
*/
-void ff_print_debug_info(MpegEncContext *s, Picture *p)
+void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
+ uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
+ int *low_delay,
+ int mb_width, int mb_height, int mb_stride, int quarter_sample)
{
- if ((avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
- AVFrame *pict;
- if (s->avctx->hwaccel || !p || !p->mb_type)
++ if ((avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
+ const int shift = 1 + quarter_sample;
+ const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
+ const int mv_stride = (mb_width << mv_sample_log2) +
+ (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
+ int mb_x, mb_y, mbcount = 0;
+
+ /* size is width * height * 2 * 4 where 2 is for directions and 4 is
+ * for the maximum number of MB (4 MB in case of IS_8x8) */
+ AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
+ if (!mvs)
+ return;
+
+ for (mb_y = 0; mb_y < mb_height; mb_y++) {
+ for (mb_x = 0; mb_x < mb_width; mb_x++) {
+ int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
+ for (direction = 0; direction < 2; direction++) {
+ if (!USES_LIST(mb_type, direction))
+ continue;
+ if (IS_8X8(mb_type)) {
+ for (i = 0; i < 4; i++) {
+ int sx = mb_x * 16 + 4 + 8 * (i & 1);
+ int sy = mb_y * 16 + 4 + 8 * (i >> 1);
+ int xy = (mb_x * 2 + (i & 1) +
+ (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift) + sx;
+ int my = (motion_val[direction][xy][1] >> shift) + sy;
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
+ }
+ } else if (IS_16X8(mb_type)) {
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 8;
+ int sy = mb_y * 16 + 4 + 8 * i;
+ int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift);
+ int my = (motion_val[direction][xy][1] >> shift);
+
+ if (IS_INTERLACED(mb_type))
+ my *= 2;
+
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
+ }
+ } else if (IS_8X16(mb_type)) {
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 4 + 8 * i;
+ int sy = mb_y * 16 + 8;
+ int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
+ int mx = motion_val[direction][xy][0] >> shift;
+ int my = motion_val[direction][xy][1] >> shift;
+
+ if (IS_INTERLACED(mb_type))
+ my *= 2;
+
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
+ }
+ } else {
+ int sx = mb_x * 16 + 8;
+ int sy = mb_y * 16 + 8;
+ int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
+ int mx = (motion_val[direction][xy][0]>>shift) + sx;
+ int my = (motion_val[direction][xy][1]>>shift) + sy;
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
+ }
+ }
+ }
+ }
+
+ if (mbcount) {
+ AVFrameSideData *sd;
+
+ av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
+ sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
+ if (!sd) {
+ av_freep(&mvs);
+ return;
+ }
+ memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
+ }
+
+ av_freep(&mvs);
+ }
+
+ /* TODO: export all the following to make them accessible for users (and filters) */
+ if (avctx->hwaccel || !mbtype_table
+ || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
return;
- pict = p->f;
- if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
+
+ if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
int x,y;
- av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
- switch (pict->pict_type) {
- case AV_PICTURE_TYPE_I:
- av_log(s->avctx,AV_LOG_DEBUG,"I\n");
- break;
- case AV_PICTURE_TYPE_P:
- av_log(s->avctx,AV_LOG_DEBUG,"P\n");
- break;
- case AV_PICTURE_TYPE_B:
- av_log(s->avctx,AV_LOG_DEBUG,"B\n");
- break;
- case AV_PICTURE_TYPE_S:
- av_log(s->avctx,AV_LOG_DEBUG,"S\n");
- break;
- case AV_PICTURE_TYPE_SI:
- av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
- break;
- case AV_PICTURE_TYPE_SP:
- av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
- break;
- }
- for (y = 0; y < s->mb_height; y++) {
- for (x = 0; x < s->mb_width; x++) {
- if (s->avctx->debug & FF_DEBUG_SKIP) {
- int count = s->mbskip_table[x + y * s->mb_stride];
+ av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
+ av_get_picture_type_char(pict->pict_type));
+ for (y = 0; y < mb_height; y++) {
+ for (x = 0; x < mb_width; x++) {
+ if (avctx->debug & FF_DEBUG_SKIP) {
+ int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
if (count > 9)
count = 9;
- av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
+ av_log(avctx, AV_LOG_DEBUG, "%1d", count);
}
- if (s->avctx->debug & FF_DEBUG_QP) {
- av_log(s->avctx, AV_LOG_DEBUG, "%2d",
- p->qscale_table[x + y * s->mb_stride]);
+ if (avctx->debug & FF_DEBUG_QP) {
+ av_log(avctx, AV_LOG_DEBUG, "%2d",
+ qscale_table[x + y * mb_stride]);
}
- if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
- int mb_type = p->mb_type[x + y * s->mb_stride];
+ if (avctx->debug & FF_DEBUG_MB_TYPE) {
+ int mb_type = mbtype_table[x + y * mb_stride];
// Type & MV direction
if (IS_PCM(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "P");
+ av_log(avctx, AV_LOG_DEBUG, "P");
else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "A");
+ av_log(avctx, AV_LOG_DEBUG, "A");
else if (IS_INTRA4x4(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "i");
+ av_log(avctx, AV_LOG_DEBUG, "i");
else if (IS_INTRA16x16(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "I");
+ av_log(avctx, AV_LOG_DEBUG, "I");
else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "d");
+ av_log(avctx, AV_LOG_DEBUG, "d");
else if (IS_DIRECT(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "D");
+ av_log(avctx, AV_LOG_DEBUG, "D");
else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "g");
+ av_log(avctx, AV_LOG_DEBUG, "g");
else if (IS_GMC(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "G");
+ av_log(avctx, AV_LOG_DEBUG, "G");
else if (IS_SKIP(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "S");
+ av_log(avctx, AV_LOG_DEBUG, "S");
else if (!USES_LIST(mb_type, 1))
- av_log(s->avctx, AV_LOG_DEBUG, ">");
+ av_log(avctx, AV_LOG_DEBUG, ">");
else if (!USES_LIST(mb_type, 0))
- av_log(s->avctx, AV_LOG_DEBUG, "<");
+ av_log(avctx, AV_LOG_DEBUG, "<");
else {
- assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
- av_log(s->avctx, AV_LOG_DEBUG, "X");
+ av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
+ av_log(avctx, AV_LOG_DEBUG, "X");
}
// segmentation
if (IS_INTERLACED(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "=");
+ av_log(avctx, AV_LOG_DEBUG, "=");
else
- av_log(s->avctx, AV_LOG_DEBUG, " ");
+ av_log(avctx, AV_LOG_DEBUG, " ");
+ }
+ }
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+ }
+ }
+
+ if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
+ (avctx->debug_mv)) {
+ int mb_y;
+ int i;
+ int h_chroma_shift, v_chroma_shift, block_height;
+#if FF_API_VISMV
+ const int shift = 1 + quarter_sample;
+ uint8_t *ptr;
+ const int width = avctx->width;
+ const int height = avctx->height;
+#endif
+ const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
+ const int mv_stride = (mb_width << mv_sample_log2) +
+ (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
+
+ *low_delay = 0; // needed to see the vectors without trashing the buffers
+
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+
+ av_frame_make_writable(pict);
+
+ pict->opaque = NULL;
+#if FF_API_VISMV
+ ptr = pict->data[0];
+#endif
+ block_height = 16 >> v_chroma_shift;
+
+ for (mb_y = 0; mb_y < mb_height; mb_y++) {
+ int mb_x;
+ for (mb_x = 0; mb_x < mb_width; mb_x++) {
+ const int mb_index = mb_x + mb_y * mb_stride;
+#if FF_API_VISMV
+ if ((avctx->debug_mv) && motion_val[0]) {
+ int type;
+ for (type = 0; type < 3; type++) {
+ int direction = 0;
+ switch (type) {
+ case 0:
+ if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
+ (pict->pict_type!= AV_PICTURE_TYPE_P))
+ continue;
+ direction = 0;
+ break;
+ case 1:
+ if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
+ (pict->pict_type!= AV_PICTURE_TYPE_B))
+ continue;
+ direction = 0;
+ break;
+ case 2:
+ if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
+ (pict->pict_type!= AV_PICTURE_TYPE_B))
+ continue;
+ direction = 1;
+ break;
+ }
+ if (!USES_LIST(mbtype_table[mb_index], direction))
+ continue;
+
+ if (IS_8X8(mbtype_table[mb_index])) {
+ int i;
+ for (i = 0; i < 4; i++) {
+ int sx = mb_x * 16 + 4 + 8 * (i & 1);
+ int sy = mb_y * 16 + 4 + 8 * (i >> 1);
+ int xy = (mb_x * 2 + (i & 1) +
+ (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift) + sx;
+ int my = (motion_val[direction][xy][1] >> shift) + sy;
+ draw_arrow(ptr, sx, sy, mx, my, width,
+ height, pict->linesize[0], 100, 0, direction);
+ }
+ } else if (IS_16X8(mbtype_table[mb_index])) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 8;
+ int sy = mb_y * 16 + 4 + 8 * i;
+ int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift);
+ int my = (motion_val[direction][xy][1] >> shift);
+
+ if (IS_INTERLACED(mbtype_table[mb_index]))
+ my *= 2;
+
+ draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
+ height, pict->linesize[0], 100, 0, direction);
+ }
+ } else if (IS_8X16(mbtype_table[mb_index])) {
+ int i;
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 4 + 8 * i;
+ int sy = mb_y * 16 + 8;
+ int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
+ int mx = motion_val[direction][xy][0] >> shift;
+ int my = motion_val[direction][xy][1] >> shift;
+
+ if (IS_INTERLACED(mbtype_table[mb_index]))
+ my *= 2;
+
+ draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
+ height, pict->linesize[0], 100, 0, direction);
+ }
+ } else {
+ int sx= mb_x * 16 + 8;
+ int sy= mb_y * 16 + 8;
+ int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
+ int mx= (motion_val[direction][xy][0]>>shift) + sx;
+ int my= (motion_val[direction][xy][1]>>shift) + sy;
+ draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
+ }
+ }
+ }
+#endif
+ if ((avctx->debug & FF_DEBUG_VIS_QP)) {
+ uint64_t c = (qscale_table[mb_index] * 128 / 31) *
+ 0x0101010101010101ULL;
+ int y;
+ for (y = 0; y < block_height; y++) {
+ *(uint64_t *)(pict->data[1] + 8 * mb_x +
+ (block_height * mb_y + y) *
+ pict->linesize[1]) = c;
+ *(uint64_t *)(pict->data[2] + 8 * mb_x +
+ (block_height * mb_y + y) *
+ pict->linesize[2]) = c;
+ }
+ }
+ if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
+ motion_val[0]) {
+ int mb_type = mbtype_table[mb_index];
+ uint64_t u,v;
+ int y;
+#define COLOR(theta, r) \
+ u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
+ v = (int)(128 + r * sin(theta * 3.141592 / 180));
+
+
+ u = v = 128;
+ if (IS_PCM(mb_type)) {
+ COLOR(120, 48)
+ } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
+ IS_INTRA16x16(mb_type)) {
+ COLOR(30, 48)
+ } else if (IS_INTRA4x4(mb_type)) {
+ COLOR(90, 48)
+ } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
+ // COLOR(120, 48)
+ } else if (IS_DIRECT(mb_type)) {
+ COLOR(150, 48)
+ } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
+ COLOR(170, 48)
+ } else if (IS_GMC(mb_type)) {
+ COLOR(190, 48)
+ } else if (IS_SKIP(mb_type)) {
+ // COLOR(180, 48)
+ } else if (!USES_LIST(mb_type, 1)) {
+ COLOR(240, 48)
+ } else if (!USES_LIST(mb_type, 0)) {
+ COLOR(0, 48)
+ } else {
+ av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
+ COLOR(300,48)
+ }
+
+ u *= 0x0101010101010101ULL;
+ v *= 0x0101010101010101ULL;
+ for (y = 0; y < block_height; y++) {
+ *(uint64_t *)(pict->data[1] + 8 * mb_x +
+ (block_height * mb_y + y) * pict->linesize[1]) = u;
+ *(uint64_t *)(pict->data[2] + 8 * mb_x +
+ (block_height * mb_y + y) * pict->linesize[2]) = v;
+ }
+
+ // segmentation
+ if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
+ *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
+ (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
+ *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
+ (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
+ }
+ if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
+ for (y = 0; y < 16; y++)
+ pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
+ pict->linesize[0]] ^= 0x80;
+ }
+ if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
+ int dm = 1 << (mv_sample_log2 - 2);
+ for (i = 0; i < 4; i++) {
+ int sx = mb_x * 16 + 8 * (i & 1);
+ int sy = mb_y * 16 + 8 * (i >> 1);
+ int xy = (mb_x * 2 + (i & 1) +
+ (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
+ // FIXME bidir
+ int32_t *mv = (int32_t *) &motion_val[0][xy];
+ if (mv[0] != mv[dm] ||
+ mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
+ for (y = 0; y < 8; y++)
+ pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
+ if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
+ *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
+ pict->linesize[0]) ^= 0x8080808080808080ULL;
+ }
+ }
+
+ if (IS_INTERLACED(mb_type) &&
+ avctx->codec->id == AV_CODEC_ID_H264) {
+ // hmm
+ }
+ }
+ if (mbskip_table)
+ mbskip_table[mb_index] = 0;
+ }
+ }
+ }
+}
+
+void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
+{
+ ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
+ p->qscale_table, p->motion_val, &s->low_delay,
+ s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
+}
+
+int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
+{
+ AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
+ int offset = 2*s->mb_stride + 1;
+ if(!ref)
+ return AVERROR(ENOMEM);
+ av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
+ ref->size -= offset;
+ ref->data += offset;
+ return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
+}
+
+static inline int hpel_motion_lowres(MpegEncContext *s,
+ uint8_t *dest, uint8_t *src,
+ int field_based, int field_select,
+ int src_x, int src_y,
+ int width, int height, ptrdiff_t stride,
+ int h_edge_pos, int v_edge_pos,
+ int w, int h, h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y)
+{
+ const int lowres = s->avctx->lowres;
+ const int op_index = FFMIN(lowres, 3);
+ const int s_mask = (2 << lowres) - 1;
+ int emu = 0;
+ int sx, sy;
+
+ if (s->quarter_sample) {
+ motion_x /= 2;
+ motion_y /= 2;
+ }
+
+ sx = motion_x & s_mask;
+ sy = motion_y & s_mask;
+ src_x += motion_x >> lowres + 1;
+ src_y += motion_y >> lowres + 1;
+
+ src += src_y * stride + src_x;
+
+ if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
+ (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
+ s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
+ s->linesize, s->linesize,
+ w + 1, (h + 1) << field_based,
+ src_x, src_y << field_based,
+ h_edge_pos, v_edge_pos);
+ src = s->sc.edge_emu_buffer;
+ emu = 1;
+ }
+
+ sx = (sx << 2) >> lowres;
+ sy = (sy << 2) >> lowres;
+ if (field_select)
+ src += s->linesize;
+ pix_op[op_index](dest, src, stride, h, sx, sy);
+ return emu;
+}
+
+/* apply one mpeg motion vector to the three components */
+static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_y,
+ uint8_t *dest_cb,
+ uint8_t *dest_cr,
+ int field_based,
+ int bottom_field,
+ int field_select,
+ uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op,
+ int motion_x, int motion_y,
+ int h, int mb_y)
+{
+ uint8_t *ptr_y, *ptr_cb, *ptr_cr;
+ int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
+ ptrdiff_t uvlinesize, linesize;
+ const int lowres = s->avctx->lowres;
+ const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
+ const int block_s = 8>>lowres;
+ const int s_mask = (2 << lowres) - 1;
+ const int h_edge_pos = s->h_edge_pos >> lowres;
+ const int v_edge_pos = s->v_edge_pos >> lowres;
+ linesize = s->current_picture.f->linesize[0] << field_based;
+ uvlinesize = s->current_picture.f->linesize[1] << field_based;
+
+ // FIXME obviously not perfect but qpel will not work in lowres anyway
+ if (s->quarter_sample) {
+ motion_x /= 2;
+ motion_y /= 2;
+ }
+
+ if(field_based){
+ motion_y += (bottom_field - field_select)*((1 << lowres)-1);
+ }
+
+ sx = motion_x & s_mask;
+ sy = motion_y & s_mask;
+ src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
+ src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
+
+ if (s->out_format == FMT_H263) {
+ uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
+ uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
+ uvsrc_x = src_x >> 1;
+ uvsrc_y = src_y >> 1;
+ } else if (s->out_format == FMT_H261) {
+ // even chroma mv's are full pel in H261
+ mx = motion_x / 4;
+ my = motion_y / 4;
+ uvsx = (2 * mx) & s_mask;
+ uvsy = (2 * my) & s_mask;
+ uvsrc_x = s->mb_x * block_s + (mx >> lowres);
+ uvsrc_y = mb_y * block_s + (my >> lowres);
+ } else {
+ if(s->chroma_y_shift){
+ mx = motion_x / 2;
+ my = motion_y / 2;
+ uvsx = mx & s_mask;
+ uvsy = my & s_mask;
+ uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
+ uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
+ } else {
+ if(s->chroma_x_shift){
+ //Chroma422
+ mx = motion_x / 2;
+ uvsx = mx & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_y = src_y;
+ uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
+ } else {
+ //Chroma444
+ uvsx = motion_x & s_mask;
+ uvsy = motion_y & s_mask;
+ uvsrc_x = src_x;
+ uvsrc_y = src_y;
+ }
+ }
+ }
+
+ ptr_y = ref_picture[0] + src_y * linesize + src_x;
+ ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
+ ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
+
+ if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
+ (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
+ s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
+ linesize >> field_based, linesize >> field_based,
+ 17, 17 + field_based,
+ src_x, src_y << field_based, h_edge_pos,
+ v_edge_pos);
+ ptr_y = s->sc.edge_emu_buffer;
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
+ uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
+ s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
+ uvlinesize >> field_based, uvlinesize >> field_based,
+ 9, 9 + field_based,
+ uvsrc_x, uvsrc_y << field_based,
+ h_edge_pos >> 1, v_edge_pos >> 1);
+ s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
+ uvlinesize >> field_based,uvlinesize >> field_based,
+ 9, 9 + field_based,
+ uvsrc_x, uvsrc_y << field_based,
+ h_edge_pos >> 1, v_edge_pos >> 1);
+ ptr_cb = ubuf;
+ ptr_cr = vbuf;
+ }
+ }
+
+ // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
+ if (bottom_field) {
+ dest_y += s->linesize;
+ dest_cb += s->uvlinesize;
+ dest_cr += s->uvlinesize;
+ }
+
+ if (field_select) {
+ ptr_y += s->linesize;
+ ptr_cb += s->uvlinesize;
+ ptr_cr += s->uvlinesize;
+ }
+
+ sx = (sx << 2) >> lowres;
+ sy = (sy << 2) >> lowres;
+ pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
+
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
+ uvsx = (uvsx << 2) >> lowres;
+ uvsy = (uvsy << 2) >> lowres;
+ if (hc) {
+ pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
+ pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
+ }
+ }
+ // FIXME h261 lowres loop filter
+}
+
+static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_cb, uint8_t *dest_cr,
+ uint8_t **ref_picture,
+ h264_chroma_mc_func * pix_op,
+ int mx, int my)
+{
+ const int lowres = s->avctx->lowres;
+ const int op_index = FFMIN(lowres, 3);
+ const int block_s = 8 >> lowres;
+ const int s_mask = (2 << lowres) - 1;
+ const int h_edge_pos = s->h_edge_pos >> lowres + 1;
+ const int v_edge_pos = s->v_edge_pos >> lowres + 1;
+ int emu = 0, src_x, src_y, sx, sy;
+ ptrdiff_t offset;
+ uint8_t *ptr;
+
+ if (s->quarter_sample) {
+ mx /= 2;
+ my /= 2;
+ }
+
+ /* In case of 8X8, we construct a single chroma motion vector
+ with a special rounding */
+ mx = ff_h263_round_chroma(mx);
+ my = ff_h263_round_chroma(my);
+
+ sx = mx & s_mask;
+ sy = my & s_mask;
+ src_x = s->mb_x * block_s + (mx >> lowres + 1);
+ src_y = s->mb_y * block_s + (my >> lowres + 1);
+
+ offset = src_y * s->uvlinesize + src_x;
+ ptr = ref_picture[1] + offset;
+ if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
+ (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
+ s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
+ s->uvlinesize, s->uvlinesize,
+ 9, 9,
+ src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr = s->sc.edge_emu_buffer;
+ emu = 1;
+ }
+ sx = (sx << 2) >> lowres;
+ sy = (sy << 2) >> lowres;
+ pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
+
+ ptr = ref_picture[2] + offset;
+ if (emu) {
+ s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
+ s->uvlinesize, s->uvlinesize,
+ 9, 9,
+ src_x, src_y, h_edge_pos, v_edge_pos);
+ ptr = s->sc.edge_emu_buffer;
+ }
+ pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
+}
+
+/**
+ * motion compensation of a single macroblock
+ * @param s context
+ * @param dest_y luma destination pointer
+ * @param dest_cb chroma cb/u destination pointer
+ * @param dest_cr chroma cr/v destination pointer
+ * @param dir direction (0->forward, 1->backward)
+ * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
+ * @param pix_op halfpel motion compensation function (average or put normally)
+ * the motion vectors are taken from s->mv and the MV type from s->mv_type
+ */
+static inline void MPV_motion_lowres(MpegEncContext *s,
+ uint8_t *dest_y, uint8_t *dest_cb,
+ uint8_t *dest_cr,
+ int dir, uint8_t **ref_picture,
+ h264_chroma_mc_func *pix_op)
+{
+ int mx, my;
+ int mb_x, mb_y, i;
+ const int lowres = s->avctx->lowres;
+ const int block_s = 8 >>lowres;
+
+ mb_x = s->mb_x;
+ mb_y = s->mb_y;
+
+ switch (s->mv_type) {
+ case MV_TYPE_16X16:
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, 0,
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1],
+ 2 * block_s, mb_y);
+ break;
+ case MV_TYPE_8X8:
+ mx = 0;
+ my = 0;
+ for (i = 0; i < 4; i++) {
+ hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
+ s->linesize) * block_s,
+ ref_picture[0], 0, 0,
+ (2 * mb_x + (i & 1)) * block_s,
+ (2 * mb_y + (i >> 1)) * block_s,
+ s->width, s->height, s->linesize,
+ s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
+ block_s, block_s, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1]);
+
+ mx += s->mv[dir][i][0];
+ my += s->mv[dir][i][1];
+ }
+
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
+ chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
+ pix_op, mx, my);
+ break;
+ case MV_TYPE_FIELD:
+ if (s->picture_structure == PICT_FRAME) {
+ /* top field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0], s->mv[dir][0][1],
+ block_s, mb_y);
+ /* bottom field */
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, 1, s->field_select[dir][1],
+ ref_picture, pix_op,
+ s->mv[dir][1][0], s->mv[dir][1][1],
+ block_s, mb_y);
+ } else {
+ if (s->picture_structure != s->field_select[dir][0] + 1 &&
+ s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
+ ref_picture = s->current_picture_ptr->f->data;
+
+ }
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][0],
+ ref_picture, pix_op,
+ s->mv[dir][0][0],
+ s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
+ }
+ break;
+ case MV_TYPE_16X8:
+ for (i = 0; i < 2; i++) {
+ uint8_t **ref2picture;
+
+ if (s->picture_structure == s->field_select[dir][i] + 1 ||
+ s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
+ ref2picture = ref_picture;
+ } else {
+ ref2picture = s->current_picture_ptr->f->data;
+ }
+
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->field_select[dir][i],
+ ref2picture, pix_op,
+ s->mv[dir][i][0], s->mv[dir][i][1] +
+ 2 * block_s * i, block_s, mb_y >> 1);
+
+ dest_y += 2 * block_s * s->linesize;
+ dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
+ dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
+ }
+ break;
+ case MV_TYPE_DMV:
+ if (s->picture_structure == PICT_FRAME) {
+ for (i = 0; i < 2; i++) {
+ int j;
+ for (j = 0; j < 2; j++) {
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 1, j, j ^ i,
+ ref_picture, pix_op,
+ s->mv[dir][2 * i + j][0],
+ s->mv[dir][2 * i + j][1],
+ block_s, mb_y);
+ }
+ pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+ 0, 0, s->picture_structure != i + 1,
+ ref_picture, pix_op,
+ s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
+ 2 * block_s, mb_y >> 1);
+
+ // after put we make avg of the same block
+ pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
+
+ // opposite parity is always in the same
+ // frame if this is second field
+ if (!s->first_field) {
+ ref_picture = s->current_picture_ptr->f->data;
}
}
- av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
+ break;
+ default:
+ av_assert2(0);
}
}
else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
s->mbintra_table[mb_xy]=1;
- if ((s->avctx->flags & CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor ||
- if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) ||
++ if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor ||
!(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr;
skip_idct:
if(!readable){
s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
- s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
- s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
+ s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
+ }
}
}
}
(s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
!s->fixed_qscale;
- s->loop_filter = !!(s->avctx->flags & CODEC_FLAG_LOOP_FILTER);
+ s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
- av_log(avctx, AV_LOG_ERROR,
- "a vbv buffer size is needed, "
- "for encoding with a maximum bitrate\n");
+ switch(avctx->codec_id) {
+ case AV_CODEC_ID_MPEG1VIDEO:
+ case AV_CODEC_ID_MPEG2VIDEO:
+ avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
+ break;
+ case AV_CODEC_ID_MPEG4:
+ case AV_CODEC_ID_MSMPEG4V1:
+ case AV_CODEC_ID_MSMPEG4V2:
+ case AV_CODEC_ID_MSMPEG4V3:
+ if (avctx->rc_max_rate >= 15000000) {
+ avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
+ } else if(avctx->rc_max_rate >= 2000000) {
+ avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
+ } else if(avctx->rc_max_rate >= 384000) {
+ avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
+ } else
+ avctx->rc_buffer_size = 40;
+ avctx->rc_buffer_size *= 16384;
+ break;
+ }
+ if (avctx->rc_buffer_size) {
+ av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
+ }
+ }
+
+ if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
+ av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
return -1;
}
return -1;
}
- if ((s->avctx->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
+ if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
+ (avctx->width > 16383 ||
+ avctx->height > 16383 )) {
+ av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
+ return -1;
+ }
+
+ if (s->codec_id == AV_CODEC_ID_RV10 &&
+ (avctx->width &15 ||
+ avctx->height&15 )) {
+ av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (s->codec_id == AV_CODEC_ID_RV20 &&
+ (avctx->width &3 ||
+ avctx->height&3 )) {
+ av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((s->codec_id == AV_CODEC_ID_WMV1 ||
+ s->codec_id == AV_CODEC_ID_WMV2) &&
+ avctx->width & 1) {
+ av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
+ return -1;
+ }
+
+ if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
return -1;
} else {
int b_frames;
- if (s->avctx->flags & CODEC_FLAG_PASS2) {
- if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
- if (s->picture_in_gop_number < s->gop_size &&
- skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
- // FIXME check that te gop check above is +-1 correct
- av_frame_unref(s->input_picture[0]->f);
-
- emms_c();
- ff_vbv_update(s, 0);
-
- goto no_output_pic;
- }
- }
-
+ if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
for (i = 0; i < s->max_b_frames + 1; i++) {
int pict_num = s->input_picture[0]->f->display_picture_number + i;
goto vbv_retry;
}
- assert(s->avctx->rc_max_rate);
+ av_assert0(s->avctx->rc_max_rate);
}
- if (s->avctx->flags & CODEC_FLAG_PASS1)
+ if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
ff_write_pass1_stats(s);
for (i = 0; i < 4; i++) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
s->linesize, s->linesize,
17, 17 + field_based,
- src_x, src_y << field_based,
+ src_x, src_y,
s->h_edge_pos, s->v_edge_pos);
ptr_y = s->sc.edge_emu_buffer;
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
+ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
- uint8_t *uvbuf = s->sc.edge_emu_buffer + 18 * s->linesize;
- s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
+ uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
+ uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
+ uvsrc_y = (unsigned)uvsrc_y << field_based;
+ s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
- uvsrc_x, uvsrc_y << field_based,
+ uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
- s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr,
+ s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
- uvsrc_x, uvsrc_y << field_based,
+ uvsrc_x, uvsrc_y,
s->h_edge_pos >> 1, s->v_edge_pos >> 1);
- ptr_cb = uvbuf;
- ptr_cr = uvbuf + 16;
+ ptr_cb = ubuf;
+ ptr_cr = vbuf;
}
}
src_x, src_y << field_based,
s->h_edge_pos, s->v_edge_pos);
ptr_y = s->sc.edge_emu_buffer;
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
+ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
- uint8_t *uvbuf = s->sc.edge_emu_buffer + 18 * s->linesize;
- s->vdsp.emulated_edge_mc(uvbuf, ptr_cb,
+ uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
+ uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
+ s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
s->uvlinesize, s->uvlinesize,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
av_lfg_init(&s->random_state, 0);
ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
s->scale_bias = 1.0/(32768*8);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
s->avctx = avctx;
if ((ret = ff_mdct_init(&s->mdct_ctx, 8, 0, 32768.0)) < 0)
goto error;
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
/* Generate overlap window */
- ff_sine_window_init(ff_sine_128, 128);
+ ff_init_ff_sine_windows(7);
for (i = 0; i < POW_TABLE_SIZE; i++)
- pow_table[i] = -pow(2, -i / 2048.0 - 3.0 + POW_TABLE_OFFSET);
+ pow_table[i] = pow(2, -i / 2048.0 - 3.0 + POW_TABLE_OFFSET);
if (s->avctx->trellis) {
s->opt = av_malloc(NELLY_BANDS * OPT_SIZE * sizeof(float ));
return 0;
}
-static int nvenc_setup_hevc_config(AVCodecContext *avctx)
+static av_cold int nvenc_dyload_nvenc(AVCodecContext *avctx)
{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENC_CONFIG *cc = &ctx->config;
- NV_ENC_CONFIG_HEVC *hevc = &cc->encodeCodecConfig.hevcConfig;
+ PNVENCODEAPICREATEINSTANCE nvEncodeAPICreateInstance = 0;
+ NVENCSTATUS nvstatus;
- hevc->disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
- hevc->repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
+ NvencContext *ctx = avctx->priv_data;
+ NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
- hevc->maxNumRefFramesInDPB = avctx->refs;
- hevc->idrPeriod = cc->gopLength;
+ if (!nvenc_check_cuda(avctx))
+ return 0;
- /* No other profile is supported in the current SDK version 5 */
- cc->profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID;
- avctx->profile = FF_PROFILE_HEVC_MAIN;
+ if (dl_fn->nvenc_lib)
+ return 1;
- if (ctx->level) {
- hevc->level = ctx->level;
+#if defined(_WIN32)
+ if (sizeof(void*) == 8) {
+ dl_fn->nvenc_lib = LoadLibrary(TEXT("nvEncodeAPI64.dll"));
} else {
- hevc->level = NV_ENC_LEVEL_AUTOSELECT;
+ dl_fn->nvenc_lib = LoadLibrary(TEXT("nvEncodeAPI.dll"));
}
+#else
+ dl_fn->nvenc_lib = dlopen("libnvidia-encode.so.1", RTLD_LAZY);
+#endif
- if (ctx->tier) {
- hevc->tier = ctx->tier;
+ if (!dl_fn->nvenc_lib) {
+ av_log(avctx, AV_LOG_FATAL, "Failed loading the nvenc library\n");
+ goto error;
}
- return 0;
-}
-static int nvenc_setup_codec_config(AVCodecContext *avctx)
-{
- switch (avctx->codec->id) {
- case AV_CODEC_ID_H264:
- return nvenc_setup_h264_config(avctx);
- case AV_CODEC_ID_HEVC:
- return nvenc_setup_hevc_config(avctx);
+ nvEncodeAPICreateInstance = (PNVENCODEAPICREATEINSTANCE)LOAD_FUNC(dl_fn->nvenc_lib, "NvEncodeAPICreateInstance");
+
+ if (!nvEncodeAPICreateInstance) {
+ av_log(avctx, AV_LOG_FATAL, "Failed to load nvenc entrypoint\n");
+ goto error;
}
- return 0;
-}
-static int nvenc_setup_encoder(AVCodecContext *avctx)
-{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- NV_ENC_PRESET_CONFIG preset_cfg = { 0 };
- int ret;
-
- ctx->params.version = NV_ENC_INITIALIZE_PARAMS_VER;
-
- ctx->params.encodeHeight = avctx->height;
- ctx->params.encodeWidth = avctx->width;
-
- if (avctx->sample_aspect_ratio.num &&
- avctx->sample_aspect_ratio.den &&
- (avctx->sample_aspect_ratio.num != 1 ||
- avctx->sample_aspect_ratio.den != 1)) {
- av_reduce(&ctx->params.darWidth,
- &ctx->params.darHeight,
- avctx->width * avctx->sample_aspect_ratio.num,
- avctx->height * avctx->sample_aspect_ratio.den,
- INT_MAX / 8);
- } else {
- ctx->params.darHeight = avctx->height;
- ctx->params.darWidth = avctx->width;
+ dl_fn->nvenc_funcs.version = NV_ENCODE_API_FUNCTION_LIST_VER;
+
+ nvstatus = nvEncodeAPICreateInstance(&dl_fn->nvenc_funcs);
+
+ if (nvstatus != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "Failed to create nvenc instance\n");
+ goto error;
}
- ctx->params.frameRateNum = avctx->time_base.den;
- ctx->params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;
+ av_log(avctx, AV_LOG_VERBOSE, "Nvenc initialized successfully\n");
- ctx->params.enableEncodeAsync = 0;
- ctx->params.enablePTD = 1;
+ return 1;
- ctx->params.encodeConfig = &ctx->config;
+error:
+ if (dl_fn->nvenc_lib)
+ DL_CLOSE_FUNC(dl_fn->nvenc_lib);
- nvec_map_preset(ctx);
+ dl_fn->nvenc_lib = NULL;
- preset_cfg.version = NV_ENC_PRESET_CONFIG_VER;
- preset_cfg.presetCfg.version = NV_ENC_CONFIG_VER;
+ return 0;
+}
- ret = nv->nvEncGetEncodePresetConfig(ctx->nvenc_ctx,
- ctx->params.encodeGUID,
- ctx->params.presetGUID,
- &preset_cfg);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR,
- "Cannot get the preset configuration\n");
- return AVERROR_UNKNOWN;
- }
+static av_cold void nvenc_unload_nvenc(AVCodecContext *avctx)
+{
+ NvencContext *ctx = avctx->priv_data;
+ NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
- memcpy(&ctx->config, &preset_cfg.presetCfg, sizeof(ctx->config));
+ DL_CLOSE_FUNC(dl_fn->nvenc_lib);
+ dl_fn->nvenc_lib = NULL;
- ctx->config.version = NV_ENC_CONFIG_VER;
+ dl_fn->nvenc_device_count = 0;
- if (avctx->gop_size > 0) {
- if (avctx->max_b_frames > 0) {
- ctx->last_dts = -2;
- /* 0 is intra-only,
- * 1 is I/P only,
- * 2 is one B Frame,
- * 3 two B frames, and so on. */
- ctx->config.frameIntervalP = avctx->max_b_frames + 1;
- } else if (avctx->max_b_frames == 0) {
- ctx->config.frameIntervalP = 1;
- }
- ctx->config.gopLength = avctx->gop_size;
- } else if (avctx->gop_size == 0) {
- ctx->config.frameIntervalP = 0;
- ctx->config.gopLength = 1;
- }
+ DL_CLOSE_FUNC(dl_fn->cuda_lib);
+ dl_fn->cuda_lib = NULL;
- if (ctx->config.frameIntervalP > 1)
- avctx->max_b_frames = ctx->config.frameIntervalP - 1;
+ dl_fn->cu_init = NULL;
+ dl_fn->cu_device_get_count = NULL;
+ dl_fn->cu_device_get = NULL;
+ dl_fn->cu_device_get_name = NULL;
+ dl_fn->cu_device_compute_capability = NULL;
+ dl_fn->cu_ctx_create = NULL;
+ dl_fn->cu_ctx_pop_current = NULL;
+ dl_fn->cu_ctx_destroy = NULL;
- nvenc_setup_rate_control(avctx);
+ av_log(avctx, AV_LOG_VERBOSE, "Nvenc unloaded\n");
+}
- if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
- ctx->config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD;
- } else {
- ctx->config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME;
+static av_cold int nvenc_encode_init(AVCodecContext *avctx)
+{
+ NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS encode_session_params = { 0 };
+ NV_ENC_PRESET_CONFIG preset_config = { 0 };
+ CUcontext cu_context_curr;
+ CUresult cu_res;
+ GUID encoder_preset = NV_ENC_PRESET_HQ_GUID;
+ GUID codec;
+ NVENCSTATUS nv_status = NV_ENC_SUCCESS;
+ int surfaceCount = 0;
+ int i, num_mbs;
+ int isLL = 0;
+ int lossless = 0;
+ int res = 0;
+ int dw, dh;
+
+ NvencContext *ctx = avctx->priv_data;
+ NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
+
+ if (!nvenc_dyload_nvenc(avctx))
+ return AVERROR_EXTERNAL;
+
+ ctx->last_dts = AV_NOPTS_VALUE;
+
+ ctx->encode_config.version = NV_ENC_CONFIG_VER;
+ ctx->init_encode_params.version = NV_ENC_INITIALIZE_PARAMS_VER;
+ preset_config.version = NV_ENC_PRESET_CONFIG_VER;
+ preset_config.presetCfg.version = NV_ENC_CONFIG_VER;
+ encode_session_params.version = NV_ENC_OPEN_ENCODE_SESSION_EX_PARAMS_VER;
+ encode_session_params.apiVersion = NVENCAPI_VERSION;
+
+ if (ctx->gpu >= dl_fn->nvenc_device_count) {
+ av_log(avctx, AV_LOG_FATAL, "Requested GPU %d, but only %d GPUs are available!\n", ctx->gpu, dl_fn->nvenc_device_count);
+ res = AVERROR(EINVAL);
+ goto error;
}
- if ((ret = nvenc_setup_codec_config(avctx)) < 0)
- return ret;
+ ctx->cu_context = NULL;
+ cu_res = dl_fn->cu_ctx_create(&ctx->cu_context, 0, dl_fn->nvenc_devices[ctx->gpu]);
- ret = nv->nvEncInitializeEncoder(ctx->nvenc_ctx, &ctx->params);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "Cannot initialize the decoder");
- return AVERROR_UNKNOWN;
+ if (cu_res != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "Failed creating CUDA context for NVENC: 0x%x\n", (int)cu_res);
+ res = AVERROR_EXTERNAL;
+ goto error;
}
- return 0;
-}
+ cu_res = dl_fn->cu_ctx_pop_current(&cu_context_curr);
-static int nvenc_alloc_surface(AVCodecContext *avctx, int idx)
-{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- int ret;
- NV_ENC_CREATE_INPUT_BUFFER in_buffer = { 0 };
- NV_ENC_CREATE_BITSTREAM_BUFFER out_buffer = { 0 };
+ if (cu_res != CUDA_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "Failed popping CUDA context: 0x%x\n", (int)cu_res);
+ res = AVERROR_EXTERNAL;
+ goto error;
+ }
- in_buffer.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
- out_buffer.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
+ encode_session_params.device = ctx->cu_context;
+ encode_session_params.deviceType = NV_ENC_DEVICE_TYPE_CUDA;
- in_buffer.width = avctx->width;
- in_buffer.height = avctx->height;
+ nv_status = p_nvenc->nvEncOpenEncodeSessionEx(&encode_session_params, &ctx->nvencoder);
+ if (nv_status != NV_ENC_SUCCESS) {
+ ctx->nvencoder = NULL;
+ av_log(avctx, AV_LOG_FATAL, "OpenEncodeSessionEx failed: 0x%x - invalid license key?\n", (int)nv_status);
+ res = AVERROR_EXTERNAL;
+ goto error;
+ }
- in_buffer.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED;
+ if (ctx->preset) {
+ if (!strcmp(ctx->preset, "hp")) {
+ encoder_preset = NV_ENC_PRESET_HP_GUID;
+ } else if (!strcmp(ctx->preset, "hq")) {
+ encoder_preset = NV_ENC_PRESET_HQ_GUID;
+ } else if (!strcmp(ctx->preset, "bd")) {
+ encoder_preset = NV_ENC_PRESET_BD_GUID;
+ } else if (!strcmp(ctx->preset, "ll")) {
+ encoder_preset = NV_ENC_PRESET_LOW_LATENCY_DEFAULT_GUID;
+ isLL = 1;
+ } else if (!strcmp(ctx->preset, "llhp")) {
+ encoder_preset = NV_ENC_PRESET_LOW_LATENCY_HP_GUID;
+ isLL = 1;
+ } else if (!strcmp(ctx->preset, "llhq")) {
+ encoder_preset = NV_ENC_PRESET_LOW_LATENCY_HQ_GUID;
+ isLL = 1;
+ } else if (!strcmp(ctx->preset, "lossless")) {
+ encoder_preset = NV_ENC_PRESET_LOSSLESS_DEFAULT_GUID;
+ lossless = 1;
+ } else if (!strcmp(ctx->preset, "losslesshp")) {
+ encoder_preset = NV_ENC_PRESET_LOSSLESS_HP_GUID;
+ lossless = 1;
+ } else if (!strcmp(ctx->preset, "default")) {
+ encoder_preset = NV_ENC_PRESET_DEFAULT_GUID;
+ } else {
+ av_log(avctx, AV_LOG_FATAL, "Preset \"%s\" is unknown! Supported presets: hp, hq, bd, ll, llhp, llhq, lossless, losslesshp, default\n", ctx->preset);
+ res = AVERROR(EINVAL);
+ goto error;
+ }
+ }
- switch (avctx->pix_fmt) {
- case AV_PIX_FMT_YUV420P:
- in_buffer.bufferFmt = NV_ENC_BUFFER_FORMAT_YV12_PL;
- break;
- case AV_PIX_FMT_NV12:
- in_buffer.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12_PL;
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ codec = NV_ENC_CODEC_H264_GUID;
break;
- case AV_PIX_FMT_YUV444P:
- in_buffer.bufferFmt = NV_ENC_BUFFER_FORMAT_YUV444_PL;
+ case AV_CODEC_ID_H265:
+ codec = NV_ENC_CODEC_HEVC_GUID;
break;
default:
- return AVERROR_BUG;
+ av_log(avctx, AV_LOG_ERROR, "nvenc: Unknown codec name\n");
+ res = AVERROR(EINVAL);
+ goto error;
}
- ret = nv->nvEncCreateInputBuffer(ctx->nvenc_ctx, &in_buffer);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "CreateInputBuffer failed\n");
- return AVERROR_UNKNOWN;
+ nv_status = p_nvenc->nvEncGetEncodePresetConfig(ctx->nvencoder, codec, encoder_preset, &preset_config);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "GetEncodePresetConfig failed: 0x%x\n", (int)nv_status);
+ res = AVERROR_EXTERNAL;
+ goto error;
}
- ctx->in[idx].in = in_buffer.inputBuffer;
- ctx->in[idx].format = in_buffer.bufferFmt;
-
- /* 1MB is large enough to hold most output frames.
- * NVENC increases this automaticaly if it's not enough. */
- out_buffer.size = BITSTREAM_BUFFER_SIZE;
+ ctx->init_encode_params.encodeGUID = codec;
+ ctx->init_encode_params.encodeHeight = avctx->height;
+ ctx->init_encode_params.encodeWidth = avctx->width;
- out_buffer.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_UNCACHED;
+ if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den &&
+ (avctx->sample_aspect_ratio.num != 1 || avctx->sample_aspect_ratio.num != 1)) {
+ av_reduce(&dw, &dh,
+ avctx->width * avctx->sample_aspect_ratio.num,
+ avctx->height * avctx->sample_aspect_ratio.den,
+ 1024 * 1024);
+ ctx->init_encode_params.darHeight = dh;
+ ctx->init_encode_params.darWidth = dw;
+ } else {
+ ctx->init_encode_params.darHeight = avctx->height;
+ ctx->init_encode_params.darWidth = avctx->width;
+ }
- ret = nv->nvEncCreateBitstreamBuffer(ctx->nvenc_ctx, &out_buffer);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "CreateBitstreamBuffer failed\n");
- return AVERROR_UNKNOWN;
+ // De-compensate for hardware, dubiously, trying to compensate for
+ // playback at 704 pixel width.
+ if (avctx->width == 720 &&
+ (avctx->height == 480 || avctx->height == 576)) {
+ av_reduce(&dw, &dh,
+ ctx->init_encode_params.darWidth * 44,
+ ctx->init_encode_params.darHeight * 45,
+ 1024 * 1024);
+ ctx->init_encode_params.darHeight = dh;
+ ctx->init_encode_params.darWidth = dw;
}
- ctx->out[idx].out = out_buffer.bitstreamBuffer;
- ctx->out[idx].busy = 0;
+ ctx->init_encode_params.frameRateNum = avctx->time_base.den;
+ ctx->init_encode_params.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;
- return 0;
-}
+ num_mbs = ((avctx->width + 15) >> 4) * ((avctx->height + 15) >> 4);
+ ctx->max_surface_count = (num_mbs >= 8160) ? 32 : 48;
-static int nvenc_setup_surfaces(AVCodecContext *avctx)
-{
- NVENCContext *ctx = avctx->priv_data;
- int i, ret;
+ if (ctx->buffer_delay >= ctx->max_surface_count)
+ ctx->buffer_delay = ctx->max_surface_count - 1;
- ctx->nb_surfaces = FFMAX(4 + avctx->max_b_frames,
- ctx->nb_surfaces);
+ ctx->init_encode_params.enableEncodeAsync = 0;
+ ctx->init_encode_params.enablePTD = 1;
- ctx->in = av_mallocz(ctx->nb_surfaces * sizeof(*ctx->in));
- if (!ctx->in)
- return AVERROR(ENOMEM);
+ ctx->init_encode_params.presetGUID = encoder_preset;
- ctx->out = av_mallocz(ctx->nb_surfaces * sizeof(*ctx->out));
- if (!ctx->out)
- return AVERROR(ENOMEM);
+ ctx->init_encode_params.encodeConfig = &ctx->encode_config;
+ memcpy(&ctx->encode_config, &preset_config.presetCfg, sizeof(ctx->encode_config));
+ ctx->encode_config.version = NV_ENC_CONFIG_VER;
- ctx->timestamps = av_fifo_alloc(ctx->nb_surfaces * sizeof(int64_t));
- if (!ctx->timestamps)
- return AVERROR(ENOMEM);
- ctx->pending = av_fifo_alloc(ctx->nb_surfaces * sizeof(ctx->out));
- if (!ctx->pending)
- return AVERROR(ENOMEM);
- ctx->ready = av_fifo_alloc(ctx->nb_surfaces * sizeof(ctx->out));
- if (!ctx->ready)
- return AVERROR(ENOMEM);
+ if (avctx->refs >= 0) {
+ /* 0 means "let the hardware decide" */
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ ctx->encode_config.encodeCodecConfig.h264Config.maxNumRefFrames = avctx->refs;
+ break;
+ case AV_CODEC_ID_H265:
+ ctx->encode_config.encodeCodecConfig.hevcConfig.maxNumRefFramesInDPB = avctx->refs;
+ break;
+ /* Earlier switch/case will return if unknown codec is passed. */
+ }
+ }
+
+ if (avctx->gop_size > 0) {
+ if (avctx->max_b_frames >= 0) {
+ /* 0 is intra-only, 1 is I/P only, 2 is one B Frame, 3 two B frames, and so on. */
+ ctx->encode_config.frameIntervalP = avctx->max_b_frames + 1;
+ }
- for (i = 0; i < ctx->nb_surfaces; i++) {
- if ((ret = nvenc_alloc_surface(avctx, i)) < 0)
- return ret;
+ ctx->encode_config.gopLength = avctx->gop_size;
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ ctx->encode_config.encodeCodecConfig.h264Config.idrPeriod = avctx->gop_size;
+ break;
+ case AV_CODEC_ID_H265:
+ ctx->encode_config.encodeCodecConfig.hevcConfig.idrPeriod = avctx->gop_size;
+ break;
+ /* Earlier switch/case will return if unknown codec is passed. */
+ }
+ } else if (avctx->gop_size == 0) {
+ ctx->encode_config.frameIntervalP = 0;
+ ctx->encode_config.gopLength = 1;
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ ctx->encode_config.encodeCodecConfig.h264Config.idrPeriod = 1;
+ break;
+ case AV_CODEC_ID_H265:
+ ctx->encode_config.encodeCodecConfig.hevcConfig.idrPeriod = 1;
+ break;
+ /* Earlier switch/case will return if unknown codec is passed. */
+ }
}
- return 0;
-}
+ /* when there're b frames, set dts offset */
+ if (ctx->encode_config.frameIntervalP >= 2)
+ ctx->last_dts = -2;
-#define EXTRADATA_SIZE 512
+ if (avctx->bit_rate > 0)
+ ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate;
-static int nvenc_setup_extradata(AVCodecContext *avctx)
-{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 };
- int ret;
+ if (avctx->rc_max_rate > 0)
+ ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate;
+
+ if (lossless) {
+ ctx->encode_config.encodeCodecConfig.h264Config.qpPrimeYZeroTransformBypassFlag = 1;
+ ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CONSTQP;
+ ctx->encode_config.rcParams.constQP.qpInterB = 0;
+ ctx->encode_config.rcParams.constQP.qpInterP = 0;
+ ctx->encode_config.rcParams.constQP.qpIntra = 0;
+
+ avctx->qmin = -1;
+ avctx->qmax = -1;
+ } else if (ctx->cbr) {
+ if (!ctx->twopass) {
+ ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR;
+ } else if (ctx->twopass == 1 || isLL) {
+ ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_2_PASS_QUALITY;
+
+ if (avctx->codec->id == AV_CODEC_ID_H264) {
+ ctx->encode_config.encodeCodecConfig.h264Config.adaptiveTransformMode = NV_ENC_H264_ADAPTIVE_TRANSFORM_ENABLE;
+ ctx->encode_config.encodeCodecConfig.h264Config.fmoMode = NV_ENC_H264_FMO_DISABLE;
+ }
+ } else {
+ ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CBR;
+ }
+ } else if (avctx->global_quality > 0) {
+ ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_CONSTQP;
+ ctx->encode_config.rcParams.constQP.qpInterB = avctx->global_quality;
+ ctx->encode_config.rcParams.constQP.qpInterP = avctx->global_quality;
+ ctx->encode_config.rcParams.constQP.qpIntra = avctx->global_quality;
- avctx->extradata = av_mallocz(EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!avctx->extradata)
- return AVERROR(ENOMEM);
+ avctx->qmin = -1;
+ avctx->qmax = -1;
+ } else if (avctx->qmin >= 0 && avctx->qmax >= 0) {
+ ctx->encode_config.rcParams.rateControlMode = NV_ENC_PARAMS_RC_VBR;
- payload.version = NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER;
- payload.spsppsBuffer = avctx->extradata;
- payload.inBufferSize = EXTRADATA_SIZE;
- payload.outSPSPPSPayloadSize = &avctx->extradata_size;
+ ctx->encode_config.rcParams.enableMinQP = 1;
+ ctx->encode_config.rcParams.enableMaxQP = 1;
- ret = nv->nvEncGetSequenceParams(ctx->nvenc_ctx, &payload);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "Cannot get the extradata\n");
- return AVERROR_UNKNOWN;
+ ctx->encode_config.rcParams.minQP.qpInterB = avctx->qmin;
+ ctx->encode_config.rcParams.minQP.qpInterP = avctx->qmin;
+ ctx->encode_config.rcParams.minQP.qpIntra = avctx->qmin;
+
+ ctx->encode_config.rcParams.maxQP.qpInterB = avctx->qmax;
+ ctx->encode_config.rcParams.maxQP.qpInterP = avctx->qmax;
+ ctx->encode_config.rcParams.maxQP.qpIntra = avctx->qmax;
}
- return 0;
-}
+ if (avctx->rc_buffer_size > 0)
+ ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size;
- if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
-av_cold int ff_nvenc_encode_close(AVCodecContext *avctx)
-{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- int i;
++ if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
+ ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FIELD;
+ } else {
+ ctx->encode_config.frameFieldMode = NV_ENC_PARAMS_FRAME_FIELD_MODE_FRAME;
+ }
- if (ctx->in) {
- for (i = 0; i < ctx->nb_surfaces; ++i) {
- nv->nvEncDestroyInputBuffer(ctx->nvenc_ctx, ctx->in[i].in);
- nv->nvEncDestroyBitstreamBuffer(ctx->nvenc_ctx, ctx->out[i].out);
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourDescriptionPresentFlag = 1;
+ ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoSignalTypePresentFlag = 1;
+
+ ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourMatrix = avctx->colorspace;
+ ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.colourPrimaries = avctx->color_primaries;
+ ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.transferCharacteristics = avctx->color_trc;
+
+ ctx->encode_config.encodeCodecConfig.h264Config.h264VUIParameters.videoFullRangeFlag = avctx->color_range == AVCOL_RANGE_JPEG;
+
- ctx->encode_config.encodeCodecConfig.h264Config.disableSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
- ctx->encode_config.encodeCodecConfig.h264Config.repeatSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
++ ctx->encode_config.encodeCodecConfig.h264Config.disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
++ ctx->encode_config.encodeCodecConfig.h264Config.repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
+
+ if (!ctx->profile) {
+ switch (avctx->profile) {
+ case FF_PROFILE_H264_HIGH_444_PREDICTIVE:
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
+ break;
+ case FF_PROFILE_H264_BASELINE:
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID;
+ break;
+ case FF_PROFILE_H264_MAIN:
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID;
+ break;
+ case FF_PROFILE_H264_HIGH:
+ case FF_PROFILE_UNKNOWN:
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
+ break;
+ default:
+ av_log(avctx, AV_LOG_WARNING, "Unsupported profile requested, falling back to high\n");
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
+ break;
+ }
+ } else {
+ if (!strcmp(ctx->profile, "high")) {
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_GUID;
+ avctx->profile = FF_PROFILE_H264_HIGH;
+ } else if (!strcmp(ctx->profile, "main")) {
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_MAIN_GUID;
+ avctx->profile = FF_PROFILE_H264_MAIN;
+ } else if (!strcmp(ctx->profile, "baseline")) {
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_BASELINE_GUID;
+ avctx->profile = FF_PROFILE_H264_BASELINE;
+ } else if (!strcmp(ctx->profile, "high444p")) {
+ ctx->encode_config.profileGUID = NV_ENC_H264_PROFILE_HIGH_444_GUID;
+ avctx->profile = FF_PROFILE_H264_HIGH_444_PREDICTIVE;
+ } else {
+ av_log(avctx, AV_LOG_FATAL, "Profile \"%s\" is unknown! Supported profiles: high, main, baseline\n", ctx->profile);
+ res = AVERROR(EINVAL);
+ goto error;
+ }
}
- }
- av_freep(&ctx->in);
- av_freep(&ctx->out);
+ ctx->encode_config.encodeCodecConfig.h264Config.chromaFormatIDC = avctx->profile == FF_PROFILE_H264_HIGH_444_PREDICTIVE ? 3 : 1;
- if (ctx->nvenc_ctx)
- nv->nvEncDestroyEncoder(ctx->nvenc_ctx);
+ if (ctx->level) {
+ res = input_string_to_uint32(avctx, nvenc_h264_level_pairs, ctx->level, &ctx->encode_config.encodeCodecConfig.h264Config.level);
- if (ctx->cu_context)
- ctx->nvel.cu_ctx_destroy(ctx->cu_context);
+ if (res) {
+ av_log(avctx, AV_LOG_FATAL, "Level \"%s\" is unknown! Supported levels: auto, 1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1, 4.2, 5, 5.1\n", ctx->level);
+ goto error;
+ }
+ } else {
+ ctx->encode_config.encodeCodecConfig.h264Config.level = NV_ENC_LEVEL_AUTOSELECT;
+ }
- if (ctx->nvel.nvenc)
- dlclose(ctx->nvel.nvenc);
+ break;
+ case AV_CODEC_ID_H265:
- ctx->encode_config.encodeCodecConfig.hevcConfig.disableSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
- ctx->encode_config.encodeCodecConfig.hevcConfig.repeatSPSPPS = (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
++ ctx->encode_config.encodeCodecConfig.hevcConfig.disableSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0;
++ ctx->encode_config.encodeCodecConfig.hevcConfig.repeatSPSPPS = (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 0 : 1;
- if (ctx->nvel.cuda)
- dlclose(ctx->nvel.cuda);
+ /* No other profile is supported in the current SDK version 5 */
+ ctx->encode_config.profileGUID = NV_ENC_HEVC_PROFILE_MAIN_GUID;
+ avctx->profile = FF_PROFILE_HEVC_MAIN;
- return 0;
-}
+ if (ctx->level) {
+ res = input_string_to_uint32(avctx, nvenc_hevc_level_pairs, ctx->level, &ctx->encode_config.encodeCodecConfig.hevcConfig.level);
-av_cold int ff_nvenc_encode_init(AVCodecContext *avctx)
-{
- int ret;
+ if (res) {
+ av_log(avctx, AV_LOG_FATAL, "Level \"%s\" is unknown! Supported levels: auto, 1, 2, 2.1, 3, 3.1, 4, 4.1, 5, 5.1, 5.2, 6, 6.1, 6.2\n", ctx->level);
+ goto error;
+ }
+ } else {
+ ctx->encode_config.encodeCodecConfig.hevcConfig.level = NV_ENC_LEVEL_AUTOSELECT;
+ }
+
+ if (ctx->tier) {
+ if (!strcmp(ctx->tier, "main")) {
+ ctx->encode_config.encodeCodecConfig.hevcConfig.tier = NV_ENC_TIER_HEVC_MAIN;
+ } else if (!strcmp(ctx->tier, "high")) {
+ ctx->encode_config.encodeCodecConfig.hevcConfig.tier = NV_ENC_TIER_HEVC_HIGH;
+ } else {
+ av_log(avctx, AV_LOG_FATAL, "Tier \"%s\" is unknown! Supported tiers: main, high\n", ctx->tier);
+ res = AVERROR(EINVAL);
+ goto error;
+ }
+ }
+
+ break;
+ /* Earlier switch/case will return if unknown codec is passed. */
+ }
- if ((ret = nvenc_load_libraries(avctx)) < 0)
- return ret;
+ nv_status = p_nvenc->nvEncInitializeEncoder(ctx->nvencoder, &ctx->init_encode_params);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "InitializeEncoder failed: 0x%x\n", (int)nv_status);
+ res = AVERROR_EXTERNAL;
+ goto error;
+ }
- if ((ret = nvenc_setup_device(avctx)) < 0)
- return ret;
+ ctx->input_surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->input_surfaces));
- if ((ret = nvenc_setup_encoder(avctx)) < 0)
- return ret;
+ if (!ctx->input_surfaces) {
+ res = AVERROR(ENOMEM);
+ goto error;
+ }
- if ((ret = nvenc_setup_surfaces(avctx)) < 0)
- return ret;
+ ctx->output_surfaces = av_malloc(ctx->max_surface_count * sizeof(*ctx->output_surfaces));
- if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
- if ((ret = nvenc_setup_extradata(avctx)) < 0)
- return ret;
+ if (!ctx->output_surfaces) {
+ res = AVERROR(ENOMEM);
+ goto error;
}
- return 0;
-}
+ for (surfaceCount = 0; surfaceCount < ctx->max_surface_count; ++surfaceCount) {
+ NV_ENC_CREATE_INPUT_BUFFER allocSurf = { 0 };
+ NV_ENC_CREATE_BITSTREAM_BUFFER allocOut = { 0 };
+ allocSurf.version = NV_ENC_CREATE_INPUT_BUFFER_VER;
+ allocOut.version = NV_ENC_CREATE_BITSTREAM_BUFFER_VER;
-static NVENCInputSurface *get_input_surface(NVENCContext *ctx)
-{
- int i;
+ allocSurf.width = (avctx->width + 31) & ~31;
+ allocSurf.height = (avctx->height + 31) & ~31;
- for (i = 0; i < ctx->nb_surfaces; i++) {
- if (!ctx->in[i].locked) {
- ctx->in[i].locked = 1;
- return &ctx->in[i];
- }
- }
+ allocSurf.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED;
- return NULL;
-}
+ switch (avctx->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_YV12_PL;
+ break;
-static NVENCOutputSurface *get_output_surface(NVENCContext *ctx)
-{
- int i;
+ case AV_PIX_FMT_NV12:
+ allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_NV12_PL;
+ break;
+
+ case AV_PIX_FMT_YUV444P:
+ allocSurf.bufferFmt = NV_ENC_BUFFER_FORMAT_YUV444_PL;
+ break;
- for (i = 0; i < ctx->nb_surfaces; i++) {
- if (!ctx->out[i].busy) {
- return &ctx->out[i];
+ default:
+ av_log(avctx, AV_LOG_FATAL, "Invalid input pixel format\n");
+ res = AVERROR(EINVAL);
+ goto error;
}
- }
- return NULL;
-}
+ nv_status = p_nvenc->nvEncCreateInputBuffer(ctx->nvencoder, &allocSurf);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "CreateInputBuffer failed\n");
+ res = AVERROR_EXTERNAL;
+ goto error;
+ }
-static int nvenc_copy_frame(NV_ENC_LOCK_INPUT_BUFFER *in, const AVFrame *frame)
-{
- uint8_t *buf = in->bufferDataPtr;
- int off = frame->height * in->pitch;
+ ctx->input_surfaces[surfaceCount].lockCount = 0;
+ ctx->input_surfaces[surfaceCount].input_surface = allocSurf.inputBuffer;
+ ctx->input_surfaces[surfaceCount].format = allocSurf.bufferFmt;
+ ctx->input_surfaces[surfaceCount].width = allocSurf.width;
+ ctx->input_surfaces[surfaceCount].height = allocSurf.height;
- switch (frame->format) {
- case AV_PIX_FMT_YUV420P:
- av_image_copy_plane(buf, in->pitch,
- frame->data[0], frame->linesize[0],
- frame->width, frame->height);
- buf += off;
+ /* 1MB is large enough to hold most output frames. NVENC increases this automaticaly if it's not enough. */
+ allocOut.size = 1024 * 1024;
- av_image_copy_plane(buf, in->pitch >> 1,
- frame->data[2], frame->linesize[2],
- frame->width >> 1, frame->height >> 1);
+ allocOut.memoryHeap = NV_ENC_MEMORY_HEAP_SYSMEM_CACHED;
- buf += off >> 2;
+ nv_status = p_nvenc->nvEncCreateBitstreamBuffer(ctx->nvencoder, &allocOut);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "CreateBitstreamBuffer failed\n");
+ ctx->output_surfaces[surfaceCount++].output_surface = NULL;
+ res = AVERROR_EXTERNAL;
+ goto error;
+ }
- av_image_copy_plane(buf, in->pitch >> 1,
- frame->data[1], frame->linesize[1],
- frame->width >> 1, frame->height >> 1);
- break;
- case AV_PIX_FMT_NV12:
- av_image_copy_plane(buf, in->pitch,
- frame->data[0], frame->linesize[0],
- frame->width, frame->height);
- buf += off;
-
- av_image_copy_plane(buf, in->pitch,
- frame->data[1], frame->linesize[1],
- frame->width, frame->height >> 1);
- break;
- case AV_PIX_FMT_YUV444P:
- av_image_copy_plane(buf, in->pitch,
- frame->data[0], frame->linesize[0],
- frame->width, frame->height);
- buf += off;
-
- av_image_copy_plane(buf, in->pitch,
- frame->data[1], frame->linesize[1],
- frame->width, frame->height);
- buf += off;
-
- av_image_copy_plane(buf, in->pitch,
- frame->data[2], frame->linesize[2],
- frame->width, frame->height);
- break;
- default:
- return AVERROR_BUG;
+ ctx->output_surfaces[surfaceCount].output_surface = allocOut.bitstreamBuffer;
+ ctx->output_surfaces[surfaceCount].size = allocOut.size;
+ ctx->output_surfaces[surfaceCount].busy = 0;
}
- if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
- return 0;
-}
++ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
+ uint32_t outSize = 0;
+ char tmpHeader[256];
+ NV_ENC_SEQUENCE_PARAM_PAYLOAD payload = { 0 };
+ payload.version = NV_ENC_SEQUENCE_PARAM_PAYLOAD_VER;
+
+ payload.spsppsBuffer = tmpHeader;
+ payload.inBufferSize = sizeof(tmpHeader);
+ payload.outSPSPPSPayloadSize = &outSize;
+
+ nv_status = p_nvenc->nvEncGetSequenceParams(ctx->nvencoder, &payload);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "GetSequenceParams failed\n");
+ goto error;
+ }
-static int nvenc_enqueue_frame(AVCodecContext *avctx, const AVFrame *frame,
- NVENCInputSurface **in_surf)
-{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- NV_ENC_LOCK_INPUT_BUFFER params = { 0 };
- NVENCInputSurface *in = get_input_surface(ctx);
- int ret;
+ avctx->extradata_size = outSize;
+ avctx->extradata = av_mallocz(outSize + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!in)
- return AVERROR_BUG;
+ if (!avctx->extradata) {
+ res = AVERROR(ENOMEM);
+ goto error;
+ }
- params.version = NV_ENC_LOCK_INPUT_BUFFER_VER;
- params.inputBuffer = in->in;
+ memcpy(avctx->extradata, tmpHeader, outSize);
+ }
+ if (ctx->encode_config.frameIntervalP > 1)
+ avctx->has_b_frames = 2;
- ret = nv->nvEncLockInputBuffer(ctx->nvenc_ctx, ¶ms);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "Cannot lock the buffer %p.\n",
- in);
- return AVERROR_UNKNOWN;
- }
+ if (ctx->encode_config.rcParams.averageBitRate > 0)
+ avctx->bit_rate = ctx->encode_config.rcParams.averageBitRate;
+
+ return 0;
- ret = nvenc_copy_frame(¶ms, frame);
- if (ret < 0)
- goto fail;
+error:
- ret = nv->nvEncUnlockInputBuffer(ctx->nvenc_ctx, in->in);
- if (ret != NV_ENC_SUCCESS) {
- av_log(avctx, AV_LOG_ERROR, "Cannot unlock the buffer %p.\n",
- in);
- return AVERROR_UNKNOWN;
+ for (i = 0; i < surfaceCount; ++i) {
+ p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->input_surfaces[i].input_surface);
+ if (ctx->output_surfaces[i].output_surface)
+ p_nvenc->nvEncDestroyBitstreamBuffer(ctx->nvencoder, ctx->output_surfaces[i].output_surface);
}
- *in_surf = in;
+ if (ctx->nvencoder)
+ p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
- return 0;
+ if (ctx->cu_context)
+ dl_fn->cu_ctx_destroy(ctx->cu_context);
-fail:
- nv->nvEncUnlockInputBuffer(ctx->nvenc_ctx, in->in);
+ nvenc_unload_nvenc(avctx);
- return ret;
+ ctx->nvencoder = NULL;
+ ctx->cu_context = NULL;
+
+ return res;
}
-static void nvenc_codec_specific_pic_params(AVCodecContext *avctx,
- NV_ENC_PIC_PARAMS *params)
+static av_cold int nvenc_encode_close(AVCodecContext *avctx)
{
- NVENCContext *ctx = avctx->priv_data;
+ NvencContext *ctx = avctx->priv_data;
+ NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
+ int i;
- switch (avctx->codec->id) {
- case AV_CODEC_ID_H264:
- params->codecPicParams.h264PicParams.sliceMode =
- ctx->config.encodeCodecConfig.h264Config.sliceMode;
- params->codecPicParams.h264PicParams.sliceModeData =
- ctx->config.encodeCodecConfig.h264Config.sliceModeData;
- break;
- case AV_CODEC_ID_HEVC:
- params->codecPicParams.hevcPicParams.sliceMode =
- ctx->config.encodeCodecConfig.hevcConfig.sliceMode;
- params->codecPicParams.hevcPicParams.sliceModeData =
- ctx->config.encodeCodecConfig.hevcConfig.sliceModeData;
- break;
+ av_freep(&ctx->timestamp_list.data);
+ av_freep(&ctx->output_surface_ready_queue.data);
+ av_freep(&ctx->output_surface_queue.data);
+
+ for (i = 0; i < ctx->max_surface_count; ++i) {
+ p_nvenc->nvEncDestroyInputBuffer(ctx->nvencoder, ctx->input_surfaces[i].input_surface);
+ p_nvenc->nvEncDestroyBitstreamBuffer(ctx->nvencoder, ctx->output_surfaces[i].output_surface);
}
-}
+ ctx->max_surface_count = 0;
-static inline int nvenc_enqueue_timestamp(AVFifoBuffer *f, int64_t pts)
-{
- return av_fifo_generic_write(f, &pts, sizeof(pts), NULL);
-}
+ p_nvenc->nvEncDestroyEncoder(ctx->nvencoder);
+ ctx->nvencoder = NULL;
-static inline int nvenc_dequeue_timestamp(AVFifoBuffer *f, int64_t *pts)
-{
- return av_fifo_generic_read(f, pts, sizeof(*pts), NULL);
-}
+ dl_fn->cu_ctx_destroy(ctx->cu_context);
+ ctx->cu_context = NULL;
-static inline int nvenc_enqueue_surface(AVFifoBuffer *f,
- NVENCOutputSurface *surf)
-{
- surf->busy = 1;
- return av_fifo_generic_write(f, &surf, sizeof(surf), NULL);
-}
+ nvenc_unload_nvenc(avctx);
-static inline int nvenc_dequeue_surface(AVFifoBuffer *f,
- NVENCOutputSurface **surf)
-{
- return av_fifo_generic_read(f, surf, sizeof(*surf), NULL);
+ return 0;
}
-static int nvenc_set_timestamp(NVENCContext *ctx,
- NV_ENC_LOCK_BITSTREAM *params,
- AVPacket *pkt)
+static int process_output_surface(AVCodecContext *avctx, AVPacket *pkt, NvencOutputSurface *tmpoutsurf)
{
- pkt->pts = params->outputTimeStamp;
- pkt->duration = params->outputDuration;
-
- return nvenc_dequeue_timestamp(ctx->timestamps, &pkt->dts);
-}
+ NvencContext *ctx = avctx->priv_data;
+ NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
-static int nvenc_get_frame(AVCodecContext *avctx, AVPacket *pkt)
-{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- NV_ENC_LOCK_BITSTREAM params = { 0 };
- NVENCOutputSurface *out = NULL;
- int ret;
+ uint32_t slice_mode_data;
+ uint32_t *slice_offsets;
+ NV_ENC_LOCK_BITSTREAM lock_params = { 0 };
+ NVENCSTATUS nv_status;
+ int res = 0;
- ret = nvenc_dequeue_surface(ctx->pending, &out);
- if (ret)
- return ret;
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ slice_mode_data = ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
+ break;
+ case AV_CODEC_ID_H265:
+ slice_mode_data = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "nvenc: Unknown codec name\n");
+ res = AVERROR(EINVAL);
+ goto error;
+ }
+ slice_offsets = av_mallocz(slice_mode_data * sizeof(*slice_offsets));
- params.version = NV_ENC_LOCK_BITSTREAM_VER;
- params.outputBitstream = out->out;
+ if (!slice_offsets)
+ return AVERROR(ENOMEM);
- ret = nv->nvEncLockBitstream(ctx->nvenc_ctx, ¶ms);
- if (ret < 0)
- return AVERROR_UNKNOWN;
+ lock_params.version = NV_ENC_LOCK_BITSTREAM_VER;
- ret = ff_alloc_packet(pkt, params.bitstreamSizeInBytes);
- if (ret < 0)
- return ret;
+ lock_params.doNotWait = 0;
+ lock_params.outputBitstream = tmpoutsurf->output_surface;
+ lock_params.sliceOffsets = slice_offsets;
- memcpy(pkt->data, params.bitstreamBufferPtr, pkt->size);
+ nv_status = p_nvenc->nvEncLockBitstream(ctx->nvencoder, &lock_params);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed locking bitstream buffer\n");
+ res = AVERROR_EXTERNAL;
+ goto error;
+ }
- ret = nv->nvEncUnlockBitstream(ctx->nvenc_ctx, out->out);
- if (ret < 0)
- return AVERROR_UNKNOWN;
+ if (res = ff_alloc_packet2(avctx, pkt, lock_params.bitstreamSizeInBytes, 0)) {
+ p_nvenc->nvEncUnlockBitstream(ctx->nvencoder, tmpoutsurf->output_surface);
+ goto error;
+ }
- out->busy = out->in->locked = 0;
+ memcpy(pkt->data, lock_params.bitstreamBufferPtr, lock_params.bitstreamSizeInBytes);
- ret = nvenc_set_timestamp(ctx, ¶ms, pkt);
- if (ret < 0)
- return ret;
+ nv_status = p_nvenc->nvEncUnlockBitstream(ctx->nvencoder, tmpoutsurf->output_surface);
+ if (nv_status != NV_ENC_SUCCESS)
+ av_log(avctx, AV_LOG_ERROR, "Failed unlocking bitstream buffer, expect the gates of mordor to open\n");
- switch (params.pictureType) {
+ switch (lock_params.pictureType) {
case NV_ENC_PIC_TYPE_IDR:
pkt->flags |= AV_PKT_FLAG_KEY;
#if FF_API_CODED_FRAME
#endif
}
+ pkt->pts = lock_params.outputTimeStamp;
+ pkt->dts = timestamp_queue_dequeue(&ctx->timestamp_list);
+
+ /* when there're b frame(s), set dts offset */
+ if (ctx->encode_config.frameIntervalP >= 2)
+ pkt->dts -= 1;
+
+ if (pkt->dts > pkt->pts)
+ pkt->dts = pkt->pts;
+
+ if (ctx->last_dts != AV_NOPTS_VALUE && pkt->dts <= ctx->last_dts)
+ pkt->dts = ctx->last_dts + 1;
+
+ ctx->last_dts = pkt->dts;
+
+ av_free(slice_offsets);
+
return 0;
+
+error:
+
+ av_free(slice_offsets);
+ timestamp_queue_dequeue(&ctx->timestamp_list);
+
+ return res;
}
-int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
- const AVFrame *frame, int *got_packet)
+static int nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *frame, int *got_packet)
{
- NVENCContext *ctx = avctx->priv_data;
- NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
- NV_ENC_PIC_PARAMS params = { 0 };
- NVENCInputSurface *in = NULL;
- NVENCOutputSurface *out = NULL;
- int ret;
+ NVENCSTATUS nv_status;
+ NvencOutputSurface *tmpoutsurf;
+ int res, i = 0;
- params.version = NV_ENC_PIC_PARAMS_VER;
+ NvencContext *ctx = avctx->priv_data;
+ NvencDynLoadFunctions *dl_fn = &ctx->nvenc_dload_funcs;
+ NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs;
+
+ NV_ENC_PIC_PARAMS pic_params = { 0 };
+ pic_params.version = NV_ENC_PIC_PARAMS_VER;
if (frame) {
- ret = nvenc_enqueue_frame(avctx, frame, &in);
- if (ret < 0)
- return ret;
- out = get_output_surface(ctx);
- if (!out)
- return AVERROR_BUG;
-
- out->in = in;
-
- params.inputBuffer = in->in;
- params.bufferFmt = in->format;
- params.inputWidth = frame->width;
- params.inputHeight = frame->height;
- params.outputBitstream = out->out;
- params.inputTimeStamp = frame->pts;
+ NV_ENC_LOCK_INPUT_BUFFER lockBufferParams = { 0 };
+ NvencInputSurface *inSurf = NULL;
+
+ for (i = 0; i < ctx->max_surface_count; ++i) {
+ if (!ctx->input_surfaces[i].lockCount) {
+ inSurf = &ctx->input_surfaces[i];
+ break;
+ }
+ }
+
+ av_assert0(inSurf);
+
+ inSurf->lockCount = 1;
+
+ lockBufferParams.version = NV_ENC_LOCK_INPUT_BUFFER_VER;
+ lockBufferParams.inputBuffer = inSurf->input_surface;
+
+ nv_status = p_nvenc->nvEncLockInputBuffer(ctx->nvencoder, &lockBufferParams);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed locking nvenc input buffer\n");
+ return 0;
+ }
+
+ if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
+ uint8_t *buf = lockBufferParams.bufferDataPtr;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch,
+ frame->data[0], frame->linesize[0],
+ avctx->width, avctx->height);
+
+ buf += inSurf->height * lockBufferParams.pitch;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch >> 1,
+ frame->data[2], frame->linesize[2],
+ avctx->width >> 1, avctx->height >> 1);
+
+ buf += (inSurf->height * lockBufferParams.pitch) >> 2;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch >> 1,
+ frame->data[1], frame->linesize[1],
+ avctx->width >> 1, avctx->height >> 1);
+ } else if (avctx->pix_fmt == AV_PIX_FMT_NV12) {
+ uint8_t *buf = lockBufferParams.bufferDataPtr;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch,
+ frame->data[0], frame->linesize[0],
+ avctx->width, avctx->height);
+
+ buf += inSurf->height * lockBufferParams.pitch;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch,
+ frame->data[1], frame->linesize[1],
+ avctx->width, avctx->height >> 1);
+ } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P) {
+ uint8_t *buf = lockBufferParams.bufferDataPtr;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch,
+ frame->data[0], frame->linesize[0],
+ avctx->width, avctx->height);
+
+ buf += inSurf->height * lockBufferParams.pitch;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch,
+ frame->data[1], frame->linesize[1],
+ avctx->width, avctx->height);
+
+ buf += inSurf->height * lockBufferParams.pitch;
+
+ av_image_copy_plane(buf, lockBufferParams.pitch,
+ frame->data[2], frame->linesize[2],
+ avctx->width, avctx->height);
+ } else {
+ av_log(avctx, AV_LOG_FATAL, "Invalid pixel format!\n");
+ return AVERROR(EINVAL);
+ }
+
+ nv_status = p_nvenc->nvEncUnlockInputBuffer(ctx->nvencoder, inSurf->input_surface);
+ if (nv_status != NV_ENC_SUCCESS) {
+ av_log(avctx, AV_LOG_FATAL, "Failed unlocking input buffer!\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ for (i = 0; i < ctx->max_surface_count; ++i)
+ if (!ctx->output_surfaces[i].busy)
+ break;
+
+ if (i == ctx->max_surface_count) {
+ inSurf->lockCount = 0;
+ av_log(avctx, AV_LOG_FATAL, "No free output surface found!\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ ctx->output_surfaces[i].input_surface = inSurf;
+
+ pic_params.inputBuffer = inSurf->input_surface;
+ pic_params.bufferFmt = inSurf->format;
+ pic_params.inputWidth = avctx->width;
+ pic_params.inputHeight = avctx->height;
+ pic_params.outputBitstream = ctx->output_surfaces[i].output_surface;
+ pic_params.completionEvent = 0;
- if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
+ if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
- if (frame->top_field_first)
- params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM;
- else
- params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP;
+ if (frame->top_field_first) {
+ pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM;
+ } else {
+ pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP;
+ }
} else {
- params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
+ pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
}
- nvenc_codec_specific_pic_params(avctx, ¶ms);
+ pic_params.encodePicFlags = 0;
+ pic_params.inputTimeStamp = frame->pts;
+ pic_params.inputDuration = 0;
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ pic_params.codecPicParams.h264PicParams.sliceMode = ctx->encode_config.encodeCodecConfig.h264Config.sliceMode;
+ pic_params.codecPicParams.h264PicParams.sliceModeData = ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
+ break;
+ case AV_CODEC_ID_H265:
+ pic_params.codecPicParams.hevcPicParams.sliceMode = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode;
+ pic_params.codecPicParams.hevcPicParams.sliceModeData = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "nvenc: Unknown codec name\n");
+ return AVERROR(EINVAL);
+ }
+
+ res = timestamp_queue_enqueue(&ctx->timestamp_list, frame->pts);
- ret = nvenc_enqueue_timestamp(ctx->timestamps, frame->pts);
- if (ret < 0)
- return ret;
+ if (res)
+ return res;
} else {
- params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
+ pic_params.encodePicFlags = NV_ENC_PIC_FLAG_EOS;
}
- ret = nv->nvEncEncodePicture(ctx->nvenc_ctx, ¶ms);
+ nv_status = p_nvenc->nvEncEncodePicture(ctx->nvencoder, &pic_params);
+
+ if (frame && nv_status == NV_ENC_ERR_NEED_MORE_INPUT) {
+ res = out_surf_queue_enqueue(&ctx->output_surface_queue, &ctx->output_surfaces[i]);
- if (ret != NV_ENC_SUCCESS &&
- ret != NV_ENC_ERR_NEED_MORE_INPUT) {
+ if (res)
+ return res;
- return AVERROR_UNKNOWN;
+ ctx->output_surfaces[i].busy = 1;
}
- if (out) {
- ret = nvenc_enqueue_surface(ctx->pending, out);
- if (ret < 0)
- return ret;
+ if (nv_status != NV_ENC_SUCCESS && nv_status != NV_ENC_ERR_NEED_MORE_INPUT) {
+ av_log(avctx, AV_LOG_ERROR, "EncodePicture failed!\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ if (nv_status != NV_ENC_ERR_NEED_MORE_INPUT) {
+ while (ctx->output_surface_queue.count) {
+ tmpoutsurf = out_surf_queue_dequeue(&ctx->output_surface_queue);
+ res = out_surf_queue_enqueue(&ctx->output_surface_ready_queue, tmpoutsurf);
+
+ if (res)
+ return res;
+ }
+
+ if (frame) {
+ res = out_surf_queue_enqueue(&ctx->output_surface_ready_queue, &ctx->output_surfaces[i]);
+
+ if (res)
+ return res;
+
+ ctx->output_surfaces[i].busy = 1;
+ }
}
- if (ret != NV_ENC_ERR_NEED_MORE_INPUT &&
- av_fifo_size(ctx->pending)) {
- ret = nvenc_get_frame(avctx, pkt);
- if (ret < 0)
- return ret;
+ if (ctx->output_surface_ready_queue.count && (!frame || ctx->output_surface_ready_queue.count + ctx->output_surface_queue.count >= ctx->buffer_delay)) {
+ tmpoutsurf = out_surf_queue_dequeue(&ctx->output_surface_ready_queue);
+
+ res = process_output_surface(avctx, pkt, tmpoutsurf);
+
+ if (res)
+ return res;
+
+ tmpoutsurf->busy = 0;
+ av_assert0(tmpoutsurf->input_surface->lockCount);
+ tmpoutsurf->input_surface->lockCount--;
+
*got_packet = 1;
} else {
*got_packet = 0;
ff_fft_init(&c->fft256, 7, 0);
ff_fft_init(&c->fft512, 8, 1);
ff_fft_init(&c->fft1024, 9, 1);
- c->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&c->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ c->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!c->fdsp)
+ return AVERROR(ENOMEM);
if (init_vlc(&c->scale_diff, 9, ON2AVC_SCALE_DIFFS,
ff_on2avc_scale_diff_bits, 1, 1,
"ratecontrol is willing to deviate from the target average bitrate value. This is not related "
"to minimum/maximum bitrate. Lowering tolerance too much has an adverse effect on quality.",
OFFSET(bit_rate_tolerance), AV_OPT_TYPE_INT, {.i64 = AV_CODEC_DEFAULT_BITRATE*20 }, 1, INT_MAX, V|E},
-{"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, UINT_MAX, V|A|E|D, "flags"},
+{"flags", NULL, OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = DEFAULT }, 0, UINT_MAX, V|A|S|E|D, "flags"},
- {"unaligned", "allow decoders to produce unaligned output", 0, AV_OPT_TYPE_CONST, { .i64 = CODEC_FLAG_UNALIGNED }, INT_MIN, INT_MAX, V | D, "flags" },
- {"mv4", "use four motion vectors per macroblock (MPEG-4)", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"},
- {"qpel", "use 1/4-pel motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"},
- {"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
- {"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
+ {"unaligned", "allow decoders to produce unaligned output", 0, AV_OPT_TYPE_CONST, { .i64 = AV_CODEC_FLAG_UNALIGNED }, INT_MIN, INT_MAX, V | D, "flags" },
+ {"mv4", "use four motion vectors per macroblock (MPEG-4)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_4MV }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"qpel", "use 1/4-pel motion compensation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QPEL }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"loop", "use loop filter", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOOP_FILTER }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"qscale", "use fixed qscale", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_QSCALE }, INT_MIN, INT_MAX, 0, "flags"},
#if FF_API_GMC
{"gmc", "use gmc", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_GMC }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
#if FF_API_NORMALIZE_AQP
{"naq", "normalize adaptive quantization", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_NORMALIZE_AQP }, INT_MIN, INT_MAX, V|E, "flags"},
#endif
- {"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
- {"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
- {"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
- {"bitexact", "use only bitexact functions (except (I)DCT)", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
- {"aic", "H.263 advanced intra coding / MPEG-4 AC prediction", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
- {"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
- {"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
- {"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"},
- {"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
- {"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
- {"ignorecrop", "ignore cropping information from sps", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"},
- {"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"},
- {"chunks", "Frame data might be split into multiple chunks", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_CHUNKS }, INT_MIN, INT_MAX, V|D, "flags2"},
- {"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"},
- {"export_mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_EXPORT_MVS}, INT_MIN, INT_MAX, V|D, "flags2"},
- {"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, V|D, "flags2"},
+ {"ildct", "use interlaced DCT", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_DCT }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"low_delay", "force low delay", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_LOW_DELAY }, INT_MIN, INT_MAX, V|D|E, "flags"},
+ {"global_header", "place global headers in extradata instead of every keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_GLOBAL_HEADER }, INT_MIN, INT_MAX, V|A|E, "flags"},
+ {"bitexact", "use only bitexact functions (except (I)DCT)", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_BITEXACT }, INT_MIN, INT_MAX, A|V|S|D|E, "flags"},
+ {"aic", "H.263 advanced intra coding / MPEG-4 AC prediction", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_AC_PRED }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"ilme", "interlaced motion estimation", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_INTERLACED_ME }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"cgop", "closed GOP", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_CLOSED_GOP }, INT_MIN, INT_MAX, V|E, "flags"},
+ {"output_corrupt", "Output even potentially corrupted frames", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG_OUTPUT_CORRUPT }, INT_MIN, INT_MAX, V|D, "flags"},
+ {"fast", "allow non-spec-compliant speedup tricks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_FAST }, INT_MIN, INT_MAX, V|E, "flags2"},
+ {"noout", "skip bitstream encoding", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_NO_OUTPUT }, INT_MIN, INT_MAX, V|E, "flags2"},
-{"ignorecrop", "ignore cropping information from sps", 1, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"},
++{"ignorecrop", "ignore cropping information from sps", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_IGNORE_CROP }, INT_MIN, INT_MAX, V|D, "flags2"},
+ {"local_header", "place global headers at every keyframe instead of in extradata", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_LOCAL_HEADER }, INT_MIN, INT_MAX, V|E, "flags2"},
++{"chunks", "Frame data might be split into multiple chunks", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_CHUNKS }, INT_MIN, INT_MAX, V|D, "flags2"},
++{"showall", "Show all frames before the first keyframe", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SHOW_ALL }, INT_MIN, INT_MAX, V|D, "flags2"},
++{"export_mvs", "export motion vectors through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_EXPORT_MVS}, INT_MIN, INT_MAX, V|D, "flags2"},
++{"skip_manual", "do not skip samples and export skip information as frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_FLAG2_SKIP_MANUAL}, INT_MIN, INT_MAX, V|D, "flags2"},
#if FF_API_MOTION_EST
{"me_method", "set motion estimation method", OFFSET(me_method), AV_OPT_TYPE_INT, {.i64 = ME_EPZS }, INT_MIN, INT_MAX, V|E, "me_method"},
{"zero", "zero motion estimation (fastest)", 0, AV_OPT_TYPE_CONST, {.i64 = ME_ZERO }, INT_MIN, INT_MAX, V|E, "me_method" },
goto fail;
}
- s->dsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->dsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->dsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->dsp) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
ff_celt_flush(s);
if (avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
s->filter_type = PNG_FILTER_VALUE_NONE;
- s->is_progressive = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
+ if (s->dpi && s->dpm) {
+ av_log(avctx, AV_LOG_ERROR, "Only one of 'dpi' or 'dpm' options should be set\n");
+ return AVERROR(EINVAL);
+ } else if (s->dpi) {
+ s->dpm = s->dpi * 10000 / 254;
+ }
+
++ s->is_progressive = !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT);
+ switch (avctx->pix_fmt) {
+ case AV_PIX_FMT_RGBA64BE:
+ s->bit_depth = 16;
+ s->color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case AV_PIX_FMT_RGB48BE:
+ s->bit_depth = 16;
+ s->color_type = PNG_COLOR_TYPE_RGB;
+ break;
+ case AV_PIX_FMT_RGBA:
+ s->bit_depth = 8;
+ s->color_type = PNG_COLOR_TYPE_RGB_ALPHA;
+ break;
+ case AV_PIX_FMT_RGB24:
+ s->bit_depth = 8;
+ s->color_type = PNG_COLOR_TYPE_RGB;
+ break;
+ case AV_PIX_FMT_GRAY16BE:
+ s->bit_depth = 16;
+ s->color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case AV_PIX_FMT_GRAY8:
+ s->bit_depth = 8;
+ s->color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case AV_PIX_FMT_GRAY8A:
+ s->bit_depth = 8;
+ s->color_type = PNG_COLOR_TYPE_GRAY_ALPHA;
+ break;
+ case AV_PIX_FMT_YA16BE:
+ s->bit_depth = 16;
+ s->color_type = PNG_COLOR_TYPE_GRAY_ALPHA;
+ break;
+ case AV_PIX_FMT_MONOBLACK:
+ s->bit_depth = 1;
+ s->color_type = PNG_COLOR_TYPE_GRAY;
+ break;
+ case AV_PIX_FMT_PAL8:
+ s->bit_depth = 8;
+ s->color_type = PNG_COLOR_TYPE_PALETTE;
+ break;
+ default:
+ return -1;
+ }
+ s->bits_per_pixel = ff_png_get_nb_channels(s->color_type) * s->bit_depth;
+
+ s->zstream.zalloc = ff_png_zalloc;
+ s->zstream.zfree = ff_png_zfree;
+ s->zstream.opaque = NULL;
+ compression_level = avctx->compression_level == FF_COMPRESSION_DEFAULT
+ ? Z_DEFAULT_COMPRESSION
+ : av_clip(avctx->compression_level, 0, 9);
+ if (deflateInit2(&s->zstream, compression_level, Z_DEFLATED, 15, 8, Z_DEFAULT_STRATEGY) != Z_OK)
+ return -1;
+
+ return 0;
+}
+
+static av_cold int png_enc_close(AVCodecContext *avctx)
+{
+ PNGEncContext *s = avctx->priv_data;
+
+ deflateEnd(&s->zstream);
return 0;
}
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
- if (!high_bit_depth) {
- if ((avctx->idct_algo == FF_IDCT_AUTO) ||
+ if (!high_bit_depth && avctx->lowres == 0) {
- if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) ||
++ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) ||
(avctx->idct_algo == FF_IDCT_ALTIVEC)) {
+ c->idct = idct_altivec;
c->idct_add = idct_add_altivec;
c->idct_put = idct_put_altivec;
c->perm_type = FF_IDCT_PERM_TRANSPOSE;
--- /dev/null
- if (!(avctx->flags & CODEC_FLAG_GRAY)) {
+/*
+ * Copyright (c) 2010-2011 Maxim Poliakovski
+ * Copyright (c) 2010-2011 Elvis Presley
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
+ */
+
+//#define DEBUG
+
+#define LONG_BITSTREAM_READER
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "idctdsp.h"
+#include "internal.h"
+#include "simple_idct.h"
+#include "proresdec.h"
+#include "proresdata.h"
+
+static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
+{
+ int i;
+ for (i = 0; i < 64; i++)
+ dst[i] = permutation[src[i]];
+}
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+ uint8_t idct_permutation[64];
+
+ avctx->bits_per_raw_sample = 10;
+
+ ff_blockdsp_init(&ctx->bdsp, avctx);
+ ff_proresdsp_init(&ctx->prodsp, avctx);
+
+ ff_init_scantable_permutation(idct_permutation,
+ ctx->prodsp.idct_permutation_type);
+
+ permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
+ permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
+
+ return 0;
+}
+
+static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
+ const int data_size, AVCodecContext *avctx)
+{
+ int hdr_size, width, height, flags;
+ int version;
+ const uint8_t *ptr;
+
+ hdr_size = AV_RB16(buf);
+ av_dlog(avctx, "header size %d\n", hdr_size);
+ if (hdr_size > data_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ version = AV_RB16(buf + 2);
+ av_dlog(avctx, "%.4s version %d\n", buf+4, version);
+ if (version > 1) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ width = AV_RB16(buf + 8);
+ height = AV_RB16(buf + 10);
+ if (width != avctx->width || height != avctx->height) {
+ av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
+ avctx->width, avctx->height, width, height);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ ctx->frame_type = (buf[12] >> 2) & 3;
+ ctx->alpha_info = buf[17] & 0xf;
+
+ if (ctx->alpha_info > 2) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
+ return AVERROR_INVALIDDATA;
+ }
+ if (avctx->skip_alpha) ctx->alpha_info = 0;
+
+ av_dlog(avctx, "frame type %d\n", ctx->frame_type);
+
+ if (ctx->frame_type == 0) {
+ ctx->scan = ctx->progressive_scan; // permuted
+ } else {
+ ctx->scan = ctx->interlaced_scan; // permuted
+ ctx->frame->interlaced_frame = 1;
+ ctx->frame->top_field_first = ctx->frame_type == 1;
+ }
+
+ if (ctx->alpha_info) {
+ avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
+ } else {
+ avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
+ }
+
+ ptr = buf + 20;
+ flags = buf[19];
+ av_dlog(avctx, "flags %x\n", flags);
+
+ if (flags & 2) {
+ if(buf + data_size - ptr < 64) {
+ av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
+ return AVERROR_INVALIDDATA;
+ }
+ permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
+ ptr += 64;
+ } else {
+ memset(ctx->qmat_luma, 4, 64);
+ }
+
+ if (flags & 1) {
+ if(buf + data_size - ptr < 64) {
+ av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
+ return AVERROR_INVALIDDATA;
+ }
+ permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
+ } else {
+ memset(ctx->qmat_chroma, 4, 64);
+ }
+
+ return hdr_size;
+}
+
+static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i, hdr_size, slice_count;
+ unsigned pic_data_size;
+ int log2_slice_mb_width, log2_slice_mb_height;
+ int slice_mb_count, mb_x, mb_y;
+ const uint8_t *data_ptr, *index_ptr;
+
+ hdr_size = buf[0] >> 3;
+ if (hdr_size < 8 || hdr_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ pic_data_size = AV_RB32(buf + 1);
+ if (pic_data_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ log2_slice_mb_width = buf[7] >> 4;
+ log2_slice_mb_height = buf[7] & 0xF;
+ if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
+ 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
+ return AVERROR_INVALIDDATA;
+ }
+
+ ctx->mb_width = (avctx->width + 15) >> 4;
+ if (ctx->frame_type)
+ ctx->mb_height = (avctx->height + 31) >> 5;
+ else
+ ctx->mb_height = (avctx->height + 15) >> 4;
+
+ slice_count = AV_RB16(buf + 5);
+
+ if (ctx->slice_count != slice_count || !ctx->slices) {
+ av_freep(&ctx->slices);
+ ctx->slice_count = 0;
+ ctx->slices = av_mallocz_array(slice_count, sizeof(*ctx->slices));
+ if (!ctx->slices)
+ return AVERROR(ENOMEM);
+ ctx->slice_count = slice_count;
+ }
+
+ if (!slice_count)
+ return AVERROR(EINVAL);
+
+ if (hdr_size + slice_count*2 > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ // parse slice information
+ index_ptr = buf + hdr_size;
+ data_ptr = index_ptr + slice_count*2;
+
+ slice_mb_count = 1 << log2_slice_mb_width;
+ mb_x = 0;
+ mb_y = 0;
+
+ for (i = 0; i < slice_count; i++) {
+ SliceContext *slice = &ctx->slices[i];
+
+ slice->data = data_ptr;
+ data_ptr += AV_RB16(index_ptr + i*2);
+
+ while (ctx->mb_width - mb_x < slice_mb_count)
+ slice_mb_count >>= 1;
+
+ slice->mb_x = mb_x;
+ slice->mb_y = mb_y;
+ slice->mb_count = slice_mb_count;
+ slice->data_size = data_ptr - slice->data;
+
+ if (slice->data_size < 6) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ mb_x += slice_mb_count;
+ if (mb_x == ctx->mb_width) {
+ slice_mb_count = 1 << log2_slice_mb_width;
+ mb_x = 0;
+ mb_y++;
+ }
+ if (data_ptr > buf + buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (mb_x || mb_y != ctx->mb_height) {
+ av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
+ mb_y, ctx->mb_height);
+ return AVERROR_INVALIDDATA;
+ }
+
+ return pic_data_size;
+}
+
+#define DECODE_CODEWORD(val, codebook) \
+ do { \
+ unsigned int rice_order, exp_order, switch_bits; \
+ unsigned int q, buf, bits; \
+ \
+ UPDATE_CACHE(re, gb); \
+ buf = GET_CACHE(re, gb); \
+ \
+ /* number of bits to switch between rice and exp golomb */ \
+ switch_bits = codebook & 3; \
+ rice_order = codebook >> 5; \
+ exp_order = (codebook >> 2) & 7; \
+ \
+ q = 31 - av_log2(buf); \
+ \
+ if (q > switch_bits) { /* exp golomb */ \
+ bits = exp_order - switch_bits + (q<<1); \
+ val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
+ ((switch_bits + 1) << rice_order); \
+ SKIP_BITS(re, gb, bits); \
+ } else if (rice_order) { \
+ SKIP_BITS(re, gb, q+1); \
+ val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
+ SKIP_BITS(re, gb, rice_order); \
+ } else { \
+ val = q; \
+ SKIP_BITS(re, gb, q+1); \
+ } \
+ } while (0)
+
+#define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
+
+#define FIRST_DC_CB 0xB8
+
+static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
+
+static av_always_inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out,
+ int blocks_per_slice)
+{
+ int16_t prev_dc;
+ int code, i, sign;
+
+ OPEN_READER(re, gb);
+
+ DECODE_CODEWORD(code, FIRST_DC_CB);
+ prev_dc = TOSIGNED(code);
+ out[0] = prev_dc;
+
+ out += 64; // dc coeff for the next block
+
+ code = 5;
+ sign = 0;
+ for (i = 1; i < blocks_per_slice; i++, out += 64) {
+ DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]);
+ if(code) sign ^= -(code & 1);
+ else sign = 0;
+ prev_dc += (((code + 1) >> 1) ^ sign) - sign;
+ out[0] = prev_dc;
+ }
+ CLOSE_READER(re, gb);
+}
+
+// adaptive codebook switching lut according to previous run/level values
+static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
+static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
+
+static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
+ int16_t *out, int blocks_per_slice)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int block_mask, sign;
+ unsigned pos, run, level;
+ int max_coeffs, i, bits_left;
+ int log2_block_count = av_log2(blocks_per_slice);
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb); \
+ run = 4;
+ level = 2;
+
+ max_coeffs = 64 << log2_block_count;
+ block_mask = blocks_per_slice - 1;
+
+ for (pos = block_mask;;) {
+ bits_left = gb->size_in_bits - re_index;
+ if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
+ break;
+
+ DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
+ pos += run + 1;
+ if (pos >= max_coeffs) {
+ av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
+ return AVERROR_INVALIDDATA;
+ }
+
+ DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
+ level += 1;
+
+ i = pos >> log2_block_count;
+
+ sign = SHOW_SBITS(re, gb, 1);
+ SKIP_BITS(re, gb, 1);
+ out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
+ }
+
+ CLOSE_READER(re, gb);
+ return 0;
+}
+
+static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
+ uint16_t *dst, int dst_stride,
+ const uint8_t *buf, unsigned buf_size,
+ const int16_t *qmat)
+{
+ ProresContext *ctx = avctx->priv_data;
+ LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
+ int16_t *block;
+ GetBitContext gb;
+ int i, blocks_per_slice = slice->mb_count<<2;
+ int ret;
+
+ for (i = 0; i < blocks_per_slice; i++)
+ ctx->bdsp.clear_block(blocks+(i<<6));
+
+ init_get_bits(&gb, buf, buf_size << 3);
+
+ decode_dc_coeffs(&gb, blocks, blocks_per_slice);
+ if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
+ return ret;
+
+ block = blocks;
+ for (i = 0; i < slice->mb_count; i++) {
+ ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
+ ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
+ ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
+ ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
+ block += 4*64;
+ dst += 16;
+ }
+ return 0;
+}
+
+static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
+ uint16_t *dst, int dst_stride,
+ const uint8_t *buf, unsigned buf_size,
+ const int16_t *qmat, int log2_blocks_per_mb)
+{
+ ProresContext *ctx = avctx->priv_data;
+ LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
+ int16_t *block;
+ GetBitContext gb;
+ int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
+ int ret;
+
+ for (i = 0; i < blocks_per_slice; i++)
+ ctx->bdsp.clear_block(blocks+(i<<6));
+
+ init_get_bits(&gb, buf, buf_size << 3);
+
+ decode_dc_coeffs(&gb, blocks, blocks_per_slice);
+ if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
+ return ret;
+
+ block = blocks;
+ for (i = 0; i < slice->mb_count; i++) {
+ for (j = 0; j < log2_blocks_per_mb; j++) {
+ ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
+ ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
+ block += 2*64;
+ dst += 8;
+ }
+ }
+ return 0;
+}
+
+static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
+ const int num_bits)
+{
+ const int mask = (1 << num_bits) - 1;
+ int i, idx, val, alpha_val;
+
+ idx = 0;
+ alpha_val = mask;
+ do {
+ do {
+ if (get_bits1(gb)) {
+ val = get_bits(gb, num_bits);
+ } else {
+ int sign;
+ val = get_bits(gb, num_bits == 16 ? 7 : 4);
+ sign = val & 1;
+ val = (val + 2) >> 1;
+ if (sign)
+ val = -val;
+ }
+ alpha_val = (alpha_val + val) & mask;
+ if (num_bits == 16) {
+ dst[idx++] = alpha_val >> 6;
+ } else {
+ dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
+ }
+ if (idx >= num_coeffs)
+ break;
+ } while (get_bits_left(gb)>0 && get_bits1(gb));
+ val = get_bits(gb, 4);
+ if (!val)
+ val = get_bits(gb, 11);
+ if (idx + val > num_coeffs)
+ val = num_coeffs - idx;
+ if (num_bits == 16) {
+ for (i = 0; i < val; i++)
+ dst[idx++] = alpha_val >> 6;
+ } else {
+ for (i = 0; i < val; i++)
+ dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
+
+ }
+ } while (idx < num_coeffs);
+}
+
+/**
+ * Decode alpha slice plane.
+ */
+static void decode_slice_alpha(ProresContext *ctx,
+ uint16_t *dst, int dst_stride,
+ const uint8_t *buf, int buf_size,
+ int blocks_per_slice)
+{
+ GetBitContext gb;
+ int i;
+ LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
+ int16_t *block;
+
+ for (i = 0; i < blocks_per_slice<<2; i++)
+ ctx->bdsp.clear_block(blocks+(i<<6));
+
+ init_get_bits(&gb, buf, buf_size << 3);
+
+ if (ctx->alpha_info == 2) {
+ unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
+ } else {
+ unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
+ }
+
+ block = blocks;
+ for (i = 0; i < 16; i++) {
+ memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
+ dst += dst_stride >> 1;
+ block += 16 * blocks_per_slice;
+ }
+}
+
+static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
+{
+ ProresContext *ctx = avctx->priv_data;
+ SliceContext *slice = &ctx->slices[jobnr];
+ const uint8_t *buf = slice->data;
+ AVFrame *pic = ctx->frame;
+ int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
+ int luma_stride, chroma_stride;
+ int y_data_size, u_data_size, v_data_size, a_data_size;
+ uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
+ int16_t qmat_luma_scaled[64];
+ int16_t qmat_chroma_scaled[64];
+ int mb_x_shift;
+ int ret;
+
+ slice->ret = -1;
+ //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
+ // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
+
+ // slice header
+ hdr_size = buf[0] >> 3;
+ qscale = av_clip(buf[1], 1, 224);
+ qscale = qscale > 128 ? qscale - 96 << 2: qscale;
+ y_data_size = AV_RB16(buf + 2);
+ u_data_size = AV_RB16(buf + 4);
+ v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
+ if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
+ a_data_size = slice->data_size - y_data_size - u_data_size -
+ v_data_size - hdr_size;
+
+ if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
+ || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
+ av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ buf += hdr_size;
+
+ for (i = 0; i < 64; i++) {
+ qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
+ qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
+ }
+
+ if (ctx->frame_type == 0) {
+ luma_stride = pic->linesize[0];
+ chroma_stride = pic->linesize[1];
+ } else {
+ luma_stride = pic->linesize[0] << 1;
+ chroma_stride = pic->linesize[1] << 1;
+ }
+
+ if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
+ mb_x_shift = 5;
+ log2_chroma_blocks_per_mb = 2;
+ } else {
+ mb_x_shift = 4;
+ log2_chroma_blocks_per_mb = 1;
+ }
+
+ dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
+ dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
+ dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
+ dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
+
+ if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
+ dest_y += pic->linesize[0];
+ dest_u += pic->linesize[1];
+ dest_v += pic->linesize[2];
+ dest_a += pic->linesize[3];
+ }
+
+ ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
+ buf, y_data_size, qmat_luma_scaled);
+ if (ret < 0)
+ return ret;
+
++ if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
+ buf + y_data_size, u_data_size,
+ qmat_chroma_scaled, log2_chroma_blocks_per_mb);
+ if (ret < 0)
+ return ret;
+
+ ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
+ buf + y_data_size + u_data_size, v_data_size,
+ qmat_chroma_scaled, log2_chroma_blocks_per_mb);
+ if (ret < 0)
+ return ret;
+ }
+ /* decode alpha plane if available */
+ if (ctx->alpha_info && pic->data[3] && a_data_size)
+ decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
+ buf + y_data_size + u_data_size + v_data_size,
+ a_data_size, slice->mb_count);
+
+ slice->ret = 0;
+ return 0;
+}
+
+static int decode_picture(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i;
+
+ avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
+
+ for (i = 0; i < ctx->slice_count; i++)
+ if (ctx->slices[i].ret < 0)
+ return ctx->slices[i].ret;
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
+ AVPacket *avpkt)
+{
+ ProresContext *ctx = avctx->priv_data;
+ AVFrame *frame = data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ int frame_hdr_size, pic_size, ret;
+
+ if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
+ av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ ctx->frame = frame;
+ ctx->frame->pict_type = AV_PICTURE_TYPE_I;
+ ctx->frame->key_frame = 1;
+ ctx->first_field = 1;
+
+ buf += 8;
+ buf_size -= 8;
+
+ frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
+ if (frame_hdr_size < 0)
+ return frame_hdr_size;
+
+ buf += frame_hdr_size;
+ buf_size -= frame_hdr_size;
+
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+
+ decode_picture:
+ pic_size = decode_picture_header(avctx, buf, buf_size);
+ if (pic_size < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
+ return pic_size;
+ }
+
+ if ((ret = decode_picture(avctx)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
+ return ret;
+ }
+
+ buf += pic_size;
+ buf_size -= pic_size;
+
+ if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
+ ctx->first_field = 0;
+ goto decode_picture;
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+static av_cold int decode_close(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+
+ av_freep(&ctx->slices);
+
+ return 0;
+}
+
+AVCodec ff_prores_decoder = {
+ .name = "prores",
+ .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = decode_init,
+ .close = decode_close,
+ .decode = decode_frame,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
+};
--- /dev/null
- if (!(avctx->flags & CODEC_FLAG_GRAY)) {
+/*
+ * Apple ProRes encoder
+ *
+ * Copyright (c) 2011 Anatoliy Wasserman
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Apple ProRes encoder (Anatoliy Wasserman version)
+ * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy)
+ */
+
+#include "avcodec.h"
+#include "dct.h"
+#include "internal.h"
+#include "put_bits.h"
+#include "bytestream.h"
+#include "fdctdsp.h"
+
+#define DEFAULT_SLICE_MB_WIDTH 8
+
+#define FF_PROFILE_PRORES_PROXY 0
+#define FF_PROFILE_PRORES_LT 1
+#define FF_PROFILE_PRORES_STANDARD 2
+#define FF_PROFILE_PRORES_HQ 3
+
+static const AVProfile profiles[] = {
+ { FF_PROFILE_PRORES_PROXY, "apco"},
+ { FF_PROFILE_PRORES_LT, "apcs"},
+ { FF_PROFILE_PRORES_STANDARD, "apcn"},
+ { FF_PROFILE_PRORES_HQ, "apch"},
+ { FF_PROFILE_UNKNOWN }
+};
+
+static const int qp_start_table[4] = { 4, 1, 1, 1 };
+static const int qp_end_table[4] = { 8, 9, 6, 6 };
+static const int bitrate_table[5] = { 1000, 2100, 3500, 5400 };
+
+static const uint8_t progressive_scan[64] = {
+ 0, 1, 8, 9, 2, 3, 10, 11,
+ 16, 17, 24, 25, 18, 19, 26, 27,
+ 4, 5, 12, 20, 13, 6, 7, 14,
+ 21, 28, 29, 22, 15, 23, 30, 31,
+ 32, 33, 40, 48, 41, 34, 35, 42,
+ 49, 56, 57, 50, 43, 36, 37, 44,
+ 51, 58, 59, 52, 45, 38, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static const uint8_t QMAT_LUMA[4][64] = {
+ {
+ 4, 7, 9, 11, 13, 14, 15, 63,
+ 7, 7, 11, 12, 14, 15, 63, 63,
+ 9, 11, 13, 14, 15, 63, 63, 63,
+ 11, 11, 13, 14, 63, 63, 63, 63,
+ 11, 13, 14, 63, 63, 63, 63, 63,
+ 13, 14, 63, 63, 63, 63, 63, 63,
+ 13, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63
+ }, {
+ 4, 5, 6, 7, 9, 11, 13, 15,
+ 5, 5, 7, 8, 11, 13, 15, 17,
+ 6, 7, 9, 11, 13, 15, 15, 17,
+ 7, 7, 9, 11, 13, 15, 17, 19,
+ 7, 9, 11, 13, 14, 16, 19, 23,
+ 9, 11, 13, 14, 16, 19, 23, 29,
+ 9, 11, 13, 15, 17, 21, 28, 35,
+ 11, 13, 16, 17, 21, 28, 35, 41
+ }, {
+ 4, 4, 5, 5, 6, 7, 7, 9,
+ 4, 4, 5, 6, 7, 7, 9, 9,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 6, 7, 7, 8, 9, 10, 12,
+ 6, 7, 7, 8, 9, 10, 12, 15,
+ 6, 7, 7, 9, 10, 11, 14, 17,
+ 7, 7, 9, 10, 11, 14, 17, 21
+ }, {
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 4, 5, 5, 6,
+ 4, 4, 4, 4, 5, 5, 6, 7,
+ 4, 4, 4, 4, 5, 6, 7, 7
+ }
+};
+
+static const uint8_t QMAT_CHROMA[4][64] = {
+ {
+ 4, 7, 9, 11, 13, 14, 63, 63,
+ 7, 7, 11, 12, 14, 63, 63, 63,
+ 9, 11, 13, 14, 63, 63, 63, 63,
+ 11, 11, 13, 14, 63, 63, 63, 63,
+ 11, 13, 14, 63, 63, 63, 63, 63,
+ 13, 14, 63, 63, 63, 63, 63, 63,
+ 13, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63
+ }, {
+ 4, 5, 6, 7, 9, 11, 13, 15,
+ 5, 5, 7, 8, 11, 13, 15, 17,
+ 6, 7, 9, 11, 13, 15, 15, 17,
+ 7, 7, 9, 11, 13, 15, 17, 19,
+ 7, 9, 11, 13, 14, 16, 19, 23,
+ 9, 11, 13, 14, 16, 19, 23, 29,
+ 9, 11, 13, 15, 17, 21, 28, 35,
+ 11, 13, 16, 17, 21, 28, 35, 41
+ }, {
+ 4, 4, 5, 5, 6, 7, 7, 9,
+ 4, 4, 5, 6, 7, 7, 9, 9,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 6, 7, 7, 8, 9, 10, 12,
+ 6, 7, 7, 8, 9, 10, 12, 15,
+ 6, 7, 7, 9, 10, 11, 14, 17,
+ 7, 7, 9, 10, 11, 14, 17, 21
+ }, {
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 4, 5, 5, 6,
+ 4, 4, 4, 4, 5, 5, 6, 7,
+ 4, 4, 4, 4, 5, 6, 7, 7
+ }
+};
+
+
+typedef struct {
+ FDCTDSPContext fdsp;
+ uint8_t* fill_y;
+ uint8_t* fill_u;
+ uint8_t* fill_v;
+
+ int qmat_luma[16][64];
+ int qmat_chroma[16][64];
+} ProresContext;
+
+static void encode_codeword(PutBitContext *pb, int val, int codebook)
+{
+ unsigned int rice_order, exp_order, switch_bits, first_exp, exp, zeros;
+
+ /* number of bits to switch between rice and exp golomb */
+ switch_bits = codebook & 3;
+ rice_order = codebook >> 5;
+ exp_order = (codebook >> 2) & 7;
+
+ first_exp = ((switch_bits + 1) << rice_order);
+
+ if (val >= first_exp) { /* exp golomb */
+ val -= first_exp;
+ val += (1 << exp_order);
+ exp = av_log2(val);
+ zeros = exp - exp_order + switch_bits + 1;
+ put_bits(pb, zeros, 0);
+ put_bits(pb, exp + 1, val);
+ } else if (rice_order) {
+ put_bits(pb, (val >> rice_order), 0);
+ put_bits(pb, 1, 1);
+ put_sbits(pb, rice_order, val);
+ } else {
+ put_bits(pb, val, 0);
+ put_bits(pb, 1, 1);
+ }
+}
+
+#define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
+#define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
+#define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
+#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
+#define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
+
+static av_always_inline int get_level(int val)
+{
+ int sign = (val >> 31);
+ return (val ^ sign) - sign;
+}
+
+#define FIRST_DC_CB 0xB8
+
+static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
+
+static void encode_dc_coeffs(PutBitContext *pb, int16_t *in,
+ int blocks_per_slice, int *qmat)
+{
+ int prev_dc, code;
+ int i, sign, idx;
+ int new_dc, delta, diff_sign, new_code;
+
+ prev_dc = QSCALE(qmat, 0, in[0] - 16384);
+ code = TO_GOLOMB(prev_dc);
+ encode_codeword(pb, code, FIRST_DC_CB);
+
+ code = 5; sign = 0; idx = 64;
+ for (i = 1; i < blocks_per_slice; i++, idx += 64) {
+ new_dc = QSCALE(qmat, 0, in[idx] - 16384);
+ delta = new_dc - prev_dc;
+ diff_sign = DIFF_SIGN(delta, sign);
+ new_code = TO_GOLOMB2(get_level(delta), diff_sign);
+
+ encode_codeword(pb, new_code, dc_codebook[FFMIN(code, 6)]);
+
+ code = new_code;
+ sign = delta >> 31;
+ prev_dc = new_dc;
+ }
+}
+
+static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29,
+ 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
+static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28,
+ 0x28, 0x28, 0x28, 0x4C };
+
+static void encode_ac_coeffs(AVCodecContext *avctx, PutBitContext *pb,
+ int16_t *in, int blocks_per_slice, int *qmat)
+{
+ int prev_run = 4;
+ int prev_level = 2;
+
+ int run = 0, level, code, i, j;
+ for (i = 1; i < 64; i++) {
+ int indp = progressive_scan[i];
+ for (j = 0; j < blocks_per_slice; j++) {
+ int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
+ if (val) {
+ encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
+
+ prev_run = run;
+ run = 0;
+ level = get_level(val);
+ code = level - 1;
+
+ encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
+
+ prev_level = level;
+
+ put_bits(pb, 1, IS_NEGATIVE(val));
+ } else {
+ ++run;
+ }
+ }
+ }
+}
+
+static void get(uint8_t *pixels, int stride, int16_t* block)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ AV_WN64(block, AV_RN64(pixels));
+ AV_WN64(block+4, AV_RN64(pixels+8));
+ pixels += stride;
+ block += 8;
+ }
+}
+
+static void fdct_get(FDCTDSPContext *fdsp, uint8_t *pixels, int stride, int16_t* block)
+{
+ get(pixels, stride, block);
+ fdsp->fdct(block);
+}
+
+static int encode_slice_plane(AVCodecContext *avctx, int mb_count,
+ uint8_t *src, int src_stride, uint8_t *buf, unsigned buf_size,
+ int *qmat, int chroma)
+{
+ ProresContext* ctx = avctx->priv_data;
+ FDCTDSPContext *fdsp = &ctx->fdsp;
+ LOCAL_ALIGNED(16, int16_t, blocks, [DEFAULT_SLICE_MB_WIDTH << 8]);
+ int16_t *block;
+ int i, blocks_per_slice;
+ PutBitContext pb;
+
+ block = blocks;
+ for (i = 0; i < mb_count; i++) {
+ fdct_get(fdsp, src, src_stride, block + (0 << 6));
+ fdct_get(fdsp, src + 8 * src_stride, src_stride, block + ((2 - chroma) << 6));
+ if (!chroma) {
+ fdct_get(fdsp, src + 16, src_stride, block + (1 << 6));
+ fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
+ }
+
+ block += (256 >> chroma);
+ src += (32 >> chroma);
+ }
+
+ blocks_per_slice = mb_count << (2 - chroma);
+ init_put_bits(&pb, buf, buf_size);
+
+ encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
+ encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
+
+ flush_put_bits(&pb);
+ return put_bits_ptr(&pb) - pb.buf;
+}
+
+static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx,
+ uint8_t *dest_y, uint8_t *dest_u, uint8_t *dest_v, int luma_stride,
+ int chroma_stride, unsigned mb_count, uint8_t *buf, unsigned data_size,
+ unsigned* y_data_size, unsigned* u_data_size, unsigned* v_data_size,
+ int qp)
+{
+ ProresContext* ctx = avctx->priv_data;
+
+ *y_data_size = encode_slice_plane(avctx, mb_count, dest_y, luma_stride,
+ buf, data_size, ctx->qmat_luma[qp - 1], 0);
+
++ if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ *u_data_size = encode_slice_plane(avctx, mb_count, dest_u,
+ chroma_stride, buf + *y_data_size, data_size - *y_data_size,
+ ctx->qmat_chroma[qp - 1], 1);
+
+ *v_data_size = encode_slice_plane(avctx, mb_count, dest_v,
+ chroma_stride, buf + *y_data_size + *u_data_size,
+ data_size - *y_data_size - *u_data_size,
+ ctx->qmat_chroma[qp - 1], 1);
+ }
+
+ return *y_data_size + *u_data_size + *v_data_size;
+}
+
+static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
+ unsigned stride, unsigned width, unsigned height, uint16_t *dst,
+ unsigned dst_width, unsigned dst_height)
+{
+
+ int box_width = FFMIN(width - x, dst_width);
+ int box_height = FFMIN(height - y, dst_height);
+ int i, j, src_stride = stride >> 1;
+ uint16_t last_pix, *last_line;
+
+ src += y * src_stride + x;
+ for (i = 0; i < box_height; ++i) {
+ for (j = 0; j < box_width; ++j) {
+ dst[j] = src[j];
+ }
+ last_pix = dst[j - 1];
+ for (; j < dst_width; j++)
+ dst[j] = last_pix;
+ src += src_stride;
+ dst += dst_width;
+ }
+ last_line = dst - dst_width;
+ for (; i < dst_height; i++) {
+ for (j = 0; j < dst_width; ++j) {
+ dst[j] = last_line[j];
+ }
+ dst += dst_width;
+ }
+}
+
+static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
+ int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
+ int unsafe, int *qp)
+{
+ int luma_stride, chroma_stride;
+ int hdr_size = 6, slice_size;
+ uint8_t *dest_y, *dest_u, *dest_v;
+ unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0;
+ ProresContext* ctx = avctx->priv_data;
+ int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2;
+ int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation
+ int high_bytes = (tgt_bits + (tgt_bits >> 3)) >> 3;
+
+ luma_stride = pic->linesize[0];
+ chroma_stride = pic->linesize[1];
+
+ dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
+ dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << 4);
+ dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << 4);
+
+ if (unsafe) {
+
+ subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
+ luma_stride, avctx->width, avctx->height,
+ (uint16_t *) ctx->fill_y, mb_count << 4, 16);
+ subimage_with_fill((uint16_t *) pic->data[1], mb_x << 3, mb_y << 4,
+ chroma_stride, avctx->width >> 1, avctx->height,
+ (uint16_t *) ctx->fill_u, mb_count << 3, 16);
+ subimage_with_fill((uint16_t *) pic->data[2], mb_x << 3, mb_y << 4,
+ chroma_stride, avctx->width >> 1, avctx->height,
+ (uint16_t *) ctx->fill_v, mb_count << 3, 16);
+
+ encode_slice_data(avctx, ctx->fill_y, ctx->fill_u, ctx->fill_v,
+ mb_count << 5, mb_count << 4, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size,
+ *qp);
+ } else {
+ slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
+ luma_stride, chroma_stride, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size,
+ *qp);
+
+ if (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]) {
+ do {
+ *qp += 1;
+ slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
+ luma_stride, chroma_stride, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size,
+ &v_data_size, *qp);
+ } while (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]);
+ } else if (slice_size < low_bytes && *qp
+ > qp_start_table[avctx->profile]) {
+ do {
+ *qp -= 1;
+ slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
+ luma_stride, chroma_stride, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size,
+ &v_data_size, *qp);
+ } while (slice_size < low_bytes && *qp > qp_start_table[avctx->profile]);
+ }
+ }
+
+ buf[0] = hdr_size << 3;
+ buf[1] = *qp;
+ AV_WB16(buf + 2, y_data_size);
+ AV_WB16(buf + 4, u_data_size);
+
+ return hdr_size + y_data_size + u_data_size + v_data_size;
+}
+
+static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
+ uint8_t *buf, const int buf_size)
+{
+ int mb_width = (avctx->width + 15) >> 4;
+ int mb_height = (avctx->height + 15) >> 4;
+ int hdr_size, sl_size, i;
+ int mb_y, sl_data_size, qp;
+ int unsafe_bot, unsafe_right;
+ uint8_t *sl_data, *sl_data_sizes;
+ int slice_per_line = 0, rem = mb_width;
+
+ for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
+ slice_per_line += rem >> i;
+ rem &= (1 << i) - 1;
+ }
+
+ qp = qp_start_table[avctx->profile];
+ hdr_size = 8; sl_data_size = buf_size - hdr_size;
+ sl_data_sizes = buf + hdr_size;
+ sl_data = sl_data_sizes + (slice_per_line * mb_height * 2);
+ for (mb_y = 0; mb_y < mb_height; mb_y++) {
+ int mb_x = 0;
+ int slice_mb_count = DEFAULT_SLICE_MB_WIDTH;
+ while (mb_x < mb_width) {
+ while (mb_width - mb_x < slice_mb_count)
+ slice_mb_count >>= 1;
+
+ unsafe_bot = (avctx->height & 0xf) && (mb_y == mb_height - 1);
+ unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
+
+ sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
+ sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp);
+
+ bytestream_put_be16(&sl_data_sizes, sl_size);
+ sl_data += sl_size;
+ sl_data_size -= sl_size;
+ mb_x += slice_mb_count;
+ }
+ }
+
+ buf[0] = hdr_size << 3;
+ AV_WB32(buf + 1, sl_data - buf);
+ AV_WB16(buf + 5, slice_per_line * mb_height);
+ buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4;
+
+ return sl_data - buf;
+}
+
+static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pict, int *got_packet)
+{
+ int header_size = 148;
+ uint8_t *buf;
+ int pic_size, ret;
+ int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + FF_MIN_BUFFER_SIZE; //FIXME choose tighter limit
+
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, frame_size + FF_MIN_BUFFER_SIZE, 0)) < 0)
+ return ret;
+
+ buf = pkt->data;
+ pic_size = prores_encode_picture(avctx, pict, buf + header_size + 8,
+ pkt->size - header_size - 8);
+
+ bytestream_put_be32(&buf, pic_size + 8 + header_size);
+ bytestream_put_buffer(&buf, "icpf", 4);
+
+ bytestream_put_be16(&buf, header_size);
+ bytestream_put_be16(&buf, 0);
+ bytestream_put_buffer(&buf, "fmpg", 4);
+ bytestream_put_be16(&buf, avctx->width);
+ bytestream_put_be16(&buf, avctx->height);
+ *buf++ = 0x83; // {10}(422){00}{00}(frame){11}
+ *buf++ = 0;
+ *buf++ = 2;
+ *buf++ = 2;
+ *buf++ = 6;
+ *buf++ = 32;
+ *buf++ = 0;
+ *buf++ = 3;
+
+ bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
+ bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
+
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ pkt->size = pic_size + 8 + header_size;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static void scale_mat(const uint8_t* src, int* dst, int scale)
+{
+ int i;
+ for (i = 0; i < 64; i++)
+ dst[i] = src[i] * scale;
+}
+
+static av_cold int prores_encode_init(AVCodecContext *avctx)
+{
+ int i;
+ ProresContext* ctx = avctx->priv_data;
+
+ if (avctx->pix_fmt != AV_PIX_FMT_YUV422P10) {
+ av_log(avctx, AV_LOG_ERROR, "need YUV422P10\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ avctx->bits_per_raw_sample = 10;
+
+ if (avctx->width & 0x1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "frame width needs to be multiple of 2\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->width > 65534 || avctx->height > 65535) {
+ av_log(avctx, AV_LOG_ERROR,
+ "The maximum dimensions are 65534x65535\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
+ ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8));
+ if (!ctx->fill_y)
+ return AVERROR(ENOMEM);
+ ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
+ ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 8);
+ }
+
+ if (avctx->profile == FF_PROFILE_UNKNOWN) {
+ avctx->profile = FF_PROFILE_PRORES_STANDARD;
+ av_log(avctx, AV_LOG_INFO,
+ "encoding with ProRes standard (apcn) profile\n");
+
+ } else if (avctx->profile < FF_PROFILE_PRORES_PROXY
+ || avctx->profile > FF_PROFILE_PRORES_HQ) {
+ av_log(
+ avctx,
+ AV_LOG_ERROR,
+ "unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch]\n",
+ avctx->profile);
+ return AVERROR(EINVAL);
+ }
+
+ ff_fdctdsp_init(&ctx->fdsp, avctx);
+
+ avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
+
+ for (i = 1; i <= 16; i++) {
+ scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
+ scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
+ }
+
+ avctx->coded_frame->key_frame = 1;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+
+ return 0;
+}
+
+static av_cold int prores_encode_close(AVCodecContext *avctx)
+{
+ ProresContext* ctx = avctx->priv_data;
+ av_freep(&ctx->fill_y);
+
+ return 0;
+}
+
+AVCodec ff_prores_aw_encoder = {
+ .name = "prores_aw",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = prores_encode_init,
+ .close = prores_encode_close,
+ .encode2 = prores_encode_frame,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_NONE},
+ .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
+ .profiles = profiles
+};
+
+AVCodec ff_prores_encoder = {
+ .name = "prores",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = prores_encode_init,
+ .close = prores_encode_close,
+ .encode2 = prores_encode_frame,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_NONE},
+ .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
+ .profiles = profiles
+};
int mps;
int i, j;
int min_quant, max_quant;
- int interlaced = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
+ int interlaced = !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT);
avctx->bits_per_raw_sample = 10;
+#if FF_API_CODED_FRAME
+FF_DISABLE_DEPRECATION_WARNINGS
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+ avctx->coded_frame->key_frame = 1;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
ctx->fdct = prores_fdct;
ctx->scantable = interlaced ? ff_prores_interlaced_scan
q->param.mfx.FrameInfo.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
q->param.mfx.FrameInfo.BitDepthLuma = 8;
q->param.mfx.FrameInfo.BitDepthChroma = 8;
- if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
+ q->param.mfx.FrameInfo.Width = FFALIGN(avctx->width, q->width_align);
+
++ if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
+ /* A true field layout (TFF or BFF) is not important here,
+ it will specified later during frame encoding. But it is important
+ to specify is frame progressive or not because allowed heigh alignment
+ does depend by this.
+ */
+ q->param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_FIELD_TFF;
+ q->height_align = 32;
+ } else {
+ q->param.mfx.FrameInfo.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
+ q->height_align = 16;
+ }
+ q->param.mfx.FrameInfo.Height = FFALIGN(avctx->height, q->height_align);
if (avctx->framerate.den > 0 && avctx->framerate.num > 0) {
q->param.mfx.FrameInfo.FrameRateExtN = avctx->framerate.num;
avctx->channel_layout = AV_CH_LAYOUT_MONO;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
- avpriv_float_dsp_init(&ractx->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (avctx->block_align <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported block align\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
- ractx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
++ ractx->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!ractx->fdsp)
+ return AVERROR(ENOMEM);
return 0;
}
rcc->last_qscale_for[i] = FF_QP2LAMBDA * 5;
}
rcc->buffer_index = s->avctx->rc_initial_buffer_occupancy;
+ if (!rcc->buffer_index)
+ rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4;
- if (s->avctx->flags & CODEC_FLAG_PASS2) {
+ if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
int i;
char *p;
get_qminmax(&qmin, &qmax, s, pict_type);
- fps = 1 / av_q2d(s->avctx->time_base);
+ fps = get_fps(s->avctx);
/* update predictors */
if (picture_number > 2 && !dry_run) {
- const int last_var = s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
- : rcc->last_mc_mb_var_sum;
+ const int64_t last_var =
+ s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
+ : rcc->last_mc_mb_var_sum;
+ av_assert1(s->frame_bits >= s->stuffing_bits);
update_predictor(&rcc->pred[s->last_pict_type],
rcc->last_qscale,
- sqrt(last_var), s->frame_bits);
+ sqrt(last_var),
+ s->frame_bits - s->stuffing_bits);
}
- if (s->avctx->flags & CODEC_FLAG_PASS2) {
+ if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
- assert(picture_number >= 0);
- assert(picture_number < rcc->num_entries);
+ av_assert0(picture_number >= 0);
+ if (picture_number >= rcc->num_entries) {
+ av_log(s, AV_LOG_ERROR, "Input is longer than 2-pass log file\n");
+ return -1;
+ }
rce = &rcc->entry[picture_number];
wanted_bits = rce->expected_bits;
} else {
var = pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum;
short_term_q = 0; /* avoid warning */
- if (s->avctx->flags & CODEC_FLAG_PASS2) {
+ if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
if (pict_type != AV_PICTURE_TYPE_I)
- assert(pict_type == rce->new_pict_type);
+ av_assert0(pict_type == rce->new_pict_type);
q = rce->new_qscale / br_compensation;
- ff_dlog(s, "%f %f %f last:%d var:%d type:%d//\n", q, rce->new_qscale,
+ ff_dlog(s, "%f %f %f last:%d var:%"PRId64" type:%d//\n", q, rce->new_qscale,
br_compensation, s->frame_bits, var, pict_type);
} else {
rce->pict_type =
--- /dev/null
- if (s->avctx->flags2 & CODEC_FLAG2_EXPORT_MVS) {
+/*
+ * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intmath.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "snow_dwt.h"
+#include "internal.h"
+#include "snow.h"
+
+#include "rangecoder.h"
+#include "mathops.h"
+
+#include "mpegvideo.h"
+#include "h263.h"
+
+static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
+ Plane *p= &s->plane[plane_index];
+ const int mb_w= s->b_width << s->block_max_depth;
+ const int mb_h= s->b_height << s->block_max_depth;
+ int x, y, mb_x;
+ int block_size = MB_SIZE >> s->block_max_depth;
+ int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
+ int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *dst8= s->current_picture->data[plane_index];
+ int w= p->width;
+ int h= p->height;
+
+ if(s->keyframe || (s->avctx->debug&512)){
+ if(mb_y==mb_h)
+ return;
+
+ if(add){
+ for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
+// DWTELEM * line = slice_buffer_get_line(sb, y);
+ IDWTELEM * line = sb->line[y];
+ for(x=0; x<w; x++){
+// int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
+ int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
+ v >>= FRAC_BITS;
+ if(v&(~255)) v= ~(v>>31);
+ dst8[x + y*ref_stride]= v;
+ }
+ }
+ }else{
+ for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
+// DWTELEM * line = slice_buffer_get_line(sb, y);
+ IDWTELEM * line = sb->line[y];
+ for(x=0; x<w; x++){
+ line[x] -= 128 << FRAC_BITS;
+// buf[x + y*w]-= 128<<FRAC_BITS;
+ }
+ }
+ }
+
+ return;
+ }
+
+ for(mb_x=0; mb_x<=mb_w; mb_x++){
+ add_yblock(s, 1, sb, old_buffer, dst8, obmc,
+ block_w*mb_x - block_w/2,
+ block_h*mb_y - block_h/2,
+ block_w, block_h,
+ w, h,
+ w, ref_stride, obmc_stride,
+ mb_x - 1, mb_y - 1,
+ add, 0, plane_index);
+ }
+
+ if(s->avmv && mb_y < mb_h && plane_index == 0)
+ for(mb_x=0; mb_x<mb_w; mb_x++){
+ AVMotionVector *avmv = s->avmv + s->avmv_index;
+ const int b_width = s->b_width << s->block_max_depth;
+ const int b_stride= b_width;
+ BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
+
+ if (bn->type)
+ continue;
+
+ s->avmv_index++;
+
+ avmv->w = block_w;
+ avmv->h = block_h;
+ avmv->dst_x = block_w*mb_x - block_w/2;
+ avmv->dst_y = block_h*mb_y - block_h/2;
+ avmv->src_x = avmv->dst_x + (bn->mx * s->mv_scale)/8;
+ avmv->src_y = avmv->dst_y + (bn->my * s->mv_scale)/8;
+ avmv->source= -1 - bn->ref;
+ avmv->flags = 0;
+ }
+}
+
+static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
+ const int w= b->width;
+ int y;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
+ int new_index = 0;
+
+ if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
+ qadd= 0;
+ qmul= 1<<QEXPSHIFT;
+ }
+
+ /* If we are on the second or later slice, restore our index. */
+ if (start_y != 0)
+ new_index = save_state[0];
+
+
+ for(y=start_y; y<h; y++){
+ int x = 0;
+ int v;
+ IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
+ memset(line, 0, b->width*sizeof(IDWTELEM));
+ v = b->x_coeff[new_index].coeff;
+ x = b->x_coeff[new_index++].x;
+ while(x < w){
+ register int t= ( (v>>1)*qmul + qadd)>>QEXPSHIFT;
+ register int u= -(v&1);
+ line[x] = (t^u) - u;
+
+ v = b->x_coeff[new_index].coeff;
+ x = b->x_coeff[new_index++].x;
+ }
+ }
+
+ /* Save our variables for the next slice. */
+ save_state[0] = new_index;
+
+ return;
+}
+
+static int decode_q_branch(SnowContext *s, int level, int x, int y){
+ const int w= s->b_width << s->block_max_depth;
+ const int rem_depth= s->block_max_depth - level;
+ const int index= (x + y*w) << rem_depth;
+ int trx= (x+1)<<rem_depth;
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-w] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
+ const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
+ int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
+ int res;
+
+ if(s->keyframe){
+ set_blocks(s, level, x, y, null_block.color[0], null_block.color[1], null_block.color[2], null_block.mx, null_block.my, null_block.ref, BLOCK_INTRA);
+ return 0;
+ }
+
+ if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
+ int type, mx, my;
+ int l = left->color[0];
+ int cb= left->color[1];
+ int cr= left->color[2];
+ unsigned ref = 0;
+ int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
+ int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
+
+ type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
+
+ if(type){
+ pred_mv(s, &mx, &my, 0, left, top, tr);
+ l += get_symbol(&s->c, &s->block_state[32], 1);
+ if (s->nb_planes > 2) {
+ cb+= get_symbol(&s->c, &s->block_state[64], 1);
+ cr+= get_symbol(&s->c, &s->block_state[96], 1);
+ }
+ }else{
+ if(s->ref_frames > 1)
+ ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
+ if (ref >= s->ref_frames) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
+ return AVERROR_INVALIDDATA;
+ }
+ pred_mv(s, &mx, &my, ref, left, top, tr);
+ mx+= get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
+ my+= get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
+ }
+ set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
+ }else{
+ if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
+ (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
+ (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
+ (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
+ return res;
+ }
+ return 0;
+}
+
+static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
+ const int w= b->width;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
+ int x,y;
+
+ if(s->qlog == LOSSLESS_QLOG) return;
+
+ for(y=start_y; y<end_y; y++){
+// DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
+ IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
+ for(x=0; x<w; x++){
+ int i= line[x];
+ if(i<0){
+ line[x]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
+ }else if(i>0){
+ line[x]= (( i*qmul + qadd)>>(QEXPSHIFT));
+ }
+ }
+ }
+}
+
+static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
+ const int w= b->width;
+ int x,y;
+
+ IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
+ IDWTELEM * prev;
+
+ if (start_y != 0)
+ line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
+
+ for(y=start_y; y<end_y; y++){
+ prev = line;
+// line = slice_buffer_get_line_from_address(sb, src + (y * stride));
+ line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
+ for(x=0; x<w; x++){
+ if(x){
+ if(use_median){
+ if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
+ else line[x] += line[x - 1];
+ }else{
+ if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
+ else line[x] += line[x - 1];
+ }
+ }else{
+ if(y) line[x] += prev[x];
+ }
+ }
+ }
+}
+
+static void decode_qlogs(SnowContext *s){
+ int plane_index, level, orientation;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1:0; orientation<4; orientation++){
+ int q;
+ if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
+ else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
+ else q= get_symbol(&s->c, s->header_state, 1);
+ s->plane[plane_index].band[level][orientation].qlog= q;
+ }
+ }
+ }
+}
+
+#define GET_S(dst, check) \
+ tmp= get_symbol(&s->c, s->header_state, 0);\
+ if(!(check)){\
+ av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
+ return AVERROR_INVALIDDATA;\
+ }\
+ dst= tmp;
+
+static int decode_header(SnowContext *s){
+ int plane_index, tmp;
+ uint8_t kstate[32];
+
+ memset(kstate, MID_STATE, sizeof(kstate));
+
+ s->keyframe= get_rac(&s->c, kstate);
+ if(s->keyframe || s->always_reset){
+ ff_snow_reset_contexts(s);
+ s->spatial_decomposition_type=
+ s->qlog=
+ s->qbias=
+ s->mv_scale=
+ s->block_max_depth= 0;
+ }
+ if(s->keyframe){
+ GET_S(s->version, tmp <= 0U)
+ s->always_reset= get_rac(&s->c, s->header_state);
+ s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
+ s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
+ GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
+ s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
+ if (s->colorspace_type == 1) {
+ s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
+ s->nb_planes = 1;
+ } else if(s->colorspace_type == 0) {
+ s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
+ s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
+
+ if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
+ }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
+ } else {
+ av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
+ s->chroma_h_shift = s->chroma_v_shift = 1;
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ return AVERROR_INVALIDDATA;
+ }
+ s->nb_planes = 3;
+ } else {
+ av_log(s, AV_LOG_ERROR, "unsupported color space\n");
+ s->chroma_h_shift = s->chroma_v_shift = 1;
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ return AVERROR_INVALIDDATA;
+ }
+
+
+ s->spatial_scalability= get_rac(&s->c, s->header_state);
+// s->rate_scalability= get_rac(&s->c, s->header_state);
+ GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
+ s->max_ref_frames++;
+
+ decode_qlogs(s);
+ }
+
+ if(!s->keyframe){
+ if(get_rac(&s->c, s->header_state)){
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ int htaps, i, sum=0;
+ Plane *p= &s->plane[plane_index];
+ p->diag_mc= get_rac(&s->c, s->header_state);
+ htaps= get_symbol(&s->c, s->header_state, 0)*2 + 2;
+ if((unsigned)htaps > HTAPS_MAX || htaps==0)
+ return AVERROR_INVALIDDATA;
+ p->htaps= htaps;
+ for(i= htaps/2; i; i--){
+ p->hcoeff[i]= get_symbol(&s->c, s->header_state, 0) * (1-2*(i&1));
+ sum += p->hcoeff[i];
+ }
+ p->hcoeff[0]= 32-sum;
+ }
+ s->plane[2].diag_mc= s->plane[1].diag_mc;
+ s->plane[2].htaps = s->plane[1].htaps;
+ memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
+ }
+ if(get_rac(&s->c, s->header_state)){
+ GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
+ decode_qlogs(s);
+ }
+ }
+
+ s->spatial_decomposition_type+= get_symbol(&s->c, s->header_state, 1);
+ if(s->spatial_decomposition_type > 1U){
+ av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
+ return AVERROR_INVALIDDATA;
+ }
+ if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
+ s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
+ av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
+ return AVERROR_INVALIDDATA;
+ }
+
+
+ s->qlog += get_symbol(&s->c, s->header_state, 1);
+ s->mv_scale += get_symbol(&s->c, s->header_state, 1);
+ s->qbias += get_symbol(&s->c, s->header_state, 1);
+ s->block_max_depth+= get_symbol(&s->c, s->header_state, 1);
+ if(s->block_max_depth > 1 || s->block_max_depth < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
+ s->block_max_depth= 0;
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ int ret;
+
+ if ((ret = ff_snow_common_init(avctx)) < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int decode_blocks(SnowContext *s){
+ int x, y;
+ int w= s->b_width;
+ int h= s->b_height;
+ int res;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ if ((res = decode_q_branch(s, 0, x, y)) < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
+ AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ SnowContext *s = avctx->priv_data;
+ RangeCoder * const c= &s->c;
+ int bytes_read;
+ AVFrame *picture = data;
+ int level, orientation, plane_index;
+ int res;
+
+ ff_init_range_decoder(c, buf, buf_size);
+ ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
+
+ s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
+ if ((res = decode_header(s)) < 0)
+ return res;
+ if ((res=ff_snow_common_init_after_header(avctx)) < 0)
+ return res;
+
+ // realloc slice buffer for the case that spatial_decomposition_count changed
+ ff_slice_buffer_destroy(&s->sb);
+ if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
+ (MB_SIZE >> s->block_max_depth) +
+ s->spatial_decomposition_count * 11 + 1,
+ s->plane[0].width,
+ s->spatial_idwt_buffer)) < 0)
+ return res;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
+ && p->hcoeff[1]==-10
+ && p->hcoeff[2]==2;
+ }
+
+ ff_snow_alloc_blocks(s);
+
+ if((res = ff_snow_frame_start(s)) < 0)
+ return res;
+
+ s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+
+ //keyframe flag duplication mess FIXME
+ if(avctx->debug&FF_DEBUG_PICT_INFO)
+ av_log(avctx, AV_LOG_ERROR,
+ "keyframe:%d qlog:%d qbias: %d mvscale: %d "
+ "decomposition_type:%d decomposition_count:%d\n",
+ s->keyframe, s->qlog, s->qbias, s->mv_scale,
+ s->spatial_decomposition_type,
+ s->spatial_decomposition_count
+ );
+
+ av_assert0(!s->avmv);
++ if (s->avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) {
+ s->avmv = av_malloc_array(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2));
+ }
+ s->avmv_index = 0;
+
+ if ((res = decode_blocks(s)) < 0)
+ return res;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ int w= p->width;
+ int h= p->height;
+ int x, y;
+ int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
+
+ if(s->avctx->debug&2048){
+ memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
+ s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
+ }
+ }
+ }
+
+ {
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+ unpack_coeffs(s, b, b->parent, orientation);
+ }
+ }
+ }
+
+ {
+ const int mb_h= s->b_height << s->block_max_depth;
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ int mb_y;
+ DWTCompose cs[MAX_DECOMPOSITIONS];
+ int yd=0, yq=0;
+ int y;
+ int end_y;
+
+ ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
+ for(mb_y=0; mb_y<=mb_h; mb_y++){
+
+ int slice_starty = block_h*mb_y;
+ int slice_h = block_h*(mb_y+1);
+
+ if (!(s->keyframe || s->avctx->debug&512)){
+ slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
+ slice_h -= (block_h >> 1);
+ }
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+ int start_y;
+ int end_y;
+ int our_mb_start = mb_y;
+ int our_mb_end = (mb_y + 1);
+ const int extra= 3;
+ start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
+ end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
+ if (!(s->keyframe || s->avctx->debug&512)){
+ start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
+ end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
+ }
+ start_y = FFMIN(b->height, start_y);
+ end_y = FFMIN(b->height, end_y);
+
+ if (start_y != end_y){
+ if (orientation == 0){
+ SubBand * correlate_band = &p->band[0][0];
+ int correlate_end_y = FFMIN(b->height, end_y + 1);
+ int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
+ decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
+ correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
+ dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
+ }
+ else
+ decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
+ }
+ }
+ }
+
+ for(; yd<slice_h; yd+=4){
+ ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
+ }
+
+ if(s->qlog == LOSSLESS_QLOG){
+ for(; yq<slice_h && yq<h; yq++){
+ IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
+ for(x=0; x<w; x++){
+ line[x] <<= FRAC_BITS;
+ }
+ }
+ }
+
+ predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
+
+ y = FFMIN(p->height, slice_starty);
+ end_y = FFMIN(p->height, slice_h);
+ while(y < end_y)
+ ff_slice_buffer_release(&s->sb, y++);
+ }
+
+ ff_slice_buffer_flush(&s->sb);
+ }
+
+ }
+
+ emms_c();
+
+ ff_snow_release_buffer(avctx);
+
+ if(!(s->avctx->debug&2048))
+ res = av_frame_ref(picture, s->current_picture);
+ else
+ res = av_frame_ref(picture, s->mconly_picture);
+ if (res >= 0 && s->avmv_index) {
+ AVFrameSideData *sd;
+
+ sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
+ if (!sd)
+ return AVERROR(ENOMEM);
+ memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
+ }
+
+ av_freep(&s->avmv);
+
+ if (res < 0)
+ return res;
+
+ *got_frame = 1;
+
+ bytes_read= c->bytestream - c->bytestream_start;
+ if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
+
+ return bytes_read;
+}
+
+static av_cold int decode_end(AVCodecContext *avctx)
+{
+ SnowContext *s = avctx->priv_data;
+
+ ff_slice_buffer_destroy(&s->sb);
+
+ ff_snow_common_end(s);
+
+ return 0;
+}
+
+AVCodec ff_snow_decoder = {
+ .name = "snow",
+ .long_name = NULL_IF_CONFIG_SMALL("Snow"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_SNOW,
+ .priv_data_size = sizeof(SnowContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
+ .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
+ FF_CODEC_CAP_INIT_CLEANUP,
+};
--- /dev/null
- && (avctx->flags & CODEC_FLAG_QSCALE)
+/*
+ * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intmath.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "snow_dwt.h"
+#include "snow.h"
+
+#include "rangecoder.h"
+#include "mathops.h"
+
+#include "mpegvideo.h"
+#include "h263.h"
+
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ SnowContext *s = avctx->priv_data;
+ int plane_index, ret;
+ int i;
+
+ if(avctx->prediction_method == DWT_97
- s->mv_scale = (avctx->flags & CODEC_FLAG_QPEL) ? 2 : 4;
- s->block_max_depth= (avctx->flags & CODEC_FLAG_4MV ) ? 1 : 0;
++ && (avctx->flags & AV_CODEC_FLAG_QSCALE)
+ && avctx->global_quality == 0){
+ av_log(avctx, AV_LOG_ERROR, "The 9/7 wavelet is incompatible with lossless mode.\n");
+ return -1;
+ }
+
+ s->spatial_decomposition_type= avctx->prediction_method; //FIXME add decorrelator type r transform_type
+
- if(avctx->flags&CODEC_FLAG_PASS1){
++ s->mv_scale = (avctx->flags & AV_CODEC_FLAG_QPEL) ? 2 : 4;
++ s->block_max_depth= (avctx->flags & AV_CODEC_FLAG_4MV ) ? 1 : 0;
+
+ for(plane_index=0; plane_index<3; plane_index++){
+ s->plane[plane_index].diag_mc= 1;
+ s->plane[plane_index].htaps= 6;
+ s->plane[plane_index].hcoeff[0]= 40;
+ s->plane[plane_index].hcoeff[1]= -10;
+ s->plane[plane_index].hcoeff[2]= 2;
+ s->plane[plane_index].fast_mc= 1;
+ }
+
+ if ((ret = ff_snow_common_init(avctx)) < 0) {
+ return ret;
+ }
+ ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
+
+ ff_snow_alloc_blocks(s);
+
+ s->version=0;
+
+ s->m.avctx = avctx;
+ s->m.bit_rate= avctx->bit_rate;
+
+ s->m.me.temp =
+ s->m.me.scratchpad= av_mallocz_array((avctx->width+64), 2*16*2*sizeof(uint8_t));
+ s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
+ s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
+ s->m.sc.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
+ if (!s->m.me.scratchpad || !s->m.me.map || !s->m.me.score_map || !s->m.sc.obmc_scratchpad)
+ return AVERROR(ENOMEM);
+
+ ff_h263_encode_init(&s->m); //mv_penalty
+
+ s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
+
- if((avctx->flags&CODEC_FLAG_PASS2) || !(avctx->flags&CODEC_FLAG_QSCALE)){
++ if(avctx->flags&AV_CODEC_FLAG_PASS1){
+ if(!avctx->stats_out)
+ avctx->stats_out = av_mallocz(256);
+
+ if (!avctx->stats_out)
+ return AVERROR(ENOMEM);
+ }
- s->pass1_rc= !(avctx->flags & (CODEC_FLAG_QSCALE|CODEC_FLAG_PASS2));
++ if((avctx->flags&AV_CODEC_FLAG_PASS2) || !(avctx->flags&CODEC_FLAG_QSCALE)){
+ if(ff_rate_control_init(&s->m) < 0)
+ return -1;
+ }
- int qpel= !!(s->avctx->flags & CODEC_FLAG_QPEL); //unused
++ s->pass1_rc= !(avctx->flags & (AV_CODEC_FLAG_QSCALE|CODEC_FLAG_PASS2));
+
+ switch(avctx->pix_fmt){
+ case AV_PIX_FMT_YUV444P:
+// case AV_PIX_FMT_YUV422P:
+ case AV_PIX_FMT_YUV420P:
+// case AV_PIX_FMT_YUV411P:
+ case AV_PIX_FMT_YUV410P:
+ s->nb_planes = 3;
+ s->colorspace_type= 0;
+ break;
+ case AV_PIX_FMT_GRAY8:
+ s->nb_planes = 1;
+ s->colorspace_type = 1;
+ break;
+/* case AV_PIX_FMT_RGB32:
+ s->colorspace= 1;
+ break;*/
+ default:
+ av_log(avctx, AV_LOG_ERROR, "pixel format not supported\n");
+ return -1;
+ }
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_h_shift, &s->chroma_v_shift);
+
+ ff_set_cmp(&s->mecc, s->mecc.me_cmp, s->avctx->me_cmp);
+ ff_set_cmp(&s->mecc, s->mecc.me_sub_cmp, s->avctx->me_sub_cmp);
+
+ s->input_picture = av_frame_alloc();
+ if (!s->input_picture)
+ return AVERROR(ENOMEM);
+
+ if ((ret = ff_snow_get_buffer(s, s->input_picture)) < 0)
+ return ret;
+
+ if(s->avctx->me_method == ME_ITER){
+ int size= s->b_width * s->b_height << 2*s->block_max_depth;
+ for(i=0; i<s->max_ref_frames; i++){
+ s->ref_mvs[i]= av_mallocz_array(size, sizeof(int16_t[2]));
+ s->ref_scores[i]= av_mallocz_array(size, sizeof(uint32_t));
+ if (!s->ref_mvs[i] || !s->ref_scores[i])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+//near copy & paste from dsputil, FIXME
+static int pix_sum(uint8_t * pix, int line_size, int w, int h)
+{
+ int s, i, j;
+
+ s = 0;
+ for (i = 0; i < h; i++) {
+ for (j = 0; j < w; j++) {
+ s += pix[0];
+ pix ++;
+ }
+ pix += line_size - w;
+ }
+ return s;
+}
+
+//near copy & paste from dsputil, FIXME
+static int pix_norm1(uint8_t * pix, int line_size, int w)
+{
+ int s, i, j;
+ uint32_t *sq = ff_square_tab + 256;
+
+ s = 0;
+ for (i = 0; i < w; i++) {
+ for (j = 0; j < w; j ++) {
+ s += sq[pix[0]];
+ pix ++;
+ }
+ pix += line_size - w;
+ }
+ return s;
+}
+
+static inline int get_penalty_factor(int lambda, int lambda2, int type){
+ switch(type&0xFF){
+ default:
+ case FF_CMP_SAD:
+ return lambda>>FF_LAMBDA_SHIFT;
+ case FF_CMP_DCT:
+ return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
+ case FF_CMP_W53:
+ return (4*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_W97:
+ return (2*lambda)>>(FF_LAMBDA_SHIFT);
+ case FF_CMP_SATD:
+ case FF_CMP_DCT264:
+ return (2*lambda)>>FF_LAMBDA_SHIFT;
+ case FF_CMP_RD:
+ case FF_CMP_PSNR:
+ case FF_CMP_SSE:
+ case FF_CMP_NSSE:
+ return lambda2>>FF_LAMBDA_SHIFT;
+ case FF_CMP_BIT:
+ return 1;
+ }
+}
+
+//FIXME copy&paste
+#define P_LEFT P[1]
+#define P_TOP P[2]
+#define P_TOPRIGHT P[3]
+#define P_MEDIAN P[4]
+#define P_MV1 P[9]
+#define FLAG_QPEL 1 //must be 1
+
+static int encode_q_branch(SnowContext *s, int level, int x, int y){
+ uint8_t p_buffer[1024];
+ uint8_t i_buffer[1024];
+ uint8_t p_state[sizeof(s->block_state)];
+ uint8_t i_state[sizeof(s->block_state)];
+ RangeCoder pc, ic;
+ uint8_t *pbbak= s->c.bytestream;
+ uint8_t *pbbak_start= s->c.bytestream_start;
+ int score, score2, iscore, i_len, p_len, block_s, sum, base_bits;
+ const int w= s->b_width << s->block_max_depth;
+ const int h= s->b_height << s->block_max_depth;
+ const int rem_depth= s->block_max_depth - level;
+ const int index= (x + y*w) << rem_depth;
+ const int block_w= 1<<(LOG2_MB_SIZE - level);
+ int trx= (x+1)<<rem_depth;
+ int try= (y+1)<<rem_depth;
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-w] : &null_block;
+ const BlockNode *right = trx<w ? &s->block[index+1] : &null_block;
+ const BlockNode *bottom= try<h ? &s->block[index+w] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
+ const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
+ int pl = left->color[0];
+ int pcb= left->color[1];
+ int pcr= left->color[2];
+ int pmx, pmy;
+ int mx=0, my=0;
+ int l,cr,cb;
+ const int stride= s->current_picture->linesize[0];
+ const int uvstride= s->current_picture->linesize[1];
+ uint8_t *current_data[3]= { s->input_picture->data[0] + (x + y* stride)*block_w,
+ s->input_picture->data[1] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift),
+ s->input_picture->data[2] + ((x*block_w)>>s->chroma_h_shift) + ((y*uvstride*block_w)>>s->chroma_v_shift)};
+ int P[10][2];
+ int16_t last_mv[3][2];
- if(avctx->flags&CODEC_FLAG_PASS2){
++ int qpel= !!(s->avctx->flags & AV_CODEC_FLAG_QPEL); //unused
+ const int shift= 1+qpel;
+ MotionEstContext *c= &s->m.me;
+ int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx));
+ int my_context= av_log2(2*FFABS(left->my - top->my));
+ int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
+ int ref, best_ref, ref_score, ref_mx, ref_my;
+
+ av_assert0(sizeof(s->block_state) >= 256);
+ if(s->keyframe){
+ set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
+ return 0;
+ }
+
+// clip predictors / edge ?
+
+ P_LEFT[0]= left->mx;
+ P_LEFT[1]= left->my;
+ P_TOP [0]= top->mx;
+ P_TOP [1]= top->my;
+ P_TOPRIGHT[0]= tr->mx;
+ P_TOPRIGHT[1]= tr->my;
+
+ last_mv[0][0]= s->block[index].mx;
+ last_mv[0][1]= s->block[index].my;
+ last_mv[1][0]= right->mx;
+ last_mv[1][1]= right->my;
+ last_mv[2][0]= bottom->mx;
+ last_mv[2][1]= bottom->my;
+
+ s->m.mb_stride=2;
+ s->m.mb_x=
+ s->m.mb_y= 0;
+ c->skip= 0;
+
+ av_assert1(c-> stride == stride);
+ av_assert1(c->uvstride == uvstride);
+
+ c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
+ c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
+ c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
+ c->current_mv_penalty= c->mv_penalty[s->m.f_code=1] + MAX_MV;
+
+ c->xmin = - x*block_w - 16+3;
+ c->ymin = - y*block_w - 16+3;
+ c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
+ c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3;
+
+ if(P_LEFT[0] > (c->xmax<<shift)) P_LEFT[0] = (c->xmax<<shift);
+ if(P_LEFT[1] > (c->ymax<<shift)) P_LEFT[1] = (c->ymax<<shift);
+ if(P_TOP[0] > (c->xmax<<shift)) P_TOP[0] = (c->xmax<<shift);
+ if(P_TOP[1] > (c->ymax<<shift)) P_TOP[1] = (c->ymax<<shift);
+ if(P_TOPRIGHT[0] < (c->xmin<<shift)) P_TOPRIGHT[0]= (c->xmin<<shift);
+ if(P_TOPRIGHT[0] > (c->xmax<<shift)) P_TOPRIGHT[0]= (c->xmax<<shift); //due to pmx no clip
+ if(P_TOPRIGHT[1] > (c->ymax<<shift)) P_TOPRIGHT[1]= (c->ymax<<shift);
+
+ P_MEDIAN[0]= mid_pred(P_LEFT[0], P_TOP[0], P_TOPRIGHT[0]);
+ P_MEDIAN[1]= mid_pred(P_LEFT[1], P_TOP[1], P_TOPRIGHT[1]);
+
+ if (!y) {
+ c->pred_x= P_LEFT[0];
+ c->pred_y= P_LEFT[1];
+ } else {
+ c->pred_x = P_MEDIAN[0];
+ c->pred_y = P_MEDIAN[1];
+ }
+
+ score= INT_MAX;
+ best_ref= 0;
+ for(ref=0; ref<s->ref_frames; ref++){
+ init_ref(c, current_data, s->last_picture[ref]->data, NULL, block_w*x, block_w*y, 0);
+
+ ref_score= ff_epzs_motion_search(&s->m, &ref_mx, &ref_my, P, 0, /*ref_index*/ 0, last_mv,
+ (1<<16)>>shift, level-LOG2_MB_SIZE+4, block_w);
+
+ av_assert2(ref_mx >= c->xmin);
+ av_assert2(ref_mx <= c->xmax);
+ av_assert2(ref_my >= c->ymin);
+ av_assert2(ref_my <= c->ymax);
+
+ ref_score= c->sub_motion_search(&s->m, &ref_mx, &ref_my, ref_score, 0, 0, level-LOG2_MB_SIZE+4, block_w);
+ ref_score= ff_get_mb_score(&s->m, ref_mx, ref_my, 0, 0, level-LOG2_MB_SIZE+4, block_w, 0);
+ ref_score+= 2*av_log2(2*ref)*c->penalty_factor;
+ if(s->ref_mvs[ref]){
+ s->ref_mvs[ref][index][0]= ref_mx;
+ s->ref_mvs[ref][index][1]= ref_my;
+ s->ref_scores[ref][index]= ref_score;
+ }
+ if(score > ref_score){
+ score= ref_score;
+ best_ref= ref;
+ mx= ref_mx;
+ my= ref_my;
+ }
+ }
+ //FIXME if mb_cmp != SSE then intra cannot be compared currently and mb_penalty vs. lambda2
+
+ // subpel search
+ base_bits= get_rac_count(&s->c) - 8*(s->c.bytestream - s->c.bytestream_start);
+ pc= s->c;
+ pc.bytestream_start=
+ pc.bytestream= p_buffer; //FIXME end/start? and at the other stoo
+ memcpy(p_state, s->block_state, sizeof(s->block_state));
+
+ if(level!=s->block_max_depth)
+ put_rac(&pc, &p_state[4 + s_context], 1);
+ put_rac(&pc, &p_state[1 + left->type + top->type], 0);
+ if(s->ref_frames > 1)
+ put_symbol(&pc, &p_state[128 + 1024 + 32*ref_context], best_ref, 0);
+ pred_mv(s, &pmx, &pmy, best_ref, left, top, tr);
+ put_symbol(&pc, &p_state[128 + 32*(mx_context + 16*!!best_ref)], mx - pmx, 1);
+ put_symbol(&pc, &p_state[128 + 32*(my_context + 16*!!best_ref)], my - pmy, 1);
+ p_len= pc.bytestream - pc.bytestream_start;
+ score += (s->lambda2*(get_rac_count(&pc)-base_bits))>>FF_LAMBDA_SHIFT;
+
+ block_s= block_w*block_w;
+ sum = pix_sum(current_data[0], stride, block_w, block_w);
+ l= (sum + block_s/2)/block_s;
+ iscore = pix_norm1(current_data[0], stride, block_w) - 2*l*sum + l*l*block_s;
+
+ if (s->nb_planes > 2) {
+ block_s= block_w*block_w>>(s->chroma_h_shift + s->chroma_v_shift);
+ sum = pix_sum(current_data[1], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
+ cb= (sum + block_s/2)/block_s;
+ // iscore += pix_norm1(¤t_mb[1][0], uvstride, block_w>>1) - 2*cb*sum + cb*cb*block_s;
+ sum = pix_sum(current_data[2], uvstride, block_w>>s->chroma_h_shift, block_w>>s->chroma_v_shift);
+ cr= (sum + block_s/2)/block_s;
+ // iscore += pix_norm1(¤t_mb[2][0], uvstride, block_w>>1) - 2*cr*sum + cr*cr*block_s;
+ }else
+ cb = cr = 0;
+
+ ic= s->c;
+ ic.bytestream_start=
+ ic.bytestream= i_buffer; //FIXME end/start? and at the other stoo
+ memcpy(i_state, s->block_state, sizeof(s->block_state));
+ if(level!=s->block_max_depth)
+ put_rac(&ic, &i_state[4 + s_context], 1);
+ put_rac(&ic, &i_state[1 + left->type + top->type], 1);
+ put_symbol(&ic, &i_state[32], l-pl , 1);
+ if (s->nb_planes > 2) {
+ put_symbol(&ic, &i_state[64], cb-pcb, 1);
+ put_symbol(&ic, &i_state[96], cr-pcr, 1);
+ }
+ i_len= ic.bytestream - ic.bytestream_start;
+ iscore += (s->lambda2*(get_rac_count(&ic)-base_bits))>>FF_LAMBDA_SHIFT;
+
+ av_assert1(iscore < 255*255*256 + s->lambda2*10);
+ av_assert1(iscore >= 0);
+ av_assert1(l>=0 && l<=255);
+ av_assert1(pl>=0 && pl<=255);
+
+ if(level==0){
+ int varc= iscore >> 8;
+ int vard= score >> 8;
+ if (vard <= 64 || vard < varc)
+ c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
+ else
+ c->scene_change_score+= s->m.qscale;
+ }
+
+ if(level!=s->block_max_depth){
+ put_rac(&s->c, &s->block_state[4 + s_context], 0);
+ score2 = encode_q_branch(s, level+1, 2*x+0, 2*y+0);
+ score2+= encode_q_branch(s, level+1, 2*x+1, 2*y+0);
+ score2+= encode_q_branch(s, level+1, 2*x+0, 2*y+1);
+ score2+= encode_q_branch(s, level+1, 2*x+1, 2*y+1);
+ score2+= s->lambda2>>FF_LAMBDA_SHIFT; //FIXME exact split overhead
+
+ if(score2 < score && score2 < iscore)
+ return score2;
+ }
+
+ if(iscore < score){
+ pred_mv(s, &pmx, &pmy, 0, left, top, tr);
+ memcpy(pbbak, i_buffer, i_len);
+ s->c= ic;
+ s->c.bytestream_start= pbbak_start;
+ s->c.bytestream= pbbak + i_len;
+ set_blocks(s, level, x, y, l, cb, cr, pmx, pmy, 0, BLOCK_INTRA);
+ memcpy(s->block_state, i_state, sizeof(s->block_state));
+ return iscore;
+ }else{
+ memcpy(pbbak, p_buffer, p_len);
+ s->c= pc;
+ s->c.bytestream_start= pbbak_start;
+ s->c.bytestream= pbbak + p_len;
+ set_blocks(s, level, x, y, pl, pcb, pcr, mx, my, best_ref, 0);
+ memcpy(s->block_state, p_state, sizeof(s->block_state));
+ return score;
+ }
+}
+
+static void encode_q_branch2(SnowContext *s, int level, int x, int y){
+ const int w= s->b_width << s->block_max_depth;
+ const int rem_depth= s->block_max_depth - level;
+ const int index= (x + y*w) << rem_depth;
+ int trx= (x+1)<<rem_depth;
+ BlockNode *b= &s->block[index];
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-w] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
+ const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
+ int pl = left->color[0];
+ int pcb= left->color[1];
+ int pcr= left->color[2];
+ int pmx, pmy;
+ int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 16*!!b->ref;
+ int my_context= av_log2(2*FFABS(left->my - top->my)) + 16*!!b->ref;
+ int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
+
+ if(s->keyframe){
+ set_blocks(s, level, x, y, pl, pcb, pcr, 0, 0, 0, BLOCK_INTRA);
+ return;
+ }
+
+ if(level!=s->block_max_depth){
+ if(same_block(b,b+1) && same_block(b,b+w) && same_block(b,b+w+1)){
+ put_rac(&s->c, &s->block_state[4 + s_context], 1);
+ }else{
+ put_rac(&s->c, &s->block_state[4 + s_context], 0);
+ encode_q_branch2(s, level+1, 2*x+0, 2*y+0);
+ encode_q_branch2(s, level+1, 2*x+1, 2*y+0);
+ encode_q_branch2(s, level+1, 2*x+0, 2*y+1);
+ encode_q_branch2(s, level+1, 2*x+1, 2*y+1);
+ return;
+ }
+ }
+ if(b->type & BLOCK_INTRA){
+ pred_mv(s, &pmx, &pmy, 0, left, top, tr);
+ put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 1);
+ put_symbol(&s->c, &s->block_state[32], b->color[0]-pl , 1);
+ if (s->nb_planes > 2) {
+ put_symbol(&s->c, &s->block_state[64], b->color[1]-pcb, 1);
+ put_symbol(&s->c, &s->block_state[96], b->color[2]-pcr, 1);
+ }
+ set_blocks(s, level, x, y, b->color[0], b->color[1], b->color[2], pmx, pmy, 0, BLOCK_INTRA);
+ }else{
+ pred_mv(s, &pmx, &pmy, b->ref, left, top, tr);
+ put_rac(&s->c, &s->block_state[1 + (left->type&1) + (top->type&1)], 0);
+ if(s->ref_frames > 1)
+ put_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], b->ref, 0);
+ put_symbol(&s->c, &s->block_state[128 + 32*mx_context], b->mx - pmx, 1);
+ put_symbol(&s->c, &s->block_state[128 + 32*my_context], b->my - pmy, 1);
+ set_blocks(s, level, x, y, pl, pcb, pcr, b->mx, b->my, b->ref, 0);
+ }
+}
+
+static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){
+ int i, x2, y2;
+ Plane *p= &s->plane[plane_index];
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
+ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ const int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *src= s-> input_picture->data[plane_index];
+ IDWTELEM *dst= (IDWTELEM*)s->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int w= p->width;
+ const int h= p->height;
+ int index= mb_x + mb_y*b_stride;
+ BlockNode *b= &s->block[index];
+ BlockNode backup= *b;
+ int ab=0;
+ int aa=0;
+
+ av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc stuff above
+
+ b->type|= BLOCK_INTRA;
+ b->color[plane_index]= 0;
+ memset(dst, 0, obmc_stride*obmc_stride*sizeof(IDWTELEM));
+
+ for(i=0; i<4; i++){
+ int mb_x2= mb_x + (i &1) - 1;
+ int mb_y2= mb_y + (i>>1) - 1;
+ int x= block_w*mb_x2 + block_w/2;
+ int y= block_h*mb_y2 + block_h/2;
+
+ add_yblock(s, 0, NULL, dst + (i&1)*block_w + (i>>1)*obmc_stride*block_h, NULL, obmc,
+ x, y, block_w, block_h, w, h, obmc_stride, ref_stride, obmc_stride, mb_x2, mb_y2, 0, 0, plane_index);
+
+ for(y2= FFMAX(y, 0); y2<FFMIN(h, y+block_h); y2++){
+ for(x2= FFMAX(x, 0); x2<FFMIN(w, x+block_w); x2++){
+ int index= x2-(block_w*mb_x - block_w/2) + (y2-(block_h*mb_y - block_h/2))*obmc_stride;
+ int obmc_v= obmc[index];
+ int d;
+ if(y<0) obmc_v += obmc[index + block_h*obmc_stride];
+ if(x<0) obmc_v += obmc[index + block_w];
+ if(y+block_h>h) obmc_v += obmc[index - block_h*obmc_stride];
+ if(x+block_w>w) obmc_v += obmc[index - block_w];
+ //FIXME precalculate this or simplify it somehow else
+
+ d = -dst[index] + (1<<(FRAC_BITS-1));
+ dst[index] = d;
+ ab += (src[x2 + y2*ref_stride] - (d>>FRAC_BITS)) * obmc_v;
+ aa += obmc_v * obmc_v; //FIXME precalculate this
+ }
+ }
+ }
+ *b= backup;
+
+ return av_clip_uint8( ROUNDED_DIV(ab<<LOG2_OBMC_MAX, aa) ); //FIXME we should not need clipping
+}
+
+static inline int get_block_bits(SnowContext *s, int x, int y, int w){
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int b_height = s->b_height<< s->block_max_depth;
+ int index= x + y*b_stride;
+ const BlockNode *b = &s->block[index];
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-b_stride] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-b_stride-1] : left;
+ const BlockNode *tr = y && x+w<b_stride ? &s->block[index-b_stride+w] : tl;
+ int dmx, dmy;
+// int mx_context= av_log2(2*FFABS(left->mx - top->mx));
+// int my_context= av_log2(2*FFABS(left->my - top->my));
+
+ if(x<0 || x>=b_stride || y>=b_height)
+ return 0;
+/*
+1 0 0
+01X 1-2 1
+001XX 3-6 2-3
+0001XXX 7-14 4-7
+00001XXXX 15-30 8-15
+*/
+//FIXME try accurate rate
+//FIXME intra and inter predictors if surrounding blocks are not the same type
+ if(b->type & BLOCK_INTRA){
+ return 3+2*( av_log2(2*FFABS(left->color[0] - b->color[0]))
+ + av_log2(2*FFABS(left->color[1] - b->color[1]))
+ + av_log2(2*FFABS(left->color[2] - b->color[2])));
+ }else{
+ pred_mv(s, &dmx, &dmy, b->ref, left, top, tr);
+ dmx-= b->mx;
+ dmy-= b->my;
+ return 2*(1 + av_log2(2*FFABS(dmx)) //FIXME kill the 2* can be merged in lambda
+ + av_log2(2*FFABS(dmy))
+ + av_log2(2*b->ref));
+ }
+}
+
+static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2]){
+ Plane *p= &s->plane[plane_index];
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ const int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *dst= s->current_picture->data[plane_index];
+ uint8_t *src= s-> input_picture->data[plane_index];
+ IDWTELEM *pred= (IDWTELEM*)s->m.sc.obmc_scratchpad + plane_index*block_size*block_size*4;
+ uint8_t *cur = s->scratchbuf;
+ uint8_t *tmp = s->emu_edge_buffer;
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int b_height = s->b_height<< s->block_max_depth;
+ const int w= p->width;
+ const int h= p->height;
+ int distortion;
+ int rate= 0;
+ const int penalty_factor= get_penalty_factor(s->lambda, s->lambda2, s->avctx->me_cmp);
+ int sx= block_w*mb_x - block_w/2;
+ int sy= block_h*mb_y - block_h/2;
+ int x0= FFMAX(0,-sx);
+ int y0= FFMAX(0,-sy);
+ int x1= FFMIN(block_w*2, w-sx);
+ int y1= FFMIN(block_h*2, h-sy);
+ int i,x,y;
+
+ av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below chckinhg only block_w
+
+ ff_snow_pred_block(s, cur, tmp, ref_stride, sx, sy, block_w*2, block_h*2, &s->block[mb_x + mb_y*b_stride], plane_index, w, h);
+
+ for(y=y0; y<y1; y++){
+ const uint8_t *obmc1= obmc_edged[y];
+ const IDWTELEM *pred1 = pred + y*obmc_stride;
+ uint8_t *cur1 = cur + y*ref_stride;
+ uint8_t *dst1 = dst + sx + (sy+y)*ref_stride;
+ for(x=x0; x<x1; x++){
+#if FRAC_BITS >= LOG2_OBMC_MAX
+ int v = (cur1[x] * obmc1[x]) << (FRAC_BITS - LOG2_OBMC_MAX);
+#else
+ int v = (cur1[x] * obmc1[x] + (1<<(LOG2_OBMC_MAX - FRAC_BITS-1))) >> (LOG2_OBMC_MAX - FRAC_BITS);
+#endif
+ v = (v + pred1[x]) >> FRAC_BITS;
+ if(v&(~255)) v= ~(v>>31);
+ dst1[x] = v;
+ }
+ }
+
+ /* copy the regions where obmc[] = (uint8_t)256 */
+ if(LOG2_OBMC_MAX == 8
+ && (mb_x == 0 || mb_x == b_stride-1)
+ && (mb_y == 0 || mb_y == b_height-1)){
+ if(mb_x == 0)
+ x1 = block_w;
+ else
+ x0 = block_w;
+ if(mb_y == 0)
+ y1 = block_h;
+ else
+ y0 = block_h;
+ for(y=y0; y<y1; y++)
+ memcpy(dst + sx+x0 + (sy+y)*ref_stride, cur + x0 + y*ref_stride, x1-x0);
+ }
+
+ if(block_w==16){
+ /* FIXME rearrange dsputil to fit 32x32 cmp functions */
+ /* FIXME check alignment of the cmp wavelet vs the encoding wavelet */
+ /* FIXME cmps overlap but do not cover the wavelet's whole support.
+ * So improving the score of one block is not strictly guaranteed
+ * to improve the score of the whole frame, thus iterative motion
+ * estimation does not always converge. */
+ if(s->avctx->me_cmp == FF_CMP_W97)
+ distortion = ff_w97_32_c(&s->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
+ else if(s->avctx->me_cmp == FF_CMP_W53)
+ distortion = ff_w53_32_c(&s->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, 32);
+ else{
+ distortion = 0;
+ for(i=0; i<4; i++){
+ int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
+ distortion += s->mecc.me_cmp[0](&s->m, src + off, dst + off, ref_stride, 16);
+ }
+ }
+ }else{
+ av_assert2(block_w==8);
+ distortion = s->mecc.me_cmp[0](&s->m, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
+ }
+
+ if(plane_index==0){
+ for(i=0; i<4; i++){
+/* ..RRr
+ * .RXx.
+ * rxx..
+ */
+ rate += get_block_bits(s, mb_x + (i&1) - (i>>1), mb_y + (i>>1), 1);
+ }
+ if(mb_x == b_stride-2)
+ rate += get_block_bits(s, mb_x + 1, mb_y + 1, 1);
+ }
+ return distortion + rate*penalty_factor;
+}
+
+static int get_4block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index){
+ int i, y2;
+ Plane *p= &s->plane[plane_index];
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
+ const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ const int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *dst= s->current_picture->data[plane_index];
+ uint8_t *src= s-> input_picture->data[plane_index];
+ //FIXME zero_dst is const but add_yblock changes dst if add is 0 (this is never the case for dst=zero_dst
+ // const has only been removed from zero_dst to suppress a warning
+ static IDWTELEM zero_dst[4096]; //FIXME
+ const int b_stride = s->b_width << s->block_max_depth;
+ const int w= p->width;
+ const int h= p->height;
+ int distortion= 0;
+ int rate= 0;
+ const int penalty_factor= get_penalty_factor(s->lambda, s->lambda2, s->avctx->me_cmp);
+
+ av_assert2(s->chroma_h_shift == s->chroma_v_shift); //obmc and square assumtions below
+
+ for(i=0; i<9; i++){
+ int mb_x2= mb_x + (i%3) - 1;
+ int mb_y2= mb_y + (i/3) - 1;
+ int x= block_w*mb_x2 + block_w/2;
+ int y= block_h*mb_y2 + block_h/2;
+
+ add_yblock(s, 0, NULL, zero_dst, dst, obmc,
+ x, y, block_w, block_h, w, h, /*dst_stride*/0, ref_stride, obmc_stride, mb_x2, mb_y2, 1, 1, plane_index);
+
+ //FIXME find a cleaner/simpler way to skip the outside stuff
+ for(y2= y; y2<0; y2++)
+ memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
+ for(y2= h; y2<y+block_h; y2++)
+ memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, block_w);
+ if(x<0){
+ for(y2= y; y2<y+block_h; y2++)
+ memcpy(dst + x + y2*ref_stride, src + x + y2*ref_stride, -x);
+ }
+ if(x+block_w > w){
+ for(y2= y; y2<y+block_h; y2++)
+ memcpy(dst + w + y2*ref_stride, src + w + y2*ref_stride, x+block_w - w);
+ }
+
+ av_assert1(block_w== 8 || block_w==16);
+ distortion += s->mecc.me_cmp[block_w==8](&s->m, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
+ }
+
+ if(plane_index==0){
+ BlockNode *b= &s->block[mb_x+mb_y*b_stride];
+ int merged= same_block(b,b+1) && same_block(b,b+b_stride) && same_block(b,b+b_stride+1);
+
+/* ..RRRr
+ * .RXXx.
+ * .RXXx.
+ * rxxx.
+ */
+ if(merged)
+ rate = get_block_bits(s, mb_x, mb_y, 2);
+ for(i=merged?4:0; i<9; i++){
+ static const int dxy[9][2] = {{0,0},{1,0},{0,1},{1,1},{2,0},{2,1},{-1,2},{0,2},{1,2}};
+ rate += get_block_bits(s, mb_x + dxy[i][0], mb_y + dxy[i][1], 1);
+ }
+ }
+ return distortion + rate*penalty_factor;
+}
+
+static int encode_subband_c0run(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
+ const int w= b->width;
+ const int h= b->height;
+ int x, y;
+
+ if(1){
+ int run=0;
+ int *runs = s->run_buffer;
+ int run_index=0;
+ int max_index;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int v, p=0;
+ int /*ll=0, */l=0, lt=0, t=0, rt=0;
+ v= src[x + y*stride];
+
+ if(y){
+ t= src[x + (y-1)*stride];
+ if(x){
+ lt= src[x - 1 + (y-1)*stride];
+ }
+ if(x + 1 < w){
+ rt= src[x + 1 + (y-1)*stride];
+ }
+ }
+ if(x){
+ l= src[x - 1 + y*stride];
+ /*if(x > 1){
+ if(orientation==1) ll= src[y + (x-2)*stride];
+ else ll= src[x - 2 + y*stride];
+ }*/
+ }
+ if(parent){
+ int px= x>>1;
+ int py= y>>1;
+ if(px<b->parent->width && py<b->parent->height)
+ p= parent[px + py*2*stride];
+ }
+ if(!(/*ll|*/l|lt|t|rt|p)){
+ if(v){
+ runs[run_index++]= run;
+ run=0;
+ }else{
+ run++;
+ }
+ }
+ }
+ }
+ max_index= run_index;
+ runs[run_index++]= run;
+ run_index=0;
+ run= runs[run_index++];
+
+ put_symbol2(&s->c, b->state[30], max_index, 0);
+ if(run_index <= max_index)
+ put_symbol2(&s->c, b->state[1], run, 3);
+
+ for(y=0; y<h; y++){
+ if(s->c.bytestream_end - s->c.bytestream < w*40){
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return -1;
+ }
+ for(x=0; x<w; x++){
+ int v, p=0;
+ int /*ll=0, */l=0, lt=0, t=0, rt=0;
+ v= src[x + y*stride];
+
+ if(y){
+ t= src[x + (y-1)*stride];
+ if(x){
+ lt= src[x - 1 + (y-1)*stride];
+ }
+ if(x + 1 < w){
+ rt= src[x + 1 + (y-1)*stride];
+ }
+ }
+ if(x){
+ l= src[x - 1 + y*stride];
+ /*if(x > 1){
+ if(orientation==1) ll= src[y + (x-2)*stride];
+ else ll= src[x - 2 + y*stride];
+ }*/
+ }
+ if(parent){
+ int px= x>>1;
+ int py= y>>1;
+ if(px<b->parent->width && py<b->parent->height)
+ p= parent[px + py*2*stride];
+ }
+ if(/*ll|*/l|lt|t|rt|p){
+ int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
+
+ put_rac(&s->c, &b->state[0][context], !!v);
+ }else{
+ if(!run){
+ run= runs[run_index++];
+
+ if(run_index <= max_index)
+ put_symbol2(&s->c, b->state[1], run, 3);
+ av_assert2(v);
+ }else{
+ run--;
+ av_assert2(!v);
+ }
+ }
+ if(v){
+ int context= av_log2(/*FFABS(ll) + */3*FFABS(l) + FFABS(lt) + 2*FFABS(t) + FFABS(rt) + FFABS(p));
+ int l2= 2*FFABS(l) + (l<0);
+ int t2= 2*FFABS(t) + (t<0);
+
+ put_symbol2(&s->c, b->state[context + 2], FFABS(v)-1, context-4);
+ put_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l2&0xFF] + 3*ff_quant3bA[t2&0xFF]], v<0);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int encode_subband(SnowContext *s, SubBand *b, const IDWTELEM *src, const IDWTELEM *parent, int stride, int orientation){
+// encode_subband_qtree(s, b, src, parent, stride, orientation);
+// encode_subband_z0run(s, b, src, parent, stride, orientation);
+ return encode_subband_c0run(s, b, src, parent, stride, orientation);
+// encode_subband_dzr(s, b, src, parent, stride, orientation);
+}
+
+static av_always_inline int check_block(SnowContext *s, int mb_x, int mb_y, int p[3], int intra, uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd){
+ const int b_stride= s->b_width << s->block_max_depth;
+ BlockNode *block= &s->block[mb_x + mb_y * b_stride];
+ BlockNode backup= *block;
+ unsigned value;
+ int rd, index;
+
+ av_assert2(mb_x>=0 && mb_y>=0);
+ av_assert2(mb_x<b_stride);
+
+ if(intra){
+ block->color[0] = p[0];
+ block->color[1] = p[1];
+ block->color[2] = p[2];
+ block->type |= BLOCK_INTRA;
+ }else{
+ index= (p[0] + 31*p[1]) & (ME_CACHE_SIZE-1);
+ value= s->me_cache_generation + (p[0]>>10) + (p[1]<<6) + (block->ref<<12);
+ if(s->me_cache[index] == value)
+ return 0;
+ s->me_cache[index]= value;
+
+ block->mx= p[0];
+ block->my= p[1];
+ block->type &= ~BLOCK_INTRA;
+ }
+
+ rd= get_block_rd(s, mb_x, mb_y, 0, obmc_edged) + s->intra_penalty * !!intra;
+
+//FIXME chroma
+ if(rd < *best_rd){
+ *best_rd= rd;
+ return 1;
+ }else{
+ *block= backup;
+ return 0;
+ }
+}
+
+/* special case for int[2] args we discard afterwards,
+ * fixes compilation problem with gcc 2.95 */
+static av_always_inline int check_block_inter(SnowContext *s, int mb_x, int mb_y, int p0, int p1, uint8_t (*obmc_edged)[MB_SIZE * 2], int *best_rd){
+ int p[2] = {p0, p1};
+ return check_block(s, mb_x, mb_y, p, 0, obmc_edged, best_rd);
+}
+
+static av_always_inline int check_4block_inter(SnowContext *s, int mb_x, int mb_y, int p0, int p1, int ref, int *best_rd){
+ const int b_stride= s->b_width << s->block_max_depth;
+ BlockNode *block= &s->block[mb_x + mb_y * b_stride];
+ BlockNode backup[4];
+ unsigned value;
+ int rd, index;
+
+ /* We don't initialize backup[] during variable declaration, because
+ * that fails to compile on MSVC: "cannot convert from 'BlockNode' to
+ * 'int16_t'". */
+ backup[0] = block[0];
+ backup[1] = block[1];
+ backup[2] = block[b_stride];
+ backup[3] = block[b_stride + 1];
+
+ av_assert2(mb_x>=0 && mb_y>=0);
+ av_assert2(mb_x<b_stride);
+ av_assert2(((mb_x|mb_y)&1) == 0);
+
+ index= (p0 + 31*p1) & (ME_CACHE_SIZE-1);
+ value= s->me_cache_generation + (p0>>10) + (p1<<6) + (block->ref<<12);
+ if(s->me_cache[index] == value)
+ return 0;
+ s->me_cache[index]= value;
+
+ block->mx= p0;
+ block->my= p1;
+ block->ref= ref;
+ block->type &= ~BLOCK_INTRA;
+ block[1]= block[b_stride]= block[b_stride+1]= *block;
+
+ rd= get_4block_rd(s, mb_x, mb_y, 0);
+
+//FIXME chroma
+ if(rd < *best_rd){
+ *best_rd= rd;
+ return 1;
+ }else{
+ block[0]= backup[0];
+ block[1]= backup[1];
+ block[b_stride]= backup[2];
+ block[b_stride+1]= backup[3];
+ return 0;
+ }
+}
+
+static void iterative_me(SnowContext *s){
+ int pass, mb_x, mb_y;
+ const int b_width = s->b_width << s->block_max_depth;
+ const int b_height= s->b_height << s->block_max_depth;
+ const int b_stride= b_width;
+ int color[3];
+
+ {
+ RangeCoder r = s->c;
+ uint8_t state[sizeof(s->block_state)];
+ memcpy(state, s->block_state, sizeof(s->block_state));
+ for(mb_y= 0; mb_y<s->b_height; mb_y++)
+ for(mb_x= 0; mb_x<s->b_width; mb_x++)
+ encode_q_branch(s, 0, mb_x, mb_y);
+ s->c = r;
+ memcpy(s->block_state, state, sizeof(s->block_state));
+ }
+
+ for(pass=0; pass<25; pass++){
+ int change= 0;
+
+ for(mb_y= 0; mb_y<b_height; mb_y++){
+ for(mb_x= 0; mb_x<b_width; mb_x++){
+ int dia_change, i, j, ref;
+ int best_rd= INT_MAX, ref_rd;
+ BlockNode backup, ref_b;
+ const int index= mb_x + mb_y * b_stride;
+ BlockNode *block= &s->block[index];
+ BlockNode *tb = mb_y ? &s->block[index-b_stride ] : NULL;
+ BlockNode *lb = mb_x ? &s->block[index -1] : NULL;
+ BlockNode *rb = mb_x+1<b_width ? &s->block[index +1] : NULL;
+ BlockNode *bb = mb_y+1<b_height ? &s->block[index+b_stride ] : NULL;
+ BlockNode *tlb= mb_x && mb_y ? &s->block[index-b_stride-1] : NULL;
+ BlockNode *trb= mb_x+1<b_width && mb_y ? &s->block[index-b_stride+1] : NULL;
+ BlockNode *blb= mb_x && mb_y+1<b_height ? &s->block[index+b_stride-1] : NULL;
+ BlockNode *brb= mb_x+1<b_width && mb_y+1<b_height ? &s->block[index+b_stride+1] : NULL;
+ const int b_w= (MB_SIZE >> s->block_max_depth);
+ uint8_t obmc_edged[MB_SIZE * 2][MB_SIZE * 2];
+
+ if(pass && (block->type & BLOCK_OPT))
+ continue;
+ block->type |= BLOCK_OPT;
+
+ backup= *block;
+
+ if(!s->me_cache_generation)
+ memset(s->me_cache, 0, sizeof(s->me_cache));
+ s->me_cache_generation += 1<<22;
+
+ //FIXME precalculate
+ {
+ int x, y;
+ for (y = 0; y < b_w * 2; y++)
+ memcpy(obmc_edged[y], ff_obmc_tab[s->block_max_depth] + y * b_w * 2, b_w * 2);
+ if(mb_x==0)
+ for(y=0; y<b_w*2; y++)
+ memset(obmc_edged[y], obmc_edged[y][0] + obmc_edged[y][b_w-1], b_w);
+ if(mb_x==b_stride-1)
+ for(y=0; y<b_w*2; y++)
+ memset(obmc_edged[y]+b_w, obmc_edged[y][b_w] + obmc_edged[y][b_w*2-1], b_w);
+ if(mb_y==0){
+ for(x=0; x<b_w*2; x++)
+ obmc_edged[0][x] += obmc_edged[b_w-1][x];
+ for(y=1; y<b_w; y++)
+ memcpy(obmc_edged[y], obmc_edged[0], b_w*2);
+ }
+ if(mb_y==b_height-1){
+ for(x=0; x<b_w*2; x++)
+ obmc_edged[b_w*2-1][x] += obmc_edged[b_w][x];
+ for(y=b_w; y<b_w*2-1; y++)
+ memcpy(obmc_edged[y], obmc_edged[b_w*2-1], b_w*2);
+ }
+ }
+
+ //skip stuff outside the picture
+ if(mb_x==0 || mb_y==0 || mb_x==b_width-1 || mb_y==b_height-1){
+ uint8_t *src= s-> input_picture->data[0];
+ uint8_t *dst= s->current_picture->data[0];
+ const int stride= s->current_picture->linesize[0];
+ const int block_w= MB_SIZE >> s->block_max_depth;
+ const int block_h= MB_SIZE >> s->block_max_depth;
+ const int sx= block_w*mb_x - block_w/2;
+ const int sy= block_h*mb_y - block_h/2;
+ const int w= s->plane[0].width;
+ const int h= s->plane[0].height;
+ int y;
+
+ for(y=sy; y<0; y++)
+ memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
+ for(y=h; y<sy+block_h*2; y++)
+ memcpy(dst + sx + y*stride, src + sx + y*stride, block_w*2);
+ if(sx<0){
+ for(y=sy; y<sy+block_h*2; y++)
+ memcpy(dst + sx + y*stride, src + sx + y*stride, -sx);
+ }
+ if(sx+block_w*2 > w){
+ for(y=sy; y<sy+block_h*2; y++)
+ memcpy(dst + w + y*stride, src + w + y*stride, sx+block_w*2 - w);
+ }
+ }
+
+ // intra(black) = neighbors' contribution to the current block
+ for(i=0; i < s->nb_planes; i++)
+ color[i]= get_dc(s, mb_x, mb_y, i);
+
+ // get previous score (cannot be cached due to OBMC)
+ if(pass > 0 && (block->type&BLOCK_INTRA)){
+ int color0[3]= {block->color[0], block->color[1], block->color[2]};
+ check_block(s, mb_x, mb_y, color0, 1, obmc_edged, &best_rd);
+ }else
+ check_block_inter(s, mb_x, mb_y, block->mx, block->my, obmc_edged, &best_rd);
+
+ ref_b= *block;
+ ref_rd= best_rd;
+ for(ref=0; ref < s->ref_frames; ref++){
+ int16_t (*mvr)[2]= &s->ref_mvs[ref][index];
+ if(s->ref_scores[ref][index] > s->ref_scores[ref_b.ref][index]*3/2) //FIXME tune threshold
+ continue;
+ block->ref= ref;
+ best_rd= INT_MAX;
+
+ check_block_inter(s, mb_x, mb_y, mvr[0][0], mvr[0][1], obmc_edged, &best_rd);
+ check_block_inter(s, mb_x, mb_y, 0, 0, obmc_edged, &best_rd);
+ if(tb)
+ check_block_inter(s, mb_x, mb_y, mvr[-b_stride][0], mvr[-b_stride][1], obmc_edged, &best_rd);
+ if(lb)
+ check_block_inter(s, mb_x, mb_y, mvr[-1][0], mvr[-1][1], obmc_edged, &best_rd);
+ if(rb)
+ check_block_inter(s, mb_x, mb_y, mvr[1][0], mvr[1][1], obmc_edged, &best_rd);
+ if(bb)
+ check_block_inter(s, mb_x, mb_y, mvr[b_stride][0], mvr[b_stride][1], obmc_edged, &best_rd);
+
+ /* fullpel ME */
+ //FIXME avoid subpel interpolation / round to nearest integer
+ do{
+ int newx = block->mx;
+ int newy = block->my;
+ dia_change=0;
+ for(i=0; i<FFMAX(s->avctx->dia_size, 1); i++){
+ for(j=0; j<i; j++){
+ dia_change |= check_block_inter(s, mb_x, mb_y, newx+4*(i-j), newy+(4*j), obmc_edged, &best_rd);
+ dia_change |= check_block_inter(s, mb_x, mb_y, newx-4*(i-j), newy-(4*j), obmc_edged, &best_rd);
+ dia_change |= check_block_inter(s, mb_x, mb_y, newx-(4*j), newy+4*(i-j), obmc_edged, &best_rd);
+ dia_change |= check_block_inter(s, mb_x, mb_y, newx+(4*j), newy-4*(i-j), obmc_edged, &best_rd);
+ }
+ }
+ }while(dia_change);
+ /* subpel ME */
+ do{
+ static const int square[8][2]= {{+1, 0},{-1, 0},{ 0,+1},{ 0,-1},{+1,+1},{-1,-1},{+1,-1},{-1,+1},};
+ dia_change=0;
+ for(i=0; i<8; i++)
+ dia_change |= check_block_inter(s, mb_x, mb_y, block->mx+square[i][0], block->my+square[i][1], obmc_edged, &best_rd);
+ }while(dia_change);
+ //FIXME or try the standard 2 pass qpel or similar
+
+ mvr[0][0]= block->mx;
+ mvr[0][1]= block->my;
+ if(ref_rd > best_rd){
+ ref_rd= best_rd;
+ ref_b= *block;
+ }
+ }
+ best_rd= ref_rd;
+ *block= ref_b;
+ check_block(s, mb_x, mb_y, color, 1, obmc_edged, &best_rd);
+ //FIXME RD style color selection
+ if(!same_block(block, &backup)){
+ if(tb ) tb ->type &= ~BLOCK_OPT;
+ if(lb ) lb ->type &= ~BLOCK_OPT;
+ if(rb ) rb ->type &= ~BLOCK_OPT;
+ if(bb ) bb ->type &= ~BLOCK_OPT;
+ if(tlb) tlb->type &= ~BLOCK_OPT;
+ if(trb) trb->type &= ~BLOCK_OPT;
+ if(blb) blb->type &= ~BLOCK_OPT;
+ if(brb) brb->type &= ~BLOCK_OPT;
+ change ++;
+ }
+ }
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "pass:%d changed:%d\n", pass, change);
+ if(!change)
+ break;
+ }
+
+ if(s->block_max_depth == 1){
+ int change= 0;
+ for(mb_y= 0; mb_y<b_height; mb_y+=2){
+ for(mb_x= 0; mb_x<b_width; mb_x+=2){
+ int i;
+ int best_rd, init_rd;
+ const int index= mb_x + mb_y * b_stride;
+ BlockNode *b[4];
+
+ b[0]= &s->block[index];
+ b[1]= b[0]+1;
+ b[2]= b[0]+b_stride;
+ b[3]= b[2]+1;
+ if(same_block(b[0], b[1]) &&
+ same_block(b[0], b[2]) &&
+ same_block(b[0], b[3]))
+ continue;
+
+ if(!s->me_cache_generation)
+ memset(s->me_cache, 0, sizeof(s->me_cache));
+ s->me_cache_generation += 1<<22;
+
+ init_rd= best_rd= get_4block_rd(s, mb_x, mb_y, 0);
+
+ //FIXME more multiref search?
+ check_4block_inter(s, mb_x, mb_y,
+ (b[0]->mx + b[1]->mx + b[2]->mx + b[3]->mx + 2) >> 2,
+ (b[0]->my + b[1]->my + b[2]->my + b[3]->my + 2) >> 2, 0, &best_rd);
+
+ for(i=0; i<4; i++)
+ if(!(b[i]->type&BLOCK_INTRA))
+ check_4block_inter(s, mb_x, mb_y, b[i]->mx, b[i]->my, b[i]->ref, &best_rd);
+
+ if(init_rd != best_rd)
+ change++;
+ }
+ }
+ av_log(s->avctx, AV_LOG_ERROR, "pass:4mv changed:%d\n", change*4);
+ }
+}
+
+static void encode_blocks(SnowContext *s, int search){
+ int x, y;
+ int w= s->b_width;
+ int h= s->b_height;
+
+ if(s->avctx->me_method == ME_ITER && !s->keyframe && search)
+ iterative_me(s);
+
+ for(y=0; y<h; y++){
+ if(s->c.bytestream_end - s->c.bytestream < w*MB_SIZE*MB_SIZE*3){ //FIXME nicer limit
+ av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
+ return;
+ }
+ for(x=0; x<w; x++){
+ if(s->avctx->me_method == ME_ITER || !search)
+ encode_q_branch2(s, 0, x, y);
+ else
+ encode_q_branch (s, 0, x, y);
+ }
+ }
+}
+
+static void quantize(SnowContext *s, SubBand *b, IDWTELEM *dst, DWTELEM *src, int stride, int bias){
+ const int w= b->width;
+ const int h= b->height;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<((qlog>>QSHIFT) + ENCODER_EXTRA_BITS);
+ int x,y, thres1, thres2;
+
+ if(s->qlog == LOSSLESS_QLOG){
+ for(y=0; y<h; y++)
+ for(x=0; x<w; x++)
+ dst[x + y*stride]= src[x + y*stride];
+ return;
+ }
+
+ bias= bias ? 0 : (3*qmul)>>3;
+ thres1= ((qmul - bias)>>QEXPSHIFT) - 1;
+ thres2= 2*thres1;
+
+ if(!bias){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= src[x + y*stride];
+
+ if((unsigned)(i+thres1) > thres2){
+ if(i>=0){
+ i<<= QEXPSHIFT;
+ i/= qmul; //FIXME optimize
+ dst[x + y*stride]= i;
+ }else{
+ i= -i;
+ i<<= QEXPSHIFT;
+ i/= qmul; //FIXME optimize
+ dst[x + y*stride]= -i;
+ }
+ }else
+ dst[x + y*stride]= 0;
+ }
+ }
+ }else{
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= src[x + y*stride];
+
+ if((unsigned)(i+thres1) > thres2){
+ if(i>=0){
+ i<<= QEXPSHIFT;
+ i= (i + bias) / qmul; //FIXME optimize
+ dst[x + y*stride]= i;
+ }else{
+ i= -i;
+ i<<= QEXPSHIFT;
+ i= (i + bias) / qmul; //FIXME optimize
+ dst[x + y*stride]= -i;
+ }
+ }else
+ dst[x + y*stride]= 0;
+ }
+ }
+ }
+}
+
+static void dequantize(SnowContext *s, SubBand *b, IDWTELEM *src, int stride){
+ const int w= b->width;
+ const int h= b->height;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
+ int x,y;
+
+ if(s->qlog == LOSSLESS_QLOG) return;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= src[x + y*stride];
+ if(i<0){
+ src[x + y*stride]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
+ }else if(i>0){
+ src[x + y*stride]= (( i*qmul + qadd)>>(QEXPSHIFT));
+ }
+ }
+ }
+}
+
+static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
+ const int w= b->width;
+ const int h= b->height;
+ int x,y;
+
+ for(y=h-1; y>=0; y--){
+ for(x=w-1; x>=0; x--){
+ int i= x + y*stride;
+
+ if(x){
+ if(use_median){
+ if(y && x+1<w) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
+ else src[i] -= src[i - 1];
+ }else{
+ if(y) src[i] -= mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
+ else src[i] -= src[i - 1];
+ }
+ }else{
+ if(y) src[i] -= src[i - stride];
+ }
+ }
+ }
+}
+
+static void correlate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median){
+ const int w= b->width;
+ const int h= b->height;
+ int x,y;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int i= x + y*stride;
+
+ if(x){
+ if(use_median){
+ if(y && x+1<w) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - stride + 1]);
+ else src[i] += src[i - 1];
+ }else{
+ if(y) src[i] += mid_pred(src[i - 1], src[i - stride], src[i - 1] + src[i - stride] - src[i - 1 - stride]);
+ else src[i] += src[i - 1];
+ }
+ }else{
+ if(y) src[i] += src[i - stride];
+ }
+ }
+ }
+}
+
+static void encode_qlogs(SnowContext *s){
+ int plane_index, level, orientation;
+
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1:0; orientation<4; orientation++){
+ if(orientation==2) continue;
+ put_symbol(&s->c, s->header_state, s->plane[plane_index].band[level][orientation].qlog, 1);
+ }
+ }
+ }
+}
+
+static void encode_header(SnowContext *s){
+ int plane_index, i;
+ uint8_t kstate[32];
+
+ memset(kstate, MID_STATE, sizeof(kstate));
+
+ put_rac(&s->c, kstate, s->keyframe);
+ if(s->keyframe || s->always_reset){
+ ff_snow_reset_contexts(s);
+ s->last_spatial_decomposition_type=
+ s->last_qlog=
+ s->last_qbias=
+ s->last_mv_scale=
+ s->last_block_max_depth= 0;
+ for(plane_index=0; plane_index<2; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ p->last_htaps=0;
+ p->last_diag_mc=0;
+ memset(p->last_hcoeff, 0, sizeof(p->last_hcoeff));
+ }
+ }
+ if(s->keyframe){
+ put_symbol(&s->c, s->header_state, s->version, 0);
+ put_rac(&s->c, s->header_state, s->always_reset);
+ put_symbol(&s->c, s->header_state, s->temporal_decomposition_type, 0);
+ put_symbol(&s->c, s->header_state, s->temporal_decomposition_count, 0);
+ put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
+ put_symbol(&s->c, s->header_state, s->colorspace_type, 0);
+ if (s->nb_planes > 2) {
+ put_symbol(&s->c, s->header_state, s->chroma_h_shift, 0);
+ put_symbol(&s->c, s->header_state, s->chroma_v_shift, 0);
+ }
+ put_rac(&s->c, s->header_state, s->spatial_scalability);
+// put_rac(&s->c, s->header_state, s->rate_scalability);
+ put_symbol(&s->c, s->header_state, s->max_ref_frames-1, 0);
+
+ encode_qlogs(s);
+ }
+
+ if(!s->keyframe){
+ int update_mc=0;
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ Plane *p= &s->plane[plane_index];
+ update_mc |= p->last_htaps != p->htaps;
+ update_mc |= p->last_diag_mc != p->diag_mc;
+ update_mc |= !!memcmp(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
+ }
+ put_rac(&s->c, s->header_state, update_mc);
+ if(update_mc){
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ Plane *p= &s->plane[plane_index];
+ put_rac(&s->c, s->header_state, p->diag_mc);
+ put_symbol(&s->c, s->header_state, p->htaps/2-1, 0);
+ for(i= p->htaps/2; i; i--)
+ put_symbol(&s->c, s->header_state, FFABS(p->hcoeff[i]), 0);
+ }
+ }
+ if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
+ put_rac(&s->c, s->header_state, 1);
+ put_symbol(&s->c, s->header_state, s->spatial_decomposition_count, 0);
+ encode_qlogs(s);
+ }else
+ put_rac(&s->c, s->header_state, 0);
+ }
+
+ put_symbol(&s->c, s->header_state, s->spatial_decomposition_type - s->last_spatial_decomposition_type, 1);
+ put_symbol(&s->c, s->header_state, s->qlog - s->last_qlog , 1);
+ put_symbol(&s->c, s->header_state, s->mv_scale - s->last_mv_scale, 1);
+ put_symbol(&s->c, s->header_state, s->qbias - s->last_qbias , 1);
+ put_symbol(&s->c, s->header_state, s->block_max_depth - s->last_block_max_depth, 1);
+
+}
+
+static void update_last_header_values(SnowContext *s){
+ int plane_index;
+
+ if(!s->keyframe){
+ for(plane_index=0; plane_index<2; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ p->last_diag_mc= p->diag_mc;
+ p->last_htaps = p->htaps;
+ memcpy(p->last_hcoeff, p->hcoeff, sizeof(p->hcoeff));
+ }
+ }
+
+ s->last_spatial_decomposition_type = s->spatial_decomposition_type;
+ s->last_qlog = s->qlog;
+ s->last_qbias = s->qbias;
+ s->last_mv_scale = s->mv_scale;
+ s->last_block_max_depth = s->block_max_depth;
+ s->last_spatial_decomposition_count = s->spatial_decomposition_count;
+}
+
+static int qscale2qlog(int qscale){
+ return rint(QROOT*log2(qscale / (float)FF_QP2LAMBDA))
+ + 61*QROOT/8; ///< 64 > 60
+}
+
+static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
+{
+ /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
+ * FIXME we know exact mv bits at this point,
+ * but ratecontrol isn't set up to include them. */
+ uint32_t coef_sum= 0;
+ int level, orientation, delta_qlog;
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &s->plane[0].band[level][orientation];
+ IDWTELEM *buf= b->ibuf;
+ const int w= b->width;
+ const int h= b->height;
+ const int stride= b->stride;
+ const int qlog= av_clip(2*QROOT + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ const int qdiv= (1<<16)/qmul;
+ int x, y;
+ //FIXME this is ugly
+ for(y=0; y<h; y++)
+ for(x=0; x<w; x++)
+ buf[x+y*stride]= b->buf[x+y*stride];
+ if(orientation==0)
+ decorrelate(s, b, buf, stride, 1, 0);
+ for(y=0; y<h; y++)
+ for(x=0; x<w; x++)
+ coef_sum+= abs(buf[x+y*stride]) * qdiv >> 16;
+ }
+ }
+
+ /* ugly, ratecontrol just takes a sqrt again */
+ av_assert0(coef_sum < INT_MAX);
+ coef_sum = (uint64_t)coef_sum * coef_sum >> 16;
+
+ if(pict->pict_type == AV_PICTURE_TYPE_I){
+ s->m.current_picture.mb_var_sum= coef_sum;
+ s->m.current_picture.mc_mb_var_sum= 0;
+ }else{
+ s->m.current_picture.mc_mb_var_sum= coef_sum;
+ s->m.current_picture.mb_var_sum= 0;
+ }
+
+ pict->quality= ff_rate_estimate_qscale(&s->m, 1);
+ if (pict->quality < 0)
+ return INT_MIN;
+ s->lambda= pict->quality * 3/2;
+ delta_qlog= qscale2qlog(pict->quality) - s->qlog;
+ s->qlog+= delta_qlog;
+ return delta_qlog;
+}
+
+static void calculate_visual_weight(SnowContext *s, Plane *p){
+ int width = p->width;
+ int height= p->height;
+ int level, orientation, x, y;
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+ IDWTELEM *ibuf= b->ibuf;
+ int64_t error=0;
+
+ memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height);
+ ibuf[b->width/2 + b->height/2*b->stride]= 256*16;
+ ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, width, height, width, s->spatial_decomposition_type, s->spatial_decomposition_count);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int64_t d= s->spatial_idwt_buffer[x + y*width]*16;
+ error += d*d;
+ }
+ }
+
+ b->qlog= (int)(log(352256.0/sqrt(error)) / log(pow(2.0, 1.0/QROOT))+0.5);
+ }
+ }
+}
+
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pict, int *got_packet)
+{
+ SnowContext *s = avctx->priv_data;
+ RangeCoder * const c= &s->c;
+ AVFrame *pic = pict;
+ const int width= s->avctx->width;
+ const int height= s->avctx->height;
+ int level, orientation, plane_index, i, y, ret;
+ uint8_t rc_header_bak[sizeof(s->header_state)];
+ uint8_t rc_block_bak[sizeof(s->block_state)];
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, s->b_width*s->b_height*MB_SIZE*MB_SIZE*3 + FF_MIN_BUFFER_SIZE, 0)) < 0)
+ return ret;
+
+ ff_init_range_encoder(c, pkt->data, pkt->size);
+ ff_build_rac_states(c, (1LL<<32)/20, 256-8);
+
+ for(i=0; i < s->nb_planes; i++){
+ int hshift= i ? s->chroma_h_shift : 0;
+ int vshift= i ? s->chroma_v_shift : 0;
+ for(y=0; y<FF_CEIL_RSHIFT(height, vshift); y++)
+ memcpy(&s->input_picture->data[i][y * s->input_picture->linesize[i]],
+ &pict->data[i][y * pict->linesize[i]],
+ FF_CEIL_RSHIFT(width, hshift));
+ s->mpvencdsp.draw_edges(s->input_picture->data[i], s->input_picture->linesize[i],
+ FF_CEIL_RSHIFT(width, hshift), FF_CEIL_RSHIFT(height, vshift),
+ EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
+ EDGE_TOP | EDGE_BOTTOM);
+
+ }
+ emms_c();
+ s->new_picture = pict;
+
+ s->m.picture_number= avctx->frame_number;
- if(!(avctx->flags&CODEC_FLAG_QSCALE)) {
++ if(avctx->flags&AV_CODEC_FLAG_PASS2){
+ s->m.pict_type = pic->pict_type = s->m.rc_context.entry[avctx->frame_number].new_pict_type;
+ s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
- if (s->qlog < 0 || (!pic->quality && (avctx->flags & CODEC_FLAG_QSCALE))) {
++ if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
+ pic->quality = ff_rate_estimate_qscale(&s->m, 0);
+ if (pic->quality < 0)
+ return -1;
+ }
+ }else{
+ s->keyframe= avctx->gop_size==0 || avctx->frame_number % avctx->gop_size == 0;
+ s->m.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+ }
+
+ if(s->pass1_rc && avctx->frame_number == 0)
+ pic->quality = 2*FF_QP2LAMBDA;
+ if (pic->quality) {
+ s->qlog = qscale2qlog(pic->quality);
+ s->lambda = pic->quality * 3/2;
+ }
- s->m.quarter_sample= (s->avctx->flags & CODEC_FLAG_QPEL)!=0;
++ if (s->qlog < 0 || (!pic->quality && (avctx->flags & AV_CODEC_FLAG_QSCALE))) {
+ s->qlog= LOSSLESS_QLOG;
+ s->lambda = 0;
+ }//else keep previous frame's qlog until after motion estimation
+
+ if (s->current_picture->data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) {
+ int w = s->avctx->width;
+ int h = s->avctx->height;
+
+ s->mpvencdsp.draw_edges(s->current_picture->data[0],
+ s->current_picture->linesize[0], w , h ,
+ EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
+ if (s->current_picture->data[2]) {
+ s->mpvencdsp.draw_edges(s->current_picture->data[1],
+ s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
+ EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
+ s->mpvencdsp.draw_edges(s->current_picture->data[2],
+ s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
+ EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
+ }
+ }
+
+ ff_snow_frame_start(s);
+ av_frame_unref(avctx->coded_frame);
+ ret = av_frame_ref(avctx->coded_frame, s->current_picture);
+ if (ret < 0)
+ return ret;
+
+ s->m.current_picture_ptr= &s->m.current_picture;
+ s->m.current_picture.f = s->current_picture;
+ s->m.current_picture.f->pts = pict->pts;
+ if(pic->pict_type == AV_PICTURE_TYPE_P){
+ int block_width = (width +15)>>4;
+ int block_height= (height+15)>>4;
+ int stride= s->current_picture->linesize[0];
+
+ av_assert0(s->current_picture->data[0]);
+ av_assert0(s->last_picture[0]->data[0]);
+
+ s->m.avctx= s->avctx;
+ s->m. last_picture.f = s->last_picture[0];
+ s->m. new_picture.f = s->input_picture;
+ s->m. last_picture_ptr= &s->m. last_picture;
+ s->m.linesize = stride;
+ s->m.uvlinesize= s->current_picture->linesize[1];
+ s->m.width = width;
+ s->m.height= height;
+ s->m.mb_width = block_width;
+ s->m.mb_height= block_height;
+ s->m.mb_stride= s->m.mb_width+1;
+ s->m.b8_stride= 2*s->m.mb_width+1;
+ s->m.f_code=1;
+ s->m.pict_type = pic->pict_type;
+ s->m.me_method= s->avctx->me_method;
+ s->m.me.scene_change_score=0;
+ s->m.me.dia_size = avctx->dia_size;
- && !(avctx->flags&CODEC_FLAG_PASS2)
++ s->m.quarter_sample= (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
+ s->m.out_format= FMT_H263;
+ s->m.unrestricted_mv= 1;
+
+ s->m.lambda = s->lambda;
+ s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
+ s->lambda2= s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
+
+ s->m.mecc= s->mecc; //move
+ s->m.qdsp= s->qdsp; //move
+ s->m.hdsp = s->hdsp;
+ ff_init_me(&s->m);
+ s->hdsp = s->m.hdsp;
+ s->mecc= s->m.mecc;
+ }
+
+ if(s->pass1_rc){
+ memcpy(rc_header_bak, s->header_state, sizeof(s->header_state));
+ memcpy(rc_block_bak, s->block_state, sizeof(s->block_state));
+ }
+
+redo_frame:
+
+ s->spatial_decomposition_count= 5;
+
+ while( !(width >>(s->chroma_h_shift + s->spatial_decomposition_count))
+ || !(height>>(s->chroma_v_shift + s->spatial_decomposition_count)))
+ s->spatial_decomposition_count--;
+
+ if (s->spatial_decomposition_count <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Resolution too low\n");
+ return AVERROR(EINVAL);
+ }
+
+ s->m.pict_type = pic->pict_type;
+ s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
+
+ ff_snow_common_init_after_header(avctx);
+
+ if(s->last_spatial_decomposition_count != s->spatial_decomposition_count){
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ calculate_visual_weight(s, &s->plane[plane_index]);
+ }
+ }
+
+ encode_header(s);
+ s->m.misc_bits = 8*(s->c.bytestream - s->c.bytestream_start);
+ encode_blocks(s, 1);
+ s->m.mv_bits = 8*(s->c.bytestream - s->c.bytestream_start) - s->m.misc_bits;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ int w= p->width;
+ int h= p->height;
+ int x, y;
+// int bits= put_bits_count(&s->c.pb);
+
+ if (!s->memc_only) {
+ //FIXME optimize
+ if(pict->data[plane_index]) //FIXME gray hack
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_idwt_buffer[y*w + x]= pict->data[plane_index][y*pict->linesize[plane_index] + x]<<FRAC_BITS;
+ }
+ }
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 0);
+
+ if( plane_index==0
+ && pic->pict_type == AV_PICTURE_TYPE_P
- if(s->avctx->flags&CODEC_FLAG_PSNR){
++ && !(avctx->flags&AV_CODEC_FLAG_PASS2)
+ && s->m.me.scene_change_score > s->avctx->scenechange_threshold){
+ ff_init_range_encoder(c, pkt->data, pkt->size);
+ ff_build_rac_states(c, (1LL<<32)/20, 256-8);
+ pic->pict_type= AV_PICTURE_TYPE_I;
+ s->keyframe=1;
+ s->current_picture->key_frame=1;
+ goto redo_frame;
+ }
+
+ if(s->qlog == LOSSLESS_QLOG){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_dwt_buffer[y*w + x]= (s->spatial_idwt_buffer[y*w + x] + (1<<(FRAC_BITS-1))-1)>>FRAC_BITS;
+ }
+ }
+ }else{
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_dwt_buffer[y*w + x]=s->spatial_idwt_buffer[y*w + x]<<ENCODER_EXTRA_BITS;
+ }
+ }
+ }
+
+ ff_spatial_dwt(s->spatial_dwt_buffer, s->temp_dwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
+
+ if(s->pass1_rc && plane_index==0){
+ int delta_qlog = ratecontrol_1pass(s, pic);
+ if (delta_qlog <= INT_MIN)
+ return -1;
+ if(delta_qlog){
+ //reordering qlog in the bitstream would eliminate this reset
+ ff_init_range_encoder(c, pkt->data, pkt->size);
+ memcpy(s->header_state, rc_header_bak, sizeof(s->header_state));
+ memcpy(s->block_state, rc_block_bak, sizeof(s->block_state));
+ encode_header(s);
+ encode_blocks(s, 0);
+ }
+ }
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+
+ quantize(s, b, b->ibuf, b->buf, b->stride, s->qbias);
+ if(orientation==0)
+ decorrelate(s, b, b->ibuf, b->stride, pic->pict_type == AV_PICTURE_TYPE_P, 0);
+ if (!s->no_bitstream)
+ encode_subband(s, b, b->ibuf, b->parent ? b->parent->ibuf : NULL, b->stride, orientation);
+ av_assert0(b->parent==NULL || b->parent->stride == b->stride*2);
+ if(orientation==0)
+ correlate(s, b, b->ibuf, b->stride, 1, 0);
+ }
+ }
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+
+ dequantize(s, b, b->ibuf, b->stride);
+ }
+ }
+
+ ff_spatial_idwt(s->spatial_idwt_buffer, s->temp_idwt_buffer, w, h, w, s->spatial_decomposition_type, s->spatial_decomposition_count);
+ if(s->qlog == LOSSLESS_QLOG){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->spatial_idwt_buffer[y*w + x]<<=FRAC_BITS;
+ }
+ }
+ }
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
+ }else{
+ //ME/MC only
+ if(pic->pict_type == AV_PICTURE_TYPE_I){
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x]=
+ pict->data[plane_index][y*pict->linesize[plane_index] + x];
+ }
+ }
+ }else{
+ memset(s->spatial_idwt_buffer, 0, sizeof(IDWTELEM)*w*h);
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
+ }
+ }
- if(avctx->flags&CODEC_FLAG_PASS1)
++ if(s->avctx->flags&AV_CODEC_FLAG_PSNR){
+ int64_t error= 0;
+
+ if(pict->data[plane_index]) //FIXME gray hack
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int d= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x] - pict->data[plane_index][y*pict->linesize[plane_index] + x];
+ error += d*d;
+ }
+ }
+ s->avctx->error[plane_index] += error;
+ s->current_picture->error[plane_index] = error;
+ }
+
+ }
+
+ update_last_header_values(s);
+
+ ff_snow_release_buffer(avctx);
+
+ s->current_picture->coded_picture_number = avctx->frame_number;
+ s->current_picture->pict_type = pict->pict_type;
+ s->current_picture->quality = pict->quality;
+ s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start);
+ s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
+ s->m.current_picture.f->display_picture_number =
+ s->m.current_picture.f->coded_picture_number = avctx->frame_number;
+ s->m.current_picture.f->quality = pic->quality;
+ s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
+ if(s->pass1_rc)
+ if (ff_rate_estimate_qscale(&s->m, 0) < 0)
+ return -1;
++ if(avctx->flags&AV_CODEC_FLAG_PASS1)
+ ff_write_pass1_stats(&s->m);
+ s->m.last_pict_type = s->m.pict_type;
+ avctx->frame_bits = s->m.frame_bits;
+ avctx->mv_bits = s->m.mv_bits;
+ avctx->misc_bits = s->m.misc_bits;
+ avctx->p_tex_bits = s->m.p_tex_bits;
+
+ emms_c();
+
+ pkt->size = ff_rac_terminate(c);
+ if (s->current_picture->key_frame)
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static av_cold int encode_end(AVCodecContext *avctx)
+{
+ SnowContext *s = avctx->priv_data;
+
+ ff_snow_common_end(s);
+ ff_rate_control_uninit(&s->m);
+ av_frame_free(&s->input_picture);
+ av_freep(&avctx->stats_out);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(SnowContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ FF_MPV_COMMON_OPTS
+ { "memc_only", "Only do ME/MC (I frames -> ref, P frame -> ME+MC).", OFFSET(memc_only), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
+ { "no_bitstream", "Skip final bitstream writeout.", OFFSET(no_bitstream), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
+ { "intra_penalty", "Penalty for intra blocks in block decission", OFFSET(intra_penalty), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
+ { NULL },
+};
+
+static const AVClass snowenc_class = {
+ .class_name = "snow encoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_snow_encoder = {
+ .name = "snow",
+ .long_name = NULL_IF_CONFIG_SMALL("Snow"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_SNOW,
+ .priv_data_size = sizeof(SnowContext),
+ .init = encode_init,
+ .encode2 = encode_frame,
+ .close = encode_end,
+ .pix_fmts = (const enum AVPixelFormat[]){
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ },
+ .priv_class = &snowenc_class,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
+ FF_CODEC_CAP_INIT_CLEANUP,
+};
+
+
+#ifdef TEST
+#undef malloc
+#undef free
+#undef printf
+
+#include "libavutil/lfg.h"
+#include "libavutil/mathematics.h"
+
+int main(void){
+#define width 256
+#define height 256
+ int buffer[2][width*height];
+ SnowContext s;
+ int i;
+ AVLFG prng;
+ s.spatial_decomposition_count=6;
+ s.spatial_decomposition_type=1;
+
+ s.temp_dwt_buffer = av_mallocz_array(width, sizeof(DWTELEM));
+ s.temp_idwt_buffer = av_mallocz_array(width, sizeof(IDWTELEM));
+
+ if (!s.temp_dwt_buffer || !s.temp_idwt_buffer) {
+ fprintf(stderr, "Failed to allocate memory\n");
+ return 1;
+ }
+
+ av_lfg_init(&prng, 1);
+
+ printf("testing 5/3 DWT\n");
+ for(i=0; i<width*height; i++)
+ buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 54321 - 12345;
+
+ ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+
+ for(i=0; i<width*height; i++)
+ if(buffer[0][i]!= buffer[1][i]) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]);
+
+ printf("testing 9/7 DWT\n");
+ s.spatial_decomposition_type=0;
+ for(i=0; i<width*height; i++)
+ buffer[0][i] = buffer[1][i] = av_lfg_get(&prng) % 54321 - 12345;
+
+ ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+
+ for(i=0; i<width*height; i++)
+ if(FFABS(buffer[0][i] - buffer[1][i])>20) printf("fsck: %6d %12d %7d\n",i, buffer[0][i], buffer[1][i]);
+
+ {
+ int level, orientation, x, y;
+ int64_t errors[8][4];
+ int64_t g=0;
+
+ memset(errors, 0, sizeof(errors));
+ s.spatial_decomposition_count=3;
+ s.spatial_decomposition_type=0;
+ for(level=0; level<s.spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ int w= width >> (s.spatial_decomposition_count-level);
+ int h= height >> (s.spatial_decomposition_count-level);
+ int stride= width << (s.spatial_decomposition_count-level);
+ DWTELEM *buf= buffer[0];
+ int64_t error=0;
+
+ if(orientation&1) buf+=w;
+ if(orientation>1) buf+=stride>>1;
+
+ memset(buffer[0], 0, sizeof(int)*width*height);
+ buf[w/2 + h/2*stride]= 256*256;
+ ff_spatial_idwt((IDWTELEM*)buffer[0], s.temp_idwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int64_t d= buffer[0][x + y*width];
+ error += d*d;
+ if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9 && level==2) printf("%8"PRId64" ", d);
+ }
+ if(FFABS(height/2-y)<9 && level==2) printf("\n");
+ }
+ error= (int)(sqrt(error)+0.5);
+ errors[level][orientation]= error;
+ if(g) g=av_gcd(g, error);
+ else g= error;
+ }
+ }
+ printf("static int const visual_weight[][4]={\n");
+ for(level=0; level<s.spatial_decomposition_count; level++){
+ printf(" {");
+ for(orientation=0; orientation<4; orientation++){
+ printf("%8"PRId64",", errors[level][orientation]/g);
+ }
+ printf("},\n");
+ }
+ printf("};\n");
+ {
+ int level=2;
+ int w= width >> (s.spatial_decomposition_count-level);
+ //int h= height >> (s.spatial_decomposition_count-level);
+ int stride= width << (s.spatial_decomposition_count-level);
+ DWTELEM *buf= buffer[0];
+ int64_t error=0;
+
+ buf+=w;
+ buf+=stride>>1;
+
+ memset(buffer[0], 0, sizeof(int)*width*height);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int tab[4]={0,2,3,1};
+ buffer[0][x+width*y]= 256*256*tab[(x&1) + 2*(y&1)];
+ }
+ }
+ ff_spatial_dwt(buffer[0], s.temp_dwt_buffer, width, height, width, s.spatial_decomposition_type, s.spatial_decomposition_count);
+ for(y=0; y<height; y++){
+ for(x=0; x<width; x++){
+ int64_t d= buffer[0][x + y*width];
+ error += d*d;
+ if(FFABS(width/2-x)<9 && FFABS(height/2-y)<9) printf("%8"PRId64" ", d);
+ }
+ if(FFABS(height/2-y)<9) printf("\n");
+ }
+ }
+
+ }
+ return 0;
+}
+#endif /* TEST */
return AVERROR_INVALIDDATA;
}
- tctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&tctx->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ tctx->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!tctx->fdsp) {
+ ff_twinvq_decode_close(avctx);
+ return AVERROR(ENOMEM);
+ }
if ((ret = init_mdct_win(tctx))) {
av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
ff_twinvq_decode_close(avctx);
avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4;
}
- if (avctx->codec->init && !(avctx->active_thread_type & FF_THREAD_FRAME)) {
+ avctx->pts_correction_num_faulty_pts =
+ avctx->pts_correction_num_faulty_dts = 0;
+ avctx->pts_correction_last_pts =
+ avctx->pts_correction_last_dts = INT64_MIN;
+
- if ( !CONFIG_GRAY && avctx->flags & CODEC_FLAG_GRAY
++ if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
+ && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO)
+ av_log(avctx, AV_LOG_WARNING,
+ "gray decoding requested but not enabled at configuration time\n");
+
+ if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME)
+ || avctx->internal->frame_thread_encoder)) {
ret = avctx->codec->init(avctx);
if (ret < 0) {
goto free_and_end;
*got_packet_ptr = 0;
- if ((avctx->flags&CODEC_FLAG_PASS1) && avctx->stats_out)
+ if(CONFIG_FRAME_THREAD_ENCODER &&
+ avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
+ return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
+
++ if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
+ avctx->stats_out[0] = '\0';
+
if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
av_free_packet(avpkt);
av_init_packet(avpkt);
av_frame_unref(frame);
- if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
- ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
+ if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
+ uint8_t *side;
+ int side_size;
+ uint32_t discard_padding = 0;
+ uint8_t skip_reason = 0;
+ uint8_t discard_reason = 0;
+ // copy to ensure we do not change avpkt
+ AVPacket tmp = *avpkt;
+ int did_split = av_packet_split_side_data(&tmp);
+ ret = apply_param_change(avctx, &tmp);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
+ if (avctx->err_recognition & AV_EF_EXPLODE)
+ goto fail;
+ }
+
+ avctx->internal->pkt = &tmp;
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
+ ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp);
+ else {
+ ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp);
+ av_assert0(ret <= tmp.size);
+ frame->pkt_dts = avpkt->dts;
+ }
if (ret >= 0 && *got_frame_ptr) {
avctx->frame_number++;
- frame->pkt_dts = avpkt->dts;
+ av_frame_set_best_effort_timestamp(frame,
+ guess_correct_pts(avctx,
+ frame->pkt_pts,
+ frame->pkt_dts));
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
+ if (!frame->channel_layout)
+ frame->channel_layout = avctx->channel_layout;
+ if (!av_frame_get_channels(frame))
+ av_frame_set_channels(frame, avctx->channels);
+ if (!frame->sample_rate)
+ frame->sample_rate = avctx->sample_rate;
+ }
- !(avctx->flags2 & CODEC_FLAG2_SKIP_MANUAL)) {
+ side= av_packet_get_side_data(avctx->internal->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
+ if(side && side_size>=10) {
+ avctx->internal->skip_samples = AV_RL32(side);
+ discard_padding = AV_RL32(side + 4);
+ av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
+ avctx->internal->skip_samples, (int)discard_padding);
+ skip_reason = AV_RL8(side + 8);
+ discard_reason = AV_RL8(side + 9);
+ }
+ if (avctx->internal->skip_samples && *got_frame_ptr &&
- !(avctx->flags2 & CODEC_FLAG2_SKIP_MANUAL)) {
++ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if(frame->nb_samples <= avctx->internal->skip_samples){
+ *got_frame_ptr = 0;
+ avctx->internal->skip_samples -= frame->nb_samples;
+ av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
+ avctx->internal->skip_samples);
+ } else {
+ av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
+ frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ if(frame->pkt_pts!=AV_NOPTS_VALUE)
+ frame->pkt_pts += diff_ts;
+ if(frame->pkt_dts!=AV_NOPTS_VALUE)
+ frame->pkt_dts += diff_ts;
+ if (av_frame_get_pkt_duration(frame) >= diff_ts)
+ av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
+ avctx->internal->skip_samples, frame->nb_samples);
+ frame->nb_samples -= avctx->internal->skip_samples;
+ avctx->internal->skip_samples = 0;
+ }
+ }
+
+ if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr &&
- if ((avctx->flags2 & CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) {
++ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if (discard_padding == frame->nb_samples) {
+ *got_frame_ptr = 0;
+ } else {
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ if (av_frame_get_pkt_duration(frame) >= diff_ts)
+ av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
+ (int)discard_padding, frame->nb_samples);
+ frame->nb_samples -= discard_padding;
+ }
+ }
+
++ if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) {
+ AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
+ if (fside) {
+ AV_WL32(fside->data, avctx->internal->skip_samples);
+ AV_WL32(fside->data + 4, discard_padding);
+ AV_WL8(fside->data + 8, skip_reason);
+ AV_WL8(fside->data + 9, discard_reason);
+ avctx->internal->skip_samples = 0;
+ }
+ }
+fail:
+ avctx->internal->pkt = NULL;
+ if (did_split) {
+ av_packet_free_side_data(&tmp);
+ if(ret == tmp.size)
+ ret = avpkt->size;
+ }
+
+ if (ret >= 0 && *got_frame_ptr) {
if (!avctx->refcounted_frames) {
int err = unrefcount_frame(avci, frame);
if (err < 0)
}
/* Restore planes. Should be almost identical to Huffyuv's. */
- vble_restore_plane(ctx, pic, 0, offset, avctx->width, avctx->height);
+ vble_restore_plane(ctx, pic, &gb, 0, offset, avctx->width, avctx->height);
/* Chroma */
- if (!(ctx->avctx->flags & CODEC_FLAG_GRAY)) {
+ if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) {
offset += avctx->width * avctx->height;
- vble_restore_plane(ctx, pic, 1, offset, width_uv, height_uv);
+ vble_restore_plane(ctx, pic, &gb, 1, offset, width_uv, height_uv);
offset += width_uv * height_uv;
- vble_restore_plane(ctx, pic, 2, offset, width_uv, height_uv);
+ vble_restore_plane(ctx, pic, &gb, 2, offset, width_uv, height_uv);
}
*got_frame = 1;
s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][3],
s->dest[0] - v_dist * s->linesize - 8,
stride_y);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
s->idsp.put_signed_pixels_clamped(v->block[v->topleft_blk_idx][4],
s->dest[1] - 8 * s->uvlinesize - 8,
s->uvlinesize);
s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][3],
s->dest[0] - v_dist * s->linesize + 8,
stride_y);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
s->idsp.put_signed_pixels_clamped(v->block[v->top_blk_idx][4],
s->dest[1] - 8 * s->uvlinesize,
s->uvlinesize);
vc1_decode_intra_block(v, s->block[i], i, val, mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & CODEC_FLAG_GRAY))
- if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
if (v->rangeredfrm)
} else if (val) {
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY), &block_tt);
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
block_cbp |= pat << (i << 2);
if (!v->ttmbf && ttmb < 8)
ttmb = -1;
vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & CODEC_FLAG_GRAY))
- if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
if (v->rangeredfrm)
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
first_block, s->dest[dst_idx] + off,
(i & 4) ? s->uvlinesize : s->linesize,
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY),
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
&block_tt);
block_cbp |= pat << (i << 2);
if (!v->ttmbf && ttmb < 8)
vc1_decode_intra_block(v, s->block[i], i, val, mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & CODEC_FLAG_GRAY))
- if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
if (i < 4) {
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
first_block, s->dest[dst_idx] + off,
(i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY), &block_tt);
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
block_cbp |= pat << (i << 2);
if (!v->ttmbf && ttmb < 8)
ttmb = -1;
vc1_decode_intra_block(v, s->block[i], i, val, mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & CODEC_FLAG_GRAY))
- if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
first_block, s->dest[dst_idx] + off,
(i & 4) ? s->uvlinesize : s->linesize,
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY),
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY),
&block_tt);
block_cbp |= pat << (i << 2);
- if (!v->ttmbf && ttmb < 8) ttmb = -1;
+ if (!v->ttmbf && ttmb < 8)
+ ttmb = -1;
first_block = 0;
}
}
vc1_decode_intra_block(v, s->block[i], i, val, mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & CODEC_FLAG_GRAY))
- if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
if (v->rangeredfrm)
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
first_block, s->dest[dst_idx] + off,
(i & 4) ? s->uvlinesize : s->linesize,
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY), NULL);
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
if (!v->ttmbf && ttmb < 8)
ttmb = -1;
first_block = 0;
vc1_decode_intra_block(v, s->block[i], i, val, mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & CODEC_FLAG_GRAY))
- if ((i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && (i > 3) && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
if (v->rangeredfrm)
vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
first_block, s->dest[dst_idx] + off,
(i & 4) ? s->uvlinesize : s->linesize,
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY), NULL);
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), NULL);
if (!v->ttmbf && ttmb < 8)
ttmb = -1;
first_block = 0;
vc1_decode_intra_block(v, s->block[i], i, val, mquant,
(i & 4) ? v->codingset2 : v->codingset);
- if (CONFIG_GRAY && i > 3 && (s->avctx->flags & CODEC_FLAG_GRAY))
- if (i > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && i > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
if (i < 4) {
pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
first_block, s->dest[dst_idx] + off,
(i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
- CONFIG_GRAY && (i & 4) && (s->avctx->flags & CODEC_FLAG_GRAY), &block_tt);
- (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
++ CONFIG_GRAY && (i & 4) && (s->avctx->flags & AV_CODEC_FLAG_GRAY), &block_tt);
block_cbp |= pat << (i << 2);
if (!v->ttmbf && ttmb < 8)
ttmb = -1;
vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
- if (CONFIG_GRAY && k > 3 && (s->avctx->flags & CODEC_FLAG_GRAY))
- if (k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
if (v->pq >= 9 && v->overlap) {
if (s->mb_x) {
v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
- if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
}
if (!s->first_slice_line) {
v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
- if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
}
vc1_decode_i_block_adv(v, block[k], k, val,
(k < 4) ? v->codingset : v->codingset2, mquant);
- if (CONFIG_GRAY && k > 3 && (s->avctx->flags & CODEC_FLAG_GRAY))
- if (k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (CONFIG_GRAY && k > 3 && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
continue;
v->vc1dsp.vc1_inv_trans_8x8(block[k]);
}
if (s->mb_x)
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
for (j = 0; j < 2; j++) {
v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
if (s->mb_x)
if (s->mb_y == s->end_mb_y - 1) {
if (s->mb_x) {
v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
+ }
}
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
}
if (s->mb_x >= 2)
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
for (j = 0; j < 2; j++) {
v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
if (s->mb_x >= 2) {
if (s->mb_x)
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
for (j = 0; j < 2; j++) {
v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
if (s->mb_x >= 2) {
if (s->mb_x >= 2)
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
- if (s->mb_x >= 2 && (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))) {
- if (s->mb_x >= 2) {
++ if (s->mb_x >= 2 && (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))) {
for (j = 0; j < 2; j++) {
v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
}
if (s->mb_x)
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
- if (s->mb_x && (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY))) {
- if (s->mb_x) {
++ if (s->mb_x && (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))) {
for (j = 0; j < 2; j++) {
v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
}
v->block[v->cur_blk_idx][0]);
v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][3],
v->block[v->cur_blk_idx][2]);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
- if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][4],
v->block[v->cur_blk_idx][4]);
v->vc1dsp.vc1_h_s_overlap(v->block[v->left_blk_idx][5],
v->block[v->cur_blk_idx][0]);
v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][3],
v->block[v->cur_blk_idx][1]);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
- if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][4],
v->block[v->cur_blk_idx][4]);
v->vc1dsp.vc1_v_s_overlap(v->block[v->top_blk_idx][5],
v->block[v->left_blk_idx][0]);
v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][3],
v->block[v->left_blk_idx][1]);
- if (!CONFIG_GRAY || !(s->avctx->flags & CODEC_FLAG_GRAY)) {
- if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
++ if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][4],
v->block[v->left_blk_idx][4]);
v->vc1dsp.vc1_v_s_overlap(v->block[v->topleft_blk_idx][5],
{
MpegEncContext *s = &v->s;
int i;
- int block_count = CONFIG_GRAY && (s->avctx->flags & CODEC_FLAG_GRAY) ? 4 : 6;
++ int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < block_count; i++) {
vc1_apply_p_v_loop_filter(v, i);
}
}
/* for grayscale we should not try to read from unknown area */
- if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY) {
- if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
++ if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) {
srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
}
s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
}
- if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY)
- if (s->avctx->flags & AV_CODEC_FLAG_GRAY)
++ if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
return;
/* Chroma MC always uses qpel bilinear */
uvmx = (uvmx & 3) << 1;
if (!v->field_mode && !v->s.last_picture.f->data[0])
return;
- if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY)
- if (s->avctx->flags & AV_CODEC_FLAG_GRAY)
++ if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
return;
- for (k = 0; k < 4; k++) {
- mvx[k] = s->mv[dir][k][0];
- mvy[k] = s->mv[dir][k][1];
- intra[k] = v->mb_type[0][s->block_index[k]];
- if (v->field_mode)
- mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
- }
-
/* calculate chroma MV vector from four luma MVs */
- if (!v->field_mode || (v->field_mode && !v->numref)) {
- valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
- chroma_ref_type = v->reffield;
+ if (!v->field_mode || !v->numref) {
+ int valid_count = get_chroma_mv(v, dir, &tx, &ty);
if (!valid_count) {
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
int use_ic;
uint8_t (*lutuv)[256];
- if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY)
- if (s->avctx->flags & AV_CODEC_FLAG_GRAY)
++ if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
return;
for (i = 0; i < 4; i++) {
}
/* for grayscale we should not try to read from unknown area */
- if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY) {
- if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
++ if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) {
srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
}
dxy = (my & 2) | ((mx & 2) >> 1);
if (!v->rnd)
- s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
+ s->hdsp.avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
else
- s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
+ s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0], srcY, s->linesize, 16);
}
- if (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY)
- if (s->avctx->flags & AV_CODEC_FLAG_GRAY)
++ if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
return;
/* Chroma MC always uses qpel blilinear */
uvmx = (uvmx & 3) << 1;
}
alpha = av_clip_uint16(sd->coefs[1][6]);
- for (plane = 0; plane < (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY ? 1 : 3); plane++) {
- for (plane = 0; plane < (s->avctx->flags & AV_CODEC_FLAG_GRAY ? 1 : 3); plane++) {
++ for (plane = 0; plane < (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY ? 1 : 3); plane++) {
int width = v->output_width>>!!plane;
for (row = 0; row < v->output_height>>!!plane; row++) {
wrong but it looks better than doing nothing. */
if (f && f->data[0])
- for (plane = 0; plane < (CONFIG_GRAY && s->avctx->flags & CODEC_FLAG_GRAY ? 1 : 3); plane++)
- for (plane = 0; plane < (s->avctx->flags & AV_CODEC_FLAG_GRAY ? 1 : 3); plane++)
++ for (plane = 0; plane < (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY ? 1 : 3); plane++)
for (i = 0; i < v->sprite_height>>!!plane; i++)
memset(f->data[plane] + i * f->linesize[plane],
plane ? 128 : 0, f->linesize[plane]);
if (!avctx->extradata_size || !avctx->extradata)
return -1;
- if (!CONFIG_GRAY || !(avctx->flags & CODEC_FLAG_GRAY))
- if (!(avctx->flags & AV_CODEC_FLAG_GRAY))
++ if (!CONFIG_GRAY || !(avctx->flags & AV_CODEC_FLAG_GRAY))
avctx->pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts);
- else
+ else {
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
+ if (avctx->color_range == AVCOL_RANGE_UNSPECIFIED)
+ avctx->color_range = AVCOL_RANGE_MPEG;
+ }
v->s.avctx = avctx;
- if (ff_vc1_init_common(v) < 0)
- return -1;
+ if ((ret = ff_vc1_init_common(v)) < 0)
+ return ret;
+ // ensure static VLC tables are initialized
+ if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
+ return ret;
+ if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
+ return ret;
+ // Hack to ensure the above functions will be called
+ // again once we know all necessary settings.
+ // That this is necessary might indicate a bug.
+ ff_vc1_decode_end(avctx);
+
ff_blockdsp_init(&s->bdsp, avctx);
ff_h264chroma_init(&v->h264chroma, 8);
ff_qpeldsp_init(&s->qdsp);
int mby_start;
} *slices = NULL, *tmp;
- if(s->avctx->flags & CODEC_FLAG_LOW_DELAY)
+ v->second_field = 0;
+
++ if(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
+ s->low_delay = 1;
+
/* no supplementary picture */
if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
/* special case for last picture */
ff_mdct_init(&vc->mdct[0], bl0, 1, -1.0);
ff_mdct_init(&vc->mdct[1], bl1, 1, -1.0);
- vc->fdsp = avpriv_float_dsp_alloc(vc->avctx->flags & CODEC_FLAG_BITEXACT);
++ vc->fdsp = avpriv_float_dsp_alloc(vc->avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!vc->fdsp)
+ return AVERROR(ENOMEM);
ff_dlog(NULL, " vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ",
vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]);
goto error;
avctx->bit_rate = 0;
- if (avctx->flags & CODEC_FLAG_QSCALE)
+ if (avctx->flags & AV_CODEC_FLAG_QSCALE)
venc->quality = avctx->global_quality / (float)FF_QP2LAMBDA;
else
- venc->quality = 3.0;
+ venc->quality = 8;
venc->quality *= venc->quality;
if ((ret = put_main_header(venc, (uint8_t**)&avctx->extradata)) < 0)
s->avctx = avctx;
s->width = FFALIGN(avctx->coded_width, 16);
s->height = FFALIGN(avctx->coded_height, 16);
- if (avctx->pix_fmt == AV_PIX_FMT_NONE)
+ if (avctx->codec_id != AV_CODEC_ID_THEORA)
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
- ff_hpeldsp_init(&s->hdsp, avctx->flags | CODEC_FLAG_BITEXACT);
+ ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
ff_videodsp_init(&s->vdsp, 8);
ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
ret = ff_set_dimensions(avctx, s->width, s->height);
if (ret < 0)
return ret;
- if (!(avctx->flags2 & CODEC_FLAG2_IGNORE_CROP)) {
- if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) &&
- (visible_width != s->width || visible_height != s->height)) {
++ if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
avctx->width = visible_width;
avctx->height = visible_height;
// translate offsets from theora axis ([0,0] lower left)
s->offset_x = offset_x;
s->offset_y = s->height - visible_height - offset_y;
- if ((s->offset_x & 0x1F) && !(avctx->flags & CODEC_FLAG_UNALIGNED)) {
+ if ((s->offset_x & 0x1F) && !(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) {
s->offset_x &= ~0x1F;
- av_log(avctx, AV_LOG_WARNING, "Reducing offset_x from %d to %d"
- "chroma samples to preserve alignment.\n",
- offset_x, s->offset_x);
+ if (!s->offset_x_warned) {
+ s->offset_x_warned = 1;
+ av_log(avctx, AV_LOG_WARNING, "Reducing offset_x from %d to %d"
+ "chroma samples to preserve alignment.\n",
+ offset_x, s->offset_x);
+ }
}
}
#endif /* TRACE */
}
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
+
/* choose the VLC tables for the coefficients */
coef_vlc_table = 2;
if (avctx->sample_rate >= 32000) {
}
s->avctx = avctx;
- s->fdsp = avpriv_float_dsp_alloc(avctx->flags & CODEC_FLAG_BITEXACT);
- avpriv_float_dsp_init(&s->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
++ s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT);
+ if (!s->fdsp)
+ return AVERROR(ENOMEM);
init_put_bits(&s->pb, s->frame_data, MAX_FRAMESIZE);
w->wdsp.put_mspel_pixels_tab[dxy](dest_y + 8 * linesize, ptr + 8 * linesize, linesize);
w->wdsp.put_mspel_pixels_tab[dxy](dest_y + 8 + 8 * linesize, ptr + 8 + 8 * linesize, linesize);
- if (s->avctx->flags & CODEC_FLAG_GRAY)
+ if (s->avctx->flags & AV_CODEC_FLAG_GRAY)
return;
- if (s->out_format == FMT_H263) {
- dxy = 0;
- if ((motion_x & 3) != 0)
- dxy |= 1;
- if ((motion_y & 3) != 0)
- dxy |= 2;
- mx = motion_x >> 2;
- my = motion_y >> 2;
- } else {
- mx = motion_x / 2;
- my = motion_y / 2;
- dxy = ((my & 1) << 1) | (mx & 1);
- mx >>= 1;
- my >>= 1;
- }
+ dxy = 0;
+ if ((motion_x & 3) != 0)
+ dxy |= 1;
+ if ((motion_y & 3) != 0)
+ dxy |= 2;
+ mx = motion_x >> 2;
+ my = motion_y >> 2;
src_x = s->mb_x * 8 + mx;
src_y = s->mb_y * 8 + my;
c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
+ c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
- if (!(flags & CODEC_FLAG_BITEXACT)) {
+ if (!(flags & AV_CODEC_FLAG_BITEXACT)) {
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
- c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
- c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
+ c->avg_pixels_tab[0][3] = avg_approx_pixels16_xy2_mmxext;
+ c->avg_pixels_tab[1][3] = ff_avg_approx_pixels8_xy2_mmxext;
}
- if (CONFIG_VP3_DECODER && flags & CODEC_FLAG_BITEXACT) {
- if (flags & AV_CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
++ if (CONFIG_VP3_DECODER && flags & AV_CODEC_FLAG_BITEXACT) {
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
}
c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
+ c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
- if (!(flags & CODEC_FLAG_BITEXACT)){
+ if (!(flags & AV_CODEC_FLAG_BITEXACT)){
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
- c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
- c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
+ c->avg_pixels_tab[0][3] = avg_approx_pixels16_xy2_3dnow;
+ c->avg_pixels_tab[1][3] = ff_avg_approx_pixels8_xy2_3dnow;
}
- if (CONFIG_VP3_DECODER && flags & CODEC_FLAG_BITEXACT) {
- if (flags & AV_CODEC_FLAG_BITEXACT && CONFIG_VP3_DECODER) {
++ if (CONFIG_VP3_DECODER && flags & AV_CODEC_FLAG_BITEXACT) {
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
}
c->sad[0] = sad16_mmx;
c->sad[1] = sad8_mmx;
- c->sse[0] = sse16_mmx;
- c->sse[1] = sse8_mmx;
c->vsad[4] = vsad_intra16_mmx;
- if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
- c->nsse[0] = nsse16_mmx;
- c->nsse[1] = nsse8_mmx;
-
+ if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
c->vsad[0] = vsad16_mmx;
}
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
- if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
+ c->sum_abs_dctelem = ff_sum_abs_dctelem_mmxext;
+
+ c->sad[0] = ff_sad16_mmxext;
+ c->sad[1] = ff_sad8_mmxext;
+
+ c->pix_abs[0][0] = ff_sad16_mmxext;
+ c->pix_abs[0][1] = ff_sad16_x2_mmxext;
+ c->pix_abs[0][2] = ff_sad16_y2_mmxext;
+ c->pix_abs[1][0] = ff_sad8_mmxext;
+ c->pix_abs[1][1] = ff_sad8_x2_mmxext;
+ c->pix_abs[1][2] = ff_sad8_y2_mmxext;
+
+ c->vsad[4] = ff_vsad_intra16_mmxext;
+ c->vsad[5] = ff_vsad_intra8_mmxext;
+
++ if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
+ c->pix_abs[0][3] = ff_sad16_approx_xy2_mmxext;
+ c->pix_abs[1][3] = ff_sad8_approx_xy2_mmxext;
+
+ c->vsad[0] = ff_vsad16_approx_mmxext;
+ c->vsad[1] = ff_vsad8_approx_mmxext;
+ }
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
#endif
- if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
+ if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
+ c->sad[0] = ff_sad16_sse2;
+ c->pix_abs[0][0] = ff_sad16_sse2;
+ c->pix_abs[0][1] = ff_sad16_x2_sse2;
+ c->pix_abs[0][2] = ff_sad16_y2_sse2;
+
+ c->vsad[4] = ff_vsad_intra16_sse2;
++ if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
+ c->pix_abs[0][3] = ff_sad16_approx_xy2_sse2;
+ c->vsad[0] = ff_vsad16_approx_sse2;
+ }
+ }
}
- if (EXTERNAL_SSSE3(cpu_flags) && HAVE_ALIGNED_STACK) {
+ if (EXTERNAL_SSSE3(cpu_flags)) {
+ c->sum_abs_dctelem = ff_sum_abs_dctelem_ssse3;
+#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
+#endif
}
}
--- /dev/null
- enc_ctx->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+/*
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Motion Compensation Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_mcdeint.c.
+ *
+ * Known Issues:
+ *
+ * The motion estimation is somewhat at the mercy of the input, if the
+ * input frames are created purely based on spatial interpolation then
+ * for example a thin black line or another random and not
+ * interpolateable pattern will cause problems.
+ * Note: completely ignoring the "unavailable" lines during motion
+ * estimation did not look any better, so the most obvious solution
+ * would be to improve tfields or penalize problematic motion vectors.
+ *
+ * If non iterative ME is used then snow currently ignores the OBMC
+ * window and as a result sometimes creates artifacts.
+ *
+ * Only past frames are used, we should ideally use future frames too,
+ * something like filtering the whole movie in forward and then
+ * backward direction seems like a interesting idea but the current
+ * filter framework is FAR from supporting such things.
+ *
+ * Combining the motion compensated image with the input image also is
+ * not as trivial as it seems, simple blindly taking even lines from
+ * one and odd ones from the other does not work at all as ME/MC
+ * sometimes has nothing in the previous frames which matches the
+ * current. The current algorithm has been found by trial and error
+ * and almost certainly can be improved...
+ */
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavcodec/avcodec.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+enum MCDeintMode {
+ MODE_FAST = 0,
+ MODE_MEDIUM,
+ MODE_SLOW,
+ MODE_EXTRA_SLOW,
+ MODE_NB,
+};
+
+enum MCDeintParity {
+ PARITY_TFF = 0, ///< top field first
+ PARITY_BFF = 1, ///< bottom field first
+};
+
+typedef struct {
+ const AVClass *class;
+ int mode; ///< MCDeintMode
+ int parity; ///< MCDeintParity
+ int qp;
+ AVCodecContext *enc_ctx;
+} MCDeintContext;
+
+#define OFFSET(x) offsetof(MCDeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
+
+static const AVOption mcdeint_options[] = {
+ { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_FAST}, 0, MODE_NB-1, FLAGS, .unit="mode" },
+ CONST("fast", NULL, MODE_FAST, "mode"),
+ CONST("medium", NULL, MODE_MEDIUM, "mode"),
+ CONST("slow", NULL, MODE_SLOW, "mode"),
+ CONST("extra_slow", NULL, MODE_EXTRA_SLOW, "mode"),
+
+ { "parity", "set the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=PARITY_BFF}, -1, 1, FLAGS, "parity" },
+ CONST("tff", "assume top field first", PARITY_TFF, "parity"),
+ CONST("bff", "assume bottom field first", PARITY_BFF, "parity"),
+
+ { "qp", "set qp", OFFSET(qp), AV_OPT_TYPE_INT, {.i64=1}, INT_MIN, INT_MAX, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(mcdeint);
+
+static int config_props(AVFilterLink *inlink)
+{
+ AVFilterContext *ctx = inlink->dst;
+ MCDeintContext *mcdeint = ctx->priv;
+ AVCodec *enc;
+ AVCodecContext *enc_ctx;
+ AVDictionary *opts = NULL;
+ int ret;
+
+ if (!(enc = avcodec_find_encoder(AV_CODEC_ID_SNOW))) {
+ av_log(ctx, AV_LOG_ERROR, "Snow encoder is not enabled in libavcodec\n");
+ return AVERROR(EINVAL);
+ }
+
+ mcdeint->enc_ctx = avcodec_alloc_context3(enc);
+ if (!mcdeint->enc_ctx)
+ return AVERROR(ENOMEM);
+ enc_ctx = mcdeint->enc_ctx;
+ enc_ctx->width = inlink->w;
+ enc_ctx->height = inlink->h;
+ enc_ctx->time_base = (AVRational){1,25}; // meaningless
+ enc_ctx->gop_size = INT_MAX;
+ enc_ctx->max_b_frames = 0;
+ enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
- enc_ctx->flags |= CODEC_FLAG_4MV;
++ enc_ctx->flags = AV_CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ enc_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ enc_ctx->global_quality = 1;
+ enc_ctx->me_cmp = enc_ctx->me_sub_cmp = FF_CMP_SAD;
+ enc_ctx->mb_cmp = FF_CMP_SSE;
+ av_dict_set(&opts, "memc_only", "1", 0);
+ av_dict_set(&opts, "no_bitstream", "1", 0);
+
+ switch (mcdeint->mode) {
+ case MODE_EXTRA_SLOW:
+ enc_ctx->refs = 3;
+ case MODE_SLOW:
+ enc_ctx->me_method = ME_ITER;
+ case MODE_MEDIUM:
- enc_ctx->flags |= CODEC_FLAG_QPEL;
++ enc_ctx->flags |= AV_CODEC_FLAG_4MV;
+ enc_ctx->dia_size = 2;
+ case MODE_FAST:
++ enc_ctx->flags |= AV_CODEC_FLAG_QPEL;
+ }
+
+ ret = avcodec_open2(enc_ctx, enc, &opts);
+ av_dict_free(&opts);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ MCDeintContext *mcdeint = ctx->priv;
+
+ if (mcdeint->enc_ctx) {
+ avcodec_close(mcdeint->enc_ctx);
+ av_freep(&mcdeint->enc_ctx);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
+ };
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
+{
+ MCDeintContext *mcdeint = inlink->dst->priv;
+ AVFilterLink *outlink = inlink->dst->outputs[0];
+ AVFrame *outpic, *frame_dec;
+ AVPacket pkt = {0};
+ int x, y, i, ret, got_frame = 0;
+
+ outpic = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+ if (!outpic) {
+ av_frame_free(&inpic);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(outpic, inpic);
+ inpic->quality = mcdeint->qp * FF_QP2LAMBDA;
+
+ av_init_packet(&pkt);
+
+ ret = avcodec_encode_video2(mcdeint->enc_ctx, &pkt, inpic, &got_frame);
+ if (ret < 0)
+ goto end;
+
+ frame_dec = mcdeint->enc_ctx->coded_frame;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = FF_CEIL_RSHIFT(inlink->w, is_chroma);
+ int h = FF_CEIL_RSHIFT(inlink->h, is_chroma);
+ int fils = frame_dec->linesize[i];
+ int srcs = inpic ->linesize[i];
+ int dsts = outpic ->linesize[i];
+
+ for (y = 0; y < h; y++) {
+ if ((y ^ mcdeint->parity) & 1) {
+ for (x = 0; x < w; x++) {
+ uint8_t *filp = &frame_dec->data[i][x + y*fils];
+ uint8_t *srcp = &inpic ->data[i][x + y*srcs];
+ uint8_t *dstp = &outpic ->data[i][x + y*dsts];
+
+ if (y > 0 && y < h-1){
+ int is_edge = x < 3 || x > w-4;
+ int diff0 = filp[-fils] - srcp[-srcs];
+ int diff1 = filp[+fils] - srcp[+srcs];
+ int temp = filp[0];
+
+#define DELTA(j) av_clip(j, -x, w-1-x)
+
+#define GET_SCORE_EDGE(j)\
+ FFABS(srcp[-srcs+DELTA(-1+(j))] - srcp[+srcs+DELTA(-1-(j))])+\
+ FFABS(srcp[-srcs+DELTA(j) ] - srcp[+srcs+DELTA( -(j))])+\
+ FFABS(srcp[-srcs+DELTA(1+(j)) ] - srcp[+srcs+DELTA( 1-(j))])
+
+#define GET_SCORE(j)\
+ FFABS(srcp[-srcs-1+(j)] - srcp[+srcs-1-(j)])+\
+ FFABS(srcp[-srcs +(j)] - srcp[+srcs -(j)])+\
+ FFABS(srcp[-srcs+1+(j)] - srcp[+srcs+1-(j)])
+
+#define CHECK_EDGE(j)\
+ { int score = GET_SCORE_EDGE(j);\
+ if (score < spatial_score){\
+ spatial_score = score;\
+ diff0 = filp[-fils+DELTA(j)] - srcp[-srcs+DELTA(j)];\
+ diff1 = filp[+fils+DELTA(-(j))] - srcp[+srcs+DELTA(-(j))];\
+
+#define CHECK(j)\
+ { int score = GET_SCORE(j);\
+ if (score < spatial_score){\
+ spatial_score= score;\
+ diff0 = filp[-fils+(j)] - srcp[-srcs+(j)];\
+ diff1 = filp[+fils-(j)] - srcp[+srcs-(j)];\
+
+ if (is_edge) {
+ int spatial_score = GET_SCORE_EDGE(0) - 1;
+ CHECK_EDGE(-1) CHECK_EDGE(-2) }} }}
+ CHECK_EDGE( 1) CHECK_EDGE( 2) }} }}
+ } else {
+ int spatial_score = GET_SCORE(0) - 1;
+ CHECK(-1) CHECK(-2) }} }}
+ CHECK( 1) CHECK( 2) }} }}
+ }
+
+
+ if (diff0 + diff1 > 0)
+ temp -= (diff0 + diff1 - FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ else
+ temp -= (diff0 + diff1 + FFABS(FFABS(diff0) - FFABS(diff1)) / 2) / 2;
+ *filp = *dstp = temp > 255U ? ~(temp>>31) : temp;
+ } else {
+ *dstp = *filp;
+ }
+ }
+ }
+ }
+
+ for (y = 0; y < h; y++) {
+ if (!((y ^ mcdeint->parity) & 1)) {
+ for (x = 0; x < w; x++) {
+ frame_dec->data[i][x + y*fils] =
+ outpic ->data[i][x + y*dsts] = inpic->data[i][x + y*srcs];
+ }
+ }
+ }
+ }
+ mcdeint->parity ^= 1;
+
+end:
+ av_free_packet(&pkt);
+ av_frame_free(&inpic);
+ if (ret < 0) {
+ av_frame_free(&outpic);
+ return ret;
+ }
+ return ff_filter_frame(outlink, outpic);
+}
+
+static const AVFilterPad mcdeint_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ .config_props = config_props,
+ },
+ { NULL }
+};
+
+static const AVFilterPad mcdeint_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_mcdeint = {
+ .name = "mcdeint",
+ .description = NULL_IF_CONFIG_SMALL("Apply motion compensating deinterlacing."),
+ .priv_size = sizeof(MCDeintContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = mcdeint_inputs,
+ .outputs = mcdeint_outputs,
+ .priv_class = &mcdeint_class,
+};
--- /dev/null
- avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+/*
+ * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+ * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Ultra Slow/Simple Post-processing filter.
+ *
+ * Originally written by Michael Niedermayer for the MPlayer project, and
+ * ported by Arwa Arif for FFmpeg.
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "avfilter.h"
+
+#define MAX_LEVEL 8 /* quality levels */
+#define BLOCK 16
+
+typedef struct {
+ const AVClass *av_class;
+ int log2_count;
+ int hsub, vsub;
+ int qp;
+ int qscale_type;
+ int temp_stride[3];
+ uint8_t *src[3];
+ uint16_t *temp[3];
+ int outbuf_size;
+ uint8_t *outbuf;
+ AVCodecContext *avctx_enc[BLOCK*BLOCK];
+ AVFrame *frame;
+ AVFrame *frame_dec;
+ uint8_t *non_b_qp_table;
+ int non_b_qp_alloc_size;
+ int use_bframe_qp;
+} USPPContext;
+
+#define OFFSET(x) offsetof(USPPContext, x)
+#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
+static const AVOption uspp_options[] = {
+ { "quality", "set quality", OFFSET(log2_count), AV_OPT_TYPE_INT, {.i64 = 3}, 0, MAX_LEVEL, FLAGS },
+ { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 63, FLAGS },
+ { "use_bframe_qp", "use B-frames' QP", OFFSET(use_bframe_qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(uspp);
+
+DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
+ { 0*4, 48*4, 12*4, 60*4, 3*4, 51*4, 15*4, 63*4, },
+ { 32*4, 16*4, 44*4, 28*4, 35*4, 19*4, 47*4, 31*4, },
+ { 8*4, 56*4, 4*4, 52*4, 11*4, 59*4, 7*4, 55*4, },
+ { 40*4, 24*4, 36*4, 20*4, 43*4, 27*4, 39*4, 23*4, },
+ { 2*4, 50*4, 14*4, 62*4, 1*4, 49*4, 13*4, 61*4, },
+ { 34*4, 18*4, 46*4, 30*4, 33*4, 17*4, 45*4, 29*4, },
+ { 10*4, 58*4, 6*4, 54*4, 9*4, 57*4, 5*4, 53*4, },
+ { 42*4, 26*4, 38*4, 22*4, 41*4, 25*4, 37*4, 21*4, },
+};
+
+static const uint8_t offset[511][2] = {
+ { 0, 0},
+ { 0, 0}, { 8, 8}, // quality 1
+ { 0, 0}, { 4, 4}, {12, 8}, { 8,12}, // quality 2
+ { 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14}, // quality 3
+
+ { 0, 0}, {10, 2}, { 4, 4}, {14, 6}, { 8, 8}, { 2,10}, {12,12}, { 6,14},
+ { 5, 1}, {15, 3}, { 9, 5}, { 3, 7}, {13, 9}, { 7,11}, { 1,13}, {11,15}, // quality 4
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+ { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+ { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 1, 5}, { 9, 5}, { 1,13}, { 9,13},
+ { 6, 6}, {14, 6}, { 6,14}, {14,14}, { 3, 7}, {11, 7}, { 3,15}, {11,15}, // quality 5
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8},
+ { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+ { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 2}, {15, 2}, { 7,10}, {15,10},
+ { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 3}, {14, 3}, { 6,11}, {14,11},
+ { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 4}, {12, 4}, { 4,12}, {12,12},
+ { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 5}, {13, 5}, { 5,13}, {13,13},
+ { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 6}, {15, 6}, { 7,14}, {15,14},
+ { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 7}, {14, 7}, { 6,15}, {14,15}, // quality 6
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 0, 2}, { 8, 2}, { 0,10}, { 8,10},
+ { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 0, 6}, { 8, 6}, { 0,14}, { 8,14},
+ { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 1, 3}, { 9, 3}, { 1,11}, { 9,11},
+ { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 1, 7}, { 9, 7}, { 1,15}, { 9,15},
+ { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 2, 2}, {10, 2}, { 2,10}, {10,10},
+ { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 2, 6}, {10, 6}, { 2,14}, {10,14},
+ { 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 3, 3}, {11, 3}, { 3,11}, {11,11},
+ { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 3, 7}, {11, 7}, { 3,15}, {11,15},
+ { 4, 0}, {12, 0}, { 4, 8}, {12, 8}, { 4, 2}, {12, 2}, { 4,10}, {12,10},
+ { 4, 4}, {12, 4}, { 4,12}, {12,12}, { 4, 6}, {12, 6}, { 4,14}, {12,14},
+ { 5, 1}, {13, 1}, { 5, 9}, {13, 9}, { 5, 3}, {13, 3}, { 5,11}, {13,11},
+ { 5, 5}, {13, 5}, { 5,13}, {13,13}, { 5, 7}, {13, 7}, { 5,15}, {13,15},
+ { 6, 0}, {14, 0}, { 6, 8}, {14, 8}, { 6, 2}, {14, 2}, { 6,10}, {14,10},
+ { 6, 4}, {14, 4}, { 6,12}, {14,12}, { 6, 6}, {14, 6}, { 6,14}, {14,14},
+ { 7, 1}, {15, 1}, { 7, 9}, {15, 9}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+ { 7, 5}, {15, 5}, { 7,13}, {15,13}, { 7, 7}, {15, 7}, { 7,15}, {15,15}, // quality 7
+
+ { 0, 0}, { 8, 0}, { 0, 8}, { 8, 8}, { 4, 4}, {12, 4}, { 4,12}, {12,12},
+ { 0, 4}, { 8, 4}, { 0,12}, { 8,12}, { 4, 0}, {12, 0}, { 4, 8}, {12, 8},
+ { 2, 2}, {10, 2}, { 2,10}, {10,10}, { 6, 6}, {14, 6}, { 6,14}, {14,14},
+ { 2, 6}, {10, 6}, { 2,14}, {10,14}, { 6, 2}, {14, 2}, { 6,10}, {14,10},
+ { 0, 2}, { 8, 2}, { 0,10}, { 8,10}, { 4, 6}, {12, 6}, { 4,14}, {12,14},
+ { 0, 6}, { 8, 6}, { 0,14}, { 8,14}, { 4, 2}, {12, 2}, { 4,10}, {12,10},
+ { 2, 0}, {10, 0}, { 2, 8}, {10, 8}, { 6, 4}, {14, 4}, { 6,12}, {14,12},
+ { 2, 4}, {10, 4}, { 2,12}, {10,12}, { 6, 0}, {14, 0}, { 6, 8}, {14, 8},
+ { 1, 1}, { 9, 1}, { 1, 9}, { 9, 9}, { 5, 5}, {13, 5}, { 5,13}, {13,13},
+ { 1, 5}, { 9, 5}, { 1,13}, { 9,13}, { 5, 1}, {13, 1}, { 5, 9}, {13, 9},
+ { 3, 3}, {11, 3}, { 3,11}, {11,11}, { 7, 7}, {15, 7}, { 7,15}, {15,15},
+ { 3, 7}, {11, 7}, { 3,15}, {11,15}, { 7, 3}, {15, 3}, { 7,11}, {15,11},
+ { 1, 3}, { 9, 3}, { 1,11}, { 9,11}, { 5, 7}, {13, 7}, { 5,15}, {13,15},
+ { 1, 7}, { 9, 7}, { 1,15}, { 9,15}, { 5, 3}, {13, 3}, { 5,11}, {13,11}, // quality 8
+ { 3, 1}, {11, 1}, { 3, 9}, {11, 9}, { 7, 5}, {15, 5}, { 7,13}, {15,13},
+ { 3, 5}, {11, 5}, { 3,13}, {11,13}, { 7, 1}, {15, 1}, { 7, 9}, {15, 9},
+ { 0, 1}, { 8, 1}, { 0, 9}, { 8, 9}, { 4, 5}, {12, 5}, { 4,13}, {12,13},
+ { 0, 5}, { 8, 5}, { 0,13}, { 8,13}, { 4, 1}, {12, 1}, { 4, 9}, {12, 9},
+ { 2, 3}, {10, 3}, { 2,11}, {10,11}, { 6, 7}, {14, 7}, { 6,15}, {14,15},
+ { 2, 7}, {10, 7}, { 2,15}, {10,15}, { 6, 3}, {14, 3}, { 6,11}, {14,11},
+ { 0, 3}, { 8, 3}, { 0,11}, { 8,11}, { 4, 7}, {12, 7}, { 4,15}, {12,15},
+ { 0, 7}, { 8, 7}, { 0,15}, { 8,15}, { 4, 3}, {12, 3}, { 4,11}, {12,11},
+ { 2, 1}, {10, 1}, { 2, 9}, {10, 9}, { 6, 5}, {14, 5}, { 6,13}, {14,13},
+ { 2, 5}, {10, 5}, { 2,13}, {10,13}, { 6, 1}, {14, 1}, { 6, 9}, {14, 9},
+ { 1, 0}, { 9, 0}, { 1, 8}, { 9, 8}, { 5, 4}, {13, 4}, { 5,12}, {13,12},
+ { 1, 4}, { 9, 4}, { 1,12}, { 9,12}, { 5, 0}, {13, 0}, { 5, 8}, {13, 8},
+ { 3, 2}, {11, 2}, { 3,10}, {11,10}, { 7, 6}, {15, 6}, { 7,14}, {15,14},
+ { 3, 6}, {11, 6}, { 3,14}, {11,14}, { 7, 2}, {15, 2}, { 7,10}, {15,10},
+ { 1, 2}, { 9, 2}, { 1,10}, { 9,10}, { 5, 6}, {13, 6}, { 5,14}, {13,14},
+ { 1, 6}, { 9, 6}, { 1,14}, { 9,14}, { 5, 2}, {13, 2}, { 5,10}, {13,10},
+ { 3, 0}, {11, 0}, { 3, 8}, {11, 8}, { 7, 4}, {15, 4}, { 7,12}, {15,12},
+ { 3, 4}, {11, 4}, { 3,12}, {11,12}, { 7, 0}, {15, 0}, { 7, 8}, {15, 8},
+};
+
+static void store_slice_c(uint8_t *dst, const uint16_t *src,
+ int dst_stride, int src_stride,
+ int width, int height, int log2_scale)
+{
+ int y, x;
+
+#define STORE(pos) do { \
+ temp = ((src[x + y * src_stride + pos] << log2_scale) + d[pos]) >> 8; \
+ if (temp & 0x100) temp = ~(temp >> 31); \
+ dst[x + y * dst_stride + pos] = temp; \
+} while (0)
+
+ for (y = 0; y < height; y++) {
+ const uint8_t *d = dither[y&7];
+ for (x = 0; x < width; x += 8) {
+ int temp;
+ STORE(0);
+ STORE(1);
+ STORE(2);
+ STORE(3);
+ STORE(4);
+ STORE(5);
+ STORE(6);
+ STORE(7);
+ }
+ }
+}
+
+static void filter(USPPContext *p, uint8_t *dst[3], uint8_t *src[3],
+ int dst_stride[3], int src_stride[3], int width,
+ int height, uint8_t *qp_store, int qp_stride)
+{
+ int x, y, i, j;
+ const int count = 1<<p->log2_count;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = FF_CEIL_RSHIFT(width, is_chroma ? p->hsub : 0);
+ int h = FF_CEIL_RSHIFT(height, is_chroma ? p->vsub : 0);
+ int stride = p->temp_stride[i];
+ int block = BLOCK >> (is_chroma ? p->hsub : 0);
+
+ if (!src[i] || !dst[i])
+ continue;
+ for (y = 0; y < h; y++) {
+ int index = block + block * stride + y * stride;
+
+ memcpy(p->src[i] + index, src[i] + y * src_stride[i], w );
+ for (x = 0; x < block; x++) {
+ p->src[i][index - x - 1] = p->src[i][index + x ];
+ p->src[i][index + w + x ] = p->src[i][index + w - x - 1];
+ }
+ }
+ for (y = 0; y < block; y++) {
+ memcpy(p->src[i] + ( block-1-y) * stride, p->src[i] + ( y+block ) * stride, stride);
+ memcpy(p->src[i] + (h+block +y) * stride, p->src[i] + (h-y+block-1) * stride, stride);
+ }
+
+ p->frame->linesize[i] = stride;
+ memset(p->temp[i], 0, (h + 2 * block) * stride * sizeof(int16_t));
+ }
+
+ if (p->qp)
+ p->frame->quality = p->qp * FF_QP2LAMBDA;
+ else {
+ int qpsum=0;
+ int qpcount = (height>>4) * (height>>4);
+
+ for (y = 0; y < (height>>4); y++) {
+ for (x = 0; x < (width>>4); x++)
+ qpsum += qp_store[x + y * qp_stride];
+ }
+ p->frame->quality = ff_norm_qscale((qpsum + qpcount/2) / qpcount, p->qscale_type) * FF_QP2LAMBDA;
+ }
+// init per MB qscale stuff FIXME
+ p->frame->height = height;
+ p->frame->width = width;
+
+ for (i = 0; i < count; i++) {
+ const int x1 = offset[i+count-1][0];
+ const int y1 = offset[i+count-1][1];
+ const int x1c = x1 >> p->hsub;
+ const int y1c = y1 >> p->vsub;
+ const int BLOCKc = BLOCK >> p->hsub;
+ int offset;
+ AVPacket pkt = {0};
+ int got_pkt_ptr;
+
+ av_init_packet(&pkt);
+ pkt.data = p->outbuf;
+ pkt.size = p->outbuf_size;
+
+ p->frame->data[0] = p->src[0] + x1 + y1 * p->frame->linesize[0];
+ p->frame->data[1] = p->src[1] + x1c + y1c * p->frame->linesize[1];
+ p->frame->data[2] = p->src[2] + x1c + y1c * p->frame->linesize[2];
+ p->frame->format = p->avctx_enc[i]->pix_fmt;
+
+ avcodec_encode_video2(p->avctx_enc[i], &pkt, p->frame, &got_pkt_ptr);
+ p->frame_dec = p->avctx_enc[i]->coded_frame;
+
+ offset = (BLOCK-x1) + (BLOCK-y1) * p->frame_dec->linesize[0];
+
+ for (y = 0; y < height; y++)
+ for (x = 0; x < width; x++)
+ p->temp[0][x + y * p->temp_stride[0]] += p->frame_dec->data[0][x + y * p->frame_dec->linesize[0] + offset];
+
+ if (!src[2] || !dst[2])
+ continue;
+
+ offset = (BLOCKc-x1c) + (BLOCKc-y1c) * p->frame_dec->linesize[1];
+
+ for (y = 0; y < FF_CEIL_RSHIFT(height, p->vsub); y++) {
+ for (x = 0; x < FF_CEIL_RSHIFT(width, p->hsub); x++) {
+ p->temp[1][x + y * p->temp_stride[1]] += p->frame_dec->data[1][x + y * p->frame_dec->linesize[1] + offset];
+ p->temp[2][x + y * p->temp_stride[2]] += p->frame_dec->data[2][x + y * p->frame_dec->linesize[2] + offset];
+ }
+ }
+ }
+
+ for (j = 0; j < 3; j++) {
+ int is_chroma = !!j;
+ if (!dst[j])
+ continue;
+ store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j],
+ FF_CEIL_RSHIFT(width, is_chroma ? p->hsub : 0),
+ FF_CEIL_RSHIFT(height, is_chroma ? p->vsub : 0),
+ 8-p->log2_count);
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUVJ444P,
+ AV_PIX_FMT_YUVJ420P,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_NONE
+ };
+
+ AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
+ if (!fmts_list)
+ return AVERROR(ENOMEM);
+ return ff_set_common_formats(ctx, fmts_list);
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+
+ AVFilterContext *ctx = inlink->dst;
+ USPPContext *uspp = ctx->priv;
+ const int height = inlink->h;
+ const int width = inlink->w;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
+ int i;
+
+ AVCodec *enc = avcodec_find_encoder(AV_CODEC_ID_SNOW);
+ if (!enc) {
+ av_log(ctx, AV_LOG_ERROR, "SNOW encoder not found.\n");
+ return AVERROR(EINVAL);
+ }
+
+ uspp->hsub = desc->log2_chroma_w;
+ uspp->vsub = desc->log2_chroma_h;
+
+ for (i = 0; i < 3; i++) {
+ int is_chroma = !!i;
+ int w = (width + 4 * BLOCK-1) & (~(2 * BLOCK-1));
+ int h = (height + 4 * BLOCK-1) & (~(2 * BLOCK-1));
+
+ if (is_chroma) {
+ w = FF_CEIL_RSHIFT(w, uspp->hsub);
+ h = FF_CEIL_RSHIFT(h, uspp->vsub);
+ }
+
+ uspp->temp_stride[i] = w;
+ if (!(uspp->temp[i] = av_malloc_array(uspp->temp_stride[i], h * sizeof(int16_t))))
+ return AVERROR(ENOMEM);
+ if (!(uspp->src [i] = av_malloc_array(uspp->temp_stride[i], h * sizeof(uint8_t))))
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < (1<<uspp->log2_count); i++) {
+ AVCodecContext *avctx_enc;
+ AVDictionary *opts = NULL;
+ int ret;
+
+ if (!(uspp->avctx_enc[i] = avcodec_alloc_context3(NULL)))
+ return AVERROR(ENOMEM);
+
+ avctx_enc = uspp->avctx_enc[i];
+ avctx_enc->width = width + BLOCK;
+ avctx_enc->height = height + BLOCK;
+ avctx_enc->time_base = (AVRational){1,25}; // meaningless
+ avctx_enc->gop_size = INT_MAX;
+ avctx_enc->max_b_frames = 0;
+ avctx_enc->pix_fmt = inlink->format;
++ avctx_enc->flags = AV_CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
+ avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+ avctx_enc->global_quality = 123;
+ av_dict_set(&opts, "no_bitstream", "1", 0);
+ ret = avcodec_open2(avctx_enc, enc, &opts);
+ if (ret < 0)
+ return ret;
+ av_dict_free(&opts);
+ av_assert0(avctx_enc->codec);
+ }
+
+ uspp->outbuf_size = (width + BLOCK) * (height + BLOCK) * 10;
+ if (!(uspp->frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ if (!(uspp->outbuf = av_malloc(uspp->outbuf_size)))
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *ctx = inlink->dst;
+ USPPContext *uspp = ctx->priv;
+ AVFilterLink *outlink = ctx->outputs[0];
+ AVFrame *out = in;
+
+ int qp_stride = 0;
+ uint8_t *qp_table = NULL;
+
+ /* if we are not in a constant user quantizer mode and we don't want to use
+ * the quantizers from the B-frames (B-frames often have a higher QP), we
+ * need to save the qp table from the last non B-frame; this is what the
+ * following code block does */
+ if (!uspp->qp) {
+ qp_table = av_frame_get_qp_table(in, &qp_stride, &uspp->qscale_type);
+
+ if (qp_table && !uspp->use_bframe_qp && in->pict_type != AV_PICTURE_TYPE_B) {
+ int w, h;
+
+ /* if the qp stride is not set, it means the QP are only defined on
+ * a line basis */
+ if (!qp_stride) {
+ w = FF_CEIL_RSHIFT(inlink->w, 4);
+ h = 1;
+ } else {
+ w = qp_stride;
+ h = FF_CEIL_RSHIFT(inlink->h, 4);
+ }
+
+ if (w * h > uspp->non_b_qp_alloc_size) {
+ int ret = av_reallocp_array(&uspp->non_b_qp_table, w, h);
+ if (ret < 0) {
+ uspp->non_b_qp_alloc_size = 0;
+ return ret;
+ }
+ uspp->non_b_qp_alloc_size = w * h;
+ }
+
+ av_assert0(w * h <= uspp->non_b_qp_alloc_size);
+ memcpy(uspp->non_b_qp_table, qp_table, w * h);
+ }
+ }
+
+ if (uspp->log2_count && !ctx->is_disabled) {
+ if (!uspp->use_bframe_qp && uspp->non_b_qp_table)
+ qp_table = uspp->non_b_qp_table;
+
+ if (qp_table || uspp->qp) {
+
+ /* get a new frame if in-place is not possible or if the dimensions
+ * are not multiple of 8 */
+ if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
+ const int aligned_w = FFALIGN(inlink->w, 8);
+ const int aligned_h = FFALIGN(inlink->h, 8);
+
+ out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
+ if (!out) {
+ av_frame_free(&in);
+ return AVERROR(ENOMEM);
+ }
+ av_frame_copy_props(out, in);
+ out->width = in->width;
+ out->height = in->height;
+ }
+
+ filter(uspp, out->data, in->data, out->linesize, in->linesize,
+ inlink->w, inlink->h, qp_table, qp_stride);
+ }
+ }
+
+ if (in != out) {
+ if (in->data[3])
+ av_image_copy_plane(out->data[3], out->linesize[3],
+ in ->data[3], in ->linesize[3],
+ inlink->w, inlink->h);
+ av_frame_free(&in);
+ }
+ return ff_filter_frame(outlink, out);
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ USPPContext *uspp = ctx->priv;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ av_freep(&uspp->temp[i]);
+ av_freep(&uspp->src[i]);
+ }
+
+ for (i = 0; i < (1 << uspp->log2_count); i++) {
+ avcodec_close(uspp->avctx_enc[i]);
+ av_freep(&uspp->avctx_enc[i]);
+ }
+
+ av_freep(&uspp->non_b_qp_table);
+ av_freep(&uspp->outbuf);
+ av_frame_free(&uspp->frame);
+}
+
+static const AVFilterPad uspp_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = config_input,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad uspp_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_uspp = {
+ .name = "uspp",
+ .description = NULL_IF_CONFIG_SMALL("Apply Ultra Simple / Slow Post-processing filter."),
+ .priv_size = sizeof(USPPContext),
+ .uninit = uninit,
+ .query_formats = query_formats,
+ .inputs = uspp_inputs,
+ .outputs = uspp_outputs,
+ .priv_class = &uspp_class,
+ .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
+};
--- /dev/null
- if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
+/*
+ * FFM (ffserver live feed) demuxer
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/intfloat.h"
+#include "libavutil/opt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
+#include "avformat.h"
+#include "internal.h"
+#include "ffm.h"
+#include "avio_internal.h"
+
+static int ffm_is_avail_data(AVFormatContext *s, int size)
+{
+ FFMContext *ffm = s->priv_data;
+ int64_t pos, avail_size;
+ int len;
+
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (size <= len)
+ return 1;
+ pos = avio_tell(s->pb);
+ if (!ffm->write_index) {
+ if (pos == ffm->file_size)
+ return AVERROR_EOF;
+ avail_size = ffm->file_size - pos;
+ } else {
+ if (pos == ffm->write_index) {
+ /* exactly at the end of stream */
+ return AVERROR(EAGAIN);
+ } else if (pos < ffm->write_index) {
+ avail_size = ffm->write_index - pos;
+ } else {
+ avail_size = (ffm->file_size - pos) + (ffm->write_index - FFM_PACKET_SIZE);
+ }
+ }
+ avail_size = (avail_size / ffm->packet_size) * (ffm->packet_size - FFM_HEADER_SIZE) + len;
+ if (size <= avail_size)
+ return 1;
+ else
+ return AVERROR(EAGAIN);
+}
+
+static int ffm_resync(AVFormatContext *s, int state)
+{
+ av_log(s, AV_LOG_ERROR, "resyncing\n");
+ while (state != PACKET_ID) {
+ if (avio_feof(s->pb)) {
+ av_log(s, AV_LOG_ERROR, "cannot find FFM syncword\n");
+ return -1;
+ }
+ state = (state << 8) | avio_r8(s->pb);
+ }
+ return 0;
+}
+
+/* first is true if we read the frame header */
+static int ffm_read_data(AVFormatContext *s,
+ uint8_t *buf, int size, int header)
+{
+ FFMContext *ffm = s->priv_data;
+ AVIOContext *pb = s->pb;
+ int len, fill_size, size1, frame_offset, id;
+ int64_t last_pos = -1;
+
+ size1 = size;
+ while (size > 0) {
+ redo:
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (len < 0)
+ return -1;
+ if (len > size)
+ len = size;
+ if (len == 0) {
+ if (avio_tell(pb) == ffm->file_size)
+ avio_seek(pb, ffm->packet_size, SEEK_SET);
+ retry_read:
+ if (pb->buffer_size != ffm->packet_size) {
+ int64_t tell = avio_tell(pb);
+ int ret = ffio_set_buf_size(pb, ffm->packet_size);
+ if (ret < 0)
+ return ret;
+ avio_seek(pb, tell, SEEK_SET);
+ }
+ id = avio_rb16(pb); /* PACKET_ID */
+ if (id != PACKET_ID) {
+ if (ffm_resync(s, id) < 0)
+ return -1;
+ last_pos = avio_tell(pb);
+ }
+ fill_size = avio_rb16(pb);
+ ffm->dts = avio_rb64(pb);
+ frame_offset = avio_rb16(pb);
+ avio_read(pb, ffm->packet, ffm->packet_size - FFM_HEADER_SIZE);
+ ffm->packet_end = ffm->packet + (ffm->packet_size - FFM_HEADER_SIZE - fill_size);
+ if (ffm->packet_end < ffm->packet || frame_offset < 0)
+ return -1;
+ /* if first packet or resynchronization packet, we must
+ handle it specifically */
+ if (ffm->first_packet || (frame_offset & 0x8000)) {
+ if (!frame_offset) {
+ /* This packet has no frame headers in it */
+ if (avio_tell(pb) >= ffm->packet_size * 3LL) {
+ int64_t seekback = FFMIN(ffm->packet_size * 2LL, avio_tell(pb) - last_pos);
+ seekback = FFMAX(seekback, 0);
+ avio_seek(pb, -seekback, SEEK_CUR);
+ goto retry_read;
+ }
+ /* This is bad, we cannot find a valid frame header */
+ return 0;
+ }
+ ffm->first_packet = 0;
+ if ((frame_offset & 0x7fff) < FFM_HEADER_SIZE)
+ return -1;
+ ffm->packet_ptr = ffm->packet + (frame_offset & 0x7fff) - FFM_HEADER_SIZE;
+ if (!header)
+ break;
+ } else {
+ ffm->packet_ptr = ffm->packet;
+ }
+ goto redo;
+ }
+ memcpy(buf, ffm->packet_ptr, len);
+ buf += len;
+ ffm->packet_ptr += len;
+ size -= len;
+ header = 0;
+ }
+ return size1 - size;
+}
+
+/* ensure that acutal seeking happens between FFM_PACKET_SIZE
+ and file_size - FFM_PACKET_SIZE */
+static int64_t ffm_seek1(AVFormatContext *s, int64_t pos1)
+{
+ FFMContext *ffm = s->priv_data;
+ AVIOContext *pb = s->pb;
+ int64_t pos;
+
+ pos = FFMIN(pos1, ffm->file_size - FFM_PACKET_SIZE);
+ pos = FFMAX(pos, FFM_PACKET_SIZE);
+ av_dlog(s, "seek to %"PRIx64" -> %"PRIx64"\n", pos1, pos);
+ return avio_seek(pb, pos, SEEK_SET);
+}
+
+static int64_t get_dts(AVFormatContext *s, int64_t pos)
+{
+ AVIOContext *pb = s->pb;
+ int64_t dts;
+
+ ffm_seek1(s, pos);
+ avio_skip(pb, 4);
+ dts = avio_rb64(pb);
+ av_dlog(s, "dts=%0.6f\n", dts / 1000000.0);
+ return dts;
+}
+
+static void adjust_write_index(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ AVIOContext *pb = s->pb;
+ int64_t pts;
+ //int64_t orig_write_index = ffm->write_index;
+ int64_t pos_min, pos_max;
+ int64_t pts_start;
+ int64_t ptr = avio_tell(pb);
+
+
+ pos_min = 0;
+ pos_max = ffm->file_size - 2 * FFM_PACKET_SIZE;
+
+ pts_start = get_dts(s, pos_min);
+
+ pts = get_dts(s, pos_max);
+
+ if (pts - 100000 > pts_start)
+ goto end;
+
+ ffm->write_index = FFM_PACKET_SIZE;
+
+ pts_start = get_dts(s, pos_min);
+
+ pts = get_dts(s, pos_max);
+
+ if (pts - 100000 <= pts_start) {
+ while (1) {
+ int64_t newpos;
+ int64_t newpts;
+
+ newpos = ((pos_max + pos_min) / (2 * FFM_PACKET_SIZE)) * FFM_PACKET_SIZE;
+
+ if (newpos == pos_min)
+ break;
+
+ newpts = get_dts(s, newpos);
+
+ if (newpts - 100000 <= pts) {
+ pos_max = newpos;
+ pts = newpts;
+ } else {
+ pos_min = newpos;
+ }
+ }
+ ffm->write_index += pos_max;
+ }
+
+ end:
+ avio_seek(pb, ptr, SEEK_SET);
+}
+
+
+static int ffm_close(AVFormatContext *s)
+{
+ int i;
+
+ for (i = 0; i < s->nb_streams; i++)
+ av_freep(&s->streams[i]->codec->rc_eq);
+
+ return 0;
+}
+
+static int ffm_append_recommended_configuration(AVStream *st, char **conf)
+{
+ int ret;
+ size_t newsize;
+ av_assert0(conf && st);
+ if (!*conf)
+ return 0;
+ if (!st->recommended_encoder_configuration) {
+ st->recommended_encoder_configuration = *conf;
+ *conf = 0;
+ return 0;
+ }
+ newsize = strlen(*conf) + strlen(st->recommended_encoder_configuration) + 2;
+ if ((ret = av_reallocp(&st->recommended_encoder_configuration, newsize)) < 0)
+ return ret;
+ av_strlcat(st->recommended_encoder_configuration, ",", newsize);
+ av_strlcat(st->recommended_encoder_configuration, *conf, newsize);
+ av_freep(conf);
+ return 0;
+}
+
+static int ffm2_read_header(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ AVStream *st;
+ AVIOContext *pb = s->pb;
+ AVCodecContext *codec;
+ int ret;
+ int f_main = 0, f_cprv = -1, f_stvi = -1, f_stau = -1;
+ AVCodec *enc;
+ char *buffer;
+
+ ffm->packet_size = avio_rb32(pb);
+ if (ffm->packet_size != FFM_PACKET_SIZE) {
+ av_log(s, AV_LOG_ERROR, "Invalid packet size %d, expected size was %d\n",
+ ffm->packet_size, FFM_PACKET_SIZE);
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ ffm->write_index = avio_rb64(pb);
+ /* get also filesize */
+ if (pb->seekable) {
+ ffm->file_size = avio_size(pb);
+ if (ffm->write_index && 0)
+ adjust_write_index(s);
+ } else {
+ ffm->file_size = (UINT64_C(1) << 63) - 1;
+ }
+
+ while(!avio_feof(pb)) {
+ unsigned id = avio_rb32(pb);
+ unsigned size = avio_rb32(pb);
+ int64_t next = avio_tell(pb) + size;
+ char rc_eq_buf[128];
+
+ if(!id)
+ break;
+
+ switch(id) {
+ case MKBETAG('M', 'A', 'I', 'N'):
+ if (f_main++) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ avio_rb32(pb); /* nb_streams */
+ avio_rb32(pb); /* total bitrate */
+ break;
+ case MKBETAG('C', 'O', 'M', 'M'):
+ f_cprv = f_stvi = f_stau = 0;
+ st = avformat_new_stream(s, NULL);
+ if (!st) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ avpriv_set_pts_info(st, 64, 1, 1000000);
+
+ codec = st->codec;
+ /* generic info */
+ codec->codec_id = avio_rb32(pb);
+ codec->codec_type = avio_r8(pb);
+ codec->bit_rate = avio_rb32(pb);
+ codec->flags = avio_rb32(pb);
+ codec->flags2 = avio_rb32(pb);
+ codec->debug = avio_rb32(pb);
- if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
++ if (codec->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
+ if (ff_get_extradata(codec, pb, avio_rb32(pb)) < 0)
+ return AVERROR(ENOMEM);
+ }
+ break;
+ case MKBETAG('S', 'T', 'V', 'I'):
+ if (f_stvi++) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ codec->time_base.num = avio_rb32(pb);
+ codec->time_base.den = avio_rb32(pb);
+ if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
+ av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
+ codec->time_base.num, codec->time_base.den);
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+ codec->width = avio_rb16(pb);
+ codec->height = avio_rb16(pb);
+ codec->gop_size = avio_rb16(pb);
+ codec->pix_fmt = avio_rb32(pb);
+ codec->qmin = avio_r8(pb);
+ codec->qmax = avio_r8(pb);
+ codec->max_qdiff = avio_r8(pb);
+ codec->qcompress = avio_rb16(pb) / 10000.0;
+ codec->qblur = avio_rb16(pb) / 10000.0;
+ codec->bit_rate_tolerance = avio_rb32(pb);
+ avio_get_str(pb, INT_MAX, rc_eq_buf, sizeof(rc_eq_buf));
+ codec->rc_eq = av_strdup(rc_eq_buf);
+ codec->rc_max_rate = avio_rb32(pb);
+ codec->rc_min_rate = avio_rb32(pb);
+ codec->rc_buffer_size = avio_rb32(pb);
+ codec->i_quant_factor = av_int2double(avio_rb64(pb));
+ codec->b_quant_factor = av_int2double(avio_rb64(pb));
+ codec->i_quant_offset = av_int2double(avio_rb64(pb));
+ codec->b_quant_offset = av_int2double(avio_rb64(pb));
+ codec->dct_algo = avio_rb32(pb);
+ codec->strict_std_compliance = avio_rb32(pb);
+ codec->max_b_frames = avio_rb32(pb);
+ codec->mpeg_quant = avio_rb32(pb);
+ codec->intra_dc_precision = avio_rb32(pb);
+ codec->me_method = avio_rb32(pb);
+ codec->mb_decision = avio_rb32(pb);
+ codec->nsse_weight = avio_rb32(pb);
+ codec->frame_skip_cmp = avio_rb32(pb);
+ codec->rc_buffer_aggressivity = av_int2double(avio_rb64(pb));
+ codec->codec_tag = avio_rb32(pb);
+ codec->thread_count = avio_r8(pb);
+ codec->coder_type = avio_rb32(pb);
+ codec->me_cmp = avio_rb32(pb);
+ codec->me_subpel_quality = avio_rb32(pb);
+ codec->me_range = avio_rb32(pb);
+ codec->keyint_min = avio_rb32(pb);
+ codec->scenechange_threshold = avio_rb32(pb);
+ codec->b_frame_strategy = avio_rb32(pb);
+ codec->qcompress = av_int2double(avio_rb64(pb));
+ codec->qblur = av_int2double(avio_rb64(pb));
+ codec->max_qdiff = avio_rb32(pb);
+ codec->refs = avio_rb32(pb);
+ break;
+ case MKBETAG('S', 'T', 'A', 'U'):
+ if (f_stau++) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ codec->sample_rate = avio_rb32(pb);
+ codec->channels = avio_rl16(pb);
+ codec->frame_size = avio_rl16(pb);
+ break;
+ case MKBETAG('C', 'P', 'R', 'V'):
+ if (f_cprv++) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ enc = avcodec_find_encoder(codec->codec_id);
+ if (enc && enc->priv_data_size && enc->priv_class) {
+ buffer = av_malloc(size + 1);
+ if (!buffer) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ avio_get_str(pb, size, buffer, size + 1);
+ if ((ret = ffm_append_recommended_configuration(st, &buffer)) < 0)
+ goto fail;
+ }
+ break;
+ case MKBETAG('S', '2', 'V', 'I'):
+ if (f_stvi++) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ buffer = av_malloc(size);
+ if (!buffer) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ avio_get_str(pb, INT_MAX, buffer, size);
+ av_set_options_string(codec, buffer, "=", ",");
+ if ((ret = ffm_append_recommended_configuration(st, &buffer)) < 0)
+ goto fail;
+ break;
+ case MKBETAG('S', '2', 'A', 'U'):
+ if (f_stau++) {
+ ret = AVERROR(EINVAL);
+ goto fail;
+ }
+ buffer = av_malloc(size);
+ if (!buffer) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ avio_get_str(pb, INT_MAX, buffer, size);
+ av_set_options_string(codec, buffer, "=", ",");
+ if ((ret = ffm_append_recommended_configuration(st, &buffer)) < 0)
+ goto fail;
+ break;
+ }
+ avio_seek(pb, next, SEEK_SET);
+ }
+
+ /* get until end of block reached */
+ while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
+ avio_r8(pb);
+
+ /* init packet demux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet;
+ ffm->frame_offset = 0;
+ ffm->dts = 0;
+ ffm->read_state = READ_HEADER;
+ ffm->first_packet = 1;
+ return 0;
+ fail:
+ ffm_close(s);
+ return ret;
+}
+
+static int ffm_read_header(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ AVStream *st;
+ AVIOContext *pb = s->pb;
+ AVCodecContext *codec;
+ int i, nb_streams;
+ uint32_t tag;
+
+ /* header */
+ tag = avio_rl32(pb);
+ if (tag == MKTAG('F', 'F', 'M', '2'))
+ return ffm2_read_header(s);
+ if (tag != MKTAG('F', 'F', 'M', '1'))
+ goto fail;
+ ffm->packet_size = avio_rb32(pb);
+ if (ffm->packet_size != FFM_PACKET_SIZE)
+ goto fail;
+ ffm->write_index = avio_rb64(pb);
+ /* get also filesize */
+ if (pb->seekable) {
+ ffm->file_size = avio_size(pb);
+ if (ffm->write_index && 0)
+ adjust_write_index(s);
+ } else {
+ ffm->file_size = (UINT64_C(1) << 63) - 1;
+ }
+
+ nb_streams = avio_rb32(pb);
+ avio_rb32(pb); /* total bitrate */
+ /* read each stream */
+ for(i=0;i<nb_streams;i++) {
+ char rc_eq_buf[128];
+
+ st = avformat_new_stream(s, NULL);
+ if (!st)
+ goto fail;
+
+ avpriv_set_pts_info(st, 64, 1, 1000000);
+
+ codec = st->codec;
+ /* generic info */
+ codec->codec_id = avio_rb32(pb);
+ codec->codec_type = avio_r8(pb); /* codec_type */
+ codec->bit_rate = avio_rb32(pb);
+ codec->flags = avio_rb32(pb);
+ codec->flags2 = avio_rb32(pb);
+ codec->debug = avio_rb32(pb);
+ /* specific info */
+ switch(codec->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ codec->time_base.num = avio_rb32(pb);
+ codec->time_base.den = avio_rb32(pb);
+ if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
+ av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
+ codec->time_base.num, codec->time_base.den);
+ goto fail;
+ }
+ codec->width = avio_rb16(pb);
+ codec->height = avio_rb16(pb);
+ codec->gop_size = avio_rb16(pb);
+ codec->pix_fmt = avio_rb32(pb);
+ codec->qmin = avio_r8(pb);
+ codec->qmax = avio_r8(pb);
+ codec->max_qdiff = avio_r8(pb);
+ codec->qcompress = avio_rb16(pb) / 10000.0;
+ codec->qblur = avio_rb16(pb) / 10000.0;
+ codec->bit_rate_tolerance = avio_rb32(pb);
+ avio_get_str(pb, INT_MAX, rc_eq_buf, sizeof(rc_eq_buf));
+ codec->rc_eq = av_strdup(rc_eq_buf);
+ codec->rc_max_rate = avio_rb32(pb);
+ codec->rc_min_rate = avio_rb32(pb);
+ codec->rc_buffer_size = avio_rb32(pb);
+ codec->i_quant_factor = av_int2double(avio_rb64(pb));
+ codec->b_quant_factor = av_int2double(avio_rb64(pb));
+ codec->i_quant_offset = av_int2double(avio_rb64(pb));
+ codec->b_quant_offset = av_int2double(avio_rb64(pb));
+ codec->dct_algo = avio_rb32(pb);
+ codec->strict_std_compliance = avio_rb32(pb);
+ codec->max_b_frames = avio_rb32(pb);
+ codec->mpeg_quant = avio_rb32(pb);
+ codec->intra_dc_precision = avio_rb32(pb);
+ codec->me_method = avio_rb32(pb);
+ codec->mb_decision = avio_rb32(pb);
+ codec->nsse_weight = avio_rb32(pb);
+ codec->frame_skip_cmp = avio_rb32(pb);
+ codec->rc_buffer_aggressivity = av_int2double(avio_rb64(pb));
+ codec->codec_tag = avio_rb32(pb);
+ codec->thread_count = avio_r8(pb);
+ codec->coder_type = avio_rb32(pb);
+ codec->me_cmp = avio_rb32(pb);
+ codec->me_subpel_quality = avio_rb32(pb);
+ codec->me_range = avio_rb32(pb);
+ codec->keyint_min = avio_rb32(pb);
+ codec->scenechange_threshold = avio_rb32(pb);
+ codec->b_frame_strategy = avio_rb32(pb);
+ codec->qcompress = av_int2double(avio_rb64(pb));
+ codec->qblur = av_int2double(avio_rb64(pb));
+ codec->max_qdiff = avio_rb32(pb);
+ codec->refs = avio_rb32(pb);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ codec->sample_rate = avio_rb32(pb);
+ codec->channels = avio_rl16(pb);
+ codec->frame_size = avio_rl16(pb);
+ break;
+ default:
+ goto fail;
+ }
++ if (codec->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
+ if (ff_get_extradata(codec, pb, avio_rb32(pb)) < 0)
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ /* get until end of block reached */
+ while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
+ avio_r8(pb);
+
+ /* init packet demux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet;
+ ffm->frame_offset = 0;
+ ffm->dts = 0;
+ ffm->read_state = READ_HEADER;
+ ffm->first_packet = 1;
+ return 0;
+ fail:
+ ffm_close(s);
+ return -1;
+}
+
+/* return < 0 if eof */
+static int ffm_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ int size;
+ FFMContext *ffm = s->priv_data;
+ int duration, ret;
+
+ switch(ffm->read_state) {
+ case READ_HEADER:
+ if ((ret = ffm_is_avail_data(s, FRAME_HEADER_SIZE+4)) < 0)
+ return ret;
+
+ av_dlog(s, "pos=%08"PRIx64" spos=%"PRIx64", write_index=%"PRIx64" size=%"PRIx64"\n",
+ avio_tell(s->pb), s->pb->pos, ffm->write_index, ffm->file_size);
+ if (ffm_read_data(s, ffm->header, FRAME_HEADER_SIZE, 1) !=
+ FRAME_HEADER_SIZE)
+ return -1;
+ if (ffm->header[1] & FLAG_DTS)
+ if (ffm_read_data(s, ffm->header+16, 4, 1) != 4)
+ return -1;
+ ffm->read_state = READ_DATA;
+ /* fall through */
+ case READ_DATA:
+ size = AV_RB24(ffm->header + 2);
+ if ((ret = ffm_is_avail_data(s, size)) < 0)
+ return ret;
+
+ duration = AV_RB24(ffm->header + 5);
+
+ if (av_new_packet(pkt, size) < 0) {
+ return AVERROR(ENOMEM);
+ }
+ pkt->stream_index = ffm->header[0];
+ if ((unsigned)pkt->stream_index >= s->nb_streams) {
+ av_log(s, AV_LOG_ERROR, "invalid stream index %d\n", pkt->stream_index);
+ av_free_packet(pkt);
+ ffm->read_state = READ_HEADER;
+ return -1;
+ }
+ pkt->pos = avio_tell(s->pb);
+ if (ffm->header[1] & FLAG_KEY_FRAME)
+ pkt->flags |= AV_PKT_FLAG_KEY;
+
+ ffm->read_state = READ_HEADER;
+ if (ffm_read_data(s, pkt->data, size, 0) != size) {
+ /* bad case: desynchronized packet. we cancel all the packet loading */
+ av_free_packet(pkt);
+ return -1;
+ }
+ pkt->pts = AV_RB64(ffm->header+8);
+ if (ffm->header[1] & FLAG_DTS)
+ pkt->dts = pkt->pts - AV_RB32(ffm->header+16);
+ else
+ pkt->dts = pkt->pts;
+ pkt->duration = duration;
+ break;
+ }
+ return 0;
+}
+
+/* seek to a given time in the file. The file read pointer is
+ positioned at or before pts. XXX: the following code is quite
+ approximative */
+static int ffm_seek(AVFormatContext *s, int stream_index, int64_t wanted_pts, int flags)
+{
+ FFMContext *ffm = s->priv_data;
+ int64_t pos_min, pos_max, pos;
+ int64_t pts_min, pts_max, pts;
+ double pos1;
+
+ av_dlog(s, "wanted_pts=%0.6f\n", wanted_pts / 1000000.0);
+ /* find the position using linear interpolation (better than
+ dichotomy in typical cases) */
+ if (ffm->write_index && ffm->write_index < ffm->file_size) {
+ if (get_dts(s, FFM_PACKET_SIZE) < wanted_pts) {
+ pos_min = FFM_PACKET_SIZE;
+ pos_max = ffm->write_index - FFM_PACKET_SIZE;
+ } else {
+ pos_min = ffm->write_index;
+ pos_max = ffm->file_size - FFM_PACKET_SIZE;
+ }
+ } else {
+ pos_min = FFM_PACKET_SIZE;
+ pos_max = ffm->file_size - FFM_PACKET_SIZE;
+ }
+ while (pos_min <= pos_max) {
+ pts_min = get_dts(s, pos_min);
+ pts_max = get_dts(s, pos_max);
+ if (pts_min > wanted_pts || pts_max <= wanted_pts) {
+ pos = pts_min > wanted_pts ? pos_min : pos_max;
+ goto found;
+ }
+ /* linear interpolation */
+ pos1 = (double)(pos_max - pos_min) * (double)(wanted_pts - pts_min) /
+ (double)(pts_max - pts_min);
+ pos = (((int64_t)pos1) / FFM_PACKET_SIZE) * FFM_PACKET_SIZE;
+ if (pos <= pos_min)
+ pos = pos_min;
+ else if (pos >= pos_max)
+ pos = pos_max;
+ pts = get_dts(s, pos);
+ /* check if we are lucky */
+ if (pts == wanted_pts) {
+ goto found;
+ } else if (pts > wanted_pts) {
+ pos_max = pos - FFM_PACKET_SIZE;
+ } else {
+ pos_min = pos + FFM_PACKET_SIZE;
+ }
+ }
+ pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
+
+ found:
+ if (ffm_seek1(s, pos) < 0)
+ return -1;
+
+ /* reset read state */
+ ffm->read_state = READ_HEADER;
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet;
+ ffm->first_packet = 1;
+
+ return 0;
+}
+
+static int ffm_probe(AVProbeData *p)
+{
+ if (
+ p->buf[0] == 'F' && p->buf[1] == 'F' && p->buf[2] == 'M' &&
+ (p->buf[3] == '1' || p->buf[3] == '2'))
+ return AVPROBE_SCORE_MAX + 1;
+ return 0;
+}
+
+AVInputFormat ff_ffm_demuxer = {
+ .name = "ffm",
+ .long_name = NULL_IF_CONFIG_SMALL("FFM (FFserver live feed)"),
+ .priv_data_size = sizeof(FFMContext),
+ .read_probe = ffm_probe,
+ .read_header = ffm_read_header,
+ .read_packet = ffm_read_packet,
+ .read_close = ffm_close,
+ .read_seek = ffm_seek,
+};
--- /dev/null
- if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
+/*
+ * FFM (ffserver live feed) muxer
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/intfloat.h"
+#include "libavutil/avassert.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/opt.h"
+#include "avformat.h"
+#include "avio_internal.h"
+#include "internal.h"
+#include "ffm.h"
+
+static void flush_packet(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ int fill_size, h;
+ AVIOContext *pb = s->pb;
+
+ fill_size = ffm->packet_end - ffm->packet_ptr;
+ memset(ffm->packet_ptr, 0, fill_size);
+
+ av_assert1(avio_tell(pb) % ffm->packet_size == 0);
+
+ /* put header */
+ avio_wb16(pb, PACKET_ID);
+ avio_wb16(pb, fill_size);
+ avio_wb64(pb, ffm->dts);
+ h = ffm->frame_offset;
+ if (ffm->first_packet)
+ h |= 0x8000;
+ avio_wb16(pb, h);
+ avio_write(pb, ffm->packet, ffm->packet_end - ffm->packet);
+ avio_flush(pb);
+
+ /* prepare next packet */
+ ffm->frame_offset = 0; /* no key frame */
+ ffm->packet_ptr = ffm->packet;
+ ffm->first_packet = 0;
+}
+
+/* 'first' is true if first data of a frame */
+static void ffm_write_data(AVFormatContext *s,
+ const uint8_t *buf, int size,
+ int64_t dts, int header)
+{
+ FFMContext *ffm = s->priv_data;
+ int len;
+
+ if (header && ffm->frame_offset == 0) {
+ ffm->frame_offset = ffm->packet_ptr - ffm->packet + FFM_HEADER_SIZE;
+ ffm->dts = dts;
+ }
+
+ /* write as many packets as needed */
+ while (size > 0) {
+ len = ffm->packet_end - ffm->packet_ptr;
+ if (len > size)
+ len = size;
+ memcpy(ffm->packet_ptr, buf, len);
+
+ ffm->packet_ptr += len;
+ buf += len;
+ size -= len;
+ if (ffm->packet_ptr >= ffm->packet_end)
+ flush_packet(s);
+ }
+}
+
+static void write_header_chunk(AVIOContext *pb, AVIOContext *dpb, unsigned id)
+{
+ uint8_t *dyn_buf;
+ int dyn_size= avio_close_dyn_buf(dpb, &dyn_buf);
+ avio_wb32(pb, id);
+ avio_wb32(pb, dyn_size);
+ avio_write(pb, dyn_buf, dyn_size);
+ av_free(dyn_buf);
+}
+
+static int ffm_write_header_codec_private_ctx(AVFormatContext *s, AVCodecContext *ctx, int type)
+{
+ AVIOContext *pb = s->pb;
+ AVIOContext *tmp;
+ char *buf = NULL;
+ int ret;
+ const AVCodec *enc = ctx->codec ? ctx->codec : avcodec_find_encoder(ctx->codec_id);
+
+ if (!enc) {
+ av_log(s, AV_LOG_WARNING, "Stream codec is not found. Codec private options are not stored.\n");
+ return 0;
+ }
+ if (ctx->priv_data && enc->priv_class && enc->priv_data_size) {
+ if ((ret = av_opt_serialize(ctx->priv_data, AV_OPT_FLAG_ENCODING_PARAM | type,
+ AV_OPT_SERIALIZE_SKIP_DEFAULTS, &buf, '=', ',')) < 0)
+ return ret;
+ if (buf && strlen(buf)) {
+ if (avio_open_dyn_buf(&tmp) < 0) {
+ av_free(buf);
+ return AVERROR(ENOMEM);
+ }
+ avio_put_str(tmp, buf);
+ write_header_chunk(pb, tmp, MKBETAG('C', 'P', 'R', 'V'));
+ }
+ av_free(buf);
+ }
+ return 0;
+}
+
+static int ffm_write_header_codec_ctx(AVIOContext *pb, AVCodecContext *ctx, unsigned tag, int type)
+{
+ AVIOContext *tmp;
+ char *buf = NULL;
+ int ret, need_coma = 0;
+
+#define SKIP_DEFAULTS AV_OPT_SERIALIZE_SKIP_DEFAULTS
+#define OPT_FLAGS_EXACT AV_OPT_SERIALIZE_OPT_FLAGS_EXACT
+#define ENC AV_OPT_FLAG_ENCODING_PARAM
+
+ if (avio_open_dyn_buf(&tmp) < 0)
+ return AVERROR(ENOMEM);
+ if ((ret = av_opt_serialize(ctx, ENC | type, SKIP_DEFAULTS, &buf, '=', ',')) < 0)
+ goto fail;
+ if (buf && strlen(buf)) {
+ avio_write(tmp, buf, strlen(buf));
+ av_freep(&buf);
+ need_coma = 1;
+ }
+ if ((ret = av_opt_serialize(ctx, 0, SKIP_DEFAULTS | OPT_FLAGS_EXACT, &buf, '=', ',')) < 0)
+ goto fail;
+ if (buf && strlen(buf)) {
+ if (need_coma)
+ avio_w8(tmp, ',');
+ avio_write(tmp, buf, strlen(buf));
+ }
+ av_freep(&buf);
+ avio_w8(tmp, 0);
+ write_header_chunk(pb, tmp, tag);
+ return 0;
+ fail:
+ av_free(buf);
+ ffio_free_dyn_buf(&tmp);
+ return ret;
+
+#undef SKIP_DEFAULTS
+#undef OPT_FLAGS_EXACT
+#undef ENC
+}
+
+static int ffm_write_recommended_config(AVIOContext *pb, AVCodecContext *ctx, unsigned tag,
+ const char *configuration)
+{
+ int ret;
+ const AVCodec *enc = ctx->codec ? ctx->codec : avcodec_find_encoder(ctx->codec_id);
+ AVIOContext *tmp;
+ AVDictionaryEntry *t = NULL;
+ AVDictionary *all = NULL, *comm = NULL, *prv = NULL;
+ char *buf = NULL;
+
+ if (!enc || !enc->priv_class || !enc->priv_data_size) {
+ /* codec is not known/has no private options, so save everything as common options */
+ if (avio_open_dyn_buf(&tmp) < 0)
+ return AVERROR(ENOMEM);
+ avio_put_str(tmp, configuration);
+ write_header_chunk(pb, tmp, tag);
+ return 0;
+ }
+
+ if ((ret = av_dict_parse_string(&all, configuration, "=", ",", 0)) < 0)
+ return ret;
+
+ while ((t = av_dict_get(all, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ if (av_opt_find((void *)&enc->priv_class, t->key, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)) {
+ if ((ret = av_dict_set(&prv, t->key, t->value, 0)) < 0)
+ goto fail;
+ } else if ((ret = av_dict_set(&comm, t->key, t->value, 0)) < 0)
+ goto fail;
+ }
+
+ if (comm) {
+ if ((ret = av_dict_get_string(comm, &buf, '=', ',')) < 0 ||
+ (ret = avio_open_dyn_buf(&tmp)) < 0)
+ goto fail;
+ avio_put_str(tmp, buf);
+ av_freep(&buf);
+ write_header_chunk(pb, tmp, tag);
+ }
+ if (prv) {
+ if ((ret = av_dict_get_string(prv, &buf, '=', ',')) < 0 ||
+ (ret = avio_open_dyn_buf(&tmp)) < 0)
+ goto fail;
+ avio_put_str(tmp, buf);
+ write_header_chunk(pb, tmp, MKBETAG('C', 'P', 'R', 'V'));
+ }
+
+ fail:
+ av_free(buf);
+ av_dict_free(&all);
+ av_dict_free(&comm);
+ av_dict_free(&prv);
+ return ret;
+}
+
+static int ffm_write_header(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+ AVDictionaryEntry *t;
+ AVStream *st;
+ AVIOContext *pb = s->pb;
+ AVCodecContext *codec;
+ int bit_rate, i, ret;
+
+ if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
+ ret = av_parse_time(&ffm->start_time, t->value, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ ffm->packet_size = FFM_PACKET_SIZE;
+
+ /* header */
+ avio_wl32(pb, MKTAG('F', 'F', 'M', '2'));
+ avio_wb32(pb, ffm->packet_size);
+ avio_wb64(pb, 0); /* current write position */
+
+ if(avio_open_dyn_buf(&pb) < 0)
+ return AVERROR(ENOMEM);
+
+ avio_wb32(pb, s->nb_streams);
+ bit_rate = 0;
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ bit_rate += st->codec->bit_rate;
+ }
+ avio_wb32(pb, bit_rate);
+
+ write_header_chunk(s->pb, pb, MKBETAG('M', 'A', 'I', 'N'));
+
+ /* list of streams */
+ for(i=0;i<s->nb_streams;i++) {
+ st = s->streams[i];
+ avpriv_set_pts_info(st, 64, 1, 1000000);
+ if(avio_open_dyn_buf(&pb) < 0)
+ return AVERROR(ENOMEM);
+
+ codec = st->codec;
+ /* generic info */
+ avio_wb32(pb, codec->codec_id);
+ avio_w8(pb, codec->codec_type);
+ avio_wb32(pb, codec->bit_rate);
+ avio_wb32(pb, codec->flags);
+ avio_wb32(pb, codec->flags2);
+ avio_wb32(pb, codec->debug);
++ if (codec->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
+ avio_wb32(pb, codec->extradata_size);
+ avio_write(pb, codec->extradata, codec->extradata_size);
+ }
+ write_header_chunk(s->pb, pb, MKBETAG('C', 'O', 'M', 'M'));
+ /* specific info */
+ switch(codec->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (st->recommended_encoder_configuration) {
+ av_log(NULL, AV_LOG_DEBUG, "writing recommended configuration: %s\n",
+ st->recommended_encoder_configuration);
+ if ((ret = ffm_write_recommended_config(s->pb, codec, MKBETAG('S', '2', 'V', 'I'),
+ st->recommended_encoder_configuration)) < 0)
+ return ret;
+ } else if ((ret = ffm_write_header_codec_ctx(s->pb, codec, MKBETAG('S', '2', 'V', 'I'), AV_OPT_FLAG_VIDEO_PARAM)) < 0 ||
+ (ret = ffm_write_header_codec_private_ctx(s, codec, AV_OPT_FLAG_VIDEO_PARAM)) < 0)
+ return ret;
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ if (st->recommended_encoder_configuration) {
+ av_log(NULL, AV_LOG_DEBUG, "writing recommended configuration: %s\n",
+ st->recommended_encoder_configuration);
+ if ((ret = ffm_write_recommended_config(s->pb, codec, MKBETAG('S', '2', 'A', 'U'),
+ st->recommended_encoder_configuration)) < 0)
+ return ret;
+ } else if ((ret = ffm_write_header_codec_ctx(s->pb, codec, MKBETAG('S', '2', 'A', 'U'), AV_OPT_FLAG_AUDIO_PARAM)) < 0 ||
+ (ret = ffm_write_header_codec_private_ctx(s, codec, AV_OPT_FLAG_AUDIO_PARAM)) < 0)
+ return ret;
+ break;
+ default:
+ return -1;
+ }
+ }
+ pb = s->pb;
+
+ avio_wb64(pb, 0); // end of header
+
+ /* flush until end of block reached */
+ while ((avio_tell(pb) % ffm->packet_size) != 0)
+ avio_w8(pb, 0);
+
+ avio_flush(pb);
+
+ /* init packet mux */
+ ffm->packet_ptr = ffm->packet;
+ ffm->packet_end = ffm->packet + ffm->packet_size - FFM_HEADER_SIZE;
+ av_assert0(ffm->packet_end >= ffm->packet);
+ ffm->frame_offset = 0;
+ ffm->dts = 0;
+ ffm->first_packet = 1;
+
+ return 0;
+}
+
+static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ FFMContext *ffm = s->priv_data;
+ int64_t dts;
+ uint8_t header[FRAME_HEADER_SIZE+4];
+ int header_size = FRAME_HEADER_SIZE;
+
+ dts = ffm->start_time + pkt->dts;
+ /* packet size & key_frame */
+ header[0] = pkt->stream_index;
+ header[1] = 0;
+ if (pkt->flags & AV_PKT_FLAG_KEY)
+ header[1] |= FLAG_KEY_FRAME;
+ AV_WB24(header+2, pkt->size);
+ AV_WB24(header+5, pkt->duration);
+ AV_WB64(header+8, ffm->start_time + pkt->pts);
+ if (pkt->pts != pkt->dts) {
+ header[1] |= FLAG_DTS;
+ AV_WB32(header+16, pkt->pts - pkt->dts);
+ header_size += 4;
+ }
+ ffm_write_data(s, header, header_size, dts, 1);
+ ffm_write_data(s, pkt->data, pkt->size, dts, 0);
+
+ return 0;
+}
+
+static int ffm_write_trailer(AVFormatContext *s)
+{
+ FFMContext *ffm = s->priv_data;
+
+ /* flush packets */
+ if (ffm->packet_ptr > ffm->packet)
+ flush_packet(s);
+
+ return 0;
+}
+
+AVOutputFormat ff_ffm_muxer = {
+ .name = "ffm",
+ .long_name = NULL_IF_CONFIG_SMALL("FFM (FFserver live feed)"),
+ .extensions = "ffm",
+ .priv_data_size = sizeof(FFMContext),
+ .audio_codec = AV_CODEC_ID_MP2,
+ .video_codec = AV_CODEC_ID_MPEG1VIDEO,
+ .write_header = ffm_write_header,
+ .write_packet = ffm_write_packet,
+ .write_trailer = ffm_write_trailer,
+ .flags = AVFMT_TS_NEGATIVE,
+};
int ret;
if (st->codec->codec_tag == MKTAG('t','m','c','d')) {
- st->codec->extradata_size = size;
- st->codec->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
- if (!st->codec->extradata)
+ if ((int)size != size)
return AVERROR(ENOMEM);
- ret = ffio_read_size(pb, st->codec->extradata, size);
+
+ ret = ff_get_extradata(st->codec, pb, size);
if (ret < 0)
return ret;
- st->codec->flags2 |= CODEC_FLAG2_DROP_FRAME_TIMECODE;
+ if (size > 16) {
+ MOVStreamContext *tmcd_ctx = st->priv_data;
+ int val;
+ val = AV_RB32(st->codec->extradata + 4);
+ tmcd_ctx->tmcd_flags = val;
+ if (val & 1)
++ st->codec->flags2 |= AV_CODEC_FLAG2_DROP_FRAME_TIMECODE;
+ st->codec->time_base.den = st->codec->extradata[16]; /* number of frame */
+ st->codec->time_base.num = 1;
+ /* adjust for per frame dur in counter mode */
+ if (tmcd_ctx->tmcd_flags & 0x0008) {
+ int timescale = AV_RB32(st->codec->extradata + 8);
+ int framedur = AV_RB32(st->codec->extradata + 12);
+ st->codec->time_base.den *= timescale;
+ st->codec->time_base.num *= framedur;
+ }
+ if (size > 30) {
+ uint32_t len = AV_RB32(st->codec->extradata + 18); /* name atom length */
+ uint32_t format = AV_RB32(st->codec->extradata + 22);
+ if (format == AV_RB32("name") && (int64_t)size >= (int64_t)len + 18) {
+ uint16_t str_size = AV_RB16(st->codec->extradata + 26); /* string length */
+ if (str_size > 0 && size >= (int)str_size + 26) {
+ char *reel_name = av_malloc(str_size + 1);
+ if (!reel_name)
+ return AVERROR(ENOMEM);
+ memcpy(reel_name, st->codec->extradata + 30, str_size);
+ reel_name[str_size] = 0; /* Add null terminator */
+ /* don't add reel_name if emtpy string */
+ if (*reel_name == 0) {
+ av_free(reel_name);
+ } else {
+ av_dict_set(&st->metadata, "reel_name", reel_name, AV_DICT_DONT_STRDUP_VAL);
+ }
+ }
+ }
+ }
+ }
} else {
/* other codec type, just skip (rtp, mp4s ...) */
avio_skip(pb, size);
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
goto fail;
+ if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
+ (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto fail;
#if FF_API_LAVF_BITEXACT
- if (s->nb_streams && s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)
+ if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT)
s->flags |= AVFMT_FLAG_BITEXACT;
#endif
case AV_CODEC_ID_SPEEX:
av_strlcatf(buff, size, "a=rtpmap:%d speex/%d\r\n",
payload_type, c->sample_rate);
- if (c->flags & CODEC_FLAG_QSCALE)
+ if (c->codec) {
+ const char *mode;
+ uint64_t vad_option;
+
++ if (c->flags & AV_CODEC_FLAG_QSCALE)
+ mode = "on";
+ else if (!av_opt_get_int(c, "vad", AV_OPT_FLAG_ENCODING_PARAM, &vad_option) && vad_option)
+ mode = "vad";
+ else
+ mode = "off";
+
+ av_strlcatf(buff, size, "a=fmtp:%d vbr=%s\r\n",
+ payload_type, mode);
+ }
break;
case AV_CODEC_ID_OPUS:
/* The opus RTP draft says that all opus streams MUST be declared