printf("%s %s [%s]:\n", encoder ? "Encoder" : "Decoder", c->name,
c->long_name ? c->long_name : "");
- if (c->type == AVMEDIA_TYPE_VIDEO) {
+ if (c->type == AVMEDIA_TYPE_VIDEO ||
+ c->type == AVMEDIA_TYPE_AUDIO) {
printf(" Threading capabilities: ");
- switch (c->capabilities & (CODEC_CAP_FRAME_THREADS |
- CODEC_CAP_SLICE_THREADS)) {
- case CODEC_CAP_FRAME_THREADS |
- CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
- case CODEC_CAP_FRAME_THREADS: printf("frame"); break;
- case CODEC_CAP_SLICE_THREADS: printf("slice"); break;
+ switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
+ AV_CODEC_CAP_SLICE_THREADS)) {
+ case AV_CODEC_CAP_FRAME_THREADS |
+ AV_CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
+ case AV_CODEC_CAP_FRAME_THREADS: printf("frame"); break;
+ case AV_CODEC_CAP_SLICE_THREADS: printf("slice"); break;
default: printf("no"); break;
}
printf("\n");
const AVCodec *codec = NULL;
while ((codec = next_codec_for_id(desc->id, codec, encoder))) {
- printf("%c", get_media_type_char(desc->type));
+ printf(" %c", get_media_type_char(desc->type));
- printf((codec->capabilities & CODEC_CAP_FRAME_THREADS) ? "F" : ".");
- printf((codec->capabilities & CODEC_CAP_SLICE_THREADS) ? "S" : ".");
- printf((codec->capabilities & CODEC_CAP_EXPERIMENTAL) ? "X" : ".");
- printf((codec->capabilities & CODEC_CAP_DRAW_HORIZ_BAND)?"B" : ".");
- printf((codec->capabilities & CODEC_CAP_DR1) ? "D" : ".");
+ printf((codec->capabilities & AV_CODEC_CAP_FRAME_THREADS) ? "F" : ".");
+ printf((codec->capabilities & AV_CODEC_CAP_SLICE_THREADS) ? "S" : ".");
+ printf((codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) ? "X" : ".");
++ printf((codec->capabilities & AV_CODEC_CAP_DRAW_HORIZ_BAND)?"B" : ".");
++ printf((codec->capabilities & AV_CODEC_CAP_DR1) ? "D" : ".");
printf(" %-20s %s", codec->name, codec->long_name ? codec->long_name : "");
if (strcmp(codec->name, desc->name))
}
c = avcodec_alloc_context3(codec);
- picture = av_frame_alloc();
+ if (!c) {
+ fprintf(stderr, "Could not allocate video codec context\n");
+ exit(1);
+ }
- if(codec->capabilities&CODEC_CAP_TRUNCATED)
+ if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
/* For some codecs, such as msmpeg4 and mpeg4, width and height
--- /dev/null
- CODEC_CAP_DELAY))
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2014 Andrey Utkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for demuxing, decoding, filtering, encoding and muxing
+ * @example transcoding.c
+ */
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavfilter/avfiltergraph.h>
+#include <libavfilter/avcodec.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+#include <libavutil/pixdesc.h>
+
+static AVFormatContext *ifmt_ctx;
+static AVFormatContext *ofmt_ctx;
+typedef struct FilteringContext {
+ AVFilterContext *buffersink_ctx;
+ AVFilterContext *buffersrc_ctx;
+ AVFilterGraph *filter_graph;
+} FilteringContext;
+static FilteringContext *filter_ctx;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ unsigned int i;
+
+ ifmt_ctx = NULL;
+ if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ AVStream *stream;
+ AVCodecContext *codec_ctx;
+ stream = ifmt_ctx->streams[i];
+ codec_ctx = stream->codec;
+ /* Reencode video & audio and remux subtitles etc. */
+ if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* Open decoder */
+ ret = avcodec_open2(codec_ctx,
+ avcodec_find_decoder(codec_ctx->codec_id), NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
+ return ret;
+ }
+ }
+ }
+
+ av_dump_format(ifmt_ctx, 0, filename, 0);
+ return 0;
+}
+
+static int open_output_file(const char *filename)
+{
+ AVStream *out_stream;
+ AVStream *in_stream;
+ AVCodecContext *dec_ctx, *enc_ctx;
+ AVCodec *encoder;
+ int ret;
+ unsigned int i;
+
+ ofmt_ctx = NULL;
+ avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
+ if (!ofmt_ctx) {
+ av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
+ return AVERROR_UNKNOWN;
+ }
+
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ out_stream = avformat_new_stream(ofmt_ctx, NULL);
+ if (!out_stream) {
+ av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ in_stream = ifmt_ctx->streams[i];
+ dec_ctx = in_stream->codec;
+ enc_ctx = out_stream->codec;
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* in this example, we choose transcoding to same codec */
+ encoder = avcodec_find_encoder(dec_ctx->codec_id);
+ if (!encoder) {
+ av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* In this example, we transcode to same properties (picture size,
+ * sample rate etc.). These properties can be changed for output
+ * streams easily using filters */
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ enc_ctx->height = dec_ctx->height;
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
+ /* take first format from list of supported formats */
+ enc_ctx->pix_fmt = encoder->pix_fmts[0];
+ /* video time_base can be set to whatever is handy and supported by encoder */
+ enc_ctx->time_base = dec_ctx->time_base;
+ } else {
+ enc_ctx->sample_rate = dec_ctx->sample_rate;
+ enc_ctx->channel_layout = dec_ctx->channel_layout;
+ enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
+ /* take first format from list of supported formats */
+ enc_ctx->sample_fmt = encoder->sample_fmts[0];
+ enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
+ }
+
+ /* Third parameter can be used to pass settings to encoder */
+ ret = avcodec_open2(enc_ctx, encoder, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
+ return ret;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
+ av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
+ return AVERROR_INVALIDDATA;
+ } else {
+ /* if this stream must be remuxed */
+ ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
+ ifmt_ctx->streams[i]->codec);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
+ return ret;
+ }
+ }
+
+ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
+ enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+ }
+ av_dump_format(ofmt_ctx, 0, filename, 1);
+
+ if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
+ return ret;
+ }
+ }
+
+ /* init muxer, write output file header */
+ ret = avformat_write_header(ofmt_ctx, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
+ AVCodecContext *enc_ctx, const char *filter_spec)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *buffersrc = NULL;
+ AVFilter *buffersink = NULL;
+ AVFilterContext *buffersrc_ctx = NULL;
+ AVFilterContext *buffersink_ctx = NULL;
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ AVFilterGraph *filter_graph = avfilter_graph_alloc();
+
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ buffersrc = avfilter_get_by_name("buffer");
+ buffersink = avfilter_get_by_name("buffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ snprintf(args, sizeof(args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
+ dec_ctx->time_base.num, dec_ctx->time_base.den,
+ dec_ctx->sample_aspect_ratio.num,
+ dec_ctx->sample_aspect_ratio.den);
+
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
+ (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ buffersrc = avfilter_get_by_name("abuffer");
+ buffersink = avfilter_get_by_name("abuffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ if (!dec_ctx->channel_layout)
+ dec_ctx->channel_layout =
+ av_get_default_channel_layout(dec_ctx->channels);
+ snprintf(args, sizeof(args),
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
+ dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
+ av_get_sample_fmt_name(dec_ctx->sample_fmt),
+ dec_ctx->channel_layout);
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
+ (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
+ (uint8_t*)&enc_ctx->channel_layout,
+ sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
+ (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
+ }
+ } else {
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ /* Endpoints for the filter graph. */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if (!outputs->name || !inputs->name) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+ /* Fill FilteringContext */
+ fctx->buffersrc_ctx = buffersrc_ctx;
+ fctx->buffersink_ctx = buffersink_ctx;
+ fctx->filter_graph = filter_graph;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static int init_filters(void)
+{
+ const char *filter_spec;
+ unsigned int i;
+ int ret;
+ filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
+ if (!filter_ctx)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ filter_ctx[i].buffersrc_ctx = NULL;
+ filter_ctx[i].buffersink_ctx = NULL;
+ filter_ctx[i].filter_graph = NULL;
+ if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
+ || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
+ continue;
+
+
+ if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ filter_spec = "null"; /* passthrough (dummy) filter for video */
+ else
+ filter_spec = "anull"; /* passthrough (dummy) filter for audio */
+ ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
+ ofmt_ctx->streams[i]->codec, filter_spec);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
+ int ret;
+ int got_frame_local;
+ AVPacket enc_pkt;
+ int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
+ (ifmt_ctx->streams[stream_index]->codec->codec_type ==
+ AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
+
+ if (!got_frame)
+ got_frame = &got_frame_local;
+
+ av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
+ /* encode filtered frame */
+ enc_pkt.data = NULL;
+ enc_pkt.size = 0;
+ av_init_packet(&enc_pkt);
+ ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
+ filt_frame, got_frame);
+ av_frame_free(&filt_frame);
+ if (ret < 0)
+ return ret;
+ if (!(*got_frame))
+ return 0;
+
+ /* prepare packet for muxing */
+ enc_pkt.stream_index = stream_index;
+ av_packet_rescale_ts(&enc_pkt,
+ ofmt_ctx->streams[stream_index]->codec->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
+ /* mux encoded frame */
+ ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
+ return ret;
+}
+
+static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
+{
+ int ret;
+ AVFrame *filt_frame;
+
+ av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
+ /* push the decoded frame into the filtergraph */
+ ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
+ frame, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
+ return ret;
+ }
+
+ /* pull filtered frames from the filtergraph */
+ while (1) {
+ filt_frame = av_frame_alloc();
+ if (!filt_frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
+ ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
+ filt_frame);
+ if (ret < 0) {
+ /* if no more frames for output - returns AVERROR(EAGAIN)
+ * if flushed and no more frames for output - returns AVERROR_EOF
+ * rewrite retcode to 0 to show it as normal procedure completion
+ */
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ av_frame_free(&filt_frame);
+ break;
+ }
+
+ filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
+ ret = encode_write_frame(filt_frame, stream_index, NULL);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int flush_encoder(unsigned int stream_index)
+{
+ int ret;
+ int got_frame;
+
+ if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
++ AV_CODEC_CAP_DELAY))
+ return 0;
+
+ while (1) {
+ av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
+ ret = encode_write_frame(NULL, stream_index, &got_frame);
+ if (ret < 0)
+ break;
+ if (!got_frame)
+ return 0;
+ }
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet = { .data = NULL, .size = 0 };
+ AVFrame *frame = NULL;
+ enum AVMediaType type;
+ unsigned int stream_index;
+ unsigned int i;
+ int got_frame;
+ int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
+
+ if (argc != 3) {
+ av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
+ return 1;
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = open_output_file(argv[2])) < 0)
+ goto end;
+ if ((ret = init_filters()) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
+ break;
+ stream_index = packet.stream_index;
+ type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
+ av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
+ stream_index);
+
+ if (filter_ctx[stream_index].filter_graph) {
+ av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
+ frame = av_frame_alloc();
+ if (!frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ifmt_ctx->streams[stream_index]->codec->time_base);
+ dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
+ avcodec_decode_audio4;
+ ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
+ &got_frame, &packet);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
+ break;
+ }
+
+ if (got_frame) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ ret = filter_encode_write_frame(frame, stream_index);
+ av_frame_free(&frame);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_frame_free(&frame);
+ }
+ } else {
+ /* remux this frame without reencoding */
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &packet);
+ if (ret < 0)
+ goto end;
+ }
+ av_free_packet(&packet);
+ }
+
+ /* flush filters and encoders */
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ /* flush filter */
+ if (!filter_ctx[i].filter_graph)
+ continue;
+ ret = filter_encode_write_frame(NULL, i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
+ goto end;
+ }
+
+ /* flush encoder */
+ ret = flush_encoder(i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
+ goto end;
+ }
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+ av_free_packet(&packet);
+ av_frame_free(&frame);
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ avcodec_close(ifmt_ctx->streams[i]->codec);
+ if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
+ avcodec_close(ofmt_ctx->streams[i]->codec);
+ if (filter_ctx && filter_ctx[i].filter_graph)
+ avfilter_graph_free(&filter_ctx[i].filter_graph);
+ }
+ av_free(filter_ctx);
+ avformat_close_input(&ifmt_ctx);
+ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
+
+ return ret ? 1 : 0;
+}
If the codec allocates writable tables in its init(), add an init_thread_copy()
which re-allocates them for other threads.
--Add CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
++Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
speed gain at this point but it should work.
If there are inter-frame dependencies, so the codec calls
--- /dev/null
- if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * multimedia converter based on the FFmpeg libraries
+ */
+
+#include "config.h"
+#include <ctype.h>
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <stdint.h>
+
+#if HAVE_ISATTY
+#if HAVE_IO_H
+#include <io.h>
+#endif
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#endif
+
+#include "libavformat/avformat.h"
+#include "libavdevice/avdevice.h"
+#include "libswresample/swresample.h"
+#include "libavutil/opt.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/fifo.h"
+#include "libavutil/internal.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/dict.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/avstring.h"
+#include "libavutil/libm.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/timestamp.h"
+#include "libavutil/bprint.h"
+#include "libavutil/time.h"
+#include "libavutil/threadmessage.h"
+#include "libavcodec/mathops.h"
+#include "libavformat/os_support.h"
+
+# include "libavfilter/avcodec.h"
+# include "libavfilter/avfilter.h"
+# include "libavfilter/buffersrc.h"
+# include "libavfilter/buffersink.h"
+
+#if HAVE_SYS_RESOURCE_H
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#elif HAVE_GETPROCESSTIMES
+#include <windows.h>
+#endif
+#if HAVE_GETPROCESSMEMORYINFO
+#include <windows.h>
+#include <psapi.h>
+#endif
+#if HAVE_SETCONSOLECTRLHANDLER
+#include <windows.h>
+#endif
+
+
+#if HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+
+#if HAVE_TERMIOS_H
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <termios.h>
+#elif HAVE_KBHIT
+#include <conio.h>
+#endif
+
+#if HAVE_PTHREADS
+#include <pthread.h>
+#endif
+
+#include <time.h>
+
+#include "ffmpeg.h"
+#include "cmdutils.h"
+
+#include "libavutil/avassert.h"
+
+const char program_name[] = "ffmpeg";
+const int program_birth_year = 2000;
+
+static FILE *vstats_file;
+
+const char *const forced_keyframes_const_names[] = {
+ "n",
+ "n_forced",
+ "prev_forced_n",
+ "prev_forced_t",
+ "t",
+ NULL
+};
+
+static void do_video_stats(OutputStream *ost, int frame_size);
+static int64_t getutime(void);
+static int64_t getmaxrss(void);
+
+static int run_as_daemon = 0;
+static int nb_frames_dup = 0;
+static int nb_frames_drop = 0;
+static int64_t decode_error_stat[2];
+
+static int current_time;
+AVIOContext *progress_avio = NULL;
+
+static uint8_t *subtitle_out;
+
+InputStream **input_streams = NULL;
+int nb_input_streams = 0;
+InputFile **input_files = NULL;
+int nb_input_files = 0;
+
+OutputStream **output_streams = NULL;
+int nb_output_streams = 0;
+OutputFile **output_files = NULL;
+int nb_output_files = 0;
+
+FilterGraph **filtergraphs;
+int nb_filtergraphs;
+
+#if HAVE_TERMIOS_H
+
+/* init terminal so that we can grab keys */
+static struct termios oldtty;
+static int restore_tty;
+#endif
+
+#if HAVE_PTHREADS
+static void free_input_threads(void);
+#endif
+
+/* sub2video hack:
+ Convert subtitles to video with alpha to insert them in filter graphs.
+ This is a temporary solution until libavfilter gets real subtitles support.
+ */
+
+static int sub2video_get_blank_frame(InputStream *ist)
+{
+ int ret;
+ AVFrame *frame = ist->sub2video.frame;
+
+ av_frame_unref(frame);
+ ist->sub2video.frame->width = ist->sub2video.w;
+ ist->sub2video.frame->height = ist->sub2video.h;
+ ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
+ if ((ret = av_frame_get_buffer(frame, 32)) < 0)
+ return ret;
+ memset(frame->data[0], 0, frame->height * frame->linesize[0]);
+ return 0;
+}
+
+static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
+ AVSubtitleRect *r)
+{
+ uint32_t *pal, *dst2;
+ uint8_t *src, *src2;
+ int x, y;
+
+ if (r->type != SUBTITLE_BITMAP) {
+ av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
+ return;
+ }
+ if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
+ av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
+ return;
+ }
+
+ dst += r->y * dst_linesize + r->x * 4;
+ src = r->pict.data[0];
+ pal = (uint32_t *)r->pict.data[1];
+ for (y = 0; y < r->h; y++) {
+ dst2 = (uint32_t *)dst;
+ src2 = src;
+ for (x = 0; x < r->w; x++)
+ *(dst2++) = pal[*(src2++)];
+ dst += dst_linesize;
+ src += r->pict.linesize[0];
+ }
+}
+
+static void sub2video_push_ref(InputStream *ist, int64_t pts)
+{
+ AVFrame *frame = ist->sub2video.frame;
+ int i;
+
+ av_assert1(frame->data[0]);
+ ist->sub2video.last_pts = frame->pts = pts;
+ for (i = 0; i < ist->nb_filters; i++)
+ av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
+ AV_BUFFERSRC_FLAG_KEEP_REF |
+ AV_BUFFERSRC_FLAG_PUSH);
+}
+
+static void sub2video_update(InputStream *ist, AVSubtitle *sub)
+{
+ int w = ist->sub2video.w, h = ist->sub2video.h;
+ AVFrame *frame = ist->sub2video.frame;
+ int8_t *dst;
+ int dst_linesize;
+ int num_rects, i;
+ int64_t pts, end_pts;
+
+ if (!frame)
+ return;
+ if (sub) {
+ pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
+ AV_TIME_BASE_Q, ist->st->time_base);
+ end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
+ AV_TIME_BASE_Q, ist->st->time_base);
+ num_rects = sub->num_rects;
+ } else {
+ pts = ist->sub2video.end_pts;
+ end_pts = INT64_MAX;
+ num_rects = 0;
+ }
+ if (sub2video_get_blank_frame(ist) < 0) {
+ av_log(ist->dec_ctx, AV_LOG_ERROR,
+ "Impossible to get a blank canvas.\n");
+ return;
+ }
+ dst = frame->data [0];
+ dst_linesize = frame->linesize[0];
+ for (i = 0; i < num_rects; i++)
+ sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
+ sub2video_push_ref(ist, pts);
+ ist->sub2video.end_pts = end_pts;
+}
+
+static void sub2video_heartbeat(InputStream *ist, int64_t pts)
+{
+ InputFile *infile = input_files[ist->file_index];
+ int i, j, nb_reqs;
+ int64_t pts2;
+
+ /* When a frame is read from a file, examine all sub2video streams in
+ the same file and send the sub2video frame again. Otherwise, decoded
+ video frames could be accumulating in the filter graph while a filter
+ (possibly overlay) is desperately waiting for a subtitle frame. */
+ for (i = 0; i < infile->nb_streams; i++) {
+ InputStream *ist2 = input_streams[infile->ist_index + i];
+ if (!ist2->sub2video.frame)
+ continue;
+ /* subtitles seem to be usually muxed ahead of other streams;
+ if not, subtracting a larger time here is necessary */
+ pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
+ /* do not send the heartbeat frame if the subtitle is already ahead */
+ if (pts2 <= ist2->sub2video.last_pts)
+ continue;
+ if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
+ sub2video_update(ist2, NULL);
+ for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
+ nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
+ if (nb_reqs)
+ sub2video_push_ref(ist2, pts2);
+ }
+}
+
+static void sub2video_flush(InputStream *ist)
+{
+ int i;
+
+ if (ist->sub2video.end_pts < INT64_MAX)
+ sub2video_update(ist, NULL);
+ for (i = 0; i < ist->nb_filters; i++)
+ av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
+}
+
+/* end of sub2video hack */
+
+static void term_exit_sigsafe(void)
+{
+#if HAVE_TERMIOS_H
+ if(restore_tty)
+ tcsetattr (0, TCSANOW, &oldtty);
+#endif
+}
+
+void term_exit(void)
+{
+ av_log(NULL, AV_LOG_QUIET, "%s", "");
+ term_exit_sigsafe();
+}
+
+static volatile int received_sigterm = 0;
+static volatile int received_nb_signals = 0;
+static volatile int transcode_init_done = 0;
+static volatile int ffmpeg_exited = 0;
+static int main_return_code = 0;
+
+static void
+sigterm_handler(int sig)
+{
+ received_sigterm = sig;
+ received_nb_signals++;
+ term_exit_sigsafe();
+ if(received_nb_signals > 3) {
+ write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
+ strlen("Received > 3 system signals, hard exiting\n"));
+
+ exit(123);
+ }
+}
+
+#if HAVE_SETCONSOLECTRLHANDLER
+static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
+{
+ av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
+
+ switch (fdwCtrlType)
+ {
+ case CTRL_C_EVENT:
+ case CTRL_BREAK_EVENT:
+ sigterm_handler(SIGINT);
+ return TRUE;
+
+ case CTRL_CLOSE_EVENT:
+ case CTRL_LOGOFF_EVENT:
+ case CTRL_SHUTDOWN_EVENT:
+ sigterm_handler(SIGTERM);
+ /* Basically, with these 3 events, when we return from this method the
+ process is hard terminated, so stall as long as we need to
+ to try and let the main thread(s) clean up and gracefully terminate
+ (we have at most 5 seconds, but should be done far before that). */
+ while (!ffmpeg_exited) {
+ Sleep(0);
+ }
+ return TRUE;
+
+ default:
+ av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
+ return FALSE;
+ }
+}
+#endif
+
+void term_init(void)
+{
+#if HAVE_TERMIOS_H
+ if(!run_as_daemon){
+ struct termios tty;
+ int istty = 1;
+#if HAVE_ISATTY
+ istty = isatty(0) && isatty(2);
+#endif
+ if (istty && tcgetattr (0, &tty) == 0) {
+ oldtty = tty;
+ restore_tty = 1;
+
+ tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
+ |INLCR|IGNCR|ICRNL|IXON);
+ tty.c_oflag |= OPOST;
+ tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
+ tty.c_cflag &= ~(CSIZE|PARENB);
+ tty.c_cflag |= CS8;
+ tty.c_cc[VMIN] = 1;
+ tty.c_cc[VTIME] = 0;
+
+ tcsetattr (0, TCSANOW, &tty);
+ }
+ signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
+ }
+#endif
+
+ signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
+ signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
+#ifdef SIGXCPU
+ signal(SIGXCPU, sigterm_handler);
+#endif
+#if HAVE_SETCONSOLECTRLHANDLER
+ SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
+#endif
+}
+
+/* read a key without blocking */
+static int read_key(void)
+{
+ unsigned char ch;
+#if HAVE_TERMIOS_H
+ int n = 1;
+ struct timeval tv;
+ fd_set rfds;
+
+ FD_ZERO(&rfds);
+ FD_SET(0, &rfds);
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ n = select(1, &rfds, NULL, NULL, &tv);
+ if (n > 0) {
+ n = read(0, &ch, 1);
+ if (n == 1)
+ return ch;
+
+ return n;
+ }
+#elif HAVE_KBHIT
+# if HAVE_PEEKNAMEDPIPE
+ static int is_pipe;
+ static HANDLE input_handle;
+ DWORD dw, nchars;
+ if(!input_handle){
+ input_handle = GetStdHandle(STD_INPUT_HANDLE);
+ is_pipe = !GetConsoleMode(input_handle, &dw);
+ }
+
+ if (stdin->_cnt > 0) {
+ read(0, &ch, 1);
+ return ch;
+ }
+ if (is_pipe) {
+ /* When running under a GUI, you will end here. */
+ if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
+ // input pipe may have been closed by the program that ran ffmpeg
+ return -1;
+ }
+ //Read it
+ if(nchars != 0) {
+ read(0, &ch, 1);
+ return ch;
+ }else{
+ return -1;
+ }
+ }
+# endif
+ if(kbhit())
+ return(getch());
+#endif
+ return -1;
+}
+
+static int decode_interrupt_cb(void *ctx)
+{
+ return received_nb_signals > transcode_init_done;
+}
+
+const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
+
+static void ffmpeg_cleanup(int ret)
+{
+ int i, j;
+
+ if (do_benchmark) {
+ int maxrss = getmaxrss() / 1024;
+ av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
+ }
+
+ for (i = 0; i < nb_filtergraphs; i++) {
+ FilterGraph *fg = filtergraphs[i];
+ avfilter_graph_free(&fg->graph);
+ for (j = 0; j < fg->nb_inputs; j++) {
+ av_freep(&fg->inputs[j]->name);
+ av_freep(&fg->inputs[j]);
+ }
+ av_freep(&fg->inputs);
+ for (j = 0; j < fg->nb_outputs; j++) {
+ av_freep(&fg->outputs[j]->name);
+ av_freep(&fg->outputs[j]);
+ }
+ av_freep(&fg->outputs);
+ av_freep(&fg->graph_desc);
+
+ av_freep(&filtergraphs[i]);
+ }
+ av_freep(&filtergraphs);
+
+ av_freep(&subtitle_out);
+
+ /* close files */
+ for (i = 0; i < nb_output_files; i++) {
+ OutputFile *of = output_files[i];
+ AVFormatContext *s;
+ if (!of)
+ continue;
+ s = of->ctx;
+ if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&s->pb);
+ avformat_free_context(s);
+ av_dict_free(&of->opts);
+
+ av_freep(&output_files[i]);
+ }
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ AVBitStreamFilterContext *bsfc;
+
+ if (!ost)
+ continue;
+
+ bsfc = ost->bitstream_filters;
+ while (bsfc) {
+ AVBitStreamFilterContext *next = bsfc->next;
+ av_bitstream_filter_close(bsfc);
+ bsfc = next;
+ }
+ ost->bitstream_filters = NULL;
+ av_frame_free(&ost->filtered_frame);
+ av_frame_free(&ost->last_frame);
+
+ av_parser_close(ost->parser);
+
+ av_freep(&ost->forced_keyframes);
+ av_expr_free(ost->forced_keyframes_pexpr);
+ av_freep(&ost->avfilter);
+ av_freep(&ost->logfile_prefix);
+
+ av_freep(&ost->audio_channels_map);
+ ost->audio_channels_mapped = 0;
+
+ avcodec_free_context(&ost->enc_ctx);
+
+ av_freep(&output_streams[i]);
+ }
+#if HAVE_PTHREADS
+ free_input_threads();
+#endif
+ for (i = 0; i < nb_input_files; i++) {
+ avformat_close_input(&input_files[i]->ctx);
+ av_freep(&input_files[i]);
+ }
+ for (i = 0; i < nb_input_streams; i++) {
+ InputStream *ist = input_streams[i];
+
+ av_frame_free(&ist->decoded_frame);
+ av_frame_free(&ist->filter_frame);
+ av_dict_free(&ist->decoder_opts);
+ avsubtitle_free(&ist->prev_sub.subtitle);
+ av_frame_free(&ist->sub2video.frame);
+ av_freep(&ist->filters);
+ av_freep(&ist->hwaccel_device);
+
+ avcodec_free_context(&ist->dec_ctx);
+
+ av_freep(&input_streams[i]);
+ }
+
+ if (vstats_file)
+ fclose(vstats_file);
+ av_freep(&vstats_filename);
+
+ av_freep(&input_streams);
+ av_freep(&input_files);
+ av_freep(&output_streams);
+ av_freep(&output_files);
+
+ uninit_opts();
+
+ avformat_network_deinit();
+
+ if (received_sigterm) {
+ av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
+ (int) received_sigterm);
+ } else if (ret && transcode_init_done) {
+ av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
+ }
+ term_exit();
+ ffmpeg_exited = 1;
+}
+
+void remove_avoptions(AVDictionary **a, AVDictionary *b)
+{
+ AVDictionaryEntry *t = NULL;
+
+ while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
+ av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
+ }
+}
+
+void assert_avoptions(AVDictionary *m)
+{
+ AVDictionaryEntry *t;
+ if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
+ exit_program(1);
+ }
+}
+
+static void abort_codec_experimental(AVCodec *c, int encoder)
+{
+ exit_program(1);
+}
+
+static void update_benchmark(const char *fmt, ...)
+{
+ if (do_benchmark_all) {
+ int64_t t = getutime();
+ va_list va;
+ char buf[1024];
+
+ if (fmt) {
+ va_start(va, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, va);
+ va_end(va);
+ av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
+ }
+ current_time = t;
+ }
+}
+
+static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
+{
+ int i;
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost2 = output_streams[i];
+ ost2->finished |= ost == ost2 ? this_stream : others;
+ }
+}
+
+static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
+{
+ AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
+ AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
+ int ret;
+
+ if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
+ ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (ost->st->codec->extradata) {
+ memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
+ ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
+ }
+ }
+
+ if ((avctx->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
+ (avctx->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
+ pkt->pts = pkt->dts = AV_NOPTS_VALUE;
+
+ /*
+ * Audio encoders may split the packets -- #frames in != #packets out.
+ * But there is no reordering, so we can limit the number of output packets
+ * by simply dropping them here.
+ * Counting encoded video frames needs to be done separately because of
+ * reordering, see do_video_out()
+ */
+ if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
+ if (ost->frame_number >= ost->max_frames) {
+ av_free_packet(pkt);
+ return;
+ }
+ ost->frame_number++;
+ }
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
+ NULL);
+ ost->quality = sd ? AV_RL32(sd) : -1;
+ }
+
+ if (bsfc)
+ av_packet_split_side_data(pkt);
+
+ while (bsfc) {
+ AVPacket new_pkt = *pkt;
+ AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
+ bsfc->filter->name,
+ NULL, 0);
+ int a = av_bitstream_filter_filter(bsfc, avctx,
+ bsf_arg ? bsf_arg->value : NULL,
+ &new_pkt.data, &new_pkt.size,
+ pkt->data, pkt->size,
+ pkt->flags & AV_PKT_FLAG_KEY);
+ if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
+ uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
+ if(t) {
+ memcpy(t, new_pkt.data, new_pkt.size);
+ memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+ new_pkt.data = t;
+ new_pkt.buf = NULL;
+ a = 1;
+ } else
+ a = AVERROR(ENOMEM);
+ }
+ if (a > 0) {
+ pkt->side_data = NULL;
+ pkt->side_data_elems = 0;
+ av_free_packet(pkt);
+ new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
+ av_buffer_default_free, NULL, 0);
+ if (!new_pkt.buf)
+ exit_program(1);
+ } else if (a < 0) {
+ new_pkt = *pkt;
+ av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
+ bsfc->filter->name, pkt->stream_index,
+ avctx->codec ? avctx->codec->name : "copy");
+ print_error("", a);
+ if (exit_on_error)
+ exit_program(1);
+ }
+ *pkt = new_pkt;
+
+ bsfc = bsfc->next;
+ }
+
+ if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
+ if (pkt->dts != AV_NOPTS_VALUE &&
+ pkt->pts != AV_NOPTS_VALUE &&
+ pkt->dts > pkt->pts) {
+ av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
+ pkt->dts, pkt->pts,
+ ost->file_index, ost->st->index);
+ pkt->pts =
+ pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
+ - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
+ - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
+ }
+ if(
+ (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
+ pkt->dts != AV_NOPTS_VALUE &&
+ ost->last_mux_dts != AV_NOPTS_VALUE) {
+ int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
+ if (pkt->dts < max) {
+ int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
+ av_log(s, loglevel, "Non-monotonous DTS in output stream "
+ "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
+ ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
+ if (exit_on_error) {
+ av_log(NULL, AV_LOG_FATAL, "aborting.\n");
+ exit_program(1);
+ }
+ av_log(s, loglevel, "changing to %"PRId64". This may result "
+ "in incorrect timestamps in the output file.\n",
+ max);
+ if(pkt->pts >= pkt->dts)
+ pkt->pts = FFMAX(pkt->pts, max);
+ pkt->dts = max;
+ }
+ }
+ }
+ ost->last_mux_dts = pkt->dts;
+
+ ost->data_size += pkt->size;
+ ost->packets_written++;
+
+ pkt->stream_index = ost->index;
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
+ av_get_media_type_string(ost->enc_ctx->codec_type),
+ av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
+ av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
+ pkt->size
+ );
+ }
+
+ ret = av_interleaved_write_frame(s, pkt);
+ if (ret < 0) {
+ print_error("av_interleaved_write_frame()", ret);
+ main_return_code = 1;
+ close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
+ }
+ av_free_packet(pkt);
+}
+
+static void close_output_stream(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+
+ ost->finished |= ENCODER_FINISHED;
+ if (of->shortest) {
+ int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
+ of->recording_time = FFMIN(of->recording_time, end);
+ }
+}
+
+static int check_recording_time(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+
+ if (of->recording_time != INT64_MAX &&
+ av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
+ AV_TIME_BASE_Q) >= 0) {
+ close_output_stream(ost);
+ return 0;
+ }
+ return 1;
+}
+
+static void do_audio_out(AVFormatContext *s, OutputStream *ost,
+ AVFrame *frame)
+{
+ AVCodecContext *enc = ost->enc_ctx;
+ AVPacket pkt;
+ int got_packet = 0;
+
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ if (!check_recording_time(ost))
+ return;
+
+ if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
+ frame->pts = ost->sync_opts;
+ ost->sync_opts = frame->pts + frame->nb_samples;
+ ost->samples_encoded += frame->nb_samples;
+ ost->frames_encoded++;
+
+ av_assert0(pkt.size || !pkt.data);
+ update_benchmark(NULL);
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
+ "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
+ av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
+ enc->time_base.num, enc->time_base.den);
+ }
+
+ if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
+ exit_program(1);
+ }
+ update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
+
+ if (got_packet) {
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
+ }
+
+ write_frame(s, &pkt, ost);
+ }
+}
+
+static void do_subtitle_out(AVFormatContext *s,
+ OutputStream *ost,
+ InputStream *ist,
+ AVSubtitle *sub)
+{
+ int subtitle_out_max_size = 1024 * 1024;
+ int subtitle_out_size, nb, i;
+ AVCodecContext *enc;
+ AVPacket pkt;
+ int64_t pts;
+
+ if (sub->pts == AV_NOPTS_VALUE) {
+ av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
+ if (exit_on_error)
+ exit_program(1);
+ return;
+ }
+
+ enc = ost->enc_ctx;
+
+ if (!subtitle_out) {
+ subtitle_out = av_malloc(subtitle_out_max_size);
+ if (!subtitle_out) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
+ exit_program(1);
+ }
+ }
+
+ /* Note: DVB subtitle need one packet to draw them and one other
+ packet to clear them */
+ /* XXX: signal it in the codec context ? */
+ if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
+ nb = 2;
+ else
+ nb = 1;
+
+ /* shift timestamp to honor -ss and make check_recording_time() work with -t */
+ pts = sub->pts;
+ if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
+ pts -= output_files[ost->file_index]->start_time;
+ for (i = 0; i < nb; i++) {
+ unsigned save_num_rects = sub->num_rects;
+
+ ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
+ if (!check_recording_time(ost))
+ return;
+
+ sub->pts = pts;
+ // start_display_time is required to be 0
+ sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
+ sub->end_display_time -= sub->start_display_time;
+ sub->start_display_time = 0;
+ if (i == 1)
+ sub->num_rects = 0;
+
+ ost->frames_encoded++;
+
+ subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
+ subtitle_out_max_size, sub);
+ if (i == 1)
+ sub->num_rects = save_num_rects;
+ if (subtitle_out_size < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
+ exit_program(1);
+ }
+
+ av_init_packet(&pkt);
+ pkt.data = subtitle_out;
+ pkt.size = subtitle_out_size;
+ pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
+ pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
+ if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
+ /* XXX: the pts correction is handled here. Maybe handling
+ it in the codec would be better */
+ if (i == 0)
+ pkt.pts += 90 * sub->start_display_time;
+ else
+ pkt.pts += 90 * sub->end_display_time;
+ }
+ pkt.dts = pkt.pts;
+ write_frame(s, &pkt, ost);
+ }
+}
+
+static void do_video_out(AVFormatContext *s,
+ OutputStream *ost,
+ AVFrame *next_picture,
+ double sync_ipts)
+{
+ int ret, format_video_sync;
+ AVPacket pkt;
+ AVCodecContext *enc = ost->enc_ctx;
+ AVCodecContext *mux_enc = ost->st->codec;
+ int nb_frames, nb0_frames, i;
+ double delta, delta0;
+ double duration = 0;
+ int frame_size = 0;
+ InputStream *ist = NULL;
+ AVFilterContext *filter = ost->filter->filter;
+
+ if (ost->source_index >= 0)
+ ist = input_streams[ost->source_index];
+
+ if (filter->inputs[0]->frame_rate.num > 0 &&
+ filter->inputs[0]->frame_rate.den > 0)
+ duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
+
+ if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
+ duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
+
+ if (!ost->filters_script &&
+ !ost->filters &&
+ next_picture &&
+ ist &&
+ lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
+ duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
+ }
+
+ if (!next_picture) {
+ //end, flushing
+ nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
+ ost->last_nb0_frames[1],
+ ost->last_nb0_frames[2]);
+ } else {
+ delta0 = sync_ipts - ost->sync_opts;
+ delta = delta0 + duration;
+
+ /* by default, we output a single frame */
+ nb0_frames = 0;
+ nb_frames = 1;
+
+ format_video_sync = video_sync_method;
+ if (format_video_sync == VSYNC_AUTO) {
+ if(!strcmp(s->oformat->name, "avi")) {
+ format_video_sync = VSYNC_VFR;
+ } else
+ format_video_sync = (s->oformat->flags & AVFMT_VARIABLE_FPS) ? ((s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
+ if ( ist
+ && format_video_sync == VSYNC_CFR
+ && input_files[ist->file_index]->ctx->nb_streams == 1
+ && input_files[ist->file_index]->input_ts_offset == 0) {
+ format_video_sync = VSYNC_VSCFR;
+ }
+ if (format_video_sync == VSYNC_CFR && copy_ts) {
+ format_video_sync = VSYNC_VSCFR;
+ }
+ }
+
+ if (delta0 < 0 &&
+ delta > 0 &&
+ format_video_sync != VSYNC_PASSTHROUGH &&
+ format_video_sync != VSYNC_DROP) {
+ double cor = FFMIN(-delta0, duration);
+ if (delta0 < -0.6) {
+ av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
+ } else
+ av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
+ sync_ipts += cor;
+ duration -= cor;
+ delta0 += cor;
+ }
+
+ switch (format_video_sync) {
+ case VSYNC_VSCFR:
+ if (ost->frame_number == 0 && delta - duration >= 0.5) {
+ av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
+ delta = duration;
+ delta0 = 0;
+ ost->sync_opts = lrint(sync_ipts);
+ }
+ case VSYNC_CFR:
+ // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
+ if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
+ nb_frames = 0;
+ } else if (delta < -1.1)
+ nb_frames = 0;
+ else if (delta > 1.1) {
+ nb_frames = lrintf(delta);
+ if (delta0 > 1.1)
+ nb0_frames = lrintf(delta0 - 0.6);
+ }
+ break;
+ case VSYNC_VFR:
+ if (delta <= -0.6)
+ nb_frames = 0;
+ else if (delta > 0.6)
+ ost->sync_opts = lrint(sync_ipts);
+ break;
+ case VSYNC_DROP:
+ case VSYNC_PASSTHROUGH:
+ ost->sync_opts = lrint(sync_ipts);
+ break;
+ default:
+ av_assert0(0);
+ }
+ }
+
+ nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
+ nb0_frames = FFMIN(nb0_frames, nb_frames);
+
+ memmove(ost->last_nb0_frames + 1,
+ ost->last_nb0_frames,
+ sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
+ ost->last_nb0_frames[0] = nb0_frames;
+
+ if (nb0_frames == 0 && ost->last_droped) {
+ nb_frames_drop++;
+ av_log(NULL, AV_LOG_VERBOSE,
+ "*** dropping frame %d from stream %d at ts %"PRId64"\n",
+ ost->frame_number, ost->st->index, ost->last_frame->pts);
+ }
+ if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
+ if (nb_frames > dts_error_threshold * 30) {
+ av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
+ nb_frames_drop++;
+ return;
+ }
+ nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
+ av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
+ }
+ ost->last_droped = nb_frames == nb0_frames && next_picture;
+
+ /* duplicates frame if needed */
+ for (i = 0; i < nb_frames; i++) {
+ AVFrame *in_picture;
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ if (i < nb0_frames && ost->last_frame) {
+ in_picture = ost->last_frame;
+ } else
+ in_picture = next_picture;
+
+ if (!in_picture)
+ return;
+
+ in_picture->pts = ost->sync_opts;
+
+#if 1
+ if (!check_recording_time(ost))
+#else
+ if (ost->frame_number >= ost->max_frames)
+#endif
+ return;
+
+ if (s->oformat->flags & AVFMT_RAWPICTURE &&
+ enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
+ /* raw pictures are written as AVPicture structure to
+ avoid any copies. We support temporarily the older
+ method. */
+ if (in_picture->interlaced_frame)
+ mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
+ else
+ mux_enc->field_order = AV_FIELD_PROGRESSIVE;
+ pkt.data = (uint8_t *)in_picture;
+ pkt.size = sizeof(AVPicture);
+ pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
+ pkt.flags |= AV_PKT_FLAG_KEY;
+
+ write_frame(s, &pkt, ost);
+ } else {
+ int got_packet, forced_keyframe = 0;
+ double pts_time;
+
+ if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
+ ost->top_field_first >= 0)
+ in_picture->top_field_first = !!ost->top_field_first;
+
+ if (in_picture->interlaced_frame) {
+ if (enc->codec->id == AV_CODEC_ID_MJPEG)
+ mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
+ else
+ mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
+ } else
+ mux_enc->field_order = AV_FIELD_PROGRESSIVE;
+
+ in_picture->quality = enc->global_quality;
+ in_picture->pict_type = 0;
+
+ pts_time = in_picture->pts != AV_NOPTS_VALUE ?
+ in_picture->pts * av_q2d(enc->time_base) : NAN;
+ if (ost->forced_kf_index < ost->forced_kf_count &&
+ in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
+ ost->forced_kf_index++;
+ forced_keyframe = 1;
+ } else if (ost->forced_keyframes_pexpr) {
+ double res;
+ ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
+ res = av_expr_eval(ost->forced_keyframes_pexpr,
+ ost->forced_keyframes_expr_const_values, NULL);
+ av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
+ ost->forced_keyframes_expr_const_values[FKF_N],
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
+ ost->forced_keyframes_expr_const_values[FKF_T],
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
+ res);
+ if (res) {
+ forced_keyframe = 1;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
+ ost->forced_keyframes_expr_const_values[FKF_N];
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
+ ost->forced_keyframes_expr_const_values[FKF_T];
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
+ }
+
+ ost->forced_keyframes_expr_const_values[FKF_N] += 1;
+ } else if ( ost->forced_keyframes
+ && !strncmp(ost->forced_keyframes, "source", 6)
+ && in_picture->key_frame==1) {
+ forced_keyframe = 1;
+ }
+
+ if (forced_keyframe) {
+ in_picture->pict_type = AV_PICTURE_TYPE_I;
+ av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
+ }
+
+ update_benchmark(NULL);
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
+ "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
+ av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
+ enc->time_base.num, enc->time_base.den);
+ }
+
+ ost->frames_encoded++;
+
+ ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
+ update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
+ exit_program(1);
+ }
+
+ if (got_packet) {
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
+ }
+
- if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
++ if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
+ pkt.pts = ost->sync_opts;
+
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
+ "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
+ }
+
+ frame_size = pkt.size;
+ write_frame(s, &pkt, ost);
+
+ /* if two pass, output log */
+ if (ost->logfile && enc->stats_out) {
+ fprintf(ost->logfile, "%s", enc->stats_out);
+ }
+ }
+ }
+ ost->sync_opts++;
+ /*
+ * For video, number of frames in == number of packets out.
+ * But there may be reordering, so we can't throw away frames on encoder
+ * flush, we need to limit them here, before they go into encoder.
+ */
+ ost->frame_number++;
+
+ if (vstats_filename && frame_size)
+ do_video_stats(ost, frame_size);
+ }
+
+ if (!ost->last_frame)
+ ost->last_frame = av_frame_alloc();
+ av_frame_unref(ost->last_frame);
+ if (next_picture && ost->last_frame)
+ av_frame_ref(ost->last_frame, next_picture);
+ else
+ av_frame_free(&ost->last_frame);
+}
+
+static double psnr(double d)
+{
+ return -10.0 * log(d) / log(10.0);
+}
+
+static void do_video_stats(OutputStream *ost, int frame_size)
+{
+ AVCodecContext *enc;
+ int frame_number;
+ double ti1, bitrate, avg_bitrate;
+
+ /* this is executed just the first time do_video_stats is called */
+ if (!vstats_file) {
+ vstats_file = fopen(vstats_filename, "w");
+ if (!vstats_file) {
+ perror("fopen");
+ exit_program(1);
+ }
+ }
+
+ enc = ost->enc_ctx;
+ if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ frame_number = ost->st->nb_frames;
+ fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
+ ost->quality / (float)FF_QP2LAMBDA);
+
+ if (enc->coded_frame && (enc->flags & AV_CODEC_FLAG_PSNR))
+ fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
+
+ fprintf(vstats_file,"f_size= %6d ", frame_size);
+ /* compute pts value */
+ ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
+ if (ti1 < 0.01)
+ ti1 = 0.01;
+
+ bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
+ avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
+ fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
+ (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
+ fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
+ }
+}
+
+static void finish_output_stream(OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+ int i;
+
+ ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
+
+ if (of->shortest) {
+ for (i = 0; i < of->ctx->nb_streams; i++)
+ output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
+ }
+}
+
+/**
+ * Get and encode new output from any of the filtergraphs, without causing
+ * activity.
+ *
+ * @return 0 for success, <0 for severe errors
+ */
+static int reap_filters(int flush)
+{
+ AVFrame *filtered_frame = NULL;
+ int i;
+
+ /* Reap all buffers present in the buffer sinks */
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ OutputFile *of = output_files[ost->file_index];
+ AVFilterContext *filter;
+ AVCodecContext *enc = ost->enc_ctx;
+ int ret = 0;
+
+ if (!ost->filter)
+ continue;
+ filter = ost->filter->filter;
+
+ if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
+ return AVERROR(ENOMEM);
+ }
+ filtered_frame = ost->filtered_frame;
+
+ while (1) {
+ double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
+ ret = av_buffersink_get_frame_flags(filter, filtered_frame,
+ AV_BUFFERSINK_FLAG_NO_REQUEST);
+ if (ret < 0) {
+ if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
+ av_log(NULL, AV_LOG_WARNING,
+ "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
+ } else if (flush && ret == AVERROR_EOF) {
+ if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
+ do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
+ }
+ break;
+ }
+ if (ost->finished) {
+ av_frame_unref(filtered_frame);
+ continue;
+ }
+ if (filtered_frame->pts != AV_NOPTS_VALUE) {
+ int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+ AVRational tb = enc->time_base;
+ int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
+
+ tb.den <<= extra_bits;
+ float_pts =
+ av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
+ av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
+ float_pts /= 1 << extra_bits;
+ // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
+ float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
+
+ filtered_frame->pts =
+ av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
+ av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
+ }
+ //if (ost->source_index >= 0)
+ // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
+
+ switch (filter->inputs[0]->type) {
+ case AVMEDIA_TYPE_VIDEO:
+ if (!ost->frame_aspect_ratio.num)
+ enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
+ av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
+ float_pts,
+ enc->time_base.num, enc->time_base.den);
+ }
+
+ do_video_out(of->ctx, ost, filtered_frame, float_pts);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
- !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
++ if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
+ enc->channels != av_frame_get_channels(filtered_frame)) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
+ break;
+ }
+ do_audio_out(of->ctx, ost, filtered_frame);
+ break;
+ default:
+ // TODO support subtitle filters
+ av_assert0(0);
+ }
+
+ av_frame_unref(filtered_frame);
+ }
+ }
+
+ return 0;
+}
+
+static void print_final_stats(int64_t total_size)
+{
+ uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
+ uint64_t subtitle_size = 0;
+ uint64_t data_size = 0;
+ float percent = -1.0;
+ int i, j;
+ int pass1_used = 1;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ switch (ost->enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
+ case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
+ case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
+ default: other_size += ost->data_size; break;
+ }
+ extra_size += ost->enc_ctx->extradata_size;
+ data_size += ost->data_size;
+ if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))
+ != AV_CODEC_FLAG_PASS1)
+ pass1_used = 0;
+ }
+
+ if (data_size && total_size>0 && total_size >= data_size)
+ percent = 100.0 * (total_size - data_size) / data_size;
+
+ av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
+ video_size / 1024.0,
+ audio_size / 1024.0,
+ subtitle_size / 1024.0,
+ other_size / 1024.0,
+ extra_size / 1024.0);
+ if (percent >= 0.0)
+ av_log(NULL, AV_LOG_INFO, "%f%%", percent);
+ else
+ av_log(NULL, AV_LOG_INFO, "unknown");
+ av_log(NULL, AV_LOG_INFO, "\n");
+
+ /* print verbose per-stream stats */
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *f = input_files[i];
+ uint64_t total_packets = 0, total_size = 0;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
+ i, f->ctx->filename);
+
+ for (j = 0; j < f->nb_streams; j++) {
+ InputStream *ist = input_streams[f->ist_index + j];
+ enum AVMediaType type = ist->dec_ctx->codec_type;
+
+ total_size += ist->data_size;
+ total_packets += ist->nb_packets;
+
+ av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
+ i, j, media_type_string(type));
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
+ ist->nb_packets, ist->data_size);
+
+ if (ist->decoding_needed) {
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
+ ist->frames_decoded);
+ if (type == AVMEDIA_TYPE_AUDIO)
+ av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
+ av_log(NULL, AV_LOG_VERBOSE, "; ");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, "\n");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
+ total_packets, total_size);
+ }
+
+ for (i = 0; i < nb_output_files; i++) {
+ OutputFile *of = output_files[i];
+ uint64_t total_packets = 0, total_size = 0;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
+ i, of->ctx->filename);
+
+ for (j = 0; j < of->ctx->nb_streams; j++) {
+ OutputStream *ost = output_streams[of->ost_index + j];
+ enum AVMediaType type = ost->enc_ctx->codec_type;
+
+ total_size += ost->data_size;
+ total_packets += ost->packets_written;
+
+ av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
+ i, j, media_type_string(type));
+ if (ost->encoding_needed) {
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
+ ost->frames_encoded);
+ if (type == AVMEDIA_TYPE_AUDIO)
+ av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
+ av_log(NULL, AV_LOG_VERBOSE, "; ");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
+ ost->packets_written, ost->data_size);
+
+ av_log(NULL, AV_LOG_VERBOSE, "\n");
+ }
+
+ av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
+ total_packets, total_size);
+ }
+ if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
+ av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
+ if (pass1_used) {
+ av_log(NULL, AV_LOG_WARNING, "\n");
+ } else {
+ av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
+ }
+ }
+}
+
+static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
+{
+ char buf[1024];
+ AVBPrint buf_script;
+ OutputStream *ost;
+ AVFormatContext *oc;
+ int64_t total_size;
+ AVCodecContext *enc;
+ int frame_number, vid, i;
+ double bitrate;
+ int64_t pts = INT64_MIN;
+ static int64_t last_time = -1;
+ static int qp_histogram[52];
+ int hours, mins, secs, us;
+
+ if (!print_stats && !is_last_report && !progress_avio)
+ return;
+
+ if (!is_last_report) {
+ if (last_time == -1) {
+ last_time = cur_time;
+ return;
+ }
+ if ((cur_time - last_time) < 500000)
+ return;
+ last_time = cur_time;
+ }
+
+
+ oc = output_files[0]->ctx;
+
+ total_size = avio_size(oc->pb);
+ if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
+ total_size = avio_tell(oc->pb);
+
+ buf[0] = '\0';
+ vid = 0;
+ av_bprint_init(&buf_script, 0, 1);
+ for (i = 0; i < nb_output_streams; i++) {
+ float q = -1;
+ ost = output_streams[i];
+ enc = ost->enc_ctx;
+ if (!ost->stream_copy)
+ q = ost->quality / (float) FF_QP2LAMBDA;
+
+ if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
+ av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
+ ost->file_index, ost->index, q);
+ }
+ if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ float fps, t = (cur_time-timer_start) / 1000000.0;
+
+ frame_number = ost->frame_number;
+ fps = t > 1 ? frame_number / t : 0;
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
+ frame_number, fps < 9.95, fps, q);
+ av_bprintf(&buf_script, "frame=%d\n", frame_number);
+ av_bprintf(&buf_script, "fps=%.1f\n", fps);
+ av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
+ ost->file_index, ost->index, q);
+ if (is_last_report)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
+ if (qp_hist) {
+ int j;
+ int qp = lrintf(q);
+ if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
+ qp_histogram[qp]++;
+ for (j = 0; j < 32; j++)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
+ }
+
+ if ((enc->flags & AV_CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
+ int j;
+ double error, error_sum = 0;
+ double scale, scale_sum = 0;
+ double p;
+ char type[3] = { 'Y','U','V' };
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
+ for (j = 0; j < 3; j++) {
+ if (is_last_report) {
+ error = enc->error[j];
+ scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
+ } else {
+ error = enc->coded_frame->error[j];
+ scale = enc->width * enc->height * 255.0 * 255.0;
+ }
+ if (j)
+ scale /= 4;
+ error_sum += error;
+ scale_sum += scale;
+ p = psnr(error / scale);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
+ av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
+ ost->file_index, ost->index, type[j] | 32, p);
+ }
+ p = psnr(error_sum / scale_sum);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
+ av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
+ ost->file_index, ost->index, p);
+ }
+ vid = 1;
+ }
+ /* compute min output value */
+ if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
+ pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
+ ost->st->time_base, AV_TIME_BASE_Q));
+ if (is_last_report)
+ nb_frames_drop += ost->last_droped;
+ }
+
+ secs = FFABS(pts) / AV_TIME_BASE;
+ us = FFABS(pts) % AV_TIME_BASE;
+ mins = secs / 60;
+ secs %= 60;
+ hours = mins / 60;
+ mins %= 60;
+
+ bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
+
+ if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "size=N/A time=");
+ else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "size=%8.0fkB time=", total_size / 1024.0);
+ if (pts < 0)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
+ "%02d:%02d:%02d.%02d ", hours, mins, secs,
+ (100 * us) / AV_TIME_BASE);
+
+ if (bitrate < 0) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
+ av_bprintf(&buf_script, "bitrate=N/A\n");
+ }else{
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
+ av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
+ }
+
+ if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
+ else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
+ av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
+ av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
+ hours, mins, secs, us);
+
+ if (nb_frames_dup || nb_frames_drop)
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
+ nb_frames_dup, nb_frames_drop);
+ av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
+ av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
+
+ if (print_stats || is_last_report) {
+ const char end = is_last_report ? '\n' : '\r';
+ if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
+ fprintf(stderr, "%s %c", buf, end);
+ } else
+ av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
+
+ fflush(stderr);
+ }
+
+ if (progress_avio) {
+ av_bprintf(&buf_script, "progress=%s\n",
+ is_last_report ? "end" : "continue");
+ avio_write(progress_avio, buf_script.str,
+ FFMIN(buf_script.len, buf_script.size - 1));
+ avio_flush(progress_avio);
+ av_bprint_finalize(&buf_script, NULL);
+ if (is_last_report) {
+ avio_closep(&progress_avio);
+ }
+ }
+
+ if (is_last_report)
+ print_final_stats(total_size);
+}
+
+static void flush_encoders(void)
+{
+ int i, ret;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ AVCodecContext *enc = ost->enc_ctx;
+ AVFormatContext *os = output_files[ost->file_index]->ctx;
+ int stop_encoding = 0;
+
+ if (!ost->encoding_needed)
+ continue;
+
+ if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
+ continue;
+ if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
+ continue;
+
+ for (;;) {
+ int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
+ const char *desc;
+
+ switch (enc->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ encode = avcodec_encode_audio2;
+ desc = "Audio";
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ encode = avcodec_encode_video2;
+ desc = "Video";
+ break;
+ default:
+ stop_encoding = 1;
+ }
+
+ if (encode) {
+ AVPacket pkt;
+ int pkt_size;
+ int got_packet;
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+
+ update_benchmark(NULL);
+ ret = encode(enc, &pkt, NULL, &got_packet);
+ update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
+ exit_program(1);
+ }
+ if (ost->logfile && enc->stats_out) {
+ fprintf(ost->logfile, "%s", enc->stats_out);
+ }
+ if (!got_packet) {
+ stop_encoding = 1;
+ break;
+ }
+ if (ost->finished & MUXER_FINISHED) {
+ av_free_packet(&pkt);
+ continue;
+ }
+ av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
+ pkt_size = pkt.size;
+ write_frame(os, &pkt, ost);
+ if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
+ do_video_stats(ost, pkt_size);
+ }
+ }
+
+ if (stop_encoding)
+ break;
+ }
+ }
+}
+
+/*
+ * Check whether a packet from ist should be written into ost at this time
+ */
+static int check_output_constraints(InputStream *ist, OutputStream *ost)
+{
+ OutputFile *of = output_files[ost->file_index];
+ int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
+
+ if (ost->source_index != ist_index)
+ return 0;
+
+ if (ost->finished)
+ return 0;
+
+ if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
+ return 0;
+
+ return 1;
+}
+
+static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
+{
+ OutputFile *of = output_files[ost->file_index];
+ InputFile *f = input_files [ist->file_index];
+ int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
+ int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
+ int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
+ AVPicture pict;
+ AVPacket opkt;
+
+ av_init_packet(&opkt);
+
+ if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
+ !ost->copy_initial_nonkeyframes)
+ return;
+
+ if (pkt->pts == AV_NOPTS_VALUE) {
+ if (!ost->frame_number && ist->pts < start_time &&
+ !ost->copy_prior_start)
+ return;
+ } else {
+ if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
+ !ost->copy_prior_start)
+ return;
+ }
+
+ if (of->recording_time != INT64_MAX &&
+ ist->pts >= of->recording_time + start_time) {
+ close_output_stream(ost);
+ return;
+ }
+
+ if (f->recording_time != INT64_MAX) {
+ start_time = f->ctx->start_time;
+ if (f->start_time != AV_NOPTS_VALUE)
+ start_time += f->start_time;
+ if (ist->pts >= f->recording_time + start_time) {
+ close_output_stream(ost);
+ return;
+ }
+ }
+
+ /* force the input stream PTS */
+ if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
+ ost->sync_opts++;
+
+ if (pkt->pts != AV_NOPTS_VALUE)
+ opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
+ else
+ opkt.pts = AV_NOPTS_VALUE;
+
+ if (pkt->dts == AV_NOPTS_VALUE)
+ opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
+ else
+ opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
+ opkt.dts -= ost_tb_start_time;
+
+ if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
+ int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
+ if(!duration)
+ duration = ist->dec_ctx->frame_size;
+ opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
+ (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
+ ost->st->time_base) - ost_tb_start_time;
+ }
+
+ opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
+ opkt.flags = pkt->flags;
+
+ // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+ if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
+ && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
+ && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
+ && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
+ ) {
+ if (av_parser_change(ost->parser, ost->st->codec,
+ &opkt.data, &opkt.size,
+ pkt->data, pkt->size,
+ pkt->flags & AV_PKT_FLAG_KEY)) {
+ opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
+ if (!opkt.buf)
+ exit_program(1);
+ }
+ } else {
+ opkt.data = pkt->data;
+ opkt.size = pkt->size;
+ }
+ av_copy_packet_side_data(&opkt, pkt);
+
+ if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
+ /* store AVPicture in AVPacket, as expected by the output format */
+ avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
+ opkt.data = (uint8_t *)&pict;
+ opkt.size = sizeof(AVPicture);
+ opkt.flags |= AV_PKT_FLAG_KEY;
+ }
+
+ write_frame(of->ctx, &opkt, ost);
+}
+
+int guess_input_channel_layout(InputStream *ist)
+{
+ AVCodecContext *dec = ist->dec_ctx;
+
+ if (!dec->channel_layout) {
+ char layout_name[256];
+
+ if (dec->channels > ist->guess_layout_max)
+ return 0;
+ dec->channel_layout = av_get_default_channel_layout(dec->channels);
+ if (!dec->channel_layout)
+ return 0;
+ av_get_channel_layout_string(layout_name, sizeof(layout_name),
+ dec->channels, dec->channel_layout);
+ av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
+ "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
+ }
+ return 1;
+}
+
+static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVFrame *decoded_frame, *f;
+ AVCodecContext *avctx = ist->dec_ctx;
+ int i, ret, err = 0, resample_changed;
+ AVRational decoded_frame_tb;
+
+ if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ decoded_frame = ist->decoded_frame;
+
+ update_benchmark(NULL);
+ ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
+ update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
+
+ if (ret >= 0 && avctx->sample_rate <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
+ ret = AVERROR_INVALIDDATA;
+ }
+
+ if (*got_output || ret<0)
+ decode_error_stat[ret<0] ++;
+
+ if (ret < 0 && exit_on_error)
+ exit_program(1);
+
+ if (!*got_output || ret < 0)
+ return ret;
+
+ ist->samples_decoded += decoded_frame->nb_samples;
+ ist->frames_decoded++;
+
+#if 1
+ /* increment next_dts to use for the case where the input stream does not
+ have timestamps or there are multiple frames in the packet */
+ ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+ avctx->sample_rate;
+ ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
+ avctx->sample_rate;
+#endif
+
+ resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
+ ist->resample_channels != avctx->channels ||
+ ist->resample_channel_layout != decoded_frame->channel_layout ||
+ ist->resample_sample_rate != decoded_frame->sample_rate;
+ if (resample_changed) {
+ char layout1[64], layout2[64];
+
+ if (!guess_input_channel_layout(ist)) {
+ av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
+ "layout for Input Stream #%d.%d\n", ist->file_index,
+ ist->st->index);
+ exit_program(1);
+ }
+ decoded_frame->channel_layout = avctx->channel_layout;
+
+ av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
+ ist->resample_channel_layout);
+ av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
+ decoded_frame->channel_layout);
+
+ av_log(NULL, AV_LOG_INFO,
+ "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
+ ist->file_index, ist->st->index,
+ ist->resample_sample_rate, av_get_sample_fmt_name(ist->resample_sample_fmt),
+ ist->resample_channels, layout1,
+ decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
+ avctx->channels, layout2);
+
+ ist->resample_sample_fmt = decoded_frame->format;
+ ist->resample_sample_rate = decoded_frame->sample_rate;
+ ist->resample_channel_layout = decoded_frame->channel_layout;
+ ist->resample_channels = avctx->channels;
+
+ for (i = 0; i < nb_filtergraphs; i++)
+ if (ist_in_filtergraph(filtergraphs[i], ist)) {
+ FilterGraph *fg = filtergraphs[i];
+ if (configure_filtergraph(fg) < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
+ exit_program(1);
+ }
+ }
+ }
+
+ /* if the decoder provides a pts, use it instead of the last packet pts.
+ the decoder could be delaying output by a packet or more. */
+ if (decoded_frame->pts != AV_NOPTS_VALUE) {
+ ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
+ decoded_frame_tb = avctx->time_base;
+ } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
+ decoded_frame->pts = decoded_frame->pkt_pts;
+ decoded_frame_tb = ist->st->time_base;
+ } else if (pkt->pts != AV_NOPTS_VALUE) {
+ decoded_frame->pts = pkt->pts;
+ decoded_frame_tb = ist->st->time_base;
+ }else {
+ decoded_frame->pts = ist->dts;
+ decoded_frame_tb = AV_TIME_BASE_Q;
+ }
+ pkt->pts = AV_NOPTS_VALUE;
+ if (decoded_frame->pts != AV_NOPTS_VALUE)
+ decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
+ (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
+ (AVRational){1, avctx->sample_rate});
+ for (i = 0; i < ist->nb_filters; i++) {
+ if (i < ist->nb_filters - 1) {
+ f = ist->filter_frame;
+ err = av_frame_ref(f, decoded_frame);
+ if (err < 0)
+ break;
+ } else
+ f = decoded_frame;
+ err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
+ AV_BUFFERSRC_FLAG_PUSH);
+ if (err == AVERROR_EOF)
+ err = 0; /* ignore */
+ if (err < 0)
+ break;
+ }
+ decoded_frame->pts = AV_NOPTS_VALUE;
+
+ av_frame_unref(ist->filter_frame);
+ av_frame_unref(decoded_frame);
+ return err < 0 ? err : ret;
+}
+
+static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVFrame *decoded_frame, *f;
+ int i, ret = 0, err = 0, resample_changed;
+ int64_t best_effort_timestamp;
+ AVRational *frame_sample_aspect;
+
+ if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ decoded_frame = ist->decoded_frame;
+ pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
+
+ update_benchmark(NULL);
+ ret = avcodec_decode_video2(ist->dec_ctx,
+ decoded_frame, got_output, pkt);
+ update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
+
+ // The following line may be required in some cases where there is no parser
+ // or the parser does not has_b_frames correctly
+ if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
+ if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
+ ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
+ } else
+ av_log_ask_for_sample(
+ ist->dec_ctx,
+ "has_b_frames is larger in decoder than demuxer %d > %d ",
+ ist->dec_ctx->has_b_frames,
+ ist->st->codec->has_b_frames
+ );
+ }
+
+ if (*got_output || ret<0)
+ decode_error_stat[ret<0] ++;
+
+ if (ret < 0 && exit_on_error)
+ exit_program(1);
+
+ if (*got_output && ret >= 0) {
+ if (ist->dec_ctx->width != decoded_frame->width ||
+ ist->dec_ctx->height != decoded_frame->height ||
+ ist->dec_ctx->pix_fmt != decoded_frame->format) {
+ av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
+ decoded_frame->width,
+ decoded_frame->height,
+ decoded_frame->format,
+ ist->dec_ctx->width,
+ ist->dec_ctx->height,
+ ist->dec_ctx->pix_fmt);
+ }
+ }
+
+ if (!*got_output || ret < 0)
+ return ret;
+
+ if(ist->top_field_first>=0)
+ decoded_frame->top_field_first = ist->top_field_first;
+
+ ist->frames_decoded++;
+
+ if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
+ err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
+ if (err < 0)
+ goto fail;
+ }
+ ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
+
+ best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
+ if(best_effort_timestamp != AV_NOPTS_VALUE)
+ ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
+ "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
+ ist->st->index, av_ts2str(decoded_frame->pts),
+ av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
+ best_effort_timestamp,
+ av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
+ decoded_frame->key_frame, decoded_frame->pict_type,
+ ist->st->time_base.num, ist->st->time_base.den);
+ }
+
+ pkt->size = 0;
+
+ if (ist->st->sample_aspect_ratio.num)
+ decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
+
+ resample_changed = ist->resample_width != decoded_frame->width ||
+ ist->resample_height != decoded_frame->height ||
+ ist->resample_pix_fmt != decoded_frame->format;
+ if (resample_changed) {
+ av_log(NULL, AV_LOG_INFO,
+ "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
+ ist->file_index, ist->st->index,
+ ist->resample_width, ist->resample_height, av_get_pix_fmt_name(ist->resample_pix_fmt),
+ decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
+
+ ist->resample_width = decoded_frame->width;
+ ist->resample_height = decoded_frame->height;
+ ist->resample_pix_fmt = decoded_frame->format;
+
+ for (i = 0; i < nb_filtergraphs; i++) {
+ if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
+ configure_filtergraph(filtergraphs[i]) < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
+ exit_program(1);
+ }
+ }
+ }
+
+ frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
+ for (i = 0; i < ist->nb_filters; i++) {
+ if (!frame_sample_aspect->num)
+ *frame_sample_aspect = ist->st->sample_aspect_ratio;
+
+ if (i < ist->nb_filters - 1) {
+ f = ist->filter_frame;
+ err = av_frame_ref(f, decoded_frame);
+ if (err < 0)
+ break;
+ } else
+ f = decoded_frame;
+ ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f, AV_BUFFERSRC_FLAG_PUSH);
+ if (ret == AVERROR_EOF) {
+ ret = 0; /* ignore */
+ } else if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Failed to inject frame into filter network: %s\n", av_err2str(ret));
+ exit_program(1);
+ }
+ }
+
+fail:
+ av_frame_unref(ist->filter_frame);
+ av_frame_unref(decoded_frame);
+ return err < 0 ? err : ret;
+}
+
+static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
+{
+ AVSubtitle subtitle;
+ int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
+ &subtitle, got_output, pkt);
+
+ if (*got_output || ret<0)
+ decode_error_stat[ret<0] ++;
+
+ if (ret < 0 && exit_on_error)
+ exit_program(1);
+
+ if (ret < 0 || !*got_output) {
+ if (!pkt->size)
+ sub2video_flush(ist);
+ return ret;
+ }
+
+ if (ist->fix_sub_duration) {
+ int end = 1;
+ if (ist->prev_sub.got_output) {
+ end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
+ 1000, AV_TIME_BASE);
+ if (end < ist->prev_sub.subtitle.end_display_time) {
+ av_log(ist->dec_ctx, AV_LOG_DEBUG,
+ "Subtitle duration reduced from %d to %d%s\n",
+ ist->prev_sub.subtitle.end_display_time, end,
+ end <= 0 ? ", dropping it" : "");
+ ist->prev_sub.subtitle.end_display_time = end;
+ }
+ }
+ FFSWAP(int, *got_output, ist->prev_sub.got_output);
+ FFSWAP(int, ret, ist->prev_sub.ret);
+ FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
+ if (end <= 0)
+ goto out;
+ }
+
+ if (!*got_output)
+ return ret;
+
+ sub2video_update(ist, &subtitle);
+
+ if (!subtitle.num_rects)
+ goto out;
+
+ ist->frames_decoded++;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+
+ if (!check_output_constraints(ist, ost) || !ost->encoding_needed
+ || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
+ continue;
+
+ do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
+ }
+
+out:
+ avsubtitle_free(&subtitle);
+ return ret;
+}
+
+static int send_filter_eof(InputStream *ist)
+{
+ int i, ret;
+ for (i = 0; i < ist->nb_filters; i++) {
+#if 1
+ ret = av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
+#else
+ ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+#endif
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+/* pkt = NULL means EOF (needed to flush decoder buffers) */
+static int process_input_packet(InputStream *ist, const AVPacket *pkt)
+{
+ int ret = 0, i;
+ int got_output = 0;
+
+ AVPacket avpkt;
+ if (!ist->saw_first_ts) {
+ ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
+ ist->pts = 0;
+ if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
+ ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
+ ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
+ }
+ ist->saw_first_ts = 1;
+ }
+
+ if (ist->next_dts == AV_NOPTS_VALUE)
+ ist->next_dts = ist->dts;
+ if (ist->next_pts == AV_NOPTS_VALUE)
+ ist->next_pts = ist->pts;
+
+ if (!pkt) {
+ /* EOF handling */
+ av_init_packet(&avpkt);
+ avpkt.data = NULL;
+ avpkt.size = 0;
+ goto handle_eof;
+ } else {
+ avpkt = *pkt;
+ }
+
+ if (pkt->dts != AV_NOPTS_VALUE) {
+ ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
+ if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
+ ist->next_pts = ist->pts = ist->dts;
+ }
+
+ // while we have more to decode or while the decoder did output something on EOF
+ while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
+ int duration;
+ handle_eof:
+
+ ist->pts = ist->next_pts;
+ ist->dts = ist->next_dts;
+
+ if (avpkt.size && avpkt.size != pkt->size &&
- !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
++ !(ist->dec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
+ av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
+ "Multiple frames in a packet from stream %d\n", pkt->stream_index);
+ ist->showed_multi_packet_warning = 1;
+ }
+
+ switch (ist->dec_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ ret = decode_audio (ist, &avpkt, &got_output);
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ ret = decode_video (ist, &avpkt, &got_output);
+ if (avpkt.duration) {
+ duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
+ } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
+ int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
+ duration = ((int64_t)AV_TIME_BASE *
+ ist->dec_ctx->framerate.den * ticks) /
+ ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
+ } else
+ duration = 0;
+
+ if(ist->dts != AV_NOPTS_VALUE && duration) {
+ ist->next_dts += duration;
+ }else
+ ist->next_dts = AV_NOPTS_VALUE;
+
+ if (got_output)
+ ist->next_pts += duration; //FIXME the duration is not correct in some cases
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ ret = transcode_subtitles(ist, &avpkt, &got_output);
+ break;
+ default:
+ return -1;
+ }
+
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
+ ist->file_index, ist->st->index, av_err2str(ret));
+ if (exit_on_error)
+ exit_program(1);
+ break;
+ }
+
+ avpkt.dts=
+ avpkt.pts= AV_NOPTS_VALUE;
+
+ // touch data and size only if not EOF
+ if (pkt) {
+ if(ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO)
+ ret = avpkt.size;
+ avpkt.data += ret;
+ avpkt.size -= ret;
+ }
+ if (!got_output) {
+ continue;
+ }
+ if (got_output && !pkt)
+ break;
+ }
+
+ /* after flushing, send an EOF on all the filter inputs attached to the stream */
+ if (!pkt && ist->decoding_needed && !got_output) {
+ int ret = send_filter_eof(ist);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
+ exit_program(1);
+ }
+ }
+
+ /* handle stream copy */
+ if (!ist->decoding_needed) {
+ ist->dts = ist->next_dts;
+ switch (ist->dec_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
+ ist->dec_ctx->sample_rate;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ if (ist->framerate.num) {
+ // TODO: Remove work-around for c99-to-c89 issue 7
+ AVRational time_base_q = AV_TIME_BASE_Q;
+ int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
+ ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
+ } else if (pkt->duration) {
+ ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
+ } else if(ist->dec_ctx->framerate.num != 0) {
+ int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
+ ist->next_dts += ((int64_t)AV_TIME_BASE *
+ ist->dec_ctx->framerate.den * ticks) /
+ ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
+ }
+ break;
+ }
+ ist->pts = ist->dts;
+ ist->next_pts = ist->next_dts;
+ }
+ for (i = 0; pkt && i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+
+ if (!check_output_constraints(ist, ost) || ost->encoding_needed)
+ continue;
+
+ do_streamcopy(ist, ost, pkt);
+ }
+
+ return got_output;
+}
+
+static void print_sdp(void)
+{
+ char sdp[16384];
+ int i;
+ int j;
+ AVIOContext *sdp_pb;
+ AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
+
+ if (!avc)
+ exit_program(1);
+ for (i = 0, j = 0; i < nb_output_files; i++) {
+ if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
+ avc[j] = output_files[i]->ctx;
+ j++;
+ }
+ }
+
+ av_sdp_create(avc, j, sdp, sizeof(sdp));
+
+ if (!sdp_filename) {
+ printf("SDP:\n%s\n", sdp);
+ fflush(stdout);
+ } else {
+ if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
+ } else {
+ avio_printf(sdp_pb, "SDP:\n%s", sdp);
+ avio_closep(&sdp_pb);
+ av_freep(&sdp_filename);
+ }
+ }
+
+ av_freep(&avc);
+}
+
+static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
+{
+ int i;
+ for (i = 0; hwaccels[i].name; i++)
+ if (hwaccels[i].pix_fmt == pix_fmt)
+ return &hwaccels[i];
+ return NULL;
+}
+
+static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
+{
+ InputStream *ist = s->opaque;
+ const enum AVPixelFormat *p;
+ int ret;
+
+ for (p = pix_fmts; *p != -1; p++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
+ const HWAccel *hwaccel;
+
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ break;
+
+ hwaccel = get_hwaccel(*p);
+ if (!hwaccel ||
+ (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
+ (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
+ continue;
+
+ ret = hwaccel->init(s);
+ if (ret < 0) {
+ if (ist->hwaccel_id == hwaccel->id) {
+ av_log(NULL, AV_LOG_FATAL,
+ "%s hwaccel requested for input stream #%d:%d, "
+ "but cannot be initialized.\n", hwaccel->name,
+ ist->file_index, ist->st->index);
+ return AV_PIX_FMT_NONE;
+ }
+ continue;
+ }
+ ist->active_hwaccel_id = hwaccel->id;
+ ist->hwaccel_pix_fmt = *p;
+ break;
+ }
+
+ return *p;
+}
+
+static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
+{
+ InputStream *ist = s->opaque;
+
+ if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
+ return ist->hwaccel_get_buffer(s, frame, flags);
+
+ return avcodec_default_get_buffer2(s, frame, flags);
+}
+
+static int init_input_stream(int ist_index, char *error, int error_len)
+{
+ int ret;
+ InputStream *ist = input_streams[ist_index];
+
+ if (ist->decoding_needed) {
+ AVCodec *codec = ist->dec;
+ if (!codec) {
+ snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
+ avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
+ return AVERROR(EINVAL);
+ }
+
+ ist->dec_ctx->opaque = ist;
+ ist->dec_ctx->get_format = get_format;
+ ist->dec_ctx->get_buffer2 = get_buffer;
+ ist->dec_ctx->thread_safe_callbacks = 1;
+
+ av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
+ if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
+ (ist->decoding_needed & DECODING_FOR_OST)) {
+ av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
+ if (ist->decoding_needed & DECODING_FOR_FILTER)
+ av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
+ }
+
+ if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
+ av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
+ if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
+ if (ret == AVERROR_EXPERIMENTAL)
+ abort_codec_experimental(codec, 0);
+
+ snprintf(error, error_len,
+ "Error while opening decoder for input stream "
+ "#%d:%d : %s",
+ ist->file_index, ist->st->index, av_err2str(ret));
+ return ret;
+ }
+ assert_avoptions(ist->decoder_opts);
+ }
+
+ ist->next_pts = AV_NOPTS_VALUE;
+ ist->next_dts = AV_NOPTS_VALUE;
+
+ return 0;
+}
+
+static InputStream *get_input_stream(OutputStream *ost)
+{
+ if (ost->source_index >= 0)
+ return input_streams[ost->source_index];
+ return NULL;
+}
+
+static int compare_int64(const void *a, const void *b)
+{
+ int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
+ return va < vb ? -1 : va > vb ? +1 : 0;
+}
+
+static int init_output_stream(OutputStream *ost, char *error, int error_len)
+{
+ int ret = 0;
+
+ if (ost->encoding_needed) {
+ AVCodec *codec = ost->enc;
+ AVCodecContext *dec = NULL;
+ InputStream *ist;
+
+ if ((ist = get_input_stream(ost)))
+ dec = ist->dec_ctx;
+ if (dec && dec->subtitle_header) {
+ /* ASS code assumes this buffer is null terminated so add extra byte. */
+ ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
+ if (!ost->enc_ctx->subtitle_header)
+ return AVERROR(ENOMEM);
+ memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
+ ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
+ }
+ if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
+ av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
+ av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
+
+ if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
+ if (ret == AVERROR_EXPERIMENTAL)
+ abort_codec_experimental(codec, 1);
+ snprintf(error, error_len,
+ "Error while opening encoder for output stream #%d:%d - "
+ "maybe incorrect parameters such as bit_rate, rate, width or height",
+ ost->file_index, ost->index);
+ return ret;
+ }
+ if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
++ !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
+ av_buffersink_set_frame_size(ost->filter->filter,
+ ost->enc_ctx->frame_size);
+ assert_avoptions(ost->encoder_opts);
+ if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
+ av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
+ " It takes bits/s as argument, not kbits/s\n");
+
+ ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Error initializing the output stream codec context.\n");
+ exit_program(1);
+ }
+
+ // copy timebase while removing common factors
+ ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
+ ost->st->codec->codec= ost->enc_ctx->codec;
+ } else {
+ ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Error setting up codec context options.\n");
+ return ret;
+ }
+ // copy timebase while removing common factors
+ ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
+ }
+
+ return ret;
+}
+
+static void parse_forced_key_frames(char *kf, OutputStream *ost,
+ AVCodecContext *avctx)
+{
+ char *p;
+ int n = 1, i, size, index = 0;
+ int64_t t, *pts;
+
+ for (p = kf; *p; p++)
+ if (*p == ',')
+ n++;
+ size = n;
+ pts = av_malloc_array(size, sizeof(*pts));
+ if (!pts) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
+ exit_program(1);
+ }
+
+ p = kf;
+ for (i = 0; i < n; i++) {
+ char *next = strchr(p, ',');
+
+ if (next)
+ *next++ = 0;
+
+ if (!memcmp(p, "chapters", 8)) {
+
+ AVFormatContext *avf = output_files[ost->file_index]->ctx;
+ int j;
+
+ if (avf->nb_chapters > INT_MAX - size ||
+ !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
+ sizeof(*pts)))) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Could not allocate forced key frames array.\n");
+ exit_program(1);
+ }
+ t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
+ t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+ for (j = 0; j < avf->nb_chapters; j++) {
+ AVChapter *c = avf->chapters[j];
+ av_assert1(index < size);
+ pts[index++] = av_rescale_q(c->start, c->time_base,
+ avctx->time_base) + t;
+ }
+
+ } else {
+
+ t = parse_time_or_die("force_key_frames", p, 1);
+ av_assert1(index < size);
+ pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
+
+ }
+
+ p = next;
+ }
+
+ av_assert0(index == size);
+ qsort(pts, size, sizeof(*pts), compare_int64);
+ ost->forced_kf_count = size;
+ ost->forced_kf_pts = pts;
+}
+
+static void report_new_stream(int input_index, AVPacket *pkt)
+{
+ InputFile *file = input_files[input_index];
+ AVStream *st = file->ctx->streams[pkt->stream_index];
+
+ if (pkt->stream_index < file->nb_streams_warn)
+ return;
+ av_log(file->ctx, AV_LOG_WARNING,
+ "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
+ av_get_media_type_string(st->codec->codec_type),
+ input_index, pkt->stream_index,
+ pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
+ file->nb_streams_warn = pkt->stream_index + 1;
+}
+
+static void set_encoder_id(OutputFile *of, OutputStream *ost)
+{
+ AVDictionaryEntry *e;
+
+ uint8_t *encoder_string;
+ int encoder_string_len;
+ int format_flags = 0;
+ int codec_flags = 0;
+
+ if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
+ return;
+
+ e = av_dict_get(of->opts, "fflags", NULL, 0);
+ if (e) {
+ const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
+ if (!o)
+ return;
+ av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
+ }
+ e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
+ if (e) {
+ const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
+ if (!o)
+ return;
+ av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
+ }
+
+ encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
+ encoder_string = av_mallocz(encoder_string_len);
+ if (!encoder_string)
+ exit_program(1);
+
+ if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
+ av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
+ else
+ av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
+ av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
+ av_dict_set(&ost->st->metadata, "encoder", encoder_string,
+ AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
+}
+
+static int transcode_init(void)
+{
+ int ret = 0, i, j, k;
+ AVFormatContext *oc;
+ OutputStream *ost;
+ InputStream *ist;
+ char error[1024] = {0};
+ int want_sdp = 1;
+
+ for (i = 0; i < nb_filtergraphs; i++) {
+ FilterGraph *fg = filtergraphs[i];
+ for (j = 0; j < fg->nb_outputs; j++) {
+ OutputFilter *ofilter = fg->outputs[j];
+ if (!ofilter->ost || ofilter->ost->source_index >= 0)
+ continue;
+ if (fg->nb_inputs != 1)
+ continue;
+ for (k = nb_input_streams-1; k >= 0 ; k--)
+ if (fg->inputs[0]->ist == input_streams[k])
+ break;
+ ofilter->ost->source_index = k;
+ }
+ }
+
+ /* init framerate emulation */
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *ifile = input_files[i];
+ if (ifile->rate_emu)
+ for (j = 0; j < ifile->nb_streams; j++)
+ input_streams[j + ifile->ist_index]->start = av_gettime_relative();
+ }
+
+ /* for each output stream, we compute the right encoding parameters */
+ for (i = 0; i < nb_output_streams; i++) {
+ AVCodecContext *enc_ctx;
+ AVCodecContext *dec_ctx = NULL;
+ ost = output_streams[i];
+ oc = output_files[ost->file_index]->ctx;
+ ist = get_input_stream(ost);
+
+ if (ost->attachment_filename)
+ continue;
+
+ enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
+
+ if (ist) {
+ dec_ctx = ist->dec_ctx;
+
+ ost->st->disposition = ist->st->disposition;
+ enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
+ enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
+ } else {
+ for (j=0; j<oc->nb_streams; j++) {
+ AVStream *st = oc->streams[j];
+ if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
+ break;
+ }
+ if (j == oc->nb_streams)
+ if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
+ ost->st->disposition = AV_DISPOSITION_DEFAULT;
+ }
+
+ if (ost->stream_copy) {
+ AVRational sar;
+ uint64_t extra_size;
+
+ av_assert0(ist && !ost->filter);
+
+ extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
+
+ if (extra_size > INT_MAX) {
+ return AVERROR(EINVAL);
+ }
+
+ /* if stream_copy is selected, no need to decode or encode */
+ enc_ctx->codec_id = dec_ctx->codec_id;
+ enc_ctx->codec_type = dec_ctx->codec_type;
+
+ if (!enc_ctx->codec_tag) {
+ unsigned int codec_tag;
+ if (!oc->oformat->codec_tag ||
+ av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
+ !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
+ enc_ctx->codec_tag = dec_ctx->codec_tag;
+ }
+
+ enc_ctx->bit_rate = dec_ctx->bit_rate;
+ enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
+ enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
+ enc_ctx->field_order = dec_ctx->field_order;
+ if (dec_ctx->extradata_size) {
+ enc_ctx->extradata = av_mallocz(extra_size);
+ if (!enc_ctx->extradata) {
+ return AVERROR(ENOMEM);
+ }
+ memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
+ }
+ enc_ctx->extradata_size= dec_ctx->extradata_size;
+ enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
+
+ enc_ctx->time_base = ist->st->time_base;
+ /*
+ * Avi is a special case here because it supports variable fps but
+ * having the fps and timebase differe significantly adds quite some
+ * overhead
+ */
+ if(!strcmp(oc->oformat->name, "avi")) {
+ if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
+ && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
+ && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
+ && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
+ || copy_tb==2){
+ enc_ctx->time_base.num = ist->st->r_frame_rate.den;
+ enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
+ enc_ctx->ticks_per_frame = 2;
+ } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
+ && av_q2d(ist->st->time_base) < 1.0/500
+ || copy_tb==0){
+ enc_ctx->time_base = dec_ctx->time_base;
+ enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
+ enc_ctx->time_base.den *= 2;
+ enc_ctx->ticks_per_frame = 2;
+ }
+ } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
+ && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
+ && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
+ && strcmp(oc->oformat->name, "f4v")
+ ) {
+ if( copy_tb<0 && dec_ctx->time_base.den
+ && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
+ && av_q2d(ist->st->time_base) < 1.0/500
+ || copy_tb==0){
+ enc_ctx->time_base = dec_ctx->time_base;
+ enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
+ }
+ }
+ if ( enc_ctx->codec_tag == AV_RL32("tmcd")
+ && dec_ctx->time_base.num < dec_ctx->time_base.den
+ && dec_ctx->time_base.num > 0
+ && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
+ enc_ctx->time_base = dec_ctx->time_base;
+ }
+
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->framerate;
+ if(ost->frame_rate.num)
+ enc_ctx->time_base = av_inv_q(ost->frame_rate);
+
+ av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
+ enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
+
+ if (ist->st->nb_side_data) {
+ ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
+ sizeof(*ist->st->side_data));
+ if (!ost->st->side_data)
+ return AVERROR(ENOMEM);
+
+ ost->st->nb_side_data = 0;
+ for (j = 0; j < ist->st->nb_side_data; j++) {
+ const AVPacketSideData *sd_src = &ist->st->side_data[j];
+ AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
+
+ if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
+ continue;
+
+ sd_dst->data = av_malloc(sd_src->size);
+ if (!sd_dst->data)
+ return AVERROR(ENOMEM);
+ memcpy(sd_dst->data, sd_src->data, sd_src->size);
+ sd_dst->size = sd_src->size;
+ sd_dst->type = sd_src->type;
+ ost->st->nb_side_data++;
+ }
+ }
+
+ ost->parser = av_parser_init(enc_ctx->codec_id);
+
+ switch (enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ if (audio_volume != 256) {
+ av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
+ exit_program(1);
+ }
+ enc_ctx->channel_layout = dec_ctx->channel_layout;
+ enc_ctx->sample_rate = dec_ctx->sample_rate;
+ enc_ctx->channels = dec_ctx->channels;
+ enc_ctx->frame_size = dec_ctx->frame_size;
+ enc_ctx->audio_service_type = dec_ctx->audio_service_type;
+ enc_ctx->block_align = dec_ctx->block_align;
+ enc_ctx->initial_padding = dec_ctx->delay;
+#if FF_API_AUDIOENC_DELAY
+ enc_ctx->delay = dec_ctx->delay;
+#endif
+ if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
+ enc_ctx->block_align= 0;
+ if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
+ enc_ctx->block_align= 0;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ enc_ctx->pix_fmt = dec_ctx->pix_fmt;
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->height = dec_ctx->height;
+ enc_ctx->has_b_frames = dec_ctx->has_b_frames;
+ if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
+ sar =
+ av_mul_q(ost->frame_aspect_ratio,
+ (AVRational){ enc_ctx->height, enc_ctx->width });
+ av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
+ "with stream copy may produce invalid files\n");
+ }
+ else if (ist->st->sample_aspect_ratio.num)
+ sar = ist->st->sample_aspect_ratio;
+ else
+ sar = dec_ctx->sample_aspect_ratio;
+ ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
+ ost->st->avg_frame_rate = ist->st->avg_frame_rate;
+ ost->st->r_frame_rate = ist->st->r_frame_rate;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->height = dec_ctx->height;
+ break;
+ case AVMEDIA_TYPE_UNKNOWN:
+ case AVMEDIA_TYPE_DATA:
+ case AVMEDIA_TYPE_ATTACHMENT:
+ break;
+ default:
+ abort();
+ }
+ } else {
+ if (!ost->enc)
+ ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
+ if (!ost->enc) {
+ /* should only happen when a default codec is not present. */
+ snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
+ avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
+ ret = AVERROR(EINVAL);
+ goto dump_format;
+ }
+
+ set_encoder_id(output_files[ost->file_index], ost);
+
+ if (!ost->filter &&
+ (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
+ FilterGraph *fg;
+ fg = init_simple_filtergraph(ist, ost);
+ if (configure_filtergraph(fg)) {
+ av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
+ exit_program(1);
+ }
+ }
+
+ if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (!ost->frame_rate.num)
+ ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->framerate;
+ if (ist && !ost->frame_rate.num)
+ ost->frame_rate = ist->st->r_frame_rate;
+ if (ist && !ost->frame_rate.num) {
+ ost->frame_rate = (AVRational){25, 1};
+ av_log(NULL, AV_LOG_WARNING,
+ "No information "
+ "about the input framerate is available. Falling "
+ "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
+ "if you want a different framerate.\n",
+ ost->file_index, ost->index);
+ }
+// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
+ if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
+ int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
+ ost->frame_rate = ost->enc->supported_framerates[idx];
+ }
+ // reduce frame rate for mpeg4 to be within the spec limits
+ if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
+ av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
+ ost->frame_rate.num, ost->frame_rate.den, 65535);
+ }
+ }
+
+ switch (enc_ctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
+ enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
+ enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
+ enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
+ enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ enc_ctx->time_base = av_inv_q(ost->frame_rate);
+ if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
+ enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
+ if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
+ && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
+ av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
+ "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
+ }
+ for (j = 0; j < ost->forced_kf_count; j++)
+ ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
+ AV_TIME_BASE_Q,
+ enc_ctx->time_base);
+
+ enc_ctx->width = ost->filter->filter->inputs[0]->w;
+ enc_ctx->height = ost->filter->filter->inputs[0]->h;
+ enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
+ ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
+ av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
+ ost->filter->filter->inputs[0]->sample_aspect_ratio;
+ if (!strncmp(ost->enc->name, "libx264", 7) &&
+ enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+ ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+ av_log(NULL, AV_LOG_WARNING,
+ "No pixel format specified, %s for H.264 encoding chosen.\n"
+ "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+ av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+ if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
+ enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
+ ost->filter->filter->inputs[0]->format != AV_PIX_FMT_YUV420P)
+ av_log(NULL, AV_LOG_WARNING,
+ "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
+ "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
+ av_get_pix_fmt_name(ost->filter->filter->inputs[0]->format));
+ enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
+
+ ost->st->avg_frame_rate = ost->frame_rate;
+
+ if (!dec_ctx ||
+ enc_ctx->width != dec_ctx->width ||
+ enc_ctx->height != dec_ctx->height ||
+ enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
+ enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
+ }
+
+ if (ost->forced_keyframes) {
+ if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
+ ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
+ forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
+ return ret;
+ }
+ ost->forced_keyframes_expr_const_values[FKF_N] = 0;
+ ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
+ ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
+
+ // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
+ // parse it only for static kf timings
+ } else if(strncmp(ost->forced_keyframes, "source", 6)) {
+ parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ enc_ctx->time_base = (AVRational){1, 1000};
+ if (!enc_ctx->width) {
+ enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
+ enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
+ }
+ break;
+ case AVMEDIA_TYPE_DATA:
+ break;
+ default:
+ abort();
+ break;
+ }
+ }
+
+ if (ost->disposition) {
+ static const AVOption opts[] = {
+ { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
+ { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
+ { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
+ { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
+ { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
+ { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
+ { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
+ { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
+ { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
+ { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
+ { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
+ { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
+ { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
+ { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
+ { NULL },
+ };
+ static const AVClass class = {
+ .class_name = "",
+ .item_name = av_default_item_name,
+ .option = opts,
+ .version = LIBAVUTIL_VERSION_INT,
+ };
+ const AVClass *pclass = &class;
+
+ ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
+ if (ret < 0)
+ goto dump_format;
+ }
+ }
+
+ /* open each encoder */
+ for (i = 0; i < nb_output_streams; i++) {
+ ret = init_output_stream(output_streams[i], error, sizeof(error));
+ if (ret < 0)
+ goto dump_format;
+ }
+
+ /* init input streams */
+ for (i = 0; i < nb_input_streams; i++)
+ if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+ avcodec_close(ost->enc_ctx);
+ }
+ goto dump_format;
+ }
+
+ /* discard unused programs */
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *ifile = input_files[i];
+ for (j = 0; j < ifile->ctx->nb_programs; j++) {
+ AVProgram *p = ifile->ctx->programs[j];
+ int discard = AVDISCARD_ALL;
+
+ for (k = 0; k < p->nb_stream_indexes; k++)
+ if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
+ discard = AVDISCARD_DEFAULT;
+ break;
+ }
+ p->discard = discard;
+ }
+ }
+
+ /* open files and write file headers */
+ for (i = 0; i < nb_output_files; i++) {
+ oc = output_files[i]->ctx;
+ oc->interrupt_callback = int_cb;
+ if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
+ snprintf(error, sizeof(error),
+ "Could not write header for output file #%d "
+ "(incorrect codec parameters ?): %s",
+ i, av_err2str(ret));
+ ret = AVERROR(EINVAL);
+ goto dump_format;
+ }
+// assert_avoptions(output_files[i]->opts);
+ if (strcmp(oc->oformat->name, "rtp")) {
+ want_sdp = 0;
+ }
+ }
+
+ dump_format:
+ /* dump the file output parameters - cannot be done before in case
+ of stream copy */
+ for (i = 0; i < nb_output_files; i++) {
+ av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
+ }
+
+ /* dump the stream mapping */
+ av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
+ for (i = 0; i < nb_input_streams; i++) {
+ ist = input_streams[i];
+
+ for (j = 0; j < ist->nb_filters; j++) {
+ if (ist->filters[j]->graph->graph_desc) {
+ av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
+ ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
+ ist->filters[j]->name);
+ if (nb_filtergraphs > 1)
+ av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+ }
+ }
+
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+
+ if (ost->attachment_filename) {
+ /* an attached file */
+ av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
+ ost->attachment_filename, ost->file_index, ost->index);
+ continue;
+ }
+
+ if (ost->filter && ost->filter->graph->graph_desc) {
+ /* output from a complex graph */
+ av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
+ if (nb_filtergraphs > 1)
+ av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
+
+ av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
+ ost->index, ost->enc ? ost->enc->name : "?");
+ continue;
+ }
+
+ av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
+ input_streams[ost->source_index]->file_index,
+ input_streams[ost->source_index]->st->index,
+ ost->file_index,
+ ost->index);
+ if (ost->sync_ist != input_streams[ost->source_index])
+ av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
+ ost->sync_ist->file_index,
+ ost->sync_ist->st->index);
+ if (ost->stream_copy)
+ av_log(NULL, AV_LOG_INFO, " (copy)");
+ else {
+ const AVCodec *in_codec = input_streams[ost->source_index]->dec;
+ const AVCodec *out_codec = ost->enc;
+ const char *decoder_name = "?";
+ const char *in_codec_name = "?";
+ const char *encoder_name = "?";
+ const char *out_codec_name = "?";
+ const AVCodecDescriptor *desc;
+
+ if (in_codec) {
+ decoder_name = in_codec->name;
+ desc = avcodec_descriptor_get(in_codec->id);
+ if (desc)
+ in_codec_name = desc->name;
+ if (!strcmp(decoder_name, in_codec_name))
+ decoder_name = "native";
+ }
+
+ if (out_codec) {
+ encoder_name = out_codec->name;
+ desc = avcodec_descriptor_get(out_codec->id);
+ if (desc)
+ out_codec_name = desc->name;
+ if (!strcmp(encoder_name, out_codec_name))
+ encoder_name = "native";
+ }
+
+ av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
+ in_codec_name, decoder_name,
+ out_codec_name, encoder_name);
+ }
+ av_log(NULL, AV_LOG_INFO, "\n");
+ }
+
+ if (ret) {
+ av_log(NULL, AV_LOG_ERROR, "%s\n", error);
+ return ret;
+ }
+
+ if (sdp_filename || want_sdp) {
+ print_sdp();
+ }
+
+ transcode_init_done = 1;
+
+ return 0;
+}
+
+/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
+static int need_output(void)
+{
+ int i;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ OutputFile *of = output_files[ost->file_index];
+ AVFormatContext *os = output_files[ost->file_index]->ctx;
+
+ if (ost->finished ||
+ (os->pb && avio_tell(os->pb) >= of->limit_filesize))
+ continue;
+ if (ost->frame_number >= ost->max_frames) {
+ int j;
+ for (j = 0; j < of->ctx->nb_streams; j++)
+ close_output_stream(output_streams[of->ost_index + j]);
+ continue;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * Select the output stream to process.
+ *
+ * @return selected output stream, or NULL if none available
+ */
+static OutputStream *choose_output(void)
+{
+ int i;
+ int64_t opts_min = INT64_MAX;
+ OutputStream *ost_min = NULL;
+
+ for (i = 0; i < nb_output_streams; i++) {
+ OutputStream *ost = output_streams[i];
+ int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
+ AV_TIME_BASE_Q);
+ if (!ost->finished && opts < opts_min) {
+ opts_min = opts;
+ ost_min = ost->unavailable ? NULL : ost;
+ }
+ }
+ return ost_min;
+}
+
+static int check_keyboard_interaction(int64_t cur_time)
+{
+ int i, ret, key;
+ static int64_t last_time;
+ if (received_nb_signals)
+ return AVERROR_EXIT;
+ /* read_key() returns 0 on EOF */
+ if(cur_time - last_time >= 100000 && !run_as_daemon){
+ key = read_key();
+ last_time = cur_time;
+ }else
+ key = -1;
+ if (key == 'q')
+ return AVERROR_EXIT;
+ if (key == '+') av_log_set_level(av_log_get_level()+10);
+ if (key == '-') av_log_set_level(av_log_get_level()-10);
+ if (key == 's') qp_hist ^= 1;
+ if (key == 'h'){
+ if (do_hex_dump){
+ do_hex_dump = do_pkt_dump = 0;
+ } else if(do_pkt_dump){
+ do_hex_dump = 1;
+ } else
+ do_pkt_dump = 1;
+ av_log_set_level(AV_LOG_DEBUG);
+ }
+ if (key == 'c' || key == 'C'){
+ char buf[4096], target[64], command[256], arg[256] = {0};
+ double time;
+ int k, n = 0;
+ fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
+ i = 0;
+ while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
+ if (k > 0)
+ buf[i++] = k;
+ buf[i] = 0;
+ if (k > 0 &&
+ (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
+ av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
+ target, time, command, arg);
+ for (i = 0; i < nb_filtergraphs; i++) {
+ FilterGraph *fg = filtergraphs[i];
+ if (fg->graph) {
+ if (time < 0) {
+ ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
+ key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
+ fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
+ } else if (key == 'c') {
+ fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
+ ret = AVERROR_PATCHWELCOME;
+ } else {
+ ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
+ if (ret < 0)
+ fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
+ }
+ }
+ }
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "Parse error, at least 3 arguments were expected, "
+ "only %d given in string '%s'\n", n, buf);
+ }
+ }
+ if (key == 'd' || key == 'D'){
+ int debug=0;
+ if(key == 'D') {
+ debug = input_streams[0]->st->codec->debug<<1;
+ if(!debug) debug = 1;
+ while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
+ debug += debug;
+ }else
+ if(scanf("%d", &debug)!=1)
+ fprintf(stderr,"error parsing debug value\n");
+ for(i=0;i<nb_input_streams;i++) {
+ input_streams[i]->st->codec->debug = debug;
+ }
+ for(i=0;i<nb_output_streams;i++) {
+ OutputStream *ost = output_streams[i];
+ ost->enc_ctx->debug = debug;
+ }
+ if(debug) av_log_set_level(AV_LOG_DEBUG);
+ fprintf(stderr,"debug=%d\n", debug);
+ }
+ if (key == '?'){
+ fprintf(stderr, "key function\n"
+ "? show this help\n"
+ "+ increase verbosity\n"
+ "- decrease verbosity\n"
+ "c Send command to first matching filter supporting it\n"
+ "C Send/Que command to all matching filters\n"
+ "D cycle through available debug modes\n"
+ "h dump packets/hex press to cycle through the 3 states\n"
+ "q quit\n"
+ "s Show QP histogram\n"
+ );
+ }
+ return 0;
+}
+
+#if HAVE_PTHREADS
+static void *input_thread(void *arg)
+{
+ InputFile *f = arg;
+ unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
+ int ret = 0;
+
+ while (1) {
+ AVPacket pkt;
+ ret = av_read_frame(f->ctx, &pkt);
+
+ if (ret == AVERROR(EAGAIN)) {
+ av_usleep(10000);
+ continue;
+ }
+ if (ret < 0) {
+ av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
+ break;
+ }
+ av_dup_packet(&pkt);
+ ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
+ if (flags && ret == AVERROR(EAGAIN)) {
+ flags = 0;
+ ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
+ av_log(f->ctx, AV_LOG_WARNING,
+ "Thread message queue blocking; consider raising the "
+ "thread_queue_size option (current value: %d)\n",
+ f->thread_queue_size);
+ }
+ if (ret < 0) {
+ if (ret != AVERROR_EOF)
+ av_log(f->ctx, AV_LOG_ERROR,
+ "Unable to send packet to main thread: %s\n",
+ av_err2str(ret));
+ av_free_packet(&pkt);
+ av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+static void free_input_threads(void)
+{
+ int i;
+
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *f = input_files[i];
+ AVPacket pkt;
+
+ if (!f || !f->in_thread_queue)
+ continue;
+ av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
+ while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
+ av_free_packet(&pkt);
+
+ pthread_join(f->thread, NULL);
+ f->joined = 1;
+ av_thread_message_queue_free(&f->in_thread_queue);
+ }
+}
+
+static int init_input_threads(void)
+{
+ int i, ret;
+
+ if (nb_input_files == 1)
+ return 0;
+
+ for (i = 0; i < nb_input_files; i++) {
+ InputFile *f = input_files[i];
+
+ if (f->ctx->pb ? !f->ctx->pb->seekable :
+ strcmp(f->ctx->iformat->name, "lavfi"))
+ f->non_blocking = 1;
+ ret = av_thread_message_queue_alloc(&f->in_thread_queue,
+ f->thread_queue_size, sizeof(AVPacket));
+ if (ret < 0)
+ return ret;
+
+ if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
+ av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
+ av_thread_message_queue_free(&f->in_thread_queue);
+ return AVERROR(ret);
+ }
+ }
+ return 0;
+}
+
+static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
+{
+ return av_thread_message_queue_recv(f->in_thread_queue, pkt,
+ f->non_blocking ?
+ AV_THREAD_MESSAGE_NONBLOCK : 0);
+}
+#endif
+
+static int get_input_packet(InputFile *f, AVPacket *pkt)
+{
+ if (f->rate_emu) {
+ int i;
+ for (i = 0; i < f->nb_streams; i++) {
+ InputStream *ist = input_streams[f->ist_index + i];
+ int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
+ int64_t now = av_gettime_relative() - ist->start;
+ if (pts > now)
+ return AVERROR(EAGAIN);
+ }
+ }
+
+#if HAVE_PTHREADS
+ if (nb_input_files > 1)
+ return get_input_packet_mt(f, pkt);
+#endif
+ return av_read_frame(f->ctx, pkt);
+}
+
+static int got_eagain(void)
+{
+ int i;
+ for (i = 0; i < nb_output_streams; i++)
+ if (output_streams[i]->unavailable)
+ return 1;
+ return 0;
+}
+
+static void reset_eagain(void)
+{
+ int i;
+ for (i = 0; i < nb_input_files; i++)
+ input_files[i]->eagain = 0;
+ for (i = 0; i < nb_output_streams; i++)
+ output_streams[i]->unavailable = 0;
+}
+
+/*
+ * Return
+ * - 0 -- one packet was read and processed
+ * - AVERROR(EAGAIN) -- no packets were available for selected file,
+ * this function should be called again
+ * - AVERROR_EOF -- this function should not be called again
+ */
+static int process_input(int file_index)
+{
+ InputFile *ifile = input_files[file_index];
+ AVFormatContext *is;
+ InputStream *ist;
+ AVPacket pkt;
+ int ret, i, j;
+
+ is = ifile->ctx;
+ ret = get_input_packet(ifile, &pkt);
+
+ if (ret == AVERROR(EAGAIN)) {
+ ifile->eagain = 1;
+ return ret;
+ }
+ if (ret < 0) {
+ if (ret != AVERROR_EOF) {
+ print_error(is->filename, ret);
+ if (exit_on_error)
+ exit_program(1);
+ }
+
+ for (i = 0; i < ifile->nb_streams; i++) {
+ ist = input_streams[ifile->ist_index + i];
+ if (ist->decoding_needed) {
+ ret = process_input_packet(ist, NULL);
+ if (ret>0)
+ return 0;
+ }
+
+ /* mark all outputs that don't go through lavfi as finished */
+ for (j = 0; j < nb_output_streams; j++) {
+ OutputStream *ost = output_streams[j];
+
+ if (ost->source_index == ifile->ist_index + i &&
+ (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
+ finish_output_stream(ost);
+ }
+ }
+
+ ifile->eof_reached = 1;
+ return AVERROR(EAGAIN);
+ }
+
+ reset_eagain();
+
+ if (do_pkt_dump) {
+ av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
+ is->streams[pkt.stream_index]);
+ }
+ /* the following test is needed in case new streams appear
+ dynamically in stream : we ignore them */
+ if (pkt.stream_index >= ifile->nb_streams) {
+ report_new_stream(file_index, &pkt);
+ goto discard_packet;
+ }
+
+ ist = input_streams[ifile->ist_index + pkt.stream_index];
+
+ ist->data_size += pkt.size;
+ ist->nb_packets++;
+
+ if (ist->discard)
+ goto discard_packet;
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
+ "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
+ ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
+ av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
+ av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
+ av_ts2str(input_files[ist->file_index]->ts_offset),
+ av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
+ }
+
+ if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
+ int64_t stime, stime2;
+ // Correcting starttime based on the enabled streams
+ // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
+ // so we instead do it here as part of discontinuity handling
+ if ( ist->next_dts == AV_NOPTS_VALUE
+ && ifile->ts_offset == -is->start_time
+ && (is->iformat->flags & AVFMT_TS_DISCONT)) {
+ int64_t new_start_time = INT64_MAX;
+ for (i=0; i<is->nb_streams; i++) {
+ AVStream *st = is->streams[i];
+ if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
+ continue;
+ new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
+ }
+ if (new_start_time > is->start_time) {
+ av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
+ ifile->ts_offset = -new_start_time;
+ }
+ }
+
+ stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
+ stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
+ ist->wrap_correction_done = 1;
+
+ if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
+ pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
+ ist->wrap_correction_done = 0;
+ }
+ if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
+ pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
+ ist->wrap_correction_done = 0;
+ }
+ }
+
+ /* add the stream-global side data to the first packet */
+ if (ist->nb_packets == 1) {
+ if (ist->st->nb_side_data)
+ av_packet_split_side_data(&pkt);
+ for (i = 0; i < ist->st->nb_side_data; i++) {
+ AVPacketSideData *src_sd = &ist->st->side_data[i];
+ uint8_t *dst_data;
+
+ if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
+ continue;
+ if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
+ continue;
+
+ dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
+ if (!dst_data)
+ exit_program(1);
+
+ memcpy(dst_data, src_sd->data, src_sd->size);
+ }
+ }
+
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
+
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts *= ist->ts_scale;
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts *= ist->ts_scale;
+
+ if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
+ pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
+ && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
+ int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+ int64_t delta = pkt_dts - ifile->last_ts;
+ if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
+ ifile->ts_offset -= delta;
+ av_log(NULL, AV_LOG_DEBUG,
+ "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+ delta, ifile->ts_offset);
+ pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ }
+ }
+
+ if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
+ ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
+ pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
+ !copy_ts) {
+ int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+ int64_t delta = pkt_dts - ist->next_dts;
+ if (is->iformat->flags & AVFMT_TS_DISCONT) {
+ if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
+ pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
+ ifile->ts_offset -= delta;
+ av_log(NULL, AV_LOG_DEBUG,
+ "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
+ delta, ifile->ts_offset);
+ pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
+ }
+ } else {
+ if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
+ av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
+ pkt.dts = AV_NOPTS_VALUE;
+ }
+ if (pkt.pts != AV_NOPTS_VALUE){
+ int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
+ delta = pkt_pts - ist->next_dts;
+ if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
+ delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
+ av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
+ pkt.pts = AV_NOPTS_VALUE;
+ }
+ }
+ }
+ }
+
+ if (pkt.dts != AV_NOPTS_VALUE)
+ ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
+
+ if (debug_ts) {
+ av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
+ ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
+ av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
+ av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
+ av_ts2str(input_files[ist->file_index]->ts_offset),
+ av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
+ }
+
+ sub2video_heartbeat(ist, pkt.pts);
+
+ process_input_packet(ist, &pkt);
+
+discard_packet:
+ av_free_packet(&pkt);
+
+ return 0;
+}
+
+/**
+ * Perform a step of transcoding for the specified filter graph.
+ *
+ * @param[in] graph filter graph to consider
+ * @param[out] best_ist input stream where a frame would allow to continue
+ * @return 0 for success, <0 for error
+ */
+static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
+{
+ int i, ret;
+ int nb_requests, nb_requests_max = 0;
+ InputFilter *ifilter;
+ InputStream *ist;
+
+ *best_ist = NULL;
+ ret = avfilter_graph_request_oldest(graph->graph);
+ if (ret >= 0)
+ return reap_filters(0);
+
+ if (ret == AVERROR_EOF) {
+ ret = reap_filters(1);
+ for (i = 0; i < graph->nb_outputs; i++)
+ close_output_stream(graph->outputs[i]->ost);
+ return ret;
+ }
+ if (ret != AVERROR(EAGAIN))
+ return ret;
+
+ for (i = 0; i < graph->nb_inputs; i++) {
+ ifilter = graph->inputs[i];
+ ist = ifilter->ist;
+ if (input_files[ist->file_index]->eagain ||
+ input_files[ist->file_index]->eof_reached)
+ continue;
+ nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
+ if (nb_requests > nb_requests_max) {
+ nb_requests_max = nb_requests;
+ *best_ist = ist;
+ }
+ }
+
+ if (!*best_ist)
+ for (i = 0; i < graph->nb_outputs; i++)
+ graph->outputs[i]->ost->unavailable = 1;
+
+ return 0;
+}
+
+/**
+ * Run a single step of transcoding.
+ *
+ * @return 0 for success, <0 for error
+ */
+static int transcode_step(void)
+{
+ OutputStream *ost;
+ InputStream *ist;
+ int ret;
+
+ ost = choose_output();
+ if (!ost) {
+ if (got_eagain()) {
+ reset_eagain();
+ av_usleep(10000);
+ return 0;
+ }
+ av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
+ return AVERROR_EOF;
+ }
+
+ if (ost->filter) {
+ if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
+ return ret;
+ if (!ist)
+ return 0;
+ } else {
+ av_assert0(ost->source_index >= 0);
+ ist = input_streams[ost->source_index];
+ }
+
+ ret = process_input(ist->file_index);
+ if (ret == AVERROR(EAGAIN)) {
+ if (input_files[ist->file_index]->eagain)
+ ost->unavailable = 1;
+ return 0;
+ }
+
+ if (ret < 0)
+ return ret == AVERROR_EOF ? 0 : ret;
+
+ return reap_filters(0);
+}
+
+/*
+ * The following code is the main loop of the file converter
+ */
+static int transcode(void)
+{
+ int ret, i;
+ AVFormatContext *os;
+ OutputStream *ost;
+ InputStream *ist;
+ int64_t timer_start;
+
+ ret = transcode_init();
+ if (ret < 0)
+ goto fail;
+
+ if (stdin_interaction) {
+ av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
+ }
+
+ timer_start = av_gettime_relative();
+
+#if HAVE_PTHREADS
+ if ((ret = init_input_threads()) < 0)
+ goto fail;
+#endif
+
+ while (!received_sigterm) {
+ int64_t cur_time= av_gettime_relative();
+
+ /* if 'q' pressed, exits */
+ if (stdin_interaction)
+ if (check_keyboard_interaction(cur_time) < 0)
+ break;
+
+ /* check if there's any stream where output is still needed */
+ if (!need_output()) {
+ av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
+ break;
+ }
+
+ ret = transcode_step();
+ if (ret < 0) {
+ if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
+ continue;
+ } else {
+ char errbuf[128];
+ av_strerror(ret, errbuf, sizeof(errbuf));
+
+ av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
+ break;
+ }
+ }
+
+ /* dump report by using the output first video and audio streams */
+ print_report(0, timer_start, cur_time);
+ }
+#if HAVE_PTHREADS
+ free_input_threads();
+#endif
+
+ /* at the end of stream, we must flush the decoder buffers */
+ for (i = 0; i < nb_input_streams; i++) {
+ ist = input_streams[i];
+ if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
+ process_input_packet(ist, NULL);
+ }
+ }
+ flush_encoders();
+
+ term_exit();
+
+ /* write the trailer if needed and close file */
+ for (i = 0; i < nb_output_files; i++) {
+ os = output_files[i]->ctx;
+ av_write_trailer(os);
+ }
+
+ /* dump report by using the first video and audio streams */
+ print_report(1, timer_start, av_gettime_relative());
+
+ /* close each encoder */
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+ if (ost->encoding_needed) {
+ av_freep(&ost->enc_ctx->stats_in);
+ }
+ }
+
+ /* close each decoder */
+ for (i = 0; i < nb_input_streams; i++) {
+ ist = input_streams[i];
+ if (ist->decoding_needed) {
+ avcodec_close(ist->dec_ctx);
+ if (ist->hwaccel_uninit)
+ ist->hwaccel_uninit(ist->dec_ctx);
+ }
+ }
+
+ /* finished ! */
+ ret = 0;
+
+ fail:
+#if HAVE_PTHREADS
+ free_input_threads();
+#endif
+
+ if (output_streams) {
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = output_streams[i];
+ if (ost) {
+ if (ost->logfile) {
+ fclose(ost->logfile);
+ ost->logfile = NULL;
+ }
+ av_freep(&ost->forced_kf_pts);
+ av_freep(&ost->apad);
+ av_freep(&ost->disposition);
+ av_dict_free(&ost->encoder_opts);
+ av_dict_free(&ost->swr_opts);
+ av_dict_free(&ost->resample_opts);
+ av_dict_free(&ost->bsf_args);
+ }
+ }
+ }
+ return ret;
+}
+
+
+static int64_t getutime(void)
+{
+#if HAVE_GETRUSAGE
+ struct rusage rusage;
+
+ getrusage(RUSAGE_SELF, &rusage);
+ return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
+#elif HAVE_GETPROCESSTIMES
+ HANDLE proc;
+ FILETIME c, e, k, u;
+ proc = GetCurrentProcess();
+ GetProcessTimes(proc, &c, &e, &k, &u);
+ return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
+#else
+ return av_gettime_relative();
+#endif
+}
+
+static int64_t getmaxrss(void)
+{
+#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
+ struct rusage rusage;
+ getrusage(RUSAGE_SELF, &rusage);
+ return (int64_t)rusage.ru_maxrss * 1024;
+#elif HAVE_GETPROCESSMEMORYINFO
+ HANDLE proc;
+ PROCESS_MEMORY_COUNTERS memcounters;
+ proc = GetCurrentProcess();
+ memcounters.cb = sizeof(memcounters);
+ GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
+ return memcounters.PeakPagefileUsage;
+#else
+ return 0;
+#endif
+}
+
+static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
+{
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ int64_t ti;
+
+ register_exit(ffmpeg_cleanup);
+
+ setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
+
+ av_log_set_flags(AV_LOG_SKIP_REPEATED);
+ parse_loglevel(argc, argv, options);
+
+ if(argc>1 && !strcmp(argv[1], "-d")){
+ run_as_daemon=1;
+ av_log_set_callback(log_callback_null);
+ argc--;
+ argv++;
+ }
+
+ avcodec_register_all();
+#if CONFIG_AVDEVICE
+ avdevice_register_all();
+#endif
+ avfilter_register_all();
+ av_register_all();
+ avformat_network_init();
+
+ show_banner(argc, argv, options);
+
+ term_init();
+
+ /* parse options and open all input/output files */
+ ret = ffmpeg_parse_options(argc, argv);
+ if (ret < 0)
+ exit_program(1);
+
+ if (nb_output_files <= 0 && nb_input_files == 0) {
+ show_usage();
+ av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
+ exit_program(1);
+ }
+
+ /* file converter / grab */
+ if (nb_output_files <= 0) {
+ av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
+ exit_program(1);
+ }
+
+// if (nb_input_files == 0) {
+// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
+// exit_program(1);
+// }
+
+ current_time = ti = getutime();
+ if (transcode() < 0)
+ exit_program(1);
+ ti = getutime() - ti;
+ if (do_benchmark) {
+ av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
+ }
+ av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
+ decode_error_stat[0], decode_error_stat[1]);
+ if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
+ exit_program(69);
+
+ exit_program(received_nb_signals ? 255 : main_return_code);
+ return main_return_code;
+}
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/pixfmt.h"
+#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
- if((codec->capabilities & CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
+enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
+{
+ if (codec && codec->pix_fmts) {
+ const enum AVPixelFormat *p = codec->pix_fmts;
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
+ int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
+ enum AVPixelFormat best= AV_PIX_FMT_NONE;
+ static const enum AVPixelFormat mjpeg_formats[] =
+ { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
+ static const enum AVPixelFormat ljpeg_formats[] =
+ { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
+
+ if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
+ if (enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
+ p = mjpeg_formats;
+ } else if (enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
+ p =ljpeg_formats;
+ }
+ }
+ for (; *p != AV_PIX_FMT_NONE; p++) {
+ best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
+ if (*p == target)
+ break;
+ }
+ if (*p == AV_PIX_FMT_NONE) {
+ if (target != AV_PIX_FMT_NONE)
+ av_log(NULL, AV_LOG_WARNING,
+ "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
+ av_get_pix_fmt_name(target),
+ codec->name,
+ av_get_pix_fmt_name(best));
+ return best;
+ }
+ }
+ return target;
+}
+
+void choose_sample_fmt(AVStream *st, AVCodec *codec)
+{
+ if (codec && codec->sample_fmts) {
+ const enum AVSampleFormat *p = codec->sample_fmts;
+ for (; *p != -1; p++) {
+ if (*p == st->codec->sample_fmt)
+ break;
+ }
+ if (*p == -1) {
++ if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
+ av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
+ if(av_get_sample_fmt_name(st->codec->sample_fmt))
+ av_log(NULL, AV_LOG_WARNING,
+ "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
+ av_get_sample_fmt_name(st->codec->sample_fmt),
+ codec->name,
+ av_get_sample_fmt_name(codec->sample_fmts[0]));
+ st->codec->sample_fmt = codec->sample_fmts[0];
+ }
+ }
+}
+
+static char *choose_pix_fmts(OutputStream *ost)
+{
+ AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
+ if (strict_dict)
+ // used by choose_pixel_fmt() and below
+ av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
+
+ if (ost->keep_pix_fmt) {
+ if (ost->filter)
+ avfilter_graph_set_auto_convert(ost->filter->graph->graph,
+ AVFILTER_AUTO_CONVERT_NONE);
+ if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
+ return NULL;
+ return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
+ }
+ if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
+ return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
+ } else if (ost->enc && ost->enc->pix_fmts) {
+ const enum AVPixelFormat *p;
+ AVIOContext *s = NULL;
+ uint8_t *ret;
+ int len;
+
+ if (avio_open_dyn_buf(&s) < 0)
+ exit_program(1);
+
+ p = ost->enc->pix_fmts;
+ if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
+ if (ost->enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
+ p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
+ } else if (ost->enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
+ p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
+ }
+ }
+
+ for (; *p != AV_PIX_FMT_NONE; p++) {
+ const char *name = av_get_pix_fmt_name(*p);
+ avio_printf(s, "%s|", name);
+ }
+ len = avio_close_dyn_buf(s, &ret);
+ ret[len - 1] = 0;
+ return ret;
+ } else
+ return NULL;
+}
+
/* Define a function for building a string containing a list of
* allowed formats. */
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name) \
if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
return ret;
- !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
+ fg->reconfiguration = 1;
+
+ for (i = 0; i < fg->nb_outputs; i++) {
+ OutputStream *ost = fg->outputs[i]->ost;
+ if (ost &&
+ ost->enc->type == AVMEDIA_TYPE_AUDIO &&
++ !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
+ av_buffersink_set_frame_size(ost->filter->filter,
+ ost->enc_ctx->frame_size);
+ }
+
return 0;
}
--- /dev/null
- if(codec->capabilities & CODEC_CAP_DR1)
+/*
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple media player based on the FFmpeg libraries
+ */
+
+#include "config.h"
+#include <inttypes.h>
+#include <math.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdint.h>
+
+#include "libavutil/avstring.h"
+#include "libavutil/colorspace.h"
+#include "libavutil/eval.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/dict.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/avassert.h"
+#include "libavutil/time.h"
+#include "libavformat/avformat.h"
+#include "libavdevice/avdevice.h"
+#include "libswscale/swscale.h"
+#include "libavutil/opt.h"
+#include "libavcodec/avfft.h"
+#include "libswresample/swresample.h"
+
+#if CONFIG_AVFILTER
+# include "libavfilter/avcodec.h"
+# include "libavfilter/avfilter.h"
+# include "libavfilter/buffersink.h"
+# include "libavfilter/buffersrc.h"
+#endif
+
+#include <SDL.h>
+#include <SDL_thread.h>
+
+#include "cmdutils.h"
+
+#include <assert.h>
+
+const char program_name[] = "ffplay";
+const int program_birth_year = 2003;
+
+#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
+#define MIN_FRAMES 5
+
+/* Minimum SDL audio buffer size, in samples. */
+#define SDL_AUDIO_MIN_BUFFER_SIZE 512
+/* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
+#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
+
+/* no AV sync correction is done if below the minimum AV sync threshold */
+#define AV_SYNC_THRESHOLD_MIN 0.04
+/* AV sync correction is done if above the maximum AV sync threshold */
+#define AV_SYNC_THRESHOLD_MAX 0.1
+/* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
+#define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
+/* no AV correction is done if too big error */
+#define AV_NOSYNC_THRESHOLD 10.0
+
+/* maximum audio speed change to get correct sync */
+#define SAMPLE_CORRECTION_PERCENT_MAX 10
+
+/* external clock speed adjustment constants for realtime sources based on buffer fullness */
+#define EXTERNAL_CLOCK_SPEED_MIN 0.900
+#define EXTERNAL_CLOCK_SPEED_MAX 1.010
+#define EXTERNAL_CLOCK_SPEED_STEP 0.001
+
+/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
+#define AUDIO_DIFF_AVG_NB 20
+
+/* polls for possible required screen refresh at least this often, should be less than 1/fps */
+#define REFRESH_RATE 0.01
+
+/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
+/* TODO: We assume that a decoded and resampled frame fits into this buffer */
+#define SAMPLE_ARRAY_SIZE (8 * 65536)
+
+#define CURSOR_HIDE_DELAY 1000000
+
+static int64_t sws_flags = SWS_BICUBIC;
+
+typedef struct MyAVPacketList {
+ AVPacket pkt;
+ struct MyAVPacketList *next;
+ int serial;
+} MyAVPacketList;
+
+typedef struct PacketQueue {
+ MyAVPacketList *first_pkt, *last_pkt;
+ int nb_packets;
+ int size;
+ int abort_request;
+ int serial;
+ SDL_mutex *mutex;
+ SDL_cond *cond;
+} PacketQueue;
+
+#define VIDEO_PICTURE_QUEUE_SIZE 3
+#define SUBPICTURE_QUEUE_SIZE 16
+#define SAMPLE_QUEUE_SIZE 9
+#define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
+
+typedef struct AudioParams {
+ int freq;
+ int channels;
+ int64_t channel_layout;
+ enum AVSampleFormat fmt;
+ int frame_size;
+ int bytes_per_sec;
+} AudioParams;
+
+typedef struct Clock {
+ double pts; /* clock base */
+ double pts_drift; /* clock base minus time at which we updated the clock */
+ double last_updated;
+ double speed;
+ int serial; /* clock is based on a packet with this serial */
+ int paused;
+ int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
+} Clock;
+
+/* Common struct for handling all types of decoded data and allocated render buffers. */
+typedef struct Frame {
+ AVFrame *frame;
+ AVSubtitle sub;
+ int serial;
+ double pts; /* presentation timestamp for the frame */
+ double duration; /* estimated duration of the frame */
+ int64_t pos; /* byte position of the frame in the input file */
+ SDL_Overlay *bmp;
+ int allocated;
+ int reallocate;
+ int width;
+ int height;
+ AVRational sar;
+} Frame;
+
+typedef struct FrameQueue {
+ Frame queue[FRAME_QUEUE_SIZE];
+ int rindex;
+ int windex;
+ int size;
+ int max_size;
+ int keep_last;
+ int rindex_shown;
+ SDL_mutex *mutex;
+ SDL_cond *cond;
+ PacketQueue *pktq;
+} FrameQueue;
+
+enum {
+ AV_SYNC_AUDIO_MASTER, /* default choice */
+ AV_SYNC_VIDEO_MASTER,
+ AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
+};
+
+typedef struct Decoder {
+ AVPacket pkt;
+ AVPacket pkt_temp;
+ PacketQueue *queue;
+ AVCodecContext *avctx;
+ int pkt_serial;
+ int finished;
+ int packet_pending;
+ SDL_cond *empty_queue_cond;
+ int64_t start_pts;
+ AVRational start_pts_tb;
+ int64_t next_pts;
+ AVRational next_pts_tb;
+ SDL_Thread *decoder_tid;
+} Decoder;
+
+typedef struct VideoState {
+ SDL_Thread *read_tid;
+ AVInputFormat *iformat;
+ int abort_request;
+ int force_refresh;
+ int paused;
+ int last_paused;
+ int queue_attachments_req;
+ int seek_req;
+ int seek_flags;
+ int64_t seek_pos;
+ int64_t seek_rel;
+ int read_pause_return;
+ AVFormatContext *ic;
+ int realtime;
+
+ Clock audclk;
+ Clock vidclk;
+ Clock extclk;
+
+ FrameQueue pictq;
+ FrameQueue subpq;
+ FrameQueue sampq;
+
+ Decoder auddec;
+ Decoder viddec;
+ Decoder subdec;
+
+ int audio_stream;
+
+ int av_sync_type;
+
+ double audio_clock;
+ int audio_clock_serial;
+ double audio_diff_cum; /* used for AV difference average computation */
+ double audio_diff_avg_coef;
+ double audio_diff_threshold;
+ int audio_diff_avg_count;
+ AVStream *audio_st;
+ PacketQueue audioq;
+ int audio_hw_buf_size;
+ uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE];
+ uint8_t *audio_buf;
+ uint8_t *audio_buf1;
+ unsigned int audio_buf_size; /* in bytes */
+ unsigned int audio_buf1_size;
+ int audio_buf_index; /* in bytes */
+ int audio_write_buf_size;
+ struct AudioParams audio_src;
+#if CONFIG_AVFILTER
+ struct AudioParams audio_filter_src;
+#endif
+ struct AudioParams audio_tgt;
+ struct SwrContext *swr_ctx;
+ int frame_drops_early;
+ int frame_drops_late;
+
+ enum ShowMode {
+ SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
+ } show_mode;
+ int16_t sample_array[SAMPLE_ARRAY_SIZE];
+ int sample_array_index;
+ int last_i_start;
+ RDFTContext *rdft;
+ int rdft_bits;
+ FFTSample *rdft_data;
+ int xpos;
+ double last_vis_time;
+
+ int subtitle_stream;
+ AVStream *subtitle_st;
+ PacketQueue subtitleq;
+
+ double frame_timer;
+ double frame_last_returned_time;
+ double frame_last_filter_delay;
+ int video_stream;
+ AVStream *video_st;
+ PacketQueue videoq;
+ double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
+#if !CONFIG_AVFILTER
+ struct SwsContext *img_convert_ctx;
+#endif
+ SDL_Rect last_display_rect;
+ int eof;
+
+ char filename[1024];
+ int width, height, xleft, ytop;
+ int step;
+
+#if CONFIG_AVFILTER
+ int vfilter_idx;
+ AVFilterContext *in_video_filter; // the first filter in the video chain
+ AVFilterContext *out_video_filter; // the last filter in the video chain
+ AVFilterContext *in_audio_filter; // the first filter in the audio chain
+ AVFilterContext *out_audio_filter; // the last filter in the audio chain
+ AVFilterGraph *agraph; // audio filter graph
+#endif
+
+ int last_video_stream, last_audio_stream, last_subtitle_stream;
+
+ SDL_cond *continue_read_thread;
+} VideoState;
+
+/* options specified by the user */
+static AVInputFormat *file_iformat;
+static const char *input_filename;
+static const char *window_title;
+static int fs_screen_width;
+static int fs_screen_height;
+static int default_width = 640;
+static int default_height = 480;
+static int screen_width = 0;
+static int screen_height = 0;
+static int audio_disable;
+static int video_disable;
+static int subtitle_disable;
+static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
+static int seek_by_bytes = -1;
+static int display_disable;
+static int show_status = 1;
+static int av_sync_type = AV_SYNC_AUDIO_MASTER;
+static int64_t start_time = AV_NOPTS_VALUE;
+static int64_t duration = AV_NOPTS_VALUE;
+static int fast = 0;
+static int genpts = 0;
+static int lowres = 0;
+static int decoder_reorder_pts = -1;
+static int autoexit;
+static int exit_on_keydown;
+static int exit_on_mousedown;
+static int loop = 1;
+static int framedrop = -1;
+static int infinite_buffer = -1;
+static enum ShowMode show_mode = SHOW_MODE_NONE;
+static const char *audio_codec_name;
+static const char *subtitle_codec_name;
+static const char *video_codec_name;
+double rdftspeed = 0.02;
+static int64_t cursor_last_shown;
+static int cursor_hidden = 0;
+#if CONFIG_AVFILTER
+static const char **vfilters_list = NULL;
+static int nb_vfilters = 0;
+static char *afilters = NULL;
+#endif
+static int autorotate = 1;
+
+/* current context */
+static int is_full_screen;
+static int64_t audio_callback_time;
+
+static AVPacket flush_pkt;
+
+#define FF_ALLOC_EVENT (SDL_USEREVENT)
+#define FF_QUIT_EVENT (SDL_USEREVENT + 2)
+
+static SDL_Surface *screen;
+
+#if CONFIG_AVFILTER
+static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
+{
+ GROW_ARRAY(vfilters_list, nb_vfilters);
+ vfilters_list[nb_vfilters - 1] = arg;
+ return 0;
+}
+#endif
+
+static inline
+int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
+ enum AVSampleFormat fmt2, int64_t channel_count2)
+{
+ /* If channel count == 1, planar and non-planar formats are the same */
+ if (channel_count1 == 1 && channel_count2 == 1)
+ return av_get_packed_sample_fmt(fmt1) != av_get_packed_sample_fmt(fmt2);
+ else
+ return channel_count1 != channel_count2 || fmt1 != fmt2;
+}
+
+static inline
+int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
+{
+ if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
+ return channel_layout;
+ else
+ return 0;
+}
+
+static void free_picture(Frame *vp);
+
+static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
+{
+ MyAVPacketList *pkt1;
+
+ if (q->abort_request)
+ return -1;
+
+ pkt1 = av_malloc(sizeof(MyAVPacketList));
+ if (!pkt1)
+ return -1;
+ pkt1->pkt = *pkt;
+ pkt1->next = NULL;
+ if (pkt == &flush_pkt)
+ q->serial++;
+ pkt1->serial = q->serial;
+
+ if (!q->last_pkt)
+ q->first_pkt = pkt1;
+ else
+ q->last_pkt->next = pkt1;
+ q->last_pkt = pkt1;
+ q->nb_packets++;
+ q->size += pkt1->pkt.size + sizeof(*pkt1);
+ /* XXX: should duplicate packet data in DV case */
+ SDL_CondSignal(q->cond);
+ return 0;
+}
+
+static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
+{
+ int ret;
+
+ /* duplicate the packet */
+ if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
+ return -1;
+
+ SDL_LockMutex(q->mutex);
+ ret = packet_queue_put_private(q, pkt);
+ SDL_UnlockMutex(q->mutex);
+
+ if (pkt != &flush_pkt && ret < 0)
+ av_free_packet(pkt);
+
+ return ret;
+}
+
+static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
+{
+ AVPacket pkt1, *pkt = &pkt1;
+ av_init_packet(pkt);
+ pkt->data = NULL;
+ pkt->size = 0;
+ pkt->stream_index = stream_index;
+ return packet_queue_put(q, pkt);
+}
+
+/* packet queue handling */
+static void packet_queue_init(PacketQueue *q)
+{
+ memset(q, 0, sizeof(PacketQueue));
+ q->mutex = SDL_CreateMutex();
+ q->cond = SDL_CreateCond();
+ q->abort_request = 1;
+}
+
+static void packet_queue_flush(PacketQueue *q)
+{
+ MyAVPacketList *pkt, *pkt1;
+
+ SDL_LockMutex(q->mutex);
+ for (pkt = q->first_pkt; pkt; pkt = pkt1) {
+ pkt1 = pkt->next;
+ av_free_packet(&pkt->pkt);
+ av_freep(&pkt);
+ }
+ q->last_pkt = NULL;
+ q->first_pkt = NULL;
+ q->nb_packets = 0;
+ q->size = 0;
+ SDL_UnlockMutex(q->mutex);
+}
+
+static void packet_queue_destroy(PacketQueue *q)
+{
+ packet_queue_flush(q);
+ SDL_DestroyMutex(q->mutex);
+ SDL_DestroyCond(q->cond);
+}
+
+static void packet_queue_abort(PacketQueue *q)
+{
+ SDL_LockMutex(q->mutex);
+
+ q->abort_request = 1;
+
+ SDL_CondSignal(q->cond);
+
+ SDL_UnlockMutex(q->mutex);
+}
+
+static void packet_queue_start(PacketQueue *q)
+{
+ SDL_LockMutex(q->mutex);
+ q->abort_request = 0;
+ packet_queue_put_private(q, &flush_pkt);
+ SDL_UnlockMutex(q->mutex);
+}
+
+/* return < 0 if aborted, 0 if no packet and > 0 if packet. */
+static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
+{
+ MyAVPacketList *pkt1;
+ int ret;
+
+ SDL_LockMutex(q->mutex);
+
+ for (;;) {
+ if (q->abort_request) {
+ ret = -1;
+ break;
+ }
+
+ pkt1 = q->first_pkt;
+ if (pkt1) {
+ q->first_pkt = pkt1->next;
+ if (!q->first_pkt)
+ q->last_pkt = NULL;
+ q->nb_packets--;
+ q->size -= pkt1->pkt.size + sizeof(*pkt1);
+ *pkt = pkt1->pkt;
+ if (serial)
+ *serial = pkt1->serial;
+ av_free(pkt1);
+ ret = 1;
+ break;
+ } else if (!block) {
+ ret = 0;
+ break;
+ } else {
+ SDL_CondWait(q->cond, q->mutex);
+ }
+ }
+ SDL_UnlockMutex(q->mutex);
+ return ret;
+}
+
+static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
+ memset(d, 0, sizeof(Decoder));
+ d->avctx = avctx;
+ d->queue = queue;
+ d->empty_queue_cond = empty_queue_cond;
+ d->start_pts = AV_NOPTS_VALUE;
+}
+
+static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
+ int got_frame = 0;
+
+ do {
+ int ret = -1;
+
+ if (d->queue->abort_request)
+ return -1;
+
+ if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
+ AVPacket pkt;
+ do {
+ if (d->queue->nb_packets == 0)
+ SDL_CondSignal(d->empty_queue_cond);
+ if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
+ return -1;
+ if (pkt.data == flush_pkt.data) {
+ avcodec_flush_buffers(d->avctx);
+ d->finished = 0;
+ d->next_pts = d->start_pts;
+ d->next_pts_tb = d->start_pts_tb;
+ }
+ } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
+ av_free_packet(&d->pkt);
+ d->pkt_temp = d->pkt = pkt;
+ d->packet_pending = 1;
+ }
+
+ switch (d->avctx->codec_type) {
+ case AVMEDIA_TYPE_VIDEO:
+ ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
+ if (got_frame) {
+ if (decoder_reorder_pts == -1) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ } else if (decoder_reorder_pts) {
+ frame->pts = frame->pkt_pts;
+ } else {
+ frame->pts = frame->pkt_dts;
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
+ if (got_frame) {
+ AVRational tb = (AVRational){1, frame->sample_rate};
+ if (frame->pts != AV_NOPTS_VALUE)
+ frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
+ else if (frame->pkt_pts != AV_NOPTS_VALUE)
+ frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
+ else if (d->next_pts != AV_NOPTS_VALUE)
+ frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
+ if (frame->pts != AV_NOPTS_VALUE) {
+ d->next_pts = frame->pts + frame->nb_samples;
+ d->next_pts_tb = tb;
+ }
+ }
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
+ break;
+ }
+
+ if (ret < 0) {
+ d->packet_pending = 0;
+ } else {
+ d->pkt_temp.dts =
+ d->pkt_temp.pts = AV_NOPTS_VALUE;
+ if (d->pkt_temp.data) {
+ if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
+ ret = d->pkt_temp.size;
+ d->pkt_temp.data += ret;
+ d->pkt_temp.size -= ret;
+ if (d->pkt_temp.size <= 0)
+ d->packet_pending = 0;
+ } else {
+ if (!got_frame) {
+ d->packet_pending = 0;
+ d->finished = d->pkt_serial;
+ }
+ }
+ }
+ } while (!got_frame && !d->finished);
+
+ return got_frame;
+}
+
+static void decoder_destroy(Decoder *d) {
+ av_free_packet(&d->pkt);
+}
+
+static void frame_queue_unref_item(Frame *vp)
+{
+ av_frame_unref(vp->frame);
+ avsubtitle_free(&vp->sub);
+}
+
+static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
+{
+ int i;
+ memset(f, 0, sizeof(FrameQueue));
+ if (!(f->mutex = SDL_CreateMutex()))
+ return AVERROR(ENOMEM);
+ if (!(f->cond = SDL_CreateCond()))
+ return AVERROR(ENOMEM);
+ f->pktq = pktq;
+ f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
+ f->keep_last = !!keep_last;
+ for (i = 0; i < f->max_size; i++)
+ if (!(f->queue[i].frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static void frame_queue_destory(FrameQueue *f)
+{
+ int i;
+ for (i = 0; i < f->max_size; i++) {
+ Frame *vp = &f->queue[i];
+ frame_queue_unref_item(vp);
+ av_frame_free(&vp->frame);
+ free_picture(vp);
+ }
+ SDL_DestroyMutex(f->mutex);
+ SDL_DestroyCond(f->cond);
+}
+
+static void frame_queue_signal(FrameQueue *f)
+{
+ SDL_LockMutex(f->mutex);
+ SDL_CondSignal(f->cond);
+ SDL_UnlockMutex(f->mutex);
+}
+
+static Frame *frame_queue_peek(FrameQueue *f)
+{
+ return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
+}
+
+static Frame *frame_queue_peek_next(FrameQueue *f)
+{
+ return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
+}
+
+static Frame *frame_queue_peek_last(FrameQueue *f)
+{
+ return &f->queue[f->rindex];
+}
+
+static Frame *frame_queue_peek_writable(FrameQueue *f)
+{
+ /* wait until we have space to put a new frame */
+ SDL_LockMutex(f->mutex);
+ while (f->size >= f->max_size &&
+ !f->pktq->abort_request) {
+ SDL_CondWait(f->cond, f->mutex);
+ }
+ SDL_UnlockMutex(f->mutex);
+
+ if (f->pktq->abort_request)
+ return NULL;
+
+ return &f->queue[f->windex];
+}
+
+static Frame *frame_queue_peek_readable(FrameQueue *f)
+{
+ /* wait until we have a readable a new frame */
+ SDL_LockMutex(f->mutex);
+ while (f->size - f->rindex_shown <= 0 &&
+ !f->pktq->abort_request) {
+ SDL_CondWait(f->cond, f->mutex);
+ }
+ SDL_UnlockMutex(f->mutex);
+
+ if (f->pktq->abort_request)
+ return NULL;
+
+ return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
+}
+
+static void frame_queue_push(FrameQueue *f)
+{
+ if (++f->windex == f->max_size)
+ f->windex = 0;
+ SDL_LockMutex(f->mutex);
+ f->size++;
+ SDL_CondSignal(f->cond);
+ SDL_UnlockMutex(f->mutex);
+}
+
+static void frame_queue_next(FrameQueue *f)
+{
+ if (f->keep_last && !f->rindex_shown) {
+ f->rindex_shown = 1;
+ return;
+ }
+ frame_queue_unref_item(&f->queue[f->rindex]);
+ if (++f->rindex == f->max_size)
+ f->rindex = 0;
+ SDL_LockMutex(f->mutex);
+ f->size--;
+ SDL_CondSignal(f->cond);
+ SDL_UnlockMutex(f->mutex);
+}
+
+/* jump back to the previous frame if available by resetting rindex_shown */
+static int frame_queue_prev(FrameQueue *f)
+{
+ int ret = f->rindex_shown;
+ f->rindex_shown = 0;
+ return ret;
+}
+
+/* return the number of undisplayed frames in the queue */
+static int frame_queue_nb_remaining(FrameQueue *f)
+{
+ return f->size - f->rindex_shown;
+}
+
+/* return last shown position */
+static int64_t frame_queue_last_pos(FrameQueue *f)
+{
+ Frame *fp = &f->queue[f->rindex];
+ if (f->rindex_shown && fp->serial == f->pktq->serial)
+ return fp->pos;
+ else
+ return -1;
+}
+
+static void decoder_abort(Decoder *d, FrameQueue *fq)
+{
+ packet_queue_abort(d->queue);
+ frame_queue_signal(fq);
+ SDL_WaitThread(d->decoder_tid, NULL);
+ d->decoder_tid = NULL;
+ packet_queue_flush(d->queue);
+}
+
+static inline void fill_rectangle(SDL_Surface *screen,
+ int x, int y, int w, int h, int color, int update)
+{
+ SDL_Rect rect;
+ rect.x = x;
+ rect.y = y;
+ rect.w = w;
+ rect.h = h;
+ SDL_FillRect(screen, &rect, color);
+ if (update && w > 0 && h > 0)
+ SDL_UpdateRect(screen, x, y, w, h);
+}
+
+/* draw only the border of a rectangle */
+static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
+{
+ int w1, w2, h1, h2;
+
+ /* fill the background */
+ w1 = x;
+ if (w1 < 0)
+ w1 = 0;
+ w2 = width - (x + w);
+ if (w2 < 0)
+ w2 = 0;
+ h1 = y;
+ if (h1 < 0)
+ h1 = 0;
+ h2 = height - (y + h);
+ if (h2 < 0)
+ h2 = 0;
+ fill_rectangle(screen,
+ xleft, ytop,
+ w1, height,
+ color, update);
+ fill_rectangle(screen,
+ xleft + width - w2, ytop,
+ w2, height,
+ color, update);
+ fill_rectangle(screen,
+ xleft + w1, ytop,
+ width - w1 - w2, h1,
+ color, update);
+ fill_rectangle(screen,
+ xleft + w1, ytop + height - h2,
+ width - w1 - w2, h2,
+ color, update);
+}
+
+#define ALPHA_BLEND(a, oldp, newp, s)\
+((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
+
+#define RGBA_IN(r, g, b, a, s)\
+{\
+ unsigned int v = ((const uint32_t *)(s))[0];\
+ a = (v >> 24) & 0xff;\
+ r = (v >> 16) & 0xff;\
+ g = (v >> 8) & 0xff;\
+ b = v & 0xff;\
+}
+
+#define YUVA_IN(y, u, v, a, s, pal)\
+{\
+ unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
+ a = (val >> 24) & 0xff;\
+ y = (val >> 16) & 0xff;\
+ u = (val >> 8) & 0xff;\
+ v = val & 0xff;\
+}
+
+#define YUVA_OUT(d, y, u, v, a)\
+{\
+ ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
+}
+
+
+#define BPP 1
+
+static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
+{
+ int wrap, wrap3, width2, skip2;
+ int y, u, v, a, u1, v1, a1, w, h;
+ uint8_t *lum, *cb, *cr;
+ const uint8_t *p;
+ const uint32_t *pal;
+ int dstx, dsty, dstw, dsth;
+
+ dstw = av_clip(rect->w, 0, imgw);
+ dsth = av_clip(rect->h, 0, imgh);
+ dstx = av_clip(rect->x, 0, imgw - dstw);
+ dsty = av_clip(rect->y, 0, imgh - dsth);
+ lum = dst->data[0] + dsty * dst->linesize[0];
+ cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
+ cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
+
+ width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
+ skip2 = dstx >> 1;
+ wrap = dst->linesize[0];
+ wrap3 = rect->pict.linesize[0];
+ p = rect->pict.data[0];
+ pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
+
+ if (dsty & 1) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ p++;
+ lum++;
+ }
+ p += wrap3 - dstw * BPP;
+ lum += wrap - dstw - dstx;
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ p += wrap3;
+ lum += wrap;
+
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
+
+ cb++;
+ cr++;
+ p += -wrap3 + 2 * BPP;
+ lum += -wrap + 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ p += wrap3 + (wrap3 - dstw * BPP);
+ lum += wrap + (wrap - dstw - dstx);
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ /* handle odd height */
+ if (h) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ }
+ }
+}
+
+static void free_picture(Frame *vp)
+{
+ if (vp->bmp) {
+ SDL_FreeYUVOverlay(vp->bmp);
+ vp->bmp = NULL;
+ }
+}
+
+static void calculate_display_rect(SDL_Rect *rect,
+ int scr_xleft, int scr_ytop, int scr_width, int scr_height,
+ int pic_width, int pic_height, AVRational pic_sar)
+{
+ float aspect_ratio;
+ int width, height, x, y;
+
+ if (pic_sar.num == 0)
+ aspect_ratio = 0;
+ else
+ aspect_ratio = av_q2d(pic_sar);
+
+ if (aspect_ratio <= 0.0)
+ aspect_ratio = 1.0;
+ aspect_ratio *= (float)pic_width / (float)pic_height;
+
+ /* XXX: we suppose the screen has a 1.0 pixel ratio */
+ height = scr_height;
+ width = ((int)rint(height * aspect_ratio)) & ~1;
+ if (width > scr_width) {
+ width = scr_width;
+ height = ((int)rint(width / aspect_ratio)) & ~1;
+ }
+ x = (scr_width - width) / 2;
+ y = (scr_height - height) / 2;
+ rect->x = scr_xleft + x;
+ rect->y = scr_ytop + y;
+ rect->w = FFMAX(width, 1);
+ rect->h = FFMAX(height, 1);
+}
+
+static void video_image_display(VideoState *is)
+{
+ Frame *vp;
+ Frame *sp;
+ AVPicture pict;
+ SDL_Rect rect;
+ int i;
+
+ vp = frame_queue_peek(&is->pictq);
+ if (vp->bmp) {
+ if (is->subtitle_st) {
+ if (frame_queue_nb_remaining(&is->subpq) > 0) {
+ sp = frame_queue_peek(&is->subpq);
+
+ if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
+ SDL_LockYUVOverlay (vp->bmp);
+
+ pict.data[0] = vp->bmp->pixels[0];
+ pict.data[1] = vp->bmp->pixels[2];
+ pict.data[2] = vp->bmp->pixels[1];
+
+ pict.linesize[0] = vp->bmp->pitches[0];
+ pict.linesize[1] = vp->bmp->pitches[2];
+ pict.linesize[2] = vp->bmp->pitches[1];
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ blend_subrect(&pict, sp->sub.rects[i],
+ vp->bmp->w, vp->bmp->h);
+
+ SDL_UnlockYUVOverlay (vp->bmp);
+ }
+ }
+ }
+
+ calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
+
+ SDL_DisplayYUVOverlay(vp->bmp, &rect);
+
+ if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
+ int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
+ is->last_display_rect = rect;
+ }
+ }
+}
+
+static inline int compute_mod(int a, int b)
+{
+ return a < 0 ? a%b + b : a%b;
+}
+
+static void video_audio_display(VideoState *s)
+{
+ int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
+ int ch, channels, h, h2, bgcolor, fgcolor;
+ int64_t time_diff;
+ int rdft_bits, nb_freq;
+
+ for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
+ ;
+ nb_freq = 1 << (rdft_bits - 1);
+
+ /* compute display index : center on currently output samples */
+ channels = s->audio_tgt.channels;
+ nb_display_channels = channels;
+ if (!s->paused) {
+ int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
+ n = 2 * channels;
+ delay = s->audio_write_buf_size;
+ delay /= n;
+
+ /* to be more precise, we take into account the time spent since
+ the last buffer computation */
+ if (audio_callback_time) {
+ time_diff = av_gettime_relative() - audio_callback_time;
+ delay -= (time_diff * s->audio_tgt.freq) / 1000000;
+ }
+
+ delay += 2 * data_used;
+ if (delay < data_used)
+ delay = data_used;
+
+ i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
+ if (s->show_mode == SHOW_MODE_WAVES) {
+ h = INT_MIN;
+ for (i = 0; i < 1000; i += channels) {
+ int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
+ int a = s->sample_array[idx];
+ int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
+ int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
+ int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
+ int score = a - d;
+ if (h < score && (b ^ c) < 0) {
+ h = score;
+ i_start = idx;
+ }
+ }
+ }
+
+ s->last_i_start = i_start;
+ } else {
+ i_start = s->last_i_start;
+ }
+
+ bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ if (s->show_mode == SHOW_MODE_WAVES) {
+ fill_rectangle(screen,
+ s->xleft, s->ytop, s->width, s->height,
+ bgcolor, 0);
+
+ fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
+
+ /* total height for one channel */
+ h = s->height / nb_display_channels;
+ /* graph height / 2 */
+ h2 = (h * 9) / 20;
+ for (ch = 0; ch < nb_display_channels; ch++) {
+ i = i_start + ch;
+ y1 = s->ytop + ch * h + (h / 2); /* position of center line */
+ for (x = 0; x < s->width; x++) {
+ y = (s->sample_array[i] * h2) >> 15;
+ if (y < 0) {
+ y = -y;
+ ys = y1 - y;
+ } else {
+ ys = y1;
+ }
+ fill_rectangle(screen,
+ s->xleft + x, ys, 1, y,
+ fgcolor, 0);
+ i += channels;
+ if (i >= SAMPLE_ARRAY_SIZE)
+ i -= SAMPLE_ARRAY_SIZE;
+ }
+ }
+
+ fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
+
+ for (ch = 1; ch < nb_display_channels; ch++) {
+ y = s->ytop + ch * h;
+ fill_rectangle(screen,
+ s->xleft, y, s->width, 1,
+ fgcolor, 0);
+ }
+ SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
+ } else {
+ nb_display_channels= FFMIN(nb_display_channels, 2);
+ if (rdft_bits != s->rdft_bits) {
+ av_rdft_end(s->rdft);
+ av_free(s->rdft_data);
+ s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
+ s->rdft_bits = rdft_bits;
+ s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
+ }
+ if (!s->rdft || !s->rdft_data){
+ av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
+ s->show_mode = SHOW_MODE_WAVES;
+ } else {
+ FFTSample *data[2];
+ for (ch = 0; ch < nb_display_channels; ch++) {
+ data[ch] = s->rdft_data + 2 * nb_freq * ch;
+ i = i_start + ch;
+ for (x = 0; x < 2 * nb_freq; x++) {
+ double w = (x-nb_freq) * (1.0 / nb_freq);
+ data[ch][x] = s->sample_array[i] * (1.0 - w * w);
+ i += channels;
+ if (i >= SAMPLE_ARRAY_SIZE)
+ i -= SAMPLE_ARRAY_SIZE;
+ }
+ av_rdft_calc(s->rdft, data[ch]);
+ }
+ /* Least efficient way to do this, we should of course
+ * directly access it but it is more than fast enough. */
+ for (y = 0; y < s->height; y++) {
+ double w = 1 / sqrt(nb_freq);
+ int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
+ int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
+ + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
+ a = FFMIN(a, 255);
+ b = FFMIN(b, 255);
+ fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
+
+ fill_rectangle(screen,
+ s->xpos, s->height-y, 1, 1,
+ fgcolor, 0);
+ }
+ }
+ SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
+ if (!s->paused)
+ s->xpos++;
+ if (s->xpos >= s->width)
+ s->xpos= s->xleft;
+ }
+}
+
+static void stream_close(VideoState *is)
+{
+ /* XXX: use a special url_shutdown call to abort parse cleanly */
+ is->abort_request = 1;
+ SDL_WaitThread(is->read_tid, NULL);
+ packet_queue_destroy(&is->videoq);
+ packet_queue_destroy(&is->audioq);
+ packet_queue_destroy(&is->subtitleq);
+
+ /* free all pictures */
+ frame_queue_destory(&is->pictq);
+ frame_queue_destory(&is->sampq);
+ frame_queue_destory(&is->subpq);
+ SDL_DestroyCond(is->continue_read_thread);
+#if !CONFIG_AVFILTER
+ sws_freeContext(is->img_convert_ctx);
+#endif
+ av_free(is);
+}
+
+static void do_exit(VideoState *is)
+{
+ if (is) {
+ stream_close(is);
+ }
+ av_lockmgr_register(NULL);
+ uninit_opts();
+#if CONFIG_AVFILTER
+ av_freep(&vfilters_list);
+#endif
+ avformat_network_deinit();
+ if (show_status)
+ printf("\n");
+ SDL_Quit();
+ av_log(NULL, AV_LOG_QUIET, "%s", "");
+ exit(0);
+}
+
+static void sigterm_handler(int sig)
+{
+ exit(123);
+}
+
+static void set_default_window_size(int width, int height, AVRational sar)
+{
+ SDL_Rect rect;
+ calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
+ default_width = rect.w;
+ default_height = rect.h;
+}
+
+static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
+{
+ int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
+ int w,h;
+
+ if (is_full_screen) flags |= SDL_FULLSCREEN;
+ else flags |= SDL_RESIZABLE;
+
+ if (vp && vp->width)
+ set_default_window_size(vp->width, vp->height, vp->sar);
+
+ if (is_full_screen && fs_screen_width) {
+ w = fs_screen_width;
+ h = fs_screen_height;
+ } else if (!is_full_screen && screen_width) {
+ w = screen_width;
+ h = screen_height;
+ } else {
+ w = default_width;
+ h = default_height;
+ }
+ w = FFMIN(16383, w);
+ if (screen && is->width == screen->w && screen->w == w
+ && is->height== screen->h && screen->h == h && !force_set_video_mode)
+ return 0;
+ screen = SDL_SetVideoMode(w, h, 0, flags);
+ if (!screen) {
+ av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
+ do_exit(is);
+ }
+ if (!window_title)
+ window_title = input_filename;
+ SDL_WM_SetCaption(window_title, window_title);
+
+ is->width = screen->w;
+ is->height = screen->h;
+
+ return 0;
+}
+
+/* display the current picture, if any */
+static void video_display(VideoState *is)
+{
+ if (!screen)
+ video_open(is, 0, NULL);
+ if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
+ video_audio_display(is);
+ else if (is->video_st)
+ video_image_display(is);
+}
+
+static double get_clock(Clock *c)
+{
+ if (*c->queue_serial != c->serial)
+ return NAN;
+ if (c->paused) {
+ return c->pts;
+ } else {
+ double time = av_gettime_relative() / 1000000.0;
+ return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
+ }
+}
+
+static void set_clock_at(Clock *c, double pts, int serial, double time)
+{
+ c->pts = pts;
+ c->last_updated = time;
+ c->pts_drift = c->pts - time;
+ c->serial = serial;
+}
+
+static void set_clock(Clock *c, double pts, int serial)
+{
+ double time = av_gettime_relative() / 1000000.0;
+ set_clock_at(c, pts, serial, time);
+}
+
+static void set_clock_speed(Clock *c, double speed)
+{
+ set_clock(c, get_clock(c), c->serial);
+ c->speed = speed;
+}
+
+static void init_clock(Clock *c, int *queue_serial)
+{
+ c->speed = 1.0;
+ c->paused = 0;
+ c->queue_serial = queue_serial;
+ set_clock(c, NAN, -1);
+}
+
+static void sync_clock_to_slave(Clock *c, Clock *slave)
+{
+ double clock = get_clock(c);
+ double slave_clock = get_clock(slave);
+ if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
+ set_clock(c, slave_clock, slave->serial);
+}
+
+static int get_master_sync_type(VideoState *is) {
+ if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
+ if (is->video_st)
+ return AV_SYNC_VIDEO_MASTER;
+ else
+ return AV_SYNC_AUDIO_MASTER;
+ } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
+ if (is->audio_st)
+ return AV_SYNC_AUDIO_MASTER;
+ else
+ return AV_SYNC_EXTERNAL_CLOCK;
+ } else {
+ return AV_SYNC_EXTERNAL_CLOCK;
+ }
+}
+
+/* get the current master clock value */
+static double get_master_clock(VideoState *is)
+{
+ double val;
+
+ switch (get_master_sync_type(is)) {
+ case AV_SYNC_VIDEO_MASTER:
+ val = get_clock(&is->vidclk);
+ break;
+ case AV_SYNC_AUDIO_MASTER:
+ val = get_clock(&is->audclk);
+ break;
+ default:
+ val = get_clock(&is->extclk);
+ break;
+ }
+ return val;
+}
+
+static void check_external_clock_speed(VideoState *is) {
+ if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
+ is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
+ set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP));
+ } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
+ (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
+ set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP));
+ } else {
+ double speed = is->extclk.speed;
+ if (speed != 1.0)
+ set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
+ }
+}
+
+/* seek in the stream */
+static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
+{
+ if (!is->seek_req) {
+ is->seek_pos = pos;
+ is->seek_rel = rel;
+ is->seek_flags &= ~AVSEEK_FLAG_BYTE;
+ if (seek_by_bytes)
+ is->seek_flags |= AVSEEK_FLAG_BYTE;
+ is->seek_req = 1;
+ SDL_CondSignal(is->continue_read_thread);
+ }
+}
+
+/* pause or resume the video */
+static void stream_toggle_pause(VideoState *is)
+{
+ if (is->paused) {
+ is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
+ if (is->read_pause_return != AVERROR(ENOSYS)) {
+ is->vidclk.paused = 0;
+ }
+ set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
+ }
+ set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
+ is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
+}
+
+static void toggle_pause(VideoState *is)
+{
+ stream_toggle_pause(is);
+ is->step = 0;
+}
+
+static void step_to_next_frame(VideoState *is)
+{
+ /* if the stream is paused unpause it, then step */
+ if (is->paused)
+ stream_toggle_pause(is);
+ is->step = 1;
+}
+
+static double compute_target_delay(double delay, VideoState *is)
+{
+ double sync_threshold, diff = 0;
+
+ /* update delay to follow master synchronisation source */
+ if (get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER) {
+ /* if video is slave, we try to correct big delays by
+ duplicating or deleting a frame */
+ diff = get_clock(&is->vidclk) - get_master_clock(is);
+
+ /* skip or repeat frame. We take into account the
+ delay to compute the threshold. I still don't know
+ if it is the best guess */
+ sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
+ if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
+ if (diff <= -sync_threshold)
+ delay = FFMAX(0, delay + diff);
+ else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
+ delay = delay + diff;
+ else if (diff >= sync_threshold)
+ delay = 2 * delay;
+ }
+ }
+
+ av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
+ delay, -diff);
+
+ return delay;
+}
+
+static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
+ if (vp->serial == nextvp->serial) {
+ double duration = nextvp->pts - vp->pts;
+ if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
+ return vp->duration;
+ else
+ return duration;
+ } else {
+ return 0.0;
+ }
+}
+
+static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
+ /* update current video pts */
+ set_clock(&is->vidclk, pts, serial);
+ sync_clock_to_slave(&is->extclk, &is->vidclk);
+}
+
+/* called to display each frame */
+static void video_refresh(void *opaque, double *remaining_time)
+{
+ VideoState *is = opaque;
+ double time;
+
+ Frame *sp, *sp2;
+
+ if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
+ check_external_clock_speed(is);
+
+ if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
+ time = av_gettime_relative() / 1000000.0;
+ if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
+ video_display(is);
+ is->last_vis_time = time;
+ }
+ *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
+ }
+
+ if (is->video_st) {
+ int redisplay = 0;
+ if (is->force_refresh)
+ redisplay = frame_queue_prev(&is->pictq);
+retry:
+ if (frame_queue_nb_remaining(&is->pictq) == 0) {
+ // nothing to do, no picture to display in the queue
+ } else {
+ double last_duration, duration, delay;
+ Frame *vp, *lastvp;
+
+ /* dequeue the picture */
+ lastvp = frame_queue_peek_last(&is->pictq);
+ vp = frame_queue_peek(&is->pictq);
+
+ if (vp->serial != is->videoq.serial) {
+ frame_queue_next(&is->pictq);
+ redisplay = 0;
+ goto retry;
+ }
+
+ if (lastvp->serial != vp->serial && !redisplay)
+ is->frame_timer = av_gettime_relative() / 1000000.0;
+
+ if (is->paused)
+ goto display;
+
+ /* compute nominal last_duration */
+ last_duration = vp_duration(is, lastvp, vp);
+ if (redisplay)
+ delay = 0.0;
+ else
+ delay = compute_target_delay(last_duration, is);
+
+ time= av_gettime_relative()/1000000.0;
+ if (time < is->frame_timer + delay && !redisplay) {
+ *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
+ return;
+ }
+
+ is->frame_timer += delay;
+ if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
+ is->frame_timer = time;
+
+ SDL_LockMutex(is->pictq.mutex);
+ if (!redisplay && !isnan(vp->pts))
+ update_video_pts(is, vp->pts, vp->pos, vp->serial);
+ SDL_UnlockMutex(is->pictq.mutex);
+
+ if (frame_queue_nb_remaining(&is->pictq) > 1) {
+ Frame *nextvp = frame_queue_peek_next(&is->pictq);
+ duration = vp_duration(is, vp, nextvp);
+ if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
+ if (!redisplay)
+ is->frame_drops_late++;
+ frame_queue_next(&is->pictq);
+ redisplay = 0;
+ goto retry;
+ }
+ }
+
+ if (is->subtitle_st) {
+ while (frame_queue_nb_remaining(&is->subpq) > 0) {
+ sp = frame_queue_peek(&is->subpq);
+
+ if (frame_queue_nb_remaining(&is->subpq) > 1)
+ sp2 = frame_queue_peek_next(&is->subpq);
+ else
+ sp2 = NULL;
+
+ if (sp->serial != is->subtitleq.serial
+ || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
+ || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
+ {
+ frame_queue_next(&is->subpq);
+ } else {
+ break;
+ }
+ }
+ }
+
+display:
+ /* display picture */
+ if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
+ video_display(is);
+
+ frame_queue_next(&is->pictq);
+
+ if (is->step && !is->paused)
+ stream_toggle_pause(is);
+ }
+ }
+ is->force_refresh = 0;
+ if (show_status) {
+ static int64_t last_time;
+ int64_t cur_time;
+ int aqsize, vqsize, sqsize;
+ double av_diff;
+
+ cur_time = av_gettime_relative();
+ if (!last_time || (cur_time - last_time) >= 30000) {
+ aqsize = 0;
+ vqsize = 0;
+ sqsize = 0;
+ if (is->audio_st)
+ aqsize = is->audioq.size;
+ if (is->video_st)
+ vqsize = is->videoq.size;
+ if (is->subtitle_st)
+ sqsize = is->subtitleq.size;
+ av_diff = 0;
+ if (is->audio_st && is->video_st)
+ av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
+ else if (is->video_st)
+ av_diff = get_master_clock(is) - get_clock(&is->vidclk);
+ else if (is->audio_st)
+ av_diff = get_master_clock(is) - get_clock(&is->audclk);
+ av_log(NULL, AV_LOG_INFO,
+ "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
+ get_master_clock(is),
+ (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
+ av_diff,
+ is->frame_drops_early + is->frame_drops_late,
+ aqsize / 1024,
+ vqsize / 1024,
+ sqsize,
+ is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
+ is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
+ fflush(stdout);
+ last_time = cur_time;
+ }
+ }
+}
+
+/* allocate a picture (needs to do that in main thread to avoid
+ potential locking problems */
+static void alloc_picture(VideoState *is)
+{
+ Frame *vp;
+ int64_t bufferdiff;
+
+ vp = &is->pictq.queue[is->pictq.windex];
+
+ free_picture(vp);
+
+ video_open(is, 0, vp);
+
+ vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
+ SDL_YV12_OVERLAY,
+ screen);
+ bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
+ if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
+ /* SDL allocates a buffer smaller than requested if the video
+ * overlay hardware is unable to support the requested size. */
+ av_log(NULL, AV_LOG_FATAL,
+ "Error: the video system does not support an image\n"
+ "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
+ "to reduce the image size.\n", vp->width, vp->height );
+ do_exit(is);
+ }
+
+ SDL_LockMutex(is->pictq.mutex);
+ vp->allocated = 1;
+ SDL_CondSignal(is->pictq.cond);
+ SDL_UnlockMutex(is->pictq.mutex);
+}
+
+static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
+ int i, width, height;
+ Uint8 *p, *maxp;
+ for (i = 0; i < 3; i++) {
+ width = bmp->w;
+ height = bmp->h;
+ if (i > 0) {
+ width >>= 1;
+ height >>= 1;
+ }
+ if (bmp->pitches[i] > width) {
+ maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
+ for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
+ *(p+1) = *p;
+ }
+ }
+}
+
+static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
+{
+ Frame *vp;
+
+#if defined(DEBUG_SYNC) && 0
+ printf("frame_type=%c pts=%0.3f\n",
+ av_get_picture_type_char(src_frame->pict_type), pts);
+#endif
+
+ if (!(vp = frame_queue_peek_writable(&is->pictq)))
+ return -1;
+
+ vp->sar = src_frame->sample_aspect_ratio;
+
+ /* alloc or resize hardware picture buffer */
+ if (!vp->bmp || vp->reallocate || !vp->allocated ||
+ vp->width != src_frame->width ||
+ vp->height != src_frame->height) {
+ SDL_Event event;
+
+ vp->allocated = 0;
+ vp->reallocate = 0;
+ vp->width = src_frame->width;
+ vp->height = src_frame->height;
+
+ /* the allocation must be done in the main thread to avoid
+ locking problems. */
+ event.type = FF_ALLOC_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+
+ /* wait until the picture is allocated */
+ SDL_LockMutex(is->pictq.mutex);
+ while (!vp->allocated && !is->videoq.abort_request) {
+ SDL_CondWait(is->pictq.cond, is->pictq.mutex);
+ }
+ /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
+ if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
+ while (!vp->allocated && !is->abort_request) {
+ SDL_CondWait(is->pictq.cond, is->pictq.mutex);
+ }
+ }
+ SDL_UnlockMutex(is->pictq.mutex);
+
+ if (is->videoq.abort_request)
+ return -1;
+ }
+
+ /* if the frame is not skipped, then display it */
+ if (vp->bmp) {
+ AVPicture pict = { { 0 } };
+
+ /* get a pointer on the bitmap */
+ SDL_LockYUVOverlay (vp->bmp);
+
+ pict.data[0] = vp->bmp->pixels[0];
+ pict.data[1] = vp->bmp->pixels[2];
+ pict.data[2] = vp->bmp->pixels[1];
+
+ pict.linesize[0] = vp->bmp->pitches[0];
+ pict.linesize[1] = vp->bmp->pitches[2];
+ pict.linesize[2] = vp->bmp->pitches[1];
+
+#if CONFIG_AVFILTER
+ // FIXME use direct rendering
+ av_picture_copy(&pict, (AVPicture *)src_frame,
+ src_frame->format, vp->width, vp->height);
+#else
+ av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
+ is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
+ vp->width, vp->height, src_frame->format, vp->width, vp->height,
+ AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
+ if (!is->img_convert_ctx) {
+ av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
+ exit(1);
+ }
+ sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
+ 0, vp->height, pict.data, pict.linesize);
+#endif
+ /* workaround SDL PITCH_WORKAROUND */
+ duplicate_right_border_pixels(vp->bmp);
+ /* update the bitmap content */
+ SDL_UnlockYUVOverlay(vp->bmp);
+
+ vp->pts = pts;
+ vp->duration = duration;
+ vp->pos = pos;
+ vp->serial = serial;
+
+ /* now we can update the picture count */
+ frame_queue_push(&is->pictq);
+ }
+ return 0;
+}
+
+static int get_video_frame(VideoState *is, AVFrame *frame)
+{
+ int got_picture;
+
+ if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
+ return -1;
+
+ if (got_picture) {
+ double dpts = NAN;
+
+ if (frame->pts != AV_NOPTS_VALUE)
+ dpts = av_q2d(is->video_st->time_base) * frame->pts;
+
+ frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
+
+ if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
+ if (frame->pts != AV_NOPTS_VALUE) {
+ double diff = dpts - get_master_clock(is);
+ if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
+ diff - is->frame_last_filter_delay < 0 &&
+ is->viddec.pkt_serial == is->vidclk.serial &&
+ is->videoq.nb_packets) {
+ is->frame_drops_early++;
+ av_frame_unref(frame);
+ got_picture = 0;
+ }
+ }
+ }
+ }
+
+ return got_picture;
+}
+
+#if CONFIG_AVFILTER
+static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
+ AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
+{
+ int ret, i;
+ int nb_filters = graph->nb_filters;
+ AVFilterInOut *outputs = NULL, *inputs = NULL;
+
+ if (filtergraph) {
+ outputs = avfilter_inout_alloc();
+ inputs = avfilter_inout_alloc();
+ if (!outputs || !inputs) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = source_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = sink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
+ goto fail;
+ } else {
+ if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
+ goto fail;
+ }
+
+ /* Reorder the filters to ensure that inputs of the custom filters are merged first */
+ for (i = 0; i < graph->nb_filters - nb_filters; i++)
+ FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
+
+ ret = avfilter_graph_config(graph, NULL);
+fail:
+ avfilter_inout_free(&outputs);
+ avfilter_inout_free(&inputs);
+ return ret;
+}
+
+static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
+{
+ static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
+ char sws_flags_str[128];
+ char buffersrc_args[256];
+ int ret;
+ AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
+ AVCodecContext *codec = is->video_st->codec;
+ AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
+
+ av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
+ snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
+ graph->scale_sws_opts = av_strdup(sws_flags_str);
+
+ snprintf(buffersrc_args, sizeof(buffersrc_args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ frame->width, frame->height, frame->format,
+ is->video_st->time_base.num, is->video_st->time_base.den,
+ codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
+ if (fr.num && fr.den)
+ av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
+
+ if ((ret = avfilter_graph_create_filter(&filt_src,
+ avfilter_get_by_name("buffer"),
+ "ffplay_buffer", buffersrc_args, NULL,
+ graph)) < 0)
+ goto fail;
+
+ ret = avfilter_graph_create_filter(&filt_out,
+ avfilter_get_by_name("buffersink"),
+ "ffplay_buffersink", NULL, NULL, graph);
+ if (ret < 0)
+ goto fail;
+
+ if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto fail;
+
+ last_filter = filt_out;
+
+/* Note: this macro adds a filter before the lastly added filter, so the
+ * processing order of the filters is in reverse */
+#define INSERT_FILT(name, arg) do { \
+ AVFilterContext *filt_ctx; \
+ \
+ ret = avfilter_graph_create_filter(&filt_ctx, \
+ avfilter_get_by_name(name), \
+ "ffplay_" name, arg, NULL, graph); \
+ if (ret < 0) \
+ goto fail; \
+ \
+ ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
+ if (ret < 0) \
+ goto fail; \
+ \
+ last_filter = filt_ctx; \
+} while (0)
+
+ /* SDL YUV code is not handling odd width/height for some driver
+ * combinations, therefore we crop the picture to an even width/height. */
+ INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
+
+ if (autorotate) {
+ double theta = get_rotation(is->video_st);
+
+ if (fabs(theta - 90) < 1.0) {
+ INSERT_FILT("transpose", "clock");
+ } else if (fabs(theta - 180) < 1.0) {
+ INSERT_FILT("hflip", NULL);
+ INSERT_FILT("vflip", NULL);
+ } else if (fabs(theta - 270) < 1.0) {
+ INSERT_FILT("transpose", "cclock");
+ } else if (fabs(theta) > 1.0) {
+ char rotate_buf[64];
+ snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
+ INSERT_FILT("rotate", rotate_buf);
+ }
+ }
+
+ if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
+ goto fail;
+
+ is->in_video_filter = filt_src;
+ is->out_video_filter = filt_out;
+
+fail:
+ return ret;
+}
+
+static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
+{
+ static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
+ int sample_rates[2] = { 0, -1 };
+ int64_t channel_layouts[2] = { 0, -1 };
+ int channels[2] = { 0, -1 };
+ AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
+ char aresample_swr_opts[512] = "";
+ AVDictionaryEntry *e = NULL;
+ char asrc_args[256];
+ int ret;
+
+ avfilter_graph_free(&is->agraph);
+ if (!(is->agraph = avfilter_graph_alloc()))
+ return AVERROR(ENOMEM);
+
+ while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
+ av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
+ if (strlen(aresample_swr_opts))
+ aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
+ av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
+
+ ret = snprintf(asrc_args, sizeof(asrc_args),
+ "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
+ is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
+ is->audio_filter_src.channels,
+ 1, is->audio_filter_src.freq);
+ if (is->audio_filter_src.channel_layout)
+ snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
+ ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
+
+ ret = avfilter_graph_create_filter(&filt_asrc,
+ avfilter_get_by_name("abuffer"), "ffplay_abuffer",
+ asrc_args, NULL, is->agraph);
+ if (ret < 0)
+ goto end;
+
+
+ ret = avfilter_graph_create_filter(&filt_asink,
+ avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
+ NULL, NULL, is->agraph);
+ if (ret < 0)
+ goto end;
+
+ if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+
+ if (force_output_format) {
+ channel_layouts[0] = is->audio_tgt.channel_layout;
+ channels [0] = is->audio_tgt.channels;
+ sample_rates [0] = is->audio_tgt.freq;
+ if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
+ goto end;
+ }
+
+
+ if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
+ goto end;
+
+ is->in_audio_filter = filt_asrc;
+ is->out_audio_filter = filt_asink;
+
+end:
+ if (ret < 0)
+ avfilter_graph_free(&is->agraph);
+ return ret;
+}
+#endif /* CONFIG_AVFILTER */
+
+static int audio_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFrame *frame = av_frame_alloc();
+ Frame *af;
+#if CONFIG_AVFILTER
+ int last_serial = -1;
+ int64_t dec_channel_layout;
+ int reconfigure;
+#endif
+ int got_frame = 0;
+ AVRational tb;
+ int ret = 0;
+
+ if (!frame)
+ return AVERROR(ENOMEM);
+
+ do {
+ if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
+ goto the_end;
+
+ if (got_frame) {
+ tb = (AVRational){1, frame->sample_rate};
+
+#if CONFIG_AVFILTER
+ dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
+
+ reconfigure =
+ cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
+ frame->format, av_frame_get_channels(frame)) ||
+ is->audio_filter_src.channel_layout != dec_channel_layout ||
+ is->audio_filter_src.freq != frame->sample_rate ||
+ is->auddec.pkt_serial != last_serial;
+
+ if (reconfigure) {
+ char buf1[1024], buf2[1024];
+ av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
+ av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
+ av_log(NULL, AV_LOG_DEBUG,
+ "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
+ is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
+ frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
+
+ is->audio_filter_src.fmt = frame->format;
+ is->audio_filter_src.channels = av_frame_get_channels(frame);
+ is->audio_filter_src.channel_layout = dec_channel_layout;
+ is->audio_filter_src.freq = frame->sample_rate;
+ last_serial = is->auddec.pkt_serial;
+
+ if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
+ goto the_end;
+ }
+
+ if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
+ goto the_end;
+
+ while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
+ tb = is->out_audio_filter->inputs[0]->time_base;
+#endif
+ if (!(af = frame_queue_peek_writable(&is->sampq)))
+ goto the_end;
+
+ af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
+ af->pos = av_frame_get_pkt_pos(frame);
+ af->serial = is->auddec.pkt_serial;
+ af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
+
+ av_frame_move_ref(af->frame, frame);
+ frame_queue_push(&is->sampq);
+
+#if CONFIG_AVFILTER
+ if (is->audioq.serial != is->auddec.pkt_serial)
+ break;
+ }
+ if (ret == AVERROR_EOF)
+ is->auddec.finished = is->auddec.pkt_serial;
+#endif
+ }
+ } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
+ the_end:
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&is->agraph);
+#endif
+ av_frame_free(&frame);
+ return ret;
+}
+
+static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
+{
+ packet_queue_start(d->queue);
+ d->decoder_tid = SDL_CreateThread(fn, arg);
+}
+
+static int video_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFrame *frame = av_frame_alloc();
+ double pts;
+ double duration;
+ int ret;
+ AVRational tb = is->video_st->time_base;
+ AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
+
+#if CONFIG_AVFILTER
+ AVFilterGraph *graph = avfilter_graph_alloc();
+ AVFilterContext *filt_out = NULL, *filt_in = NULL;
+ int last_w = 0;
+ int last_h = 0;
+ enum AVPixelFormat last_format = -2;
+ int last_serial = -1;
+ int last_vfilter_idx = 0;
+ if (!graph) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
+
+#endif
+
+ if (!frame) {
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&graph);
+#endif
+ return AVERROR(ENOMEM);
+ }
+
+ for (;;) {
+ ret = get_video_frame(is, frame);
+ if (ret < 0)
+ goto the_end;
+ if (!ret)
+ continue;
+
+#if CONFIG_AVFILTER
+ if ( last_w != frame->width
+ || last_h != frame->height
+ || last_format != frame->format
+ || last_serial != is->viddec.pkt_serial
+ || last_vfilter_idx != is->vfilter_idx) {
+ av_log(NULL, AV_LOG_DEBUG,
+ "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
+ last_w, last_h,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
+ frame->width, frame->height,
+ (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
+ avfilter_graph_free(&graph);
+ graph = avfilter_graph_alloc();
+ if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
+ SDL_Event event;
+ event.type = FF_QUIT_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+ goto the_end;
+ }
+ filt_in = is->in_video_filter;
+ filt_out = is->out_video_filter;
+ last_w = frame->width;
+ last_h = frame->height;
+ last_format = frame->format;
+ last_serial = is->viddec.pkt_serial;
+ last_vfilter_idx = is->vfilter_idx;
+ frame_rate = filt_out->inputs[0]->frame_rate;
+ }
+
+ ret = av_buffersrc_add_frame(filt_in, frame);
+ if (ret < 0)
+ goto the_end;
+
+ while (ret >= 0) {
+ is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
+
+ ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
+ if (ret < 0) {
+ if (ret == AVERROR_EOF)
+ is->viddec.finished = is->viddec.pkt_serial;
+ ret = 0;
+ break;
+ }
+
+ is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
+ if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
+ is->frame_last_filter_delay = 0;
+ tb = filt_out->inputs[0]->time_base;
+#endif
+ duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
+ pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
+ ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
+ av_frame_unref(frame);
+#if CONFIG_AVFILTER
+ }
+#endif
+
+ if (ret < 0)
+ goto the_end;
+ }
+ the_end:
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&graph);
+#endif
+ av_frame_free(&frame);
+ return 0;
+}
+
+static int subtitle_thread(void *arg)
+{
+ VideoState *is = arg;
+ Frame *sp;
+ int got_subtitle;
+ double pts;
+ int i, j;
+ int r, g, b, y, u, v, a;
+
+ for (;;) {
+ if (!(sp = frame_queue_peek_writable(&is->subpq)))
+ return 0;
+
+ if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
+ break;
+
+ pts = 0;
+
+ if (got_subtitle && sp->sub.format == 0) {
+ if (sp->sub.pts != AV_NOPTS_VALUE)
+ pts = sp->sub.pts / (double)AV_TIME_BASE;
+ sp->pts = pts;
+ sp->serial = is->subdec.pkt_serial;
+
+ for (i = 0; i < sp->sub.num_rects; i++)
+ {
+ for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
+ {
+ RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
+ y = RGB_TO_Y_CCIR(r, g, b);
+ u = RGB_TO_U_CCIR(r, g, b, 0);
+ v = RGB_TO_V_CCIR(r, g, b, 0);
+ YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
+ }
+ }
+
+ /* now we can update the picture count */
+ frame_queue_push(&is->subpq);
+ } else if (got_subtitle) {
+ avsubtitle_free(&sp->sub);
+ }
+ }
+ return 0;
+}
+
+/* copy samples for viewing in editor window */
+static void update_sample_display(VideoState *is, short *samples, int samples_size)
+{
+ int size, len;
+
+ size = samples_size / sizeof(short);
+ while (size > 0) {
+ len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
+ if (len > size)
+ len = size;
+ memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
+ samples += len;
+ is->sample_array_index += len;
+ if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
+ is->sample_array_index = 0;
+ size -= len;
+ }
+}
+
+/* return the wanted number of samples to get better sync if sync_type is video
+ * or external master clock */
+static int synchronize_audio(VideoState *is, int nb_samples)
+{
+ int wanted_nb_samples = nb_samples;
+
+ /* if not master, then we try to remove or add samples to correct the clock */
+ if (get_master_sync_type(is) != AV_SYNC_AUDIO_MASTER) {
+ double diff, avg_diff;
+ int min_nb_samples, max_nb_samples;
+
+ diff = get_clock(&is->audclk) - get_master_clock(is);
+
+ if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
+ is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
+ if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
+ /* not enough measures to have a correct estimate */
+ is->audio_diff_avg_count++;
+ } else {
+ /* estimate the A-V difference */
+ avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
+
+ if (fabs(avg_diff) >= is->audio_diff_threshold) {
+ wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
+ min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
+ max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
+ wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
+ }
+ av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
+ diff, avg_diff, wanted_nb_samples - nb_samples,
+ is->audio_clock, is->audio_diff_threshold);
+ }
+ } else {
+ /* too big difference : may be initial PTS errors, so
+ reset A-V filter */
+ is->audio_diff_avg_count = 0;
+ is->audio_diff_cum = 0;
+ }
+ }
+
+ return wanted_nb_samples;
+}
+
+/**
+ * Decode one audio frame and return its uncompressed size.
+ *
+ * The processed audio frame is decoded, converted if required, and
+ * stored in is->audio_buf, with size in bytes given by the return
+ * value.
+ */
+static int audio_decode_frame(VideoState *is)
+{
+ int data_size, resampled_data_size;
+ int64_t dec_channel_layout;
+ av_unused double audio_clock0;
+ int wanted_nb_samples;
+ Frame *af;
+
+ if (is->paused)
+ return -1;
+
+ do {
+ if (!(af = frame_queue_peek_readable(&is->sampq)))
+ return -1;
+ frame_queue_next(&is->sampq);
+ } while (af->serial != is->audioq.serial);
+
+ data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
+ af->frame->nb_samples,
+ af->frame->format, 1);
+
+ dec_channel_layout =
+ (af->frame->channel_layout && av_frame_get_channels(af->frame) == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
+ af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
+ wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
+
+ if (af->frame->format != is->audio_src.fmt ||
+ dec_channel_layout != is->audio_src.channel_layout ||
+ af->frame->sample_rate != is->audio_src.freq ||
+ (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
+ swr_free(&is->swr_ctx);
+ is->swr_ctx = swr_alloc_set_opts(NULL,
+ is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
+ dec_channel_layout, af->frame->format, af->frame->sample_rate,
+ 0, NULL);
+ if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
+ af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), av_frame_get_channels(af->frame),
+ is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
+ swr_free(&is->swr_ctx);
+ return -1;
+ }
+ is->audio_src.channel_layout = dec_channel_layout;
+ is->audio_src.channels = av_frame_get_channels(af->frame);
+ is->audio_src.freq = af->frame->sample_rate;
+ is->audio_src.fmt = af->frame->format;
+ }
+
+ if (is->swr_ctx) {
+ const uint8_t **in = (const uint8_t **)af->frame->extended_data;
+ uint8_t **out = &is->audio_buf1;
+ int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
+ int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
+ int len2;
+ if (out_size < 0) {
+ av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
+ return -1;
+ }
+ if (wanted_nb_samples != af->frame->nb_samples) {
+ if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
+ wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
+ return -1;
+ }
+ }
+ av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
+ if (!is->audio_buf1)
+ return AVERROR(ENOMEM);
+ len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
+ if (len2 < 0) {
+ av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
+ return -1;
+ }
+ if (len2 == out_count) {
+ av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
+ if (swr_init(is->swr_ctx) < 0)
+ swr_free(&is->swr_ctx);
+ }
+ is->audio_buf = is->audio_buf1;
+ resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
+ } else {
+ is->audio_buf = af->frame->data[0];
+ resampled_data_size = data_size;
+ }
+
+ audio_clock0 = is->audio_clock;
+ /* update the audio clock with the pts */
+ if (!isnan(af->pts))
+ is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
+ else
+ is->audio_clock = NAN;
+ is->audio_clock_serial = af->serial;
+#ifdef DEBUG
+ {
+ static double last_clock;
+ printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
+ is->audio_clock - last_clock,
+ is->audio_clock, audio_clock0);
+ last_clock = is->audio_clock;
+ }
+#endif
+ return resampled_data_size;
+}
+
+/* prepare a new audio buffer */
+static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
+{
+ VideoState *is = opaque;
+ int audio_size, len1;
+
+ audio_callback_time = av_gettime_relative();
+
+ while (len > 0) {
+ if (is->audio_buf_index >= is->audio_buf_size) {
+ audio_size = audio_decode_frame(is);
+ if (audio_size < 0) {
+ /* if error, just output silence */
+ is->audio_buf = is->silence_buf;
+ is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
+ } else {
+ if (is->show_mode != SHOW_MODE_VIDEO)
+ update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
+ is->audio_buf_size = audio_size;
+ }
+ is->audio_buf_index = 0;
+ }
+ len1 = is->audio_buf_size - is->audio_buf_index;
+ if (len1 > len)
+ len1 = len;
+ memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
+ len -= len1;
+ stream += len1;
+ is->audio_buf_index += len1;
+ }
+ is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
+ /* Let's assume the audio driver that is used by SDL has two periods. */
+ if (!isnan(is->audio_clock)) {
+ set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
+ sync_clock_to_slave(&is->extclk, &is->audclk);
+ }
+}
+
+static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
+{
+ SDL_AudioSpec wanted_spec, spec;
+ const char *env;
+ static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
+ static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
+ int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
+
+ env = SDL_getenv("SDL_AUDIO_CHANNELS");
+ if (env) {
+ wanted_nb_channels = atoi(env);
+ wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
+ }
+ if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
+ wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
+ wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
+ }
+ wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
+ wanted_spec.channels = wanted_nb_channels;
+ wanted_spec.freq = wanted_sample_rate;
+ if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
+ av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
+ return -1;
+ }
+ while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
+ next_sample_rate_idx--;
+ wanted_spec.format = AUDIO_S16SYS;
+ wanted_spec.silence = 0;
+ wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
+ wanted_spec.callback = sdl_audio_callback;
+ wanted_spec.userdata = opaque;
+ while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
+ av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
+ wanted_spec.channels, wanted_spec.freq, SDL_GetError());
+ wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
+ if (!wanted_spec.channels) {
+ wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
+ wanted_spec.channels = wanted_nb_channels;
+ if (!wanted_spec.freq) {
+ av_log(NULL, AV_LOG_ERROR,
+ "No more combinations to try, audio open failed\n");
+ return -1;
+ }
+ }
+ wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
+ }
+ if (spec.format != AUDIO_S16SYS) {
+ av_log(NULL, AV_LOG_ERROR,
+ "SDL advised audio format %d is not supported!\n", spec.format);
+ return -1;
+ }
+ if (spec.channels != wanted_spec.channels) {
+ wanted_channel_layout = av_get_default_channel_layout(spec.channels);
+ if (!wanted_channel_layout) {
+ av_log(NULL, AV_LOG_ERROR,
+ "SDL advised channel count %d is not supported!\n", spec.channels);
+ return -1;
+ }
+ }
+
+ audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
+ audio_hw_params->freq = spec.freq;
+ audio_hw_params->channel_layout = wanted_channel_layout;
+ audio_hw_params->channels = spec.channels;
+ audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
+ audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
+ if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
+ av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
+ return -1;
+ }
+ return spec.size;
+}
+
+/* open a given stream. Return 0 if OK */
+static int stream_component_open(VideoState *is, int stream_index)
+{
+ AVFormatContext *ic = is->ic;
+ AVCodecContext *avctx;
+ AVCodec *codec;
+ const char *forced_codec_name = NULL;
+ AVDictionary *opts;
+ AVDictionaryEntry *t = NULL;
+ int sample_rate, nb_channels;
+ int64_t channel_layout;
+ int ret = 0;
+ int stream_lowres = lowres;
+
+ if (stream_index < 0 || stream_index >= ic->nb_streams)
+ return -1;
+ avctx = ic->streams[stream_index]->codec;
+
+ codec = avcodec_find_decoder(avctx->codec_id);
+
+ switch(avctx->codec_type){
+ case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
+ case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
+ case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
+ }
+ if (forced_codec_name)
+ codec = avcodec_find_decoder_by_name(forced_codec_name);
+ if (!codec) {
+ if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
+ "No codec could be found with name '%s'\n", forced_codec_name);
+ else av_log(NULL, AV_LOG_WARNING,
+ "No codec could be found with id %d\n", avctx->codec_id);
+ return -1;
+ }
+
+ avctx->codec_id = codec->id;
+ if(stream_lowres > av_codec_get_max_lowres(codec)){
+ av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
+ av_codec_get_max_lowres(codec));
+ stream_lowres = av_codec_get_max_lowres(codec);
+ }
+ av_codec_set_lowres(avctx, stream_lowres);
+
+ if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
+ if (fast)
+ avctx->flags2 |= AV_CODEC_FLAG2_FAST;
++ if(codec->capabilities & AV_CODEC_CAP_DR1)
+ avctx->flags |= CODEC_FLAG_EMU_EDGE;
+
+ opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
+ if (!av_dict_get(opts, "threads", NULL, 0))
+ av_dict_set(&opts, "threads", "auto", 0);
+ if (stream_lowres)
+ av_dict_set_int(&opts, "lowres", stream_lowres, 0);
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
+ av_dict_set(&opts, "refcounted_frames", "1", 0);
+ if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
+ goto fail;
+ }
+ if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
+ ret = AVERROR_OPTION_NOT_FOUND;
+ goto fail;
+ }
+
+ is->eof = 0;
+ ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+#if CONFIG_AVFILTER
+ {
+ AVFilterLink *link;
+
+ is->audio_filter_src.freq = avctx->sample_rate;
+ is->audio_filter_src.channels = avctx->channels;
+ is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
+ is->audio_filter_src.fmt = avctx->sample_fmt;
+ if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
+ goto fail;
+ link = is->out_audio_filter->inputs[0];
+ sample_rate = link->sample_rate;
+ nb_channels = link->channels;
+ channel_layout = link->channel_layout;
+ }
+#else
+ sample_rate = avctx->sample_rate;
+ nb_channels = avctx->channels;
+ channel_layout = avctx->channel_layout;
+#endif
+
+ /* prepare audio output */
+ if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
+ goto fail;
+ is->audio_hw_buf_size = ret;
+ is->audio_src = is->audio_tgt;
+ is->audio_buf_size = 0;
+ is->audio_buf_index = 0;
+
+ /* init averaging filter */
+ is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
+ is->audio_diff_avg_count = 0;
+ /* since we do not have a precise anough audio fifo fullness,
+ we correct audio sync only if larger than this threshold */
+ is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
+
+ is->audio_stream = stream_index;
+ is->audio_st = ic->streams[stream_index];
+
+ decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
+ if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
+ is->auddec.start_pts = is->audio_st->start_time;
+ is->auddec.start_pts_tb = is->audio_st->time_base;
+ }
+ decoder_start(&is->auddec, audio_thread, is);
+ SDL_PauseAudio(0);
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ is->video_stream = stream_index;
+ is->video_st = ic->streams[stream_index];
+
+ decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
+ decoder_start(&is->viddec, video_thread, is);
+ is->queue_attachments_req = 1;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ is->subtitle_stream = stream_index;
+ is->subtitle_st = ic->streams[stream_index];
+
+ decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
+ decoder_start(&is->subdec, subtitle_thread, is);
+ break;
+ default:
+ break;
+ }
+
+fail:
+ av_dict_free(&opts);
+
+ return ret;
+}
+
+static void stream_component_close(VideoState *is, int stream_index)
+{
+ AVFormatContext *ic = is->ic;
+ AVCodecContext *avctx;
+
+ if (stream_index < 0 || stream_index >= ic->nb_streams)
+ return;
+ avctx = ic->streams[stream_index]->codec;
+
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ decoder_abort(&is->auddec, &is->sampq);
+ SDL_CloseAudio();
+ decoder_destroy(&is->auddec);
+ swr_free(&is->swr_ctx);
+ av_freep(&is->audio_buf1);
+ is->audio_buf1_size = 0;
+ is->audio_buf = NULL;
+
+ if (is->rdft) {
+ av_rdft_end(is->rdft);
+ av_freep(&is->rdft_data);
+ is->rdft = NULL;
+ is->rdft_bits = 0;
+ }
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ decoder_abort(&is->viddec, &is->pictq);
+ decoder_destroy(&is->viddec);
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ decoder_abort(&is->subdec, &is->subpq);
+ decoder_destroy(&is->subdec);
+ break;
+ default:
+ break;
+ }
+
+ ic->streams[stream_index]->discard = AVDISCARD_ALL;
+ avcodec_close(avctx);
+ switch (avctx->codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ is->audio_st = NULL;
+ is->audio_stream = -1;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ is->video_st = NULL;
+ is->video_stream = -1;
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ is->subtitle_st = NULL;
+ is->subtitle_stream = -1;
+ break;
+ default:
+ break;
+ }
+}
+
+static int decode_interrupt_cb(void *ctx)
+{
+ VideoState *is = ctx;
+ return is->abort_request;
+}
+
+static int is_realtime(AVFormatContext *s)
+{
+ if( !strcmp(s->iformat->name, "rtp")
+ || !strcmp(s->iformat->name, "rtsp")
+ || !strcmp(s->iformat->name, "sdp")
+ )
+ return 1;
+
+ if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
+ || !strncmp(s->filename, "udp:", 4)
+ )
+ )
+ return 1;
+ return 0;
+}
+
+/* this thread gets the stream from the disk or the network */
+static int read_thread(void *arg)
+{
+ VideoState *is = arg;
+ AVFormatContext *ic = NULL;
+ int err, i, ret;
+ int st_index[AVMEDIA_TYPE_NB];
+ AVPacket pkt1, *pkt = &pkt1;
+ int64_t stream_start_time;
+ int pkt_in_play_range = 0;
+ AVDictionaryEntry *t;
+ AVDictionary **opts;
+ int orig_nb_streams;
+ SDL_mutex *wait_mutex = SDL_CreateMutex();
+ int scan_all_pmts_set = 0;
+ int64_t pkt_ts;
+
+ memset(st_index, -1, sizeof(st_index));
+ is->last_video_stream = is->video_stream = -1;
+ is->last_audio_stream = is->audio_stream = -1;
+ is->last_subtitle_stream = is->subtitle_stream = -1;
+ is->eof = 0;
+
+ ic = avformat_alloc_context();
+ if (!ic) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ic->interrupt_callback.callback = decode_interrupt_cb;
+ ic->interrupt_callback.opaque = is;
+ if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
+ av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
+ scan_all_pmts_set = 1;
+ }
+ err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
+ if (err < 0) {
+ print_error(is->filename, err);
+ ret = -1;
+ goto fail;
+ }
+ if (scan_all_pmts_set)
+ av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
+
+ if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
+ av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
+ ret = AVERROR_OPTION_NOT_FOUND;
+ goto fail;
+ }
+ is->ic = ic;
+
+ if (genpts)
+ ic->flags |= AVFMT_FLAG_GENPTS;
+
+ av_format_inject_global_side_data(ic);
+
+ opts = setup_find_stream_info_opts(ic, codec_opts);
+ orig_nb_streams = ic->nb_streams;
+
+ err = avformat_find_stream_info(ic, opts);
+
+ for (i = 0; i < orig_nb_streams; i++)
+ av_dict_free(&opts[i]);
+ av_freep(&opts);
+
+ if (err < 0) {
+ av_log(NULL, AV_LOG_WARNING,
+ "%s: could not find codec parameters\n", is->filename);
+ ret = -1;
+ goto fail;
+ }
+
+ if (ic->pb)
+ ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
+
+ if (seek_by_bytes < 0)
+ seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
+
+ is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
+
+ if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
+ window_title = av_asprintf("%s - %s", t->value, input_filename);
+
+ /* if seeking requested, we execute it */
+ if (start_time != AV_NOPTS_VALUE) {
+ int64_t timestamp;
+
+ timestamp = start_time;
+ /* add the stream start time */
+ if (ic->start_time != AV_NOPTS_VALUE)
+ timestamp += ic->start_time;
+ ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
+ is->filename, (double)timestamp / AV_TIME_BASE);
+ }
+ }
+
+ is->realtime = is_realtime(ic);
+
+ if (show_status)
+ av_dump_format(ic, 0, is->filename, 0);
+
+ for (i = 0; i < ic->nb_streams; i++) {
+ AVStream *st = ic->streams[i];
+ enum AVMediaType type = st->codec->codec_type;
+ st->discard = AVDISCARD_ALL;
+ if (wanted_stream_spec[type] && st_index[type] == -1)
+ if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
+ st_index[type] = i;
+ }
+ for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
+ if (wanted_stream_spec[i] && st_index[i] == -1) {
+ av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
+ st_index[i] = INT_MAX;
+ }
+ }
+
+ if (!video_disable)
+ st_index[AVMEDIA_TYPE_VIDEO] =
+ av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
+ st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
+ if (!audio_disable)
+ st_index[AVMEDIA_TYPE_AUDIO] =
+ av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
+ st_index[AVMEDIA_TYPE_AUDIO],
+ st_index[AVMEDIA_TYPE_VIDEO],
+ NULL, 0);
+ if (!video_disable && !subtitle_disable)
+ st_index[AVMEDIA_TYPE_SUBTITLE] =
+ av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
+ st_index[AVMEDIA_TYPE_SUBTITLE],
+ (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
+ st_index[AVMEDIA_TYPE_AUDIO] :
+ st_index[AVMEDIA_TYPE_VIDEO]),
+ NULL, 0);
+
+ is->show_mode = show_mode;
+ if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+ AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
+ AVCodecContext *avctx = st->codec;
+ AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
+ if (avctx->width)
+ set_default_window_size(avctx->width, avctx->height, sar);
+ }
+
+ /* open the streams */
+ if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
+ stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
+ }
+
+ ret = -1;
+ if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
+ ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
+ }
+ if (is->show_mode == SHOW_MODE_NONE)
+ is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
+
+ if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
+ stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
+ }
+
+ if (is->video_stream < 0 && is->audio_stream < 0) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
+ is->filename);
+ ret = -1;
+ goto fail;
+ }
+
+ if (infinite_buffer < 0 && is->realtime)
+ infinite_buffer = 1;
+
+ for (;;) {
+ if (is->abort_request)
+ break;
+ if (is->paused != is->last_paused) {
+ is->last_paused = is->paused;
+ if (is->paused)
+ is->read_pause_return = av_read_pause(ic);
+ else
+ av_read_play(ic);
+ }
+#if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
+ if (is->paused &&
+ (!strcmp(ic->iformat->name, "rtsp") ||
+ (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
+ /* wait 10 ms to avoid trying to get another packet */
+ /* XXX: horrible */
+ SDL_Delay(10);
+ continue;
+ }
+#endif
+ if (is->seek_req) {
+ int64_t seek_target = is->seek_pos;
+ int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
+ int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
+// FIXME the +-2 is due to rounding being not done in the correct direction in generation
+// of the seek_pos/seek_rel variables
+
+ ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR,
+ "%s: error while seeking\n", is->ic->filename);
+ } else {
+ if (is->audio_stream >= 0) {
+ packet_queue_flush(&is->audioq);
+ packet_queue_put(&is->audioq, &flush_pkt);
+ }
+ if (is->subtitle_stream >= 0) {
+ packet_queue_flush(&is->subtitleq);
+ packet_queue_put(&is->subtitleq, &flush_pkt);
+ }
+ if (is->video_stream >= 0) {
+ packet_queue_flush(&is->videoq);
+ packet_queue_put(&is->videoq, &flush_pkt);
+ }
+ if (is->seek_flags & AVSEEK_FLAG_BYTE) {
+ set_clock(&is->extclk, NAN, 0);
+ } else {
+ set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
+ }
+ }
+ is->seek_req = 0;
+ is->queue_attachments_req = 1;
+ is->eof = 0;
+ if (is->paused)
+ step_to_next_frame(is);
+ }
+ if (is->queue_attachments_req) {
+ if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
+ AVPacket copy;
+ if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
+ goto fail;
+ packet_queue_put(&is->videoq, ©);
+ packet_queue_put_nullpacket(&is->videoq, is->video_stream);
+ }
+ is->queue_attachments_req = 0;
+ }
+
+ /* if the queue are full, no need to read more */
+ if (infinite_buffer<1 &&
+ (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
+ || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
+ && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
+ || (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))
+ && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
+ /* wait 10 ms */
+ SDL_LockMutex(wait_mutex);
+ SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
+ SDL_UnlockMutex(wait_mutex);
+ continue;
+ }
+ if (!is->paused &&
+ (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
+ (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
+ if (loop != 1 && (!loop || --loop)) {
+ stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
+ } else if (autoexit) {
+ ret = AVERROR_EOF;
+ goto fail;
+ }
+ }
+ ret = av_read_frame(ic, pkt);
+ if (ret < 0) {
+ if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
+ if (is->video_stream >= 0)
+ packet_queue_put_nullpacket(&is->videoq, is->video_stream);
+ if (is->audio_stream >= 0)
+ packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
+ if (is->subtitle_stream >= 0)
+ packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
+ is->eof = 1;
+ }
+ if (ic->pb && ic->pb->error)
+ break;
+ SDL_LockMutex(wait_mutex);
+ SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
+ SDL_UnlockMutex(wait_mutex);
+ continue;
+ } else {
+ is->eof = 0;
+ }
+ /* check if packet is in play range specified by user, then queue, otherwise discard */
+ stream_start_time = ic->streams[pkt->stream_index]->start_time;
+ pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
+ pkt_in_play_range = duration == AV_NOPTS_VALUE ||
+ (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
+ av_q2d(ic->streams[pkt->stream_index]->time_base) -
+ (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
+ <= ((double)duration / 1000000);
+ if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
+ packet_queue_put(&is->audioq, pkt);
+ } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
+ && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
+ packet_queue_put(&is->videoq, pkt);
+ } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
+ packet_queue_put(&is->subtitleq, pkt);
+ } else {
+ av_free_packet(pkt);
+ }
+ }
+ /* wait until the end */
+ while (!is->abort_request) {
+ SDL_Delay(100);
+ }
+
+ ret = 0;
+ fail:
+ /* close each stream */
+ if (is->audio_stream >= 0)
+ stream_component_close(is, is->audio_stream);
+ if (is->video_stream >= 0)
+ stream_component_close(is, is->video_stream);
+ if (is->subtitle_stream >= 0)
+ stream_component_close(is, is->subtitle_stream);
+ if (ic) {
+ avformat_close_input(&ic);
+ is->ic = NULL;
+ }
+
+ if (ret != 0) {
+ SDL_Event event;
+
+ event.type = FF_QUIT_EVENT;
+ event.user.data1 = is;
+ SDL_PushEvent(&event);
+ }
+ SDL_DestroyMutex(wait_mutex);
+ return 0;
+}
+
+static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
+{
+ VideoState *is;
+
+ is = av_mallocz(sizeof(VideoState));
+ if (!is)
+ return NULL;
+ av_strlcpy(is->filename, filename, sizeof(is->filename));
+ is->iformat = iformat;
+ is->ytop = 0;
+ is->xleft = 0;
+
+ /* start video display */
+ if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
+ goto fail;
+ if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
+ goto fail;
+ if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
+ goto fail;
+
+ packet_queue_init(&is->videoq);
+ packet_queue_init(&is->audioq);
+ packet_queue_init(&is->subtitleq);
+
+ is->continue_read_thread = SDL_CreateCond();
+
+ init_clock(&is->vidclk, &is->videoq.serial);
+ init_clock(&is->audclk, &is->audioq.serial);
+ init_clock(&is->extclk, &is->extclk.serial);
+ is->audio_clock_serial = -1;
+ is->av_sync_type = av_sync_type;
+ is->read_tid = SDL_CreateThread(read_thread, is);
+ if (!is->read_tid) {
+fail:
+ stream_close(is);
+ return NULL;
+ }
+ return is;
+}
+
+static void stream_cycle_channel(VideoState *is, int codec_type)
+{
+ AVFormatContext *ic = is->ic;
+ int start_index, stream_index;
+ int old_index;
+ AVStream *st;
+ AVProgram *p = NULL;
+ int nb_streams = is->ic->nb_streams;
+
+ if (codec_type == AVMEDIA_TYPE_VIDEO) {
+ start_index = is->last_video_stream;
+ old_index = is->video_stream;
+ } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
+ start_index = is->last_audio_stream;
+ old_index = is->audio_stream;
+ } else {
+ start_index = is->last_subtitle_stream;
+ old_index = is->subtitle_stream;
+ }
+ stream_index = start_index;
+
+ if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
+ p = av_find_program_from_stream(ic, NULL, is->video_stream);
+ if (p) {
+ nb_streams = p->nb_stream_indexes;
+ for (start_index = 0; start_index < nb_streams; start_index++)
+ if (p->stream_index[start_index] == stream_index)
+ break;
+ if (start_index == nb_streams)
+ start_index = -1;
+ stream_index = start_index;
+ }
+ }
+
+ for (;;) {
+ if (++stream_index >= nb_streams)
+ {
+ if (codec_type == AVMEDIA_TYPE_SUBTITLE)
+ {
+ stream_index = -1;
+ is->last_subtitle_stream = -1;
+ goto the_end;
+ }
+ if (start_index == -1)
+ return;
+ stream_index = 0;
+ }
+ if (stream_index == start_index)
+ return;
+ st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
+ if (st->codec->codec_type == codec_type) {
+ /* check that parameters are OK */
+ switch (codec_type) {
+ case AVMEDIA_TYPE_AUDIO:
+ if (st->codec->sample_rate != 0 &&
+ st->codec->channels != 0)
+ goto the_end;
+ break;
+ case AVMEDIA_TYPE_VIDEO:
+ case AVMEDIA_TYPE_SUBTITLE:
+ goto the_end;
+ default:
+ break;
+ }
+ }
+ }
+ the_end:
+ if (p && stream_index != -1)
+ stream_index = p->stream_index[stream_index];
+ av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
+ av_get_media_type_string(codec_type),
+ old_index,
+ stream_index);
+
+ stream_component_close(is, old_index);
+ stream_component_open(is, stream_index);
+}
+
+
+static void toggle_full_screen(VideoState *is)
+{
+#if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
+ /* OS X needs to reallocate the SDL overlays */
+ int i;
+ for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
+ is->pictq.queue[i].reallocate = 1;
+#endif
+ is_full_screen = !is_full_screen;
+ video_open(is, 1, NULL);
+}
+
+static void toggle_audio_display(VideoState *is)
+{
+ int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
+ int next = is->show_mode;
+ do {
+ next = (next + 1) % SHOW_MODE_NB;
+ } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
+ if (is->show_mode != next) {
+ fill_rectangle(screen,
+ is->xleft, is->ytop, is->width, is->height,
+ bgcolor, 1);
+ is->force_refresh = 1;
+ is->show_mode = next;
+ }
+}
+
+static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
+ double remaining_time = 0.0;
+ SDL_PumpEvents();
+ while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
+ if (!cursor_hidden && av_gettime_relative() - cursor_last_shown > CURSOR_HIDE_DELAY) {
+ SDL_ShowCursor(0);
+ cursor_hidden = 1;
+ }
+ if (remaining_time > 0.0)
+ av_usleep((int64_t)(remaining_time * 1000000.0));
+ remaining_time = REFRESH_RATE;
+ if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
+ video_refresh(is, &remaining_time);
+ SDL_PumpEvents();
+ }
+}
+
+static void seek_chapter(VideoState *is, int incr)
+{
+ int64_t pos = get_master_clock(is) * AV_TIME_BASE;
+ int i;
+
+ if (!is->ic->nb_chapters)
+ return;
+
+ /* find the current chapter */
+ for (i = 0; i < is->ic->nb_chapters; i++) {
+ AVChapter *ch = is->ic->chapters[i];
+ if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
+ i--;
+ break;
+ }
+ }
+
+ i += incr;
+ i = FFMAX(i, 0);
+ if (i >= is->ic->nb_chapters)
+ return;
+
+ av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
+ stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
+ AV_TIME_BASE_Q), 0, 0);
+}
+
+/* handle an event sent by the GUI */
+static void event_loop(VideoState *cur_stream)
+{
+ SDL_Event event;
+ double incr, pos, frac;
+
+ for (;;) {
+ double x;
+ refresh_loop_wait_event(cur_stream, &event);
+ switch (event.type) {
+ case SDL_KEYDOWN:
+ if (exit_on_keydown) {
+ do_exit(cur_stream);
+ break;
+ }
+ switch (event.key.keysym.sym) {
+ case SDLK_ESCAPE:
+ case SDLK_q:
+ do_exit(cur_stream);
+ break;
+ case SDLK_f:
+ toggle_full_screen(cur_stream);
+ cur_stream->force_refresh = 1;
+ break;
+ case SDLK_p:
+ case SDLK_SPACE:
+ toggle_pause(cur_stream);
+ break;
+ case SDLK_s: // S: Step to next frame
+ step_to_next_frame(cur_stream);
+ break;
+ case SDLK_a:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
+ break;
+ case SDLK_v:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
+ break;
+ case SDLK_c:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
+ break;
+ case SDLK_t:
+ stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
+ break;
+ case SDLK_w:
+#if CONFIG_AVFILTER
+ if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
+ if (++cur_stream->vfilter_idx >= nb_vfilters)
+ cur_stream->vfilter_idx = 0;
+ } else {
+ cur_stream->vfilter_idx = 0;
+ toggle_audio_display(cur_stream);
+ }
+#else
+ toggle_audio_display(cur_stream);
+#endif
+ break;
+ case SDLK_PAGEUP:
+ if (cur_stream->ic->nb_chapters <= 1) {
+ incr = 600.0;
+ goto do_seek;
+ }
+ seek_chapter(cur_stream, 1);
+ break;
+ case SDLK_PAGEDOWN:
+ if (cur_stream->ic->nb_chapters <= 1) {
+ incr = -600.0;
+ goto do_seek;
+ }
+ seek_chapter(cur_stream, -1);
+ break;
+ case SDLK_LEFT:
+ incr = -10.0;
+ goto do_seek;
+ case SDLK_RIGHT:
+ incr = 10.0;
+ goto do_seek;
+ case SDLK_UP:
+ incr = 60.0;
+ goto do_seek;
+ case SDLK_DOWN:
+ incr = -60.0;
+ do_seek:
+ if (seek_by_bytes) {
+ pos = -1;
+ if (pos < 0 && cur_stream->video_stream >= 0)
+ pos = frame_queue_last_pos(&cur_stream->pictq);
+ if (pos < 0 && cur_stream->audio_stream >= 0)
+ pos = frame_queue_last_pos(&cur_stream->sampq);
+ if (pos < 0)
+ pos = avio_tell(cur_stream->ic->pb);
+ if (cur_stream->ic->bit_rate)
+ incr *= cur_stream->ic->bit_rate / 8.0;
+ else
+ incr *= 180000.0;
+ pos += incr;
+ stream_seek(cur_stream, pos, incr, 1);
+ } else {
+ pos = get_master_clock(cur_stream);
+ if (isnan(pos))
+ pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
+ pos += incr;
+ if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
+ pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
+ stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case SDL_VIDEOEXPOSE:
+ cur_stream->force_refresh = 1;
+ break;
+ case SDL_MOUSEBUTTONDOWN:
+ if (exit_on_mousedown) {
+ do_exit(cur_stream);
+ break;
+ }
+ case SDL_MOUSEMOTION:
+ if (cursor_hidden) {
+ SDL_ShowCursor(1);
+ cursor_hidden = 0;
+ }
+ cursor_last_shown = av_gettime_relative();
+ if (event.type == SDL_MOUSEBUTTONDOWN) {
+ x = event.button.x;
+ } else {
+ if (event.motion.state != SDL_PRESSED)
+ break;
+ x = event.motion.x;
+ }
+ if (seek_by_bytes || cur_stream->ic->duration <= 0) {
+ uint64_t size = avio_size(cur_stream->ic->pb);
+ stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
+ } else {
+ int64_t ts;
+ int ns, hh, mm, ss;
+ int tns, thh, tmm, tss;
+ tns = cur_stream->ic->duration / 1000000LL;
+ thh = tns / 3600;
+ tmm = (tns % 3600) / 60;
+ tss = (tns % 60);
+ frac = x / cur_stream->width;
+ ns = frac * tns;
+ hh = ns / 3600;
+ mm = (ns % 3600) / 60;
+ ss = (ns % 60);
+ av_log(NULL, AV_LOG_INFO,
+ "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
+ hh, mm, ss, thh, tmm, tss);
+ ts = frac * cur_stream->ic->duration;
+ if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
+ ts += cur_stream->ic->start_time;
+ stream_seek(cur_stream, ts, 0, 0);
+ }
+ break;
+ case SDL_VIDEORESIZE:
+ screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
+ SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
+ if (!screen) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
+ do_exit(cur_stream);
+ }
+ screen_width = cur_stream->width = screen->w;
+ screen_height = cur_stream->height = screen->h;
+ cur_stream->force_refresh = 1;
+ break;
+ case SDL_QUIT:
+ case FF_QUIT_EVENT:
+ do_exit(cur_stream);
+ break;
+ case FF_ALLOC_EVENT:
+ alloc_picture(event.user.data1);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int opt_frame_size(void *optctx, const char *opt, const char *arg)
+{
+ av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
+ return opt_default(NULL, "video_size", arg);
+}
+
+static int opt_width(void *optctx, const char *opt, const char *arg)
+{
+ screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
+ return 0;
+}
+
+static int opt_height(void *optctx, const char *opt, const char *arg)
+{
+ screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
+ return 0;
+}
+
+static int opt_format(void *optctx, const char *opt, const char *arg)
+{
+ file_iformat = av_find_input_format(arg);
+ if (!file_iformat) {
+ av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
+{
+ av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
+ return opt_default(NULL, "pixel_format", arg);
+}
+
+static int opt_sync(void *optctx, const char *opt, const char *arg)
+{
+ if (!strcmp(arg, "audio"))
+ av_sync_type = AV_SYNC_AUDIO_MASTER;
+ else if (!strcmp(arg, "video"))
+ av_sync_type = AV_SYNC_VIDEO_MASTER;
+ else if (!strcmp(arg, "ext"))
+ av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
+ else {
+ av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
+ exit(1);
+ }
+ return 0;
+}
+
+static int opt_seek(void *optctx, const char *opt, const char *arg)
+{
+ start_time = parse_time_or_die(opt, arg, 1);
+ return 0;
+}
+
+static int opt_duration(void *optctx, const char *opt, const char *arg)
+{
+ duration = parse_time_or_die(opt, arg, 1);
+ return 0;
+}
+
+static int opt_show_mode(void *optctx, const char *opt, const char *arg)
+{
+ show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
+ !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
+ !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
+ parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
+ return 0;
+}
+
+static void opt_input_file(void *optctx, const char *filename)
+{
+ if (input_filename) {
+ av_log(NULL, AV_LOG_FATAL,
+ "Argument '%s' provided as input filename, but '%s' was already specified.\n",
+ filename, input_filename);
+ exit(1);
+ }
+ if (!strcmp(filename, "-"))
+ filename = "pipe:";
+ input_filename = filename;
+}
+
+static int opt_codec(void *optctx, const char *opt, const char *arg)
+{
+ const char *spec = strchr(opt, ':');
+ if (!spec) {
+ av_log(NULL, AV_LOG_ERROR,
+ "No media specifier was specified in '%s' in option '%s'\n",
+ arg, opt);
+ return AVERROR(EINVAL);
+ }
+ spec++;
+ switch (spec[0]) {
+ case 'a' : audio_codec_name = arg; break;
+ case 's' : subtitle_codec_name = arg; break;
+ case 'v' : video_codec_name = arg; break;
+ default:
+ av_log(NULL, AV_LOG_ERROR,
+ "Invalid media specifier '%s' in option '%s'\n", spec, opt);
+ return AVERROR(EINVAL);
+ }
+ return 0;
+}
+
+static int dummy;
+
+static const OptionDef options[] = {
+#include "cmdutils_common_opts.h"
+ { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
+ { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
+ { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
+ { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
+ { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
+ { "vn", OPT_BOOL, { &video_disable }, "disable video" },
+ { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
+ { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
+ { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
+ { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
+ { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
+ { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
+ { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
+ { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
+ { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
+ { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
+ { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
+ { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
+ { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
+ { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
+ { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
+ { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
+ { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
+ { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
+ { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
+ { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
+ { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
+ { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
+ { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
+#if CONFIG_AVFILTER
+ { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
+ { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
+#endif
+ { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
+ { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
+ { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
+ { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
+ { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
+ { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
+ { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
+ { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
+ { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
+ { NULL, },
+};
+
+static void show_usage(void)
+{
+ av_log(NULL, AV_LOG_INFO, "Simple media player\n");
+ av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
+ av_log(NULL, AV_LOG_INFO, "\n");
+}
+
+void show_help_default(const char *opt, const char *arg)
+{
+ av_log_set_callback(log_callback_help);
+ show_usage();
+ show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
+ show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
+ printf("\n");
+ show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
+ show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
+#if !CONFIG_AVFILTER
+ show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
+#else
+ show_help_children(avfilter_get_class(), AV_OPT_FLAG_FILTERING_PARAM);
+#endif
+ printf("\nWhile playing:\n"
+ "q, ESC quit\n"
+ "f toggle full screen\n"
+ "p, SPC pause\n"
+ "a cycle audio channel in the current program\n"
+ "v cycle video channel\n"
+ "t cycle subtitle channel in the current program\n"
+ "c cycle program\n"
+ "w cycle video filters or show modes\n"
+ "s activate frame-step mode\n"
+ "left/right seek backward/forward 10 seconds\n"
+ "down/up seek backward/forward 1 minute\n"
+ "page down/page up seek backward/forward 10 minutes\n"
+ "mouse click seek to percentage in file corresponding to fraction of width\n"
+ );
+}
+
+static int lockmgr(void **mtx, enum AVLockOp op)
+{
+ switch(op) {
+ case AV_LOCK_CREATE:
+ *mtx = SDL_CreateMutex();
+ if(!*mtx)
+ return 1;
+ return 0;
+ case AV_LOCK_OBTAIN:
+ return !!SDL_LockMutex(*mtx);
+ case AV_LOCK_RELEASE:
+ return !!SDL_UnlockMutex(*mtx);
+ case AV_LOCK_DESTROY:
+ SDL_DestroyMutex(*mtx);
+ return 0;
+ }
+ return 1;
+}
+
+/* Called from the main */
+int main(int argc, char **argv)
+{
+ int flags;
+ VideoState *is;
+ char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
+
+ av_log_set_flags(AV_LOG_SKIP_REPEATED);
+ parse_loglevel(argc, argv, options);
+
+ /* register all codecs, demux and protocols */
+#if CONFIG_AVDEVICE
+ avdevice_register_all();
+#endif
+#if CONFIG_AVFILTER
+ avfilter_register_all();
+#endif
+ av_register_all();
+ avformat_network_init();
+
+ init_opts();
+
+ signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
+ signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
+
+ show_banner(argc, argv, options);
+
+ parse_options(NULL, argc, argv, options, opt_input_file);
+
+ if (!input_filename) {
+ show_usage();
+ av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
+ av_log(NULL, AV_LOG_FATAL,
+ "Use -h to get full help or, even better, run 'man %s'\n", program_name);
+ exit(1);
+ }
+
+ if (display_disable) {
+ video_disable = 1;
+ }
+ flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
+ if (audio_disable)
+ flags &= ~SDL_INIT_AUDIO;
+ if (display_disable)
+ SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
+#if !defined(_WIN32) && !defined(__APPLE__)
+ flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
+#endif
+ if (SDL_Init (flags)) {
+ av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
+ av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
+ exit(1);
+ }
+
+ if (!display_disable) {
+ const SDL_VideoInfo *vi = SDL_GetVideoInfo();
+ fs_screen_width = vi->current_w;
+ fs_screen_height = vi->current_h;
+ }
+
+ SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
+ SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
+ SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
+
+ if (av_lockmgr_register(lockmgr)) {
+ av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
+ do_exit(NULL);
+ }
+
+ av_init_packet(&flush_pkt);
+ flush_pkt.data = (uint8_t *)&flush_pkt;
+
+ is = stream_open(input_filename, file_iformat);
+ if (!is) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
+ do_exit(NULL);
+ }
+
+ event_loop(is);
+
+ /* never returns */
+
+ return 0;
+}
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * 012v decoder
+ *
+ * Copyright (C) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+
+static av_cold int zero12v_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
+ avctx->bits_per_raw_sample = 10;
+
+ if (avctx->codec_tag == MKTAG('a', '1', '2', 'v'))
+ avpriv_request_sample(avctx, "transparency");
+
+ return 0;
+}
+
+static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ int line, ret;
+ const int width = avctx->width;
+ AVFrame *pic = data;
+ uint16_t *y, *u, *v;
+ const uint8_t *line_end, *src = avpkt->data;
+ int stride = avctx->width * 8 / 3;
+
+ if (width <= 1 || avctx->height <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "Dimensions %dx%d not supported.\n", width, avctx->height);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ( avctx->codec_tag == MKTAG('0', '1', '2', 'v')
+ && avpkt->size % avctx->height == 0
+ && avpkt->size / avctx->height * 3 >= width * 8)
+ stride = avpkt->size / avctx->height;
+
+ if (avpkt->size < avctx->height * stride) {
+ av_log(avctx, AV_LOG_ERROR, "Packet too small: %d instead of %d\n",
+ avpkt->size, avctx->height * stride);
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->pict_type = AV_PICTURE_TYPE_I;
+ pic->key_frame = 1;
+
+ line_end = avpkt->data + stride;
+ for (line = 0; line < avctx->height; line++) {
+ uint16_t y_temp[6] = {0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000};
+ uint16_t u_temp[3] = {0x8000, 0x8000, 0x8000};
+ uint16_t v_temp[3] = {0x8000, 0x8000, 0x8000};
+ int x;
+ y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
+ u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
+ v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
+
+ for (x = 0; x < width; x += 6) {
+ uint32_t t;
+
+ if (width - x < 6 || line_end - src < 16) {
+ y = y_temp;
+ u = u_temp;
+ v = v_temp;
+ }
+
+ if (line_end - src < 4)
+ break;
+
+ t = AV_RL32(src);
+ src += 4;
+ *u++ = t << 6 & 0xFFC0;
+ *y++ = t >> 4 & 0xFFC0;
+ *v++ = t >> 14 & 0xFFC0;
+
+ if (line_end - src < 4)
+ break;
+
+ t = AV_RL32(src);
+ src += 4;
+ *y++ = t << 6 & 0xFFC0;
+ *u++ = t >> 4 & 0xFFC0;
+ *y++ = t >> 14 & 0xFFC0;
+
+ if (line_end - src < 4)
+ break;
+
+ t = AV_RL32(src);
+ src += 4;
+ *v++ = t << 6 & 0xFFC0;
+ *y++ = t >> 4 & 0xFFC0;
+ *u++ = t >> 14 & 0xFFC0;
+
+ if (line_end - src < 4)
+ break;
+
+ t = AV_RL32(src);
+ src += 4;
+ *y++ = t << 6 & 0xFFC0;
+ *v++ = t >> 4 & 0xFFC0;
+ *y++ = t >> 14 & 0xFFC0;
+
+ if (width - x < 6)
+ break;
+ }
+
+ if (x < width) {
+ y = x + (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
+ u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
+ v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
+ memcpy(y, y_temp, sizeof(*y) * (width - x));
+ memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
+ memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
+ }
+
+ line_end += stride;
+ src = line_end - stride;
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+AVCodec ff_zero12v_decoder = {
+ .name = "012v",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_012V,
+ .init = zero12v_decode_init,
+ .decode = zero12v_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
.id = AV_CODEC_ID_8SVX_FIB,
.priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init,
- .close = eightsvx_decode_close,
.decode = eightsvx_decode_frame,
- .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
+ .close = eightsvx_decode_close,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
};
.id = AV_CODEC_ID_8SVX_EXP,
.priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init,
- .close = eightsvx_decode_close,
.decode = eightsvx_decode_frame,
- .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
+ .close = eightsvx_decode_close,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
};
.encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = AV_CODEC_CAP_DELAY,
};
-
+#endif
+#if CONFIG_A64MULTI5_ENCODER
AVCodec ff_a64multi5_encoder = {
.name = "a64multi5",
.long_name = NULL_IF_CONFIG_SMALL("Multicolor charset for Commodore 64, extended with 5th color (colram)"),
.encode2 = a64multi_encode_frame,
.close = a64multi_close_encoder,
.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
- .capabilities = CODEC_CAP_DELAY,
+ .capabilities = AV_CODEC_CAP_DELAY,
};
+#endif
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
+ .flush = flush,
+ .priv_class = &aac_decoder_class,
+ .profiles = profiles,
};
/*
.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
+ .flush = flush,
+ .profiles = profiles,
};
--- /dev/null
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+/*
+ * Copyright (c) 2013
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * AAC decoder fixed-point implementation
+ *
+ * Copyright (c) 2005-2006 Oded Shimon ( ods15 ods15 dyndns org )
+ * Copyright (c) 2006-2007 Maxim Gavrilov ( maxim.gavrilov gmail com )
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * AAC decoder
+ * @author Oded Shimon ( ods15 ods15 dyndns org )
+ * @author Maxim Gavrilov ( maxim.gavrilov gmail com )
+ *
+ * Fixed point implementation
+ * @author Stanislav Ocovaj ( stanislav.ocovaj imgtec com )
+ */
+
+#define FFT_FLOAT 0
+#define FFT_FIXED_32 1
+#define USE_FIXED 1
+
+#include "libavutil/fixed_dsp.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "get_bits.h"
+#include "fft.h"
+#include "lpc.h"
+#include "kbdwin.h"
+#include "sinewin.h"
+
+#include "aac.h"
+#include "aactab.h"
+#include "aacdectab.h"
+#include "cbrt_tablegen.h"
+#include "sbr.h"
+#include "aacsbr.h"
+#include "mpeg4audio.h"
+#include "aacadtsdec.h"
+#include "libavutil/intfloat.h"
+
+#include <math.h>
+#include <string.h>
+
+static av_always_inline void reset_predict_state(PredictorState *ps)
+{
+ ps->r0.mant = 0;
+ ps->r0.exp = 0;
+ ps->r1.mant = 0;
+ ps->r1.exp = 0;
+ ps->cor0.mant = 0;
+ ps->cor0.exp = 0;
+ ps->cor1.mant = 0;
+ ps->cor1.exp = 0;
+ ps->var0.mant = 0x20000000;
+ ps->var0.exp = 1;
+ ps->var1.mant = 0x20000000;
+ ps->var1.exp = 1;
+}
+
+int exp2tab[4] = { Q31(1.0000000000/2), Q31(1.1892071150/2), Q31(1.4142135624/2), Q31(1.6817928305/2) }; // 2^0, 2^0.25, 2^0.5, 2^0.75
+
+static inline int *DEC_SPAIR(int *dst, unsigned idx)
+{
+ dst[0] = (idx & 15) - 4;
+ dst[1] = (idx >> 4 & 15) - 4;
+
+ return dst + 2;
+}
+
+static inline int *DEC_SQUAD(int *dst, unsigned idx)
+{
+ dst[0] = (idx & 3) - 1;
+ dst[1] = (idx >> 2 & 3) - 1;
+ dst[2] = (idx >> 4 & 3) - 1;
+ dst[3] = (idx >> 6 & 3) - 1;
+
+ return dst + 4;
+}
+
+static inline int *DEC_UPAIR(int *dst, unsigned idx, unsigned sign)
+{
+ dst[0] = (idx & 15) * (1 - (sign & 0xFFFFFFFE));
+ dst[1] = (idx >> 4 & 15) * (1 - ((sign & 1) << 1));
+
+ return dst + 2;
+}
+
+static inline int *DEC_UQUAD(int *dst, unsigned idx, unsigned sign)
+{
+ unsigned nz = idx >> 12;
+
+ dst[0] = (idx & 3) * (1 + (((int)sign >> 31) << 1));
+ sign <<= nz & 1;
+ nz >>= 1;
+ dst[1] = (idx >> 2 & 3) * (1 + (((int)sign >> 31) << 1));
+ sign <<= nz & 1;
+ nz >>= 1;
+ dst[2] = (idx >> 4 & 3) * (1 + (((int)sign >> 31) << 1));
+ sign <<= nz & 1;
+ nz >>= 1;
+ dst[3] = (idx >> 6 & 3) * (1 + (((int)sign >> 31) << 1));
+
+ return dst + 4;
+}
+
+static void vector_pow43(int *coefs, int len)
+{
+ int i, coef;
+
+ for (i=0; i<len; i++) {
+ coef = coefs[i];
+ if (coef < 0)
+ coef = -(int)cbrt_tab[-coef];
+ else
+ coef = (int)cbrt_tab[coef];
+ coefs[i] = coef;
+ }
+}
+
+static void subband_scale(int *dst, int *src, int scale, int offset, int len)
+{
+ int ssign = scale < 0 ? -1 : 1;
+ int s = FFABS(scale);
+ unsigned int round;
+ int i, out, c = exp2tab[s & 3];
+
+ s = offset - (s >> 2);
+
+ if (s > 0) {
+ round = 1 << (s-1);
+ for (i=0; i<len; i++) {
+ out = (int)(((int64_t)src[i] * c) >> 32);
+ dst[i] = ((int)(out+round) >> s) * ssign;
+ }
+ }
+ else {
+ s = s + 32;
+ round = 1 << (s-1);
+ for (i=0; i<len; i++) {
+ out = (int)((int64_t)((int64_t)src[i] * c + round) >> s);
+ dst[i] = out * ssign;
+ }
+ }
+}
+
+static void noise_scale(int *coefs, int scale, int band_energy, int len)
+{
+ int ssign = scale < 0 ? -1 : 1;
+ int s = FFABS(scale);
+ unsigned int round;
+ int i, out, c = exp2tab[s & 3];
+ int nlz = 0;
+
+ while (band_energy > 0x7fff) {
+ band_energy >>= 1;
+ nlz++;
+ }
+ c /= band_energy;
+ s = 21 + nlz - (s >> 2);
+
+ if (s > 0) {
+ round = 1 << (s-1);
+ for (i=0; i<len; i++) {
+ out = (int)(((int64_t)coefs[i] * c) >> 32);
+ coefs[i] = ((int)(out+round) >> s) * ssign;
+ }
+ }
+ else {
+ s = s + 32;
+ round = 1 << (s-1);
+ for (i=0; i<len; i++) {
+ out = (int)((int64_t)((int64_t)coefs[i] * c + round) >> s);
+ coefs[i] = out * ssign;
+ }
+ }
+}
+
+static av_always_inline SoftFloat flt16_round(SoftFloat pf)
+{
+ SoftFloat tmp;
+ int s;
+
+ tmp.exp = pf.exp;
+ s = pf.mant >> 31;
+ tmp.mant = (pf.mant ^ s) - s;
+ tmp.mant = (tmp.mant + 0x00200000U) & 0xFFC00000U;
+ tmp.mant = (tmp.mant ^ s) - s;
+
+ return tmp;
+}
+
+static av_always_inline SoftFloat flt16_even(SoftFloat pf)
+{
+ SoftFloat tmp;
+ int s;
+
+ tmp.exp = pf.exp;
+ s = pf.mant >> 31;
+ tmp.mant = (pf.mant ^ s) - s;
+ tmp.mant = (tmp.mant + 0x001FFFFFU + (tmp.mant & 0x00400000U >> 16)) & 0xFFC00000U;
+ tmp.mant = (tmp.mant ^ s) - s;
+
+ return tmp;
+}
+
+static av_always_inline SoftFloat flt16_trunc(SoftFloat pf)
+{
+ SoftFloat pun;
+ int s;
+
+ pun.exp = pf.exp;
+ s = pf.mant >> 31;
+ pun.mant = (pf.mant ^ s) - s;
+ pun.mant = pun.mant & 0xFFC00000U;
+ pun.mant = (pun.mant ^ s) - s;
+
+ return pun;
+}
+
+static av_always_inline void predict(PredictorState *ps, int *coef,
+ int output_enable)
+{
+ const SoftFloat a = { 1023410176, 0 }; // 61.0 / 64
+ const SoftFloat alpha = { 973078528, 0 }; // 29.0 / 32
+ SoftFloat e0, e1;
+ SoftFloat pv;
+ SoftFloat k1, k2;
+ SoftFloat r0 = ps->r0, r1 = ps->r1;
+ SoftFloat cor0 = ps->cor0, cor1 = ps->cor1;
+ SoftFloat var0 = ps->var0, var1 = ps->var1;
+ SoftFloat tmp;
+
+ if (var0.exp > 1 || (var0.exp == 1 && var0.mant > 0x20000000)) {
+ k1 = av_mul_sf(cor0, flt16_even(av_div_sf(a, var0)));
+ }
+ else {
+ k1.mant = 0;
+ k1.exp = 0;
+ }
+
+ if (var1.exp > 1 || (var1.exp == 1 && var1.mant > 0x20000000)) {
+ k2 = av_mul_sf(cor1, flt16_even(av_div_sf(a, var1)));
+ }
+ else {
+ k2.mant = 0;
+ k2.exp = 0;
+ }
+
+ tmp = av_mul_sf(k1, r0);
+ pv = flt16_round(av_add_sf(tmp, av_mul_sf(k2, r1)));
+ if (output_enable) {
+ int shift = 28 - pv.exp;
+
+ if (shift < 31)
+ *coef += (pv.mant + (1 << (shift - 1))) >> shift;
+ }
+
+ e0 = av_int2sf(*coef, 2);
+ e1 = av_sub_sf(e0, tmp);
+
+ ps->cor1 = flt16_trunc(av_add_sf(av_mul_sf(alpha, cor1), av_mul_sf(r1, e1)));
+ tmp = av_add_sf(av_mul_sf(r1, r1), av_mul_sf(e1, e1));
+ tmp.exp--;
+ ps->var1 = flt16_trunc(av_add_sf(av_mul_sf(alpha, var1), tmp));
+ ps->cor0 = flt16_trunc(av_add_sf(av_mul_sf(alpha, cor0), av_mul_sf(r0, e0)));
+ tmp = av_add_sf(av_mul_sf(r0, r0), av_mul_sf(e0, e0));
+ tmp.exp--;
+ ps->var0 = flt16_trunc(av_add_sf(av_mul_sf(alpha, var0), tmp));
+
+ ps->r1 = flt16_trunc(av_mul_sf(a, av_sub_sf(r0, av_mul_sf(k1, e0))));
+ ps->r0 = flt16_trunc(av_mul_sf(a, e0));
+}
+
+
+static const int cce_scale_fixed[8] = {
+ Q30(1.0), //2^(0/8)
+ Q30(1.0905077327), //2^(1/8)
+ Q30(1.1892071150), //2^(2/8)
+ Q30(1.2968395547), //2^(3/8)
+ Q30(1.4142135624), //2^(4/8)
+ Q30(1.5422108254), //2^(5/8)
+ Q30(1.6817928305), //2^(6/8)
+ Q30(1.8340080864), //2^(7/8)
+};
+
+/**
+ * Apply dependent channel coupling (applied before IMDCT).
+ *
+ * @param index index into coupling gain array
+ */
+static void apply_dependent_coupling_fixed(AACContext *ac,
+ SingleChannelElement *target,
+ ChannelElement *cce, int index)
+{
+ IndividualChannelStream *ics = &cce->ch[0].ics;
+ const uint16_t *offsets = ics->swb_offset;
+ int *dest = target->coeffs;
+ const int *src = cce->ch[0].coeffs;
+ int g, i, group, k, idx = 0;
+ if (ac->oc[1].m4ac.object_type == AOT_AAC_LTP) {
+ av_log(ac->avctx, AV_LOG_ERROR,
+ "Dependent coupling is not supported together with LTP\n");
+ return;
+ }
+ for (g = 0; g < ics->num_window_groups; g++) {
+ for (i = 0; i < ics->max_sfb; i++, idx++) {
+ if (cce->ch[0].band_type[idx] != ZERO_BT) {
+ const int gain = cce->coup.gain[index][idx];
+ int shift, round, c, tmp;
+
+ if (gain < 0) {
+ c = -cce_scale_fixed[-gain & 7];
+ shift = (-gain-1024) >> 3;
+ }
+ else {
+ c = cce_scale_fixed[gain & 7];
+ shift = (gain-1024) >> 3;
+ }
+
+ if (shift < 0) {
+ shift = -shift;
+ round = 1 << (shift - 1);
+
+ for (group = 0; group < ics->group_len[g]; group++) {
+ for (k = offsets[i]; k < offsets[i + 1]; k++) {
+ tmp = (int)(((int64_t)src[group * 128 + k] * c + \
+ (int64_t)0x1000000000) >> 37);
+ dest[group * 128 + k] += (tmp + round) >> shift;
+ }
+ }
+ }
+ else {
+ for (group = 0; group < ics->group_len[g]; group++) {
+ for (k = offsets[i]; k < offsets[i + 1]; k++) {
+ tmp = (int)(((int64_t)src[group * 128 + k] * c + \
+ (int64_t)0x1000000000) >> 37);
+ dest[group * 128 + k] += tmp << shift;
+ }
+ }
+ }
+ }
+ }
+ dest += ics->group_len[g] * 128;
+ src += ics->group_len[g] * 128;
+ }
+}
+
+/**
+ * Apply independent channel coupling (applied after IMDCT).
+ *
+ * @param index index into coupling gain array
+ */
+static void apply_independent_coupling_fixed(AACContext *ac,
+ SingleChannelElement *target,
+ ChannelElement *cce, int index)
+{
+ int i, c, shift, round, tmp;
+ const int gain = cce->coup.gain[index][0];
+ const int *src = cce->ch[0].ret;
+ int *dest = target->ret;
+ const int len = 1024 << (ac->oc[1].m4ac.sbr == 1);
+
+ c = cce_scale_fixed[gain & 7];
+ shift = (gain-1024) >> 3;
+ if (shift < 0) {
+ shift = -shift;
+ round = 1 << (shift - 1);
+
+ for (i = 0; i < len; i++) {
+ tmp = (int)(((int64_t)src[i] * c + (int64_t)0x1000000000) >> 37);
+ dest[i] += (tmp + round) >> shift;
+ }
+ }
+ else {
+ for (i = 0; i < len; i++) {
+ tmp = (int)(((int64_t)src[i] * c + (int64_t)0x1000000000) >> 37);
+ dest[i] += tmp << shift;
+ }
+ }
+}
+
+#include "aacdec_template.c"
+
+AVCodec ff_aac_fixed_decoder = {
+ .name = "aac_fixed",
+ .long_name = NULL_IF_CONFIG_SMALL("AAC (Advanced Audio Coding)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_AAC,
+ .priv_data_size = sizeof(AACContext),
+ .init = aac_decode_init,
+ .close = aac_decode_close,
+ .decode = aac_decode_frame,
+ .sample_fmts = (const enum AVSampleFormat[]) {
+ AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
+ },
++ .capabilities = AV_CODEC_CAP_CHANNEL_CONF | AV_CODEC_CAP_DR1,
+ .channel_layouts = aac_channel_layout,
+ .flush = flush,
+};
.init = aac_encode_init,
.encode2 = aac_encode_frame,
.close = aac_encode_end,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY |
- CODEC_CAP_EXPERIMENTAL,
+ .supported_samplerates = mpeg4audio_sample_rates,
+ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY |
+ AV_CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
.priv_class = &aacenc_class,
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * Copyright (c) 2012
+ * MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Author: Stanislav Ocovaj (socovaj@mips.com)
+ *
+ * AC3 fixed-point decoder for MIPS platforms
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define FFT_FLOAT 0
+#define USE_FIXED 1
+#define FFT_FIXED_32 1
+#include "ac3dec.h"
+
+
+static const int end_freq_inv_tab[8] =
+{
+ 50529027, 44278013, 39403370, 32292987, 27356480, 23729101, 20951060, 18755316
+};
+
+static void scale_coefs (
+ int32_t *dst,
+ const int32_t *src,
+ int dynrng,
+ int len)
+{
+ int i, shift, round;
+ int16_t mul;
+ int temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+
+ mul = (dynrng & 0x1f) + 0x20;
+ shift = 4 - ((dynrng << 23) >> 28);
+ if (shift > 0 ) {
+ round = 1 << (shift-1);
+ for (i=0; i<len; i+=8) {
+
+ temp = src[i] * mul;
+ temp1 = src[i+1] * mul;
+ temp = temp + round;
+ temp2 = src[i+2] * mul;
+
+ temp1 = temp1 + round;
+ dst[i] = temp >> shift;
+ temp3 = src[i+3] * mul;
+ temp2 = temp2 + round;
+
+ dst[i+1] = temp1 >> shift;
+ temp4 = src[i + 4] * mul;
+ temp3 = temp3 + round;
+ dst[i+2] = temp2 >> shift;
+
+ temp5 = src[i+5] * mul;
+ temp4 = temp4 + round;
+ dst[i+3] = temp3 >> shift;
+ temp6 = src[i+6] * mul;
+
+ dst[i+4] = temp4 >> shift;
+ temp5 = temp5 + round;
+ temp7 = src[i+7] * mul;
+ temp6 = temp6 + round;
+
+ dst[i+5] = temp5 >> shift;
+ temp7 = temp7 + round;
+ dst[i+6] = temp6 >> shift;
+ dst[i+7] = temp7 >> shift;
+
+ }
+ } else {
+ shift = -shift;
+ for (i=0; i<len; i+=8) {
+
+ temp = src[i] * mul;
+ temp1 = src[i+1] * mul;
+ temp2 = src[i+2] * mul;
+
+ dst[i] = temp << shift;
+ temp3 = src[i+3] * mul;
+
+ dst[i+1] = temp1 << shift;
+ temp4 = src[i + 4] * mul;
+ dst[i+2] = temp2 << shift;
+
+ temp5 = src[i+5] * mul;
+ dst[i+3] = temp3 << shift;
+ temp6 = src[i+6] * mul;
+
+ dst[i+4] = temp4 << shift;
+ temp7 = src[i+7] * mul;
+
+ dst[i+5] = temp5 << shift;
+ dst[i+6] = temp6 << shift;
+ dst[i+7] = temp7 << shift;
+
+ }
+ }
+}
+
+/**
+ * Downmix samples from original signal to stereo or mono (this is for 16-bit samples
+ * and fixed point decoder - original (for 32-bit samples) is in ac3dsp.c).
+ */
+static void ac3_downmix_c_fixed16(int16_t **samples, int16_t (*matrix)[2],
+ int out_ch, int in_ch, int len)
+{
+ int i, j;
+ int v0, v1;
+ if (out_ch == 2) {
+ for (i = 0; i < len; i++) {
+ v0 = v1 = 0;
+ for (j = 0; j < in_ch; j++) {
+ v0 += samples[j][i] * matrix[j][0];
+ v1 += samples[j][i] * matrix[j][1];
+ }
+ samples[0][i] = (v0+2048)>>12;
+ samples[1][i] = (v1+2048)>>12;
+ }
+ } else if (out_ch == 1) {
+ for (i = 0; i < len; i++) {
+ v0 = 0;
+ for (j = 0; j < in_ch; j++)
+ v0 += samples[j][i] * matrix[j][0];
+ samples[0][i] = (v0+2048)>>12;
+ }
+ }
+}
+
+#include "eac3dec.c"
+#include "ac3dec.c"
+
+static const AVOption options[] = {
+ { "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 6.0, PAR },
+ { "heavy_compr", "heavy dynamic range compression enabled", OFFSET(heavy_compression), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, PAR },
+ { NULL},
+};
+
+static const AVClass ac3_decoder_class = {
+ .class_name = "Fixed-Point AC-3 Decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_ac3_fixed_decoder = {
+ .name = "ac3_fixed",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_AC3,
+ .priv_data_size = sizeof (AC3DecodeContext),
+ .init = ac3_decode_init,
+ .close = ac3_decode_end,
+ .decode = ac3_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE },
+ .priv_class = &ac3_decoder_class,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * AC-3 Audio Decoder
+ * This code was developed as part of Google Summer of Code 2006.
+ * E-AC-3 support was added as part of Google Summer of Code 2007.
+ *
+ * Copyright (c) 2006 Kartikey Mahendra BHATT (bhattkm at gmail dot com)
+ * Copyright (c) 2007-2008 Bartlomiej Wolowiec <bartek.wolowiec@gmail.com>
+ * Copyright (c) 2007 Justin Ruggles <justin.ruggles@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * Upmix delay samples from stereo to original channel layout.
+ */
+#include "ac3dec.h"
+#include "eac3dec.c"
+#include "ac3dec.c"
+
+static const AVOption options[] = {
+ { "drc_scale", "percentage of dynamic range compression to apply", OFFSET(drc_scale), AV_OPT_TYPE_FLOAT, {.dbl = 1.0}, 0.0, 6.0, PAR },
+ { "heavy_compr", "heavy dynamic range compression enabled", OFFSET(heavy_compression), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, PAR },
+ { "target_level", "target level in -dBFS (0 not applied)", OFFSET(target_level), AV_OPT_TYPE_INT, {.i64 = 0 }, -31, 0, PAR },
+
+{"dmix_mode", "Preferred Stereo Downmix Mode", OFFSET(preferred_stereo_downmix), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, 2, 0, "dmix_mode"},
+{"ltrt_cmixlev", "Lt/Rt Center Mix Level", OFFSET(ltrt_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
+{"ltrt_surmixlev", "Lt/Rt Surround Mix Level", OFFSET(ltrt_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
+{"loro_cmixlev", "Lo/Ro Center Mix Level", OFFSET(loro_center_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
+{"loro_surmixlev", "Lo/Ro Surround Mix Level", OFFSET(loro_surround_mix_level), AV_OPT_TYPE_FLOAT, {.dbl = -1.0 }, -1.0, 2.0, 0},
+
+ { NULL},
+};
+
+static const AVClass ac3_decoder_class = {
+ .class_name = "AC3 decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_ac3_decoder = {
+ .name = "ac3",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_AC3,
+ .priv_data_size = sizeof (AC3DecodeContext),
+ .init = ac3_decode_init,
+ .close = ac3_decode_end,
+ .decode = ac3_decode_frame,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE },
+ .priv_class = &ac3_decoder_class,
+};
+
+#if CONFIG_EAC3_DECODER
+static const AVClass eac3_decoder_class = {
+ .class_name = "E-AC3 decoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_eac3_decoder = {
+ .name = "eac3",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_EAC3,
+ .priv_data_size = sizeof (AC3DecodeContext),
+ .init = ac3_decode_init,
+ .close = ac3_decode_end,
+ .decode = ac3_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
+ .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE },
+ .priv_class = &eac3_decoder_class,
+};
+#endif
.priv_data_size = sizeof(ADPCMDecodeContext), \
.init = adpcm_decode_init, \
.decode = adpcm_decode_frame, \
- .capabilities = CODEC_CAP_DR1, \
+ .flush = adpcm_flush, \
+ .capabilities = AV_CODEC_CAP_DR1, \
.sample_fmts = sample_fmts_, \
}
.init = alac_decode_init,
.close = alac_decode_close,
.decode = alac_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+ .priv_class = &alac_class
};
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
+#endif
+#if CONFIG_ASV2_DECODER
AVCodec ff_asv2_decoder = {
.name = "asv2",
.long_name = NULL_IF_CONFIG_SMALL("ASUS V2"),
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
+#endif
.id = AV_CODEC_ID_ATRAC3,
.priv_data_size = sizeof(ATRAC3Context),
.init = atrac3_decode_init,
- .init_static_data = atrac3_init_static_data,
.close = atrac3_decode_close,
.decode = atrac3_decode_frame,
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
};
}
AVCodec ff_atrac3p_decoder = {
- .name = "atrac3plus",
- .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
- .type = AVMEDIA_TYPE_AUDIO,
- .id = AV_CODEC_ID_ATRAC3P,
- .capabilities = AV_CODEC_CAP_DR1,
- .priv_data_size = sizeof(ATRAC3PContext),
- .init = atrac3p_decode_init,
- .init_static_data = ff_atrac3p_init_vlcs,
- .close = atrac3p_decode_close,
- .decode = atrac3p_decode_frame,
+ .name = "atrac3plus",
+ .long_name = NULL_IF_CONFIG_SMALL("ATRAC3+ (Adaptive TRansform Acoustic Coding 3+)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_ATRAC3P,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .priv_data_size = sizeof(ATRAC3PContext),
+ .init = atrac3p_decode_init,
+ .close = atrac3p_decode_close,
+ .decode = atrac3p_decode_frame,
};
*/
#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16)
+/**
+ * Show all frames before the first keyframe
+ */
+#define AV_CODEC_FLAG2_SHOW_ALL 0x00400000
+/**
+ * Export motion vectors through frame side data
+ */
+#define AV_CODEC_FLAG2_EXPORT_MVS 0x10000000
+/**
+ * Do not skip samples and export skip information as frame side data
+ */
+#define AV_CODEC_FLAG2_SKIP_MANUAL 0x20000000
+
+ /* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+ /* /Fx */
+ /* codec capabilities */
+
+ /**
+ * Decoder can use draw_horiz_band callback.
+ */
+ #define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0)
+ /**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+ #define AV_CODEC_CAP_DR1 (1 << 1)
+ #define AV_CODEC_CAP_TRUNCATED (1 << 3)
+ /**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+ #define AV_CODEC_CAP_DELAY (1 << 5)
+ /**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+ #define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
++
++#if FF_API_CAP_VDPAU
++/**
++ * Codec can export data for HW decoding (VDPAU).
++ */
++#define AV_CODEC_CAP_HWACCEL_VDPAU (1 << 7)
++#endif
++
+ /**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+ #define AV_CODEC_CAP_SUBFRAMES (1 << 8)
+ /**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+ #define AV_CODEC_CAP_EXPERIMENTAL (1 << 9)
+ /**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+ #define AV_CODEC_CAP_CHANNEL_CONF (1 << 10)
+ /**
+ * Codec supports frame-level multithreading.
+ */
+ #define AV_CODEC_CAP_FRAME_THREADS (1 << 12)
+ /**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+ #define AV_CODEC_CAP_SLICE_THREADS (1 << 13)
+ /**
+ * Codec supports changed parameters at any point.
+ */
+ #define AV_CODEC_CAP_PARAM_CHANGE (1 << 14)
+ /**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+ #define AV_CODEC_CAP_AUTO_THREADS (1 << 15)
+ /**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+ #define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16)
++/**
++ * Codec is intra only.
++ */
++#define AV_CODEC_CAP_INTRA_ONLY 0x40000000
++/**
++ * Codec is lossless.
++ */
++#define AV_CODEC_CAP_LOSSLESS 0x80000000
++
+
/**
* Allow decoders to produce frames with data planes that are not aligned
* to CPU requirements (e.g. due to cropping).
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * AVRn decoder
+ * Copyright (c) 2012 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "mjpeg.h"
+#include "mjpegdec.h"
+#include "libavutil/imgutils.h"
+
+typedef struct {
+ AVCodecContext *mjpeg_avctx;
+ int is_mjpeg;
+ int interlace; //FIXME use frame.interlaced_frame
+ int tff;
+} AVRnContext;
+
+static av_cold int init(AVCodecContext *avctx)
+{
+ AVRnContext *a = avctx->priv_data;
+ int ret;
+
+ // Support "Resolution 1:1" for Avid AVI Codec
+ a->is_mjpeg = avctx->extradata_size < 31 || memcmp(&avctx->extradata[28], "1:1", 3);
+
+ if(!a->is_mjpeg && avctx->lowres) {
+ av_log(avctx, AV_LOG_ERROR, "lowres is not possible with rawvideo\n");
+ return AVERROR(EINVAL);
+ }
+
+ if(a->is_mjpeg) {
+ AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_MJPEG);
+ AVDictionary *thread_opt = NULL;
+ if (!codec) {
+ av_log(avctx, AV_LOG_ERROR, "MJPEG codec not found\n");
+ return AVERROR_DECODER_NOT_FOUND;
+ }
+
+ a->mjpeg_avctx = avcodec_alloc_context3(codec);
+
+ av_dict_set(&thread_opt, "threads", "1", 0); // Is this needed ?
+ a->mjpeg_avctx->refcounted_frames = 1;
+ a->mjpeg_avctx->flags = avctx->flags;
+ a->mjpeg_avctx->idct_algo = avctx->idct_algo;
+ a->mjpeg_avctx->lowres = avctx->lowres;
+ a->mjpeg_avctx->width = avctx->width;
+ a->mjpeg_avctx->height = avctx->height;
+
+ if ((ret = ff_codec_open2_recursive(a->mjpeg_avctx, codec, &thread_opt)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "MJPEG codec failed to open\n");
+ }
+ av_dict_free(&thread_opt);
+
+ return ret;
+ }
+
+ if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
+ return ret;
+
+ avctx->pix_fmt = AV_PIX_FMT_UYVY422;
+
+ if(avctx->extradata_size >= 9 && avctx->extradata[4]+28 < avctx->extradata_size) {
+ int ndx = avctx->extradata[4] + 4;
+ a->interlace = !memcmp(avctx->extradata + ndx, "1:1(", 4);
+ if(a->interlace) {
+ a->tff = avctx->extradata[ndx + 24] == 1;
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int end(AVCodecContext *avctx)
+{
+ AVRnContext *a = avctx->priv_data;
+
+ avcodec_close(a->mjpeg_avctx);
+ av_freep(&a->mjpeg_avctx);
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVRnContext *a = avctx->priv_data;
+ AVFrame *p = data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ int y, ret, true_height;
+
+ if(a->is_mjpeg) {
+ ret = avcodec_decode_video2(a->mjpeg_avctx, data, got_frame, avpkt);
+
+ if (ret >= 0 && *got_frame && avctx->width <= p->width && avctx->height <= p->height) {
+ int shift = p->height - avctx->height;
+ int subsample_h, subsample_v;
+
+ av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &subsample_h, &subsample_v);
+
+ p->data[0] += p->linesize[0] * shift;
+ if (p->data[2]) {
+ p->data[1] += p->linesize[1] * (shift>>subsample_v);
+ p->data[2] += p->linesize[2] * (shift>>subsample_v);
+ }
+
+ p->width = avctx->width;
+ p->height = avctx->height;
+ }
+ avctx->pix_fmt = a->mjpeg_avctx->pix_fmt;
+ return ret;
+ }
+
+ true_height = buf_size / (2*avctx->width);
+
+ if(buf_size < 2*avctx->width * avctx->height) {
+ av_log(avctx, AV_LOG_ERROR, "packet too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
+ return ret;
+ p->pict_type= AV_PICTURE_TYPE_I;
+ p->key_frame= 1;
+
+ if(a->interlace) {
+ buf += (true_height - avctx->height)*avctx->width;
+ for(y = 0; y < avctx->height-1; y+=2) {
+ memcpy(p->data[0] + (y+ a->tff)*p->linesize[0], buf , 2*avctx->width);
+ memcpy(p->data[0] + (y+!a->tff)*p->linesize[0], buf + avctx->width*true_height+4, 2*avctx->width);
+ buf += 2*avctx->width;
+ }
+ } else {
+ buf += (true_height - avctx->height)*avctx->width*2;
+ for(y = 0; y < avctx->height; y++) {
+ memcpy(p->data[0] + y*p->linesize[0], buf, 2*avctx->width);
+ buf += 2*avctx->width;
+ }
+ }
+
+ *got_frame = 1;
+ return buf_size;
+}
+
+AVCodec ff_avrn_decoder = {
+ .name = "avrn",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid AVI Codec"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AVRN,
+ .priv_data_size = sizeof(AVRnContext),
+ .init = init,
+ .close = end,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * AVID Meridien decoder
+ *
+ * Copyright (c) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+
+static av_cold int avui_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
+ return 0;
+}
+
+static int avui_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ int ret;
+ AVFrame *pic = data;
+ const uint8_t *src = avpkt->data, *extradata = avctx->extradata;
+ const uint8_t *srca;
+ uint8_t *y, *u, *v, *a;
+ int transparent, interlaced = 1, skip, opaque_length, i, j, k;
+ uint32_t extradata_size = avctx->extradata_size;
+
+ while (extradata_size >= 24) {
+ uint32_t atom_size = AV_RB32(extradata);
+ if (!memcmp(&extradata[4], "APRGAPRG0001", 12)) {
+ interlaced = extradata[19] != 1;
+ break;
+ }
+ if (atom_size && atom_size <= extradata_size) {
+ extradata += atom_size;
+ extradata_size -= atom_size;
+ } else {
+ break;
+ }
+ }
+ if (avctx->height == 486) {
+ skip = 10;
+ } else {
+ skip = 16;
+ }
+ opaque_length = 2 * avctx->width * (avctx->height + skip) + 4 * interlaced;
+ if (avpkt->size < opaque_length) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+ transparent = avctx->bits_per_coded_sample == 32 &&
+ avpkt->size >= opaque_length * 2 + 4;
+ srca = src + opaque_length + 5;
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ if (!interlaced) {
+ src += avctx->width * skip;
+ srca += avctx->width * skip;
+ }
+
+ for (i = 0; i < interlaced + 1; i++) {
+ src += avctx->width * skip;
+ srca += avctx->width * skip;
+ if (interlaced && avctx->height == 486) {
+ y = pic->data[0] + (1 - i) * pic->linesize[0];
+ u = pic->data[1] + (1 - i) * pic->linesize[1];
+ v = pic->data[2] + (1 - i) * pic->linesize[2];
+ a = pic->data[3] + (1 - i) * pic->linesize[3];
+ } else {
+ y = pic->data[0] + i * pic->linesize[0];
+ u = pic->data[1] + i * pic->linesize[1];
+ v = pic->data[2] + i * pic->linesize[2];
+ a = pic->data[3] + i * pic->linesize[3];
+ }
+
+ for (j = 0; j < avctx->height >> interlaced; j++) {
+ for (k = 0; k < avctx->width >> 1; k++) {
+ u[ k ] = *src++;
+ y[2 * k ] = *src++;
+ a[2 * k ] = 0xFF - (transparent ? *srca++ : 0);
+ srca++;
+ v[ k ] = *src++;
+ y[2 * k + 1] = *src++;
+ a[2 * k + 1] = 0xFF - (transparent ? *srca++ : 0);
+ srca++;
+ }
+
+ y += (interlaced + 1) * pic->linesize[0];
+ u += (interlaced + 1) * pic->linesize[1];
+ v += (interlaced + 1) * pic->linesize[2];
+ a += (interlaced + 1) * pic->linesize[3];
+ }
+ src += 4;
+ srca += 4;
+ }
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+AVCodec ff_avui_decoder = {
+ .name = "avui",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AVUI,
+ .init = avui_decode_init,
+ .decode = avui_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
--- /dev/null
- .capabilities = CODEC_CAP_EXPERIMENTAL,
+/*
+ * AVID Meridien encoder
+ *
+ * Copyright (c) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+
+static av_cold int avui_encode_init(AVCodecContext *avctx)
+{
+ if (avctx->width != 720 || avctx->height != 486 && avctx->height != 576) {
+ av_log(avctx, AV_LOG_ERROR, "Only 720x486 and 720x576 are supported.\n");
+ return AVERROR(EINVAL);
+ }
+ if (!(avctx->extradata = av_mallocz(144 + FF_INPUT_BUFFER_PADDING_SIZE)))
+ return AVERROR(ENOMEM);
+ avctx->extradata_size = 144;
+ memcpy(avctx->extradata, "\0\0\0\x18""APRGAPRG0001", 16);
+ if (avctx->field_order > AV_FIELD_PROGRESSIVE) {
+ avctx->extradata[19] = 2;
+ } else {
+ avctx->extradata[19] = 1;
+ }
+ memcpy(avctx->extradata + 24, "\0\0\0\x78""ARESARES0001""\0\0\0\x98", 20);
+ AV_WB32(avctx->extradata + 44, avctx->width);
+ AV_WB32(avctx->extradata + 48, avctx->height);
+ memcpy(avctx->extradata + 52, "\0\0\0\x1\0\0\0\x20\0\0\0\x2", 12);
+
+
+ return 0;
+}
+
+static int avui_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pic, int *got_packet)
+{
+ uint8_t *dst;
+ int i, j, skip, ret, size, interlaced;
+
+ interlaced = avctx->field_order > AV_FIELD_PROGRESSIVE;
+
+ if (avctx->height == 486) {
+ skip = 10;
+ } else {
+ skip = 16;
+ }
+ size = 2 * avctx->width * (avctx->height + skip) + 8 * interlaced;
+ if ((ret = ff_alloc_packet(pkt, size)) < 0)
+ return ret;
+ dst = pkt->data;
+ if (!interlaced) {
+ memset(dst, 0, avctx->width * skip);
+ dst += avctx->width * skip;
+ }
+
+ avctx->coded_frame->key_frame = 1;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+
+ for (i = 0; i <= interlaced; i++) {
+ uint8_t *src;
+ if (interlaced && avctx->height == 486) {
+ src = pic->data[0] + (1 - i) * pic->linesize[0];
+ } else {
+ src = pic->data[0] + i * pic->linesize[0];
+ }
+ memset(dst, 0, avctx->width * skip + 4 * i);
+ dst += avctx->width * skip + 4 * i;
+ for (j = 0; j < avctx->height; j += interlaced + 1) {
+ memcpy(dst, src, avctx->width * 2);
+ src += (interlaced + 1) * pic->linesize[0];
+ dst += avctx->width * 2;
+ }
+ }
+
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+ return 0;
+}
+
+AVCodec ff_avui_encoder = {
+ .name = "avui",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid Meridien Uncompressed"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AVUI,
+ .init = avui_encode_init,
+ .encode2 = avui_encode_frame,
++ .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE },
+};
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .flush = flush,
+ .capabilities = AV_CODEC_CAP_DR1,
};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * Binary text decoder
+ * eXtended BINary text (XBIN) decoder
+ * iCEDraw File decoder
+ * Copyright (c) 2010 Peter Ross (pross@xvid.org)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Binary text decoder
+ * eXtended BINary text (XBIN) decoder
+ * iCEDraw File decoder
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/xga_font_data.h"
+#include "avcodec.h"
+#include "cga_data.h"
+#include "bintext.h"
+#include "internal.h"
+
+typedef struct XbinContext {
+ AVFrame *frame;
+ int palette[16];
+ int flags;
+ int font_height;
+ const uint8_t *font;
+ int x, y;
+} XbinContext;
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ XbinContext *s = avctx->priv_data;
+ uint8_t *p;
+ int i;
+
+ avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ p = avctx->extradata;
+ if (p) {
+ s->font_height = p[0];
+ s->flags = p[1];
+ p += 2;
+ if(avctx->extradata_size < 2 + (!!(s->flags & BINTEXT_PALETTE))*3*16
+ + (!!(s->flags & BINTEXT_FONT))*s->font_height*256) {
+ av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
+ return AVERROR_INVALIDDATA;
+ }
+ } else {
+ s->font_height = 8;
+ s->flags = 0;
+ }
+
+ if ((s->flags & BINTEXT_PALETTE)) {
+ for (i = 0; i < 16; i++) {
+ s->palette[i] = 0xFF000000 | (AV_RB24(p) << 2) | ((AV_RB24(p) >> 4) & 0x30303);
+ p += 3;
+ }
+ } else {
+ for (i = 0; i < 16; i++)
+ s->palette[i] = 0xFF000000 | ff_cga_palette[i];
+ }
+
+ if ((s->flags & BINTEXT_FONT)) {
+ s->font = p;
+ } else {
+ switch(s->font_height) {
+ default:
+ av_log(avctx, AV_LOG_WARNING, "font height %i not supported\n", s->font_height);
+ s->font_height = 8;
+ case 8:
+ s->font = avpriv_cga_font;
+ break;
+ case 16:
+ s->font = avpriv_vga16_font;
+ break;
+ }
+ }
+
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+#define DEFAULT_BG_COLOR 0
+av_unused static void hscroll(AVCodecContext *avctx)
+{
+ XbinContext *s = avctx->priv_data;
+ if (s->y < avctx->height - s->font_height) {
+ s->y += s->font_height;
+ } else {
+ memmove(s->frame->data[0], s->frame->data[0] + s->font_height*s->frame->linesize[0],
+ (avctx->height - s->font_height)*s->frame->linesize[0]);
+ memset(s->frame->data[0] + (avctx->height - s->font_height)*s->frame->linesize[0],
+ DEFAULT_BG_COLOR, s->font_height * s->frame->linesize[0]);
+ }
+}
+
+#define FONT_WIDTH 8
+
+/**
+ * Draw character to screen
+ */
+static void draw_char(AVCodecContext *avctx, int c, int a)
+{
+ XbinContext *s = avctx->priv_data;
+ if (s->y > avctx->height - s->font_height)
+ return;
+ ff_draw_pc_font(s->frame->data[0] + s->y * s->frame->linesize[0] + s->x,
+ s->frame->linesize[0], s->font, s->font_height, c,
+ a & 0x0F, a >> 4);
+ s->x += FONT_WIDTH;
+ if (s->x > avctx->width - FONT_WIDTH) {
+ s->x = 0;
+ s->y += s->font_height;
+ }
+}
+
+static int decode_frame(AVCodecContext *avctx,
+ void *data, int *got_frame,
+ AVPacket *avpkt)
+{
+ XbinContext *s = avctx->priv_data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ const uint8_t *buf_end = buf+buf_size;
+ int ret;
+
+ s->x = s->y = 0;
+ if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
+ return ret;
+ s->frame->pict_type = AV_PICTURE_TYPE_I;
+ s->frame->palette_has_changed = 1;
+ memcpy(s->frame->data[1], s->palette, 16 * 4);
+
+ if (avctx->codec_id == AV_CODEC_ID_XBIN) {
+ while (buf + 2 < buf_end) {
+ int i,c,a;
+ int type = *buf >> 6;
+ int count = (*buf & 0x3F) + 1;
+ buf++;
+ switch (type) {
+ case 0: //no compression
+ for (i = 0; i < count && buf + 1 < buf_end; i++) {
+ draw_char(avctx, buf[0], buf[1]);
+ buf += 2;
+ }
+ break;
+ case 1: //character compression
+ c = *buf++;
+ for (i = 0; i < count && buf < buf_end; i++)
+ draw_char(avctx, c, *buf++);
+ break;
+ case 2: //attribute compression
+ a = *buf++;
+ for (i = 0; i < count && buf < buf_end; i++)
+ draw_char(avctx, *buf++, a);
+ break;
+ case 3: //character/attribute compression
+ c = *buf++;
+ a = *buf++;
+ for (i = 0; i < count && buf < buf_end; i++)
+ draw_char(avctx, c, a);
+ break;
+ }
+ }
+ } else if (avctx->codec_id == AV_CODEC_ID_IDF) {
+ while (buf + 2 < buf_end) {
+ if (AV_RL16(buf) == 1) {
+ int i;
+ if (buf + 6 > buf_end)
+ break;
+ for (i = 0; i < buf[2]; i++)
+ draw_char(avctx, buf[4], buf[5]);
+ buf += 6;
+ } else {
+ draw_char(avctx, buf[0], buf[1]);
+ buf += 2;
+ }
+ }
+ } else {
+ while (buf + 1 < buf_end) {
+ draw_char(avctx, buf[0], buf[1]);
+ buf += 2;
+ }
+ }
+
+ if ((ret = av_frame_ref(data, s->frame)) < 0)
+ return ret;
+ *got_frame = 1;
+ return buf_size;
+}
+
+static av_cold int decode_end(AVCodecContext *avctx)
+{
+ XbinContext *s = avctx->priv_data;
+
+ av_frame_free(&s->frame);
+
+ return 0;
+}
+
+#if CONFIG_BINTEXT_DECODER
+AVCodec ff_bintext_decoder = {
+ .name = "bintext",
+ .long_name = NULL_IF_CONFIG_SMALL("Binary text"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_BINTEXT,
+ .priv_data_size = sizeof(XbinContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
+#if CONFIG_XBIN_DECODER
+AVCodec ff_xbin_decoder = {
+ .name = "xbin",
+ .long_name = NULL_IF_CONFIG_SMALL("eXtended BINary text"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_XBIN,
+ .priv_data_size = sizeof(XbinContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
+#if CONFIG_IDF_DECODER
+AVCodec ff_idf_decoder = {
+ .name = "idf",
+ .long_name = NULL_IF_CONFIG_SMALL("iCEDraw text"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_IDF,
+ .priv_data_size = sizeof(XbinContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
.id = AV_CODEC_ID_CLJR,
.init = decode_init,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
+
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * CPiA video decoder.
+ * Copyright (c) 2010 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This decoder is based on the LGPL code available at
+ * https://v4l4j.googlecode.com/svn/v4l4j/trunk/libvideo/libv4lconvert/cpia1.c
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "internal.h"
+
+
+#define FRAME_HEADER_SIZE 64
+#define MAGIC_0 0x19 /**< First header byte */
+#define MAGIC_1 0x68 /**< Second header byte */
+#define SUBSAMPLE_420 0
+#define SUBSAMPLE_422 1
+#define YUVORDER_YUYV 0
+#define YUVORDER_UYVY 1
+#define NOT_COMPRESSED 0
+#define COMPRESSED 1
+#define NO_DECIMATION 0
+#define DECIMATION_ENAB 1
+#define EOL 0xfd /**< End Of Line marker */
+#define EOI 0xff /**< End Of Image marker */
+
+
+typedef struct {
+ AVFrame *frame;
+} CpiaContext;
+
+
+static int cpia_decode_frame(AVCodecContext *avctx,
+ void *data, int *got_frame, AVPacket* avpkt)
+{
+ CpiaContext* const cpia = avctx->priv_data;
+ int i,j,ret;
+
+ uint8_t* const header = avpkt->data;
+ uint8_t* src;
+ int src_size;
+ uint16_t linelength;
+ uint8_t skip;
+
+ AVFrame *frame = cpia->frame;
+ uint8_t *y, *u, *v, *y_end, *u_end, *v_end;
+
+ // Check header
+ if ( avpkt->size < FRAME_HEADER_SIZE
+ || header[0] != MAGIC_0 || header[1] != MAGIC_1
+ || (header[17] != SUBSAMPLE_420 && header[17] != SUBSAMPLE_422)
+ || (header[18] != YUVORDER_YUYV && header[18] != YUVORDER_UYVY)
+ || (header[28] != NOT_COMPRESSED && header[28] != COMPRESSED)
+ || (header[29] != NO_DECIMATION && header[29] != DECIMATION_ENAB)
+ ) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid header!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ // currently unsupported properties
+ if (header[17] == SUBSAMPLE_422) {
+ avpriv_report_missing_feature(avctx, "4:2:2 subsampling");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (header[18] == YUVORDER_UYVY) {
+ avpriv_report_missing_feature(avctx, "YUV byte order UYVY");
+ return AVERROR_PATCHWELCOME;
+ }
+ if (header[29] == DECIMATION_ENAB) {
+ avpriv_report_missing_feature(avctx, "Decimation");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ src = header + FRAME_HEADER_SIZE;
+ src_size = avpkt->size - FRAME_HEADER_SIZE;
+
+ if (header[28] == NOT_COMPRESSED) {
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
+ } else {
+ frame->pict_type = AV_PICTURE_TYPE_P;
+ frame->key_frame = 0;
+ }
+
+ // Get buffer filled with previous frame
+ if ((ret = ff_reget_buffer(avctx, frame)) < 0)
+ return ret;
+
+
+ for ( i = 0;
+ i < frame->height;
+ i++, src += linelength, src_size -= linelength
+ ) {
+ // Read line length, two byte little endian
+ linelength = AV_RL16(src);
+ src += 2;
+
+ if (src_size < linelength) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Frame ended unexpectedly!\n");
+ break;
+ }
+ if (src[linelength - 1] != EOL) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Wrong line length %d or line not terminated properly (found 0x%02x)!\n", linelength, src[linelength - 1]);
+ break;
+ }
+
+ /* Update the data pointers. Y data is on every line.
+ * U and V data on every second line
+ */
+ y = &frame->data[0][i * frame->linesize[0]];
+ u = &frame->data[1][(i >> 1) * frame->linesize[1]];
+ v = &frame->data[2][(i >> 1) * frame->linesize[2]];
+ y_end = y + frame->linesize[0] - 1;
+ u_end = u + frame->linesize[1] - 1;
+ v_end = v + frame->linesize[2] - 1;
+
+ if ((i & 1) && header[17] == SUBSAMPLE_420) {
+ /* We are on a odd line and 420 subsample is used.
+ * On this line only Y values are specified, one per pixel.
+ */
+ for (j = 0; j < linelength - 1; j++) {
+ if (y > y_end) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Decoded data exceeded linesize!\n");
+ break;
+ }
+ if ((src[j] & 1) && header[28] == COMPRESSED) {
+ /* It seems that odd lines are always uncompressed, but
+ * we do it according to specification anyways.
+ */
+ skip = src[j] >> 1;
+ y += skip;
+ } else {
+ *(y++) = src[j];
+ }
+ }
+ } else if (header[17] == SUBSAMPLE_420) {
+ /* We are on an even line and 420 subsample is used.
+ * On this line each pair of pixels is described by four bytes.
+ */
+ for (j = 0; j < linelength - 4; ) {
+ if (y + 1 > y_end || u > u_end || v > v_end) {
+ av_frame_set_decode_error_flags(frame, FF_DECODE_ERROR_INVALID_BITSTREAM);
+ av_log(avctx, AV_LOG_WARNING, "Decoded data exceeded linesize!\n");
+ break;
+ }
+ if ((src[j] & 1) && header[28] == COMPRESSED) {
+ // Skip amount of pixels and move forward one byte
+ skip = src[j] >> 1;
+ y += skip;
+ u += skip >> 1;
+ v += skip >> 1;
+ j++;
+ } else {
+ // Set image data as specified and move forward 4 bytes
+ *(y++) = src[j];
+ *(u++) = src[j+1];
+ *(y++) = src[j+2];
+ *(v++) = src[j+3];
+ j += 4;
+ }
+ }
+ }
+ }
+
+ *got_frame = 1;
+ if ((ret = av_frame_ref(data, cpia->frame)) < 0)
+ return ret;
+
+ return avpkt->size;
+}
+
+static av_cold int cpia_decode_init(AVCodecContext *avctx)
+{
+ CpiaContext *s = avctx->priv_data;
+
+ // output pixel format
+ avctx->pix_fmt = AV_PIX_FMT_YUV420P;
+
+ /* The default timebase set by the v4l2 demuxer leads to probing which is buggy.
+ * Set some reasonable time_base to skip this.
+ */
+ if (avctx->time_base.num == 1 && avctx->time_base.den == 1000000) {
+ avctx->time_base.num = 1;
+ avctx->time_base.den = 60;
+ }
+
+ s->frame = av_frame_alloc();
+ if (!s->frame)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static av_cold int cpia_decode_end(AVCodecContext *avctx)
+{
+ CpiaContext *s = avctx->priv_data;
+
+ av_frame_free(&s->frame);
+
+ return 0;
+}
+
+AVCodec ff_cpia_decoder = {
+ .name = "cpia",
+ .long_name = NULL_IF_CONFIG_SMALL("CPiA video format"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_CPIA,
+ .priv_data_size = sizeof(CpiaContext),
+ .init = cpia_decode_init,
+ .close = cpia_decode_end,
+ .decode = cpia_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+/*
+ * - CrystalHD decoder module -
+ *
+ * Copyright(C) 2010,2011 Philip Langdale <ffmpeg.philipl@overt.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * - Principles of Operation -
+ *
+ * The CrystalHD decoder operates at the bitstream level - which is an even
+ * higher level than the decoding hardware you typically see in modern GPUs.
+ * This means it has a very simple interface, in principle. You feed demuxed
+ * packets in one end and get decoded picture (fields/frames) out the other.
+ *
+ * Of course, nothing is ever that simple. Due, at the very least, to b-frame
+ * dependencies in the supported formats, the hardware has a delay between
+ * when a packet goes in, and when a picture comes out. Furthermore, this delay
+ * is not just a function of time, but also one of the dependency on additional
+ * frames being fed into the decoder to satisfy the b-frame dependencies.
+ *
+ * As such, a pipeline will build up that is roughly equivalent to the required
+ * DPB for the file being played. If that was all it took, things would still
+ * be simple - so, of course, it isn't.
+ *
+ * The hardware has a way of indicating that a picture is ready to be copied out,
+ * but this is unreliable - and sometimes the attempt will still fail so, based
+ * on testing, the code will wait until 3 pictures are ready before starting
+ * to copy out - and this has the effect of extending the pipeline.
+ *
+ * Finally, while it is tempting to say that once the decoder starts outputting
+ * frames, the software should never fail to return a frame from a decode(),
+ * this is a hard assertion to make, because the stream may switch between
+ * differently encoded content (number of b-frames, interlacing, etc) which
+ * might require a longer pipeline than before. If that happened, you could
+ * deadlock trying to retrieve a frame that can't be decoded without feeding
+ * in additional packets.
+ *
+ * As such, the code will return in the event that a picture cannot be copied
+ * out, leading to an increase in the length of the pipeline. This in turn,
+ * means we have to be sensitive to the time it takes to decode a picture;
+ * We do not want to give up just because the hardware needed a little more
+ * time to prepare the picture! For this reason, there are delays included
+ * in the decode() path that ensure that, under normal conditions, the hardware
+ * will only fail to return a frame if it really needs additional packets to
+ * complete the decoding.
+ *
+ * Finally, to be explicit, we do not want the pipeline to grow without bound
+ * for two reasons: 1) The hardware can only buffer a finite number of packets,
+ * and 2) The client application may not be able to cope with arbitrarily long
+ * delays in the video path relative to the audio path. For example. MPlayer
+ * can only handle a 20 picture delay (although this is arbitrary, and needs
+ * to be extended to fully support the CrystalHD where the delay could be up
+ * to 32 pictures - consider PAFF H.264 content with 16 b-frames).
+ */
+
+/*****************************************************************************
+ * Includes
+ ****************************************************************************/
+
+#define _XOPEN_SOURCE 600
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <libcrystalhd/bc_dts_types.h>
+#include <libcrystalhd/bc_dts_defs.h>
+#include <libcrystalhd/libcrystalhd_if.h>
+
+#include "avcodec.h"
+#include "h264.h"
+#include "internal.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+/** Timeout parameter passed to DtsProcOutput() in us */
+#define OUTPUT_PROC_TIMEOUT 50
+/** Step between fake timestamps passed to hardware in units of 100ns */
+#define TIMESTAMP_UNIT 100000
+/** Initial value in us of the wait in decode() */
+#define BASE_WAIT 10000
+/** Increment in us to adjust wait in decode() */
+#define WAIT_UNIT 1000
+
+
+/*****************************************************************************
+ * Module private data
+ ****************************************************************************/
+
+typedef enum {
+ RET_ERROR = -1,
+ RET_OK = 0,
+ RET_COPY_AGAIN = 1,
+ RET_SKIP_NEXT_COPY = 2,
+ RET_COPY_NEXT_FIELD = 3,
+} CopyRet;
+
+typedef struct OpaqueList {
+ struct OpaqueList *next;
+ uint64_t fake_timestamp;
+ uint64_t reordered_opaque;
+ uint8_t pic_type;
+} OpaqueList;
+
+typedef struct {
+ AVClass *av_class;
+ AVCodecContext *avctx;
+ AVFrame *pic;
+ HANDLE dev;
+
+ uint8_t *orig_extradata;
+ uint32_t orig_extradata_size;
+
+ AVBitStreamFilterContext *bsfc;
+ AVCodecParserContext *parser;
+
+ uint8_t is_70012;
+ uint8_t *sps_pps_buf;
+ uint32_t sps_pps_size;
+ uint8_t is_nal;
+ uint8_t output_ready;
+ uint8_t need_second_field;
+ uint8_t skip_next_output;
+ uint64_t decode_wait;
+
+ uint64_t last_picture;
+
+ OpaqueList *head;
+ OpaqueList *tail;
+
+ /* Options */
+ uint32_t sWidth;
+ uint8_t bframe_bug;
+} CHDContext;
+
+static const AVOption options[] = {
+ { "crystalhd_downscale_width",
+ "Turn on downscaling to the specified width",
+ offsetof(CHDContext, sWidth),
+ AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT32_MAX,
+ AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, },
+ { NULL, },
+};
+
+
+/*****************************************************************************
+ * Helper functions
+ ****************************************************************************/
+
+static inline BC_MEDIA_SUBTYPE id2subtype(CHDContext *priv, enum AVCodecID id)
+{
+ switch (id) {
+ case AV_CODEC_ID_MPEG4:
+ return BC_MSUBTYPE_DIVX;
+ case AV_CODEC_ID_MSMPEG4V3:
+ return BC_MSUBTYPE_DIVX311;
+ case AV_CODEC_ID_MPEG2VIDEO:
+ return BC_MSUBTYPE_MPEG2VIDEO;
+ case AV_CODEC_ID_VC1:
+ return BC_MSUBTYPE_VC1;
+ case AV_CODEC_ID_WMV3:
+ return BC_MSUBTYPE_WMV3;
+ case AV_CODEC_ID_H264:
+ return priv->is_nal ? BC_MSUBTYPE_AVC1 : BC_MSUBTYPE_H264;
+ default:
+ return BC_MSUBTYPE_INVALID;
+ }
+}
+
+static inline void print_frame_info(CHDContext *priv, BC_DTS_PROC_OUT *output)
+{
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffSz: %u\n", output->YbuffSz);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffDoneSz: %u\n",
+ output->YBuffDoneSz);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tUVBuffDoneSz: %u\n",
+ output->UVBuffDoneSz);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tTimestamp: %"PRIu64"\n",
+ output->PicInfo.timeStamp);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tPicture Number: %u\n",
+ output->PicInfo.picture_number);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tWidth: %u\n",
+ output->PicInfo.width);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tHeight: %u\n",
+ output->PicInfo.height);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tChroma: 0x%03x\n",
+ output->PicInfo.chroma_format);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tPulldown: %u\n",
+ output->PicInfo.pulldown);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tFlags: 0x%08x\n",
+ output->PicInfo.flags);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrame Rate/Res: %u\n",
+ output->PicInfo.frame_rate);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tAspect Ratio: %u\n",
+ output->PicInfo.aspect_ratio);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tColor Primaries: %u\n",
+ output->PicInfo.colour_primaries);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tMetaData: %u\n",
+ output->PicInfo.picture_meta_payload);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tSession Number: %u\n",
+ output->PicInfo.sess_num);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tycom: %u\n",
+ output->PicInfo.ycom);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tCustom Aspect: %u\n",
+ output->PicInfo.custom_aspect_ratio_width_height);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrames to Drop: %u\n",
+ output->PicInfo.n_drop);
+ av_log(priv->avctx, AV_LOG_VERBOSE, "\tH264 Valid Fields: 0x%08x\n",
+ output->PicInfo.other.h264.valid);
+}
+
+
+/*****************************************************************************
+ * OpaqueList functions
+ ****************************************************************************/
+
+static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque,
+ uint8_t pic_type)
+{
+ OpaqueList *newNode = av_mallocz(sizeof (OpaqueList));
+ if (!newNode) {
+ av_log(priv->avctx, AV_LOG_ERROR,
+ "Unable to allocate new node in OpaqueList.\n");
+ return 0;
+ }
+ if (!priv->head) {
+ newNode->fake_timestamp = TIMESTAMP_UNIT;
+ priv->head = newNode;
+ } else {
+ newNode->fake_timestamp = priv->tail->fake_timestamp + TIMESTAMP_UNIT;
+ priv->tail->next = newNode;
+ }
+ priv->tail = newNode;
+ newNode->reordered_opaque = reordered_opaque;
+ newNode->pic_type = pic_type;
+
+ return newNode->fake_timestamp;
+}
+
+/*
+ * The OpaqueList is built in decode order, while elements will be removed
+ * in presentation order. If frames are reordered, this means we must be
+ * able to remove elements that are not the first element.
+ *
+ * Returned node must be freed by caller.
+ */
+static OpaqueList *opaque_list_pop(CHDContext *priv, uint64_t fake_timestamp)
+{
+ OpaqueList *node = priv->head;
+
+ if (!priv->head) {
+ av_log(priv->avctx, AV_LOG_ERROR,
+ "CrystalHD: Attempted to query non-existent timestamps.\n");
+ return NULL;
+ }
+
+ /*
+ * The first element is special-cased because we have to manipulate
+ * the head pointer rather than the previous element in the list.
+ */
+ if (priv->head->fake_timestamp == fake_timestamp) {
+ priv->head = node->next;
+
+ if (!priv->head->next)
+ priv->tail = priv->head;
+
+ node->next = NULL;
+ return node;
+ }
+
+ /*
+ * The list is processed at arm's length so that we have the
+ * previous element available to rewrite its next pointer.
+ */
+ while (node->next) {
+ OpaqueList *current = node->next;
+ if (current->fake_timestamp == fake_timestamp) {
+ node->next = current->next;
+
+ if (!node->next)
+ priv->tail = node;
+
+ current->next = NULL;
+ return current;
+ } else {
+ node = current;
+ }
+ }
+
+ av_log(priv->avctx, AV_LOG_VERBOSE,
+ "CrystalHD: Couldn't match fake_timestamp.\n");
+ return NULL;
+}
+
+
+/*****************************************************************************
+ * Video decoder API function definitions
+ ****************************************************************************/
+
+static void flush(AVCodecContext *avctx)
+{
+ CHDContext *priv = avctx->priv_data;
+
+ avctx->has_b_frames = 0;
+ priv->last_picture = -1;
+ priv->output_ready = 0;
+ priv->need_second_field = 0;
+ priv->skip_next_output = 0;
+ priv->decode_wait = BASE_WAIT;
+
+ av_frame_unref (priv->pic);
+
+ /* Flush mode 4 flushes all software and hardware buffers. */
+ DtsFlushInput(priv->dev, 4);
+}
+
+
+static av_cold int uninit(AVCodecContext *avctx)
+{
+ CHDContext *priv = avctx->priv_data;
+ HANDLE device;
+
+ device = priv->dev;
+ DtsStopDecoder(device);
+ DtsCloseDecoder(device);
+ DtsDeviceClose(device);
+
+ /*
+ * Restore original extradata, so that if the decoder is
+ * reinitialised, the bitstream detection and filtering
+ * will work as expected.
+ */
+ if (priv->orig_extradata) {
+ av_free(avctx->extradata);
+ avctx->extradata = priv->orig_extradata;
+ avctx->extradata_size = priv->orig_extradata_size;
+ priv->orig_extradata = NULL;
+ priv->orig_extradata_size = 0;
+ }
+
+ av_parser_close(priv->parser);
+ if (priv->bsfc) {
+ av_bitstream_filter_close(priv->bsfc);
+ }
+
+ av_freep(&priv->sps_pps_buf);
+
+ av_frame_free (&priv->pic);
+
+ if (priv->head) {
+ OpaqueList *node = priv->head;
+ while (node) {
+ OpaqueList *next = node->next;
+ av_free(node);
+ node = next;
+ }
+ }
+
+ return 0;
+}
+
+
+static av_cold int init(AVCodecContext *avctx)
+{
+ CHDContext* priv;
+ BC_STATUS ret;
+ BC_INFO_CRYSTAL version;
+ BC_INPUT_FORMAT format = {
+ .FGTEnable = FALSE,
+ .Progressive = TRUE,
+ .OptFlags = 0x80000000 | vdecFrameRate59_94 | 0x40,
+ .width = avctx->width,
+ .height = avctx->height,
+ };
+
+ BC_MEDIA_SUBTYPE subtype;
+
+ uint32_t mode = DTS_PLAYBACK_MODE |
+ DTS_LOAD_FILE_PLAY_FW |
+ DTS_SKIP_TX_CHK_CPB |
+ DTS_PLAYBACK_DROP_RPT_MODE |
+ DTS_SINGLE_THREADED_MODE |
+ DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);
+
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
+ avctx->codec->name);
+
+ avctx->pix_fmt = AV_PIX_FMT_YUYV422;
+
+ /* Initialize the library */
+ priv = avctx->priv_data;
+ priv->avctx = avctx;
+ priv->is_nal = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
+ priv->last_picture = -1;
+ priv->decode_wait = BASE_WAIT;
+ priv->pic = av_frame_alloc();
+
+ subtype = id2subtype(priv, avctx->codec->id);
+ switch (subtype) {
+ case BC_MSUBTYPE_AVC1:
+ {
+ uint8_t *dummy_p;
+ int dummy_int;
+
+ /* Back up the extradata so it can be restored at close time. */
+ priv->orig_extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!priv->orig_extradata) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to allocate copy of extradata\n");
+ return AVERROR(ENOMEM);
+ }
+ priv->orig_extradata_size = avctx->extradata_size;
+ memcpy(priv->orig_extradata, avctx->extradata, avctx->extradata_size);
+
+ priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
+ if (!priv->bsfc) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Cannot open the h264_mp4toannexb BSF!\n");
+ return AVERROR_BSF_NOT_FOUND;
+ }
+ av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
+ &dummy_int, NULL, 0, 0);
+ }
+ subtype = BC_MSUBTYPE_H264;
+ // Fall-through
+ case BC_MSUBTYPE_H264:
+ format.startCodeSz = 4;
+ // Fall-through
+ case BC_MSUBTYPE_VC1:
+ case BC_MSUBTYPE_WVC1:
+ case BC_MSUBTYPE_WMV3:
+ case BC_MSUBTYPE_WMVA:
+ case BC_MSUBTYPE_MPEG2VIDEO:
+ case BC_MSUBTYPE_DIVX:
+ case BC_MSUBTYPE_DIVX311:
+ format.pMetaData = avctx->extradata;
+ format.metaDataSz = avctx->extradata_size;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
+ return AVERROR(EINVAL);
+ }
+ format.mSubtype = subtype;
+
+ if (priv->sWidth) {
+ format.bEnableScaling = 1;
+ format.ScalingParams.sWidth = priv->sWidth;
+ }
+
+ /* Get a decoder instance */
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
+ // Initialize the Link and Decoder devices
+ ret = DtsDeviceOpen(&priv->dev, mode);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
+ goto fail;
+ }
+
+ ret = DtsCrystalHDVersion(priv->dev, &version);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: DtsCrystalHDVersion failed\n");
+ goto fail;
+ }
+ priv->is_70012 = version.device == 0;
+
+ if (priv->is_70012 &&
+ (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
+ goto fail;
+ }
+
+ ret = DtsSetInputFormat(priv->dev, &format);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
+ goto fail;
+ }
+
+ ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
+ goto fail;
+ }
+
+ ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
+ goto fail;
+ }
+ ret = DtsStartDecoder(priv->dev);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
+ goto fail;
+ }
+ ret = DtsStartCapture(priv->dev);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
+ goto fail;
+ }
+
+ if (avctx->codec->id == AV_CODEC_ID_H264) {
+ priv->parser = av_parser_init(avctx->codec->id);
+ if (!priv->parser)
+ av_log(avctx, AV_LOG_WARNING,
+ "Cannot open the h.264 parser! Interlaced h.264 content "
+ "will not be detected reliably.\n");
+ priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");
+
+ return 0;
+
+ fail:
+ uninit(avctx);
+ return -1;
+}
+
+
+static inline CopyRet copy_frame(AVCodecContext *avctx,
+ BC_DTS_PROC_OUT *output,
+ void *data, int *got_frame)
+{
+ BC_STATUS ret;
+ BC_DTS_STATUS decoder_status = { 0, };
+ uint8_t trust_interlaced;
+ uint8_t interlaced;
+
+ CHDContext *priv = avctx->priv_data;
+ int64_t pkt_pts = AV_NOPTS_VALUE;
+ uint8_t pic_type = 0;
+
+ uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
+ VDEC_FLAG_BOTTOMFIELD;
+ uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
+
+ int width = output->PicInfo.width;
+ int height = output->PicInfo.height;
+ int bwidth;
+ uint8_t *src = output->Ybuff;
+ int sStride;
+ uint8_t *dst;
+ int dStride;
+
+ if (output->PicInfo.timeStamp != 0) {
+ OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
+ if (node) {
+ pkt_pts = node->reordered_opaque;
+ pic_type = node->pic_type;
+ av_free(node);
+ } else {
+ /*
+ * We will encounter a situation where a timestamp cannot be
+ * popped if a second field is being returned. In this case,
+ * each field has the same timestamp and the first one will
+ * cause it to be popped. To keep subsequent calculations
+ * simple, pic_type should be set a FIELD value - doesn't
+ * matter which, but I chose BOTTOM.
+ */
+ pic_type = PICT_BOTTOM_FIELD;
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
+ output->PicInfo.timeStamp);
+ av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
+ pic_type);
+ }
+
+ ret = DtsGetDriverStatus(priv->dev, &decoder_status);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "CrystalHD: GetDriverStatus failed: %u\n", ret);
+ return RET_ERROR;
+ }
+
+ /*
+ * For most content, we can trust the interlaced flag returned
+ * by the hardware, but sometimes we can't. These are the
+ * conditions under which we can trust the flag:
+ *
+ * 1) It's not h.264 content
+ * 2) The UNKNOWN_SRC flag is not set
+ * 3) We know we're expecting a second field
+ * 4) The hardware reports this picture and the next picture
+ * have the same picture number.
+ *
+ * Note that there can still be interlaced content that will
+ * fail this check, if the hardware hasn't decoded the next
+ * picture or if there is a corruption in the stream. (In either
+ * case a 0 will be returned for the next picture number)
+ */
+ trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 ||
+ !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
+ priv->need_second_field ||
+ (decoder_status.picNumFlags & ~0x40000000) ==
+ output->PicInfo.picture_number;
+
+ /*
+ * If we got a false negative for trust_interlaced on the first field,
+ * we will realise our mistake here when we see that the picture number is that
+ * of the previous picture. We cannot recover the frame and should discard the
+ * second field to keep the correct number of output frames.
+ */
+ if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Incorrectly guessed progressive frame. Discarding second field\n");
+ /* Returning without providing a picture. */
+ return RET_OK;
+ }
+
+ interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
+ trust_interlaced;
+
+ if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "Next picture number unknown. Assuming progressive frame.\n");
+ }
+
+ av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
+ interlaced, trust_interlaced);
+
+ if (priv->pic->data[0] && !priv->need_second_field)
+ av_frame_unref(priv->pic);
+
+ priv->need_second_field = interlaced && !priv->need_second_field;
+
+ if (!priv->pic->data[0]) {
+ if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0)
+ return RET_ERROR;
+ }
+
+ bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
+ if (priv->is_70012) {
+ int pStride;
+
+ if (width <= 720)
+ pStride = 720;
+ else if (width <= 1280)
+ pStride = 1280;
+ else pStride = 1920;
+ sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
+ } else {
+ sStride = bwidth;
+ }
+
+ dStride = priv->pic->linesize[0];
+ dst = priv->pic->data[0];
+
+ av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
+
+ if (interlaced) {
+ int dY = 0;
+ int sY = 0;
+
+ height /= 2;
+ if (bottom_field) {
+ av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
+ dY = 1;
+ } else {
+ av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
+ dY = 0;
+ }
+
+ for (sY = 0; sY < height; dY++, sY++) {
+ memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
+ dY++;
+ }
+ } else {
+ av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
+ }
+
+ priv->pic->interlaced_frame = interlaced;
+ if (interlaced)
+ priv->pic->top_field_first = !bottom_first;
+
+ priv->pic->pkt_pts = pkt_pts;
+
+ if (!priv->need_second_field) {
+ *got_frame = 1;
+ if ((ret = av_frame_ref(data, priv->pic)) < 0) {
+ return ret;
+ }
+ }
+
+ /*
+ * Two types of PAFF content have been observed. One form causes the
+ * hardware to return a field pair and the other individual fields,
+ * even though the input is always individual fields. We must skip
+ * copying on the next decode() call to maintain pipeline length in
+ * the first case.
+ */
+ if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
+ (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
+ av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
+ return RET_SKIP_NEXT_COPY;
+ }
+
+ /*
+ * The logic here is purely based on empirical testing with samples.
+ * If we need a second field, it could come from a second input packet,
+ * or it could come from the same field-pair input packet at the current
+ * field. In the first case, we should return and wait for the next time
+ * round to get the second field, while in the second case, we should
+ * ask the decoder for it immediately.
+ *
+ * Testing has shown that we are dealing with the fieldpair -> two fields
+ * case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture
+ * type was PICT_FRAME (in this second case, the flag might still be set)
+ */
+ return priv->need_second_field &&
+ (!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
+ pic_type == PICT_FRAME) ?
+ RET_COPY_NEXT_FIELD : RET_OK;
+}
+
+
+static inline CopyRet receive_frame(AVCodecContext *avctx,
+ void *data, int *got_frame)
+{
+ BC_STATUS ret;
+ BC_DTS_PROC_OUT output = {
+ .PicInfo.width = avctx->width,
+ .PicInfo.height = avctx->height,
+ };
+ CHDContext *priv = avctx->priv_data;
+ HANDLE dev = priv->dev;
+
+ *got_frame = 0;
+
+ // Request decoded data from the driver
+ ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output);
+ if (ret == BC_STS_FMT_CHANGE) {
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n");
+ avctx->width = output.PicInfo.width;
+ avctx->height = output.PicInfo.height;
+ switch ( output.PicInfo.aspect_ratio ) {
+ case vdecAspectRatioSquare:
+ avctx->sample_aspect_ratio = (AVRational) { 1, 1};
+ break;
+ case vdecAspectRatio12_11:
+ avctx->sample_aspect_ratio = (AVRational) { 12, 11};
+ break;
+ case vdecAspectRatio10_11:
+ avctx->sample_aspect_ratio = (AVRational) { 10, 11};
+ break;
+ case vdecAspectRatio16_11:
+ avctx->sample_aspect_ratio = (AVRational) { 16, 11};
+ break;
+ case vdecAspectRatio40_33:
+ avctx->sample_aspect_ratio = (AVRational) { 40, 33};
+ break;
+ case vdecAspectRatio24_11:
+ avctx->sample_aspect_ratio = (AVRational) { 24, 11};
+ break;
+ case vdecAspectRatio20_11:
+ avctx->sample_aspect_ratio = (AVRational) { 20, 11};
+ break;
+ case vdecAspectRatio32_11:
+ avctx->sample_aspect_ratio = (AVRational) { 32, 11};
+ break;
+ case vdecAspectRatio80_33:
+ avctx->sample_aspect_ratio = (AVRational) { 80, 33};
+ break;
+ case vdecAspectRatio18_11:
+ avctx->sample_aspect_ratio = (AVRational) { 18, 11};
+ break;
+ case vdecAspectRatio15_11:
+ avctx->sample_aspect_ratio = (AVRational) { 15, 11};
+ break;
+ case vdecAspectRatio64_33:
+ avctx->sample_aspect_ratio = (AVRational) { 64, 33};
+ break;
+ case vdecAspectRatio160_99:
+ avctx->sample_aspect_ratio = (AVRational) {160, 99};
+ break;
+ case vdecAspectRatio4_3:
+ avctx->sample_aspect_ratio = (AVRational) { 4, 3};
+ break;
+ case vdecAspectRatio16_9:
+ avctx->sample_aspect_ratio = (AVRational) { 16, 9};
+ break;
+ case vdecAspectRatio221_1:
+ avctx->sample_aspect_ratio = (AVRational) {221, 1};
+ break;
+ }
+ return RET_COPY_AGAIN;
+ } else if (ret == BC_STS_SUCCESS) {
+ int copy_ret = -1;
+ if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) {
+ if (priv->last_picture == -1) {
+ /*
+ * Init to one less, so that the incrementing code doesn't
+ * need to be special-cased.
+ */
+ priv->last_picture = output.PicInfo.picture_number - 1;
+ }
+
+ if (avctx->codec->id == AV_CODEC_ID_MPEG4 &&
+ output.PicInfo.timeStamp == 0 && priv->bframe_bug) {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: Not returning packed frame twice.\n");
+ priv->last_picture++;
+ DtsReleaseOutputBuffs(dev, NULL, FALSE);
+ return RET_COPY_AGAIN;
+ }
+
+ print_frame_info(priv, &output);
+
+ if (priv->last_picture + 1 < output.PicInfo.picture_number) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Picture Number discontinuity\n");
+ /*
+ * Have we lost frames? If so, we need to shrink the
+ * pipeline length appropriately.
+ *
+ * XXX: I have no idea what the semantics of this situation
+ * are so I don't even know if we've lost frames or which
+ * ones.
+ *
+ * In any case, only warn the first time.
+ */
+ priv->last_picture = output.PicInfo.picture_number - 1;
+ }
+
+ copy_ret = copy_frame(avctx, &output, data, got_frame);
+ if (*got_frame > 0) {
+ avctx->has_b_frames--;
+ priv->last_picture++;
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
+ avctx->has_b_frames);
+ }
+ } else {
+ /*
+ * An invalid frame has been consumed.
+ */
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
+ "invalid PIB\n");
+ avctx->has_b_frames--;
+ copy_ret = RET_OK;
+ }
+ DtsReleaseOutputBuffs(dev, NULL, FALSE);
+
+ return copy_ret;
+ } else if (ret == BC_STS_BUSY) {
+ return RET_COPY_AGAIN;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
+ return RET_ERROR;
+ }
+}
+
+
+static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
+{
+ BC_STATUS ret;
+ BC_DTS_STATUS decoder_status = { 0, };
+ CopyRet rec_ret;
+ CHDContext *priv = avctx->priv_data;
+ HANDLE dev = priv->dev;
+ uint8_t *in_data = avpkt->data;
+ int len = avpkt->size;
+ int free_data = 0;
+ uint8_t pic_type = 0;
+
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
+
+ if (avpkt->size == 7 && !priv->bframe_bug) {
+ /*
+ * The use of a drop frame triggers the bug
+ */
+ av_log(avctx, AV_LOG_INFO,
+ "CrystalHD: Enabling work-around for packed b-frame bug\n");
+ priv->bframe_bug = 1;
+ } else if (avpkt->size == 8 && priv->bframe_bug) {
+ /*
+ * Delay frames don't trigger the bug
+ */
+ av_log(avctx, AV_LOG_INFO,
+ "CrystalHD: Disabling work-around for packed b-frame bug\n");
+ priv->bframe_bug = 0;
+ }
+
+ if (len) {
+ int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
+
+ if (priv->parser) {
+ int ret = 0;
+
+ if (priv->bsfc) {
+ ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL,
+ &in_data, &len,
+ avpkt->data, len, 0);
+ }
+ free_data = ret > 0;
+
+ if (ret >= 0) {
+ uint8_t *pout;
+ int psize;
+ int index;
+ H264Context *h = priv->parser->priv_data;
+
+ index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
+ in_data, len, avctx->internal->pkt->pts,
+ avctx->internal->pkt->dts, 0);
+ if (index < 0) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Failed to parse h.264 packet to "
+ "detect interlacing.\n");
+ } else if (index != len) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: Failed to parse h.264 packet "
+ "completely. Interlaced frames may be "
+ "incorrectly detected.\n");
+ } else {
+ av_log(avctx, AV_LOG_VERBOSE,
+ "CrystalHD: parser picture type %d\n",
+ h->picture_structure);
+ pic_type = h->picture_structure;
+ }
+ } else {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: mp4toannexb filter failed to filter "
+ "packet. Interlaced frames may be incorrectly "
+ "detected.\n");
+ }
+ }
+
+ if (len < tx_free - 1024) {
+ /*
+ * Despite being notionally opaque, either libcrystalhd or
+ * the hardware itself will mangle pts values that are too
+ * small or too large. The docs claim it should be in units
+ * of 100ns. Given that we're nominally dealing with a black
+ * box on both sides, any transform we do has no guarantee of
+ * avoiding mangling so we need to build a mapping to values
+ * we know will not be mangled.
+ */
+ uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type);
+ if (!pts) {
+ if (free_data) {
+ av_freep(&in_data);
+ }
+ return AVERROR(ENOMEM);
+ }
+ av_log(priv->avctx, AV_LOG_VERBOSE,
+ "input \"pts\": %"PRIu64"\n", pts);
+ ret = DtsProcInput(dev, in_data, len, pts, 0);
+ if (free_data) {
+ av_freep(&in_data);
+ }
+ if (ret == BC_STS_BUSY) {
+ av_log(avctx, AV_LOG_WARNING,
+ "CrystalHD: ProcInput returned busy\n");
+ usleep(BASE_WAIT);
+ return AVERROR(EBUSY);
+ } else if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR,
+ "CrystalHD: ProcInput failed: %u\n", ret);
+ return -1;
+ }
+ avctx->has_b_frames++;
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
+ len = 0; // We didn't consume any bytes.
+ }
+ } else {
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
+ }
+
+ if (priv->skip_next_output) {
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
+ priv->skip_next_output = 0;
+ avctx->has_b_frames--;
+ return len;
+ }
+
+ ret = DtsGetDriverStatus(dev, &decoder_status);
+ if (ret != BC_STS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
+ return -1;
+ }
+
+ /*
+ * No frames ready. Don't try to extract.
+ *
+ * Empirical testing shows that ReadyListCount can be a damn lie,
+ * and ProcOut still fails when count > 0. The same testing showed
+ * that two more iterations were needed before ProcOutput would
+ * succeed.
+ */
+ if (priv->output_ready < 2) {
+ if (decoder_status.ReadyListCount != 0)
+ priv->output_ready++;
+ usleep(BASE_WAIT);
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
+ return len;
+ } else if (decoder_status.ReadyListCount == 0) {
+ /*
+ * After the pipeline is established, if we encounter a lack of frames
+ * that probably means we're not giving the hardware enough time to
+ * decode them, so start increasing the wait time at the end of a
+ * decode call.
+ */
+ usleep(BASE_WAIT);
+ priv->decode_wait += WAIT_UNIT;
+ av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
+ return len;
+ }
+
+ do {
+ rec_ret = receive_frame(avctx, data, got_frame);
+ if (rec_ret == RET_OK && *got_frame == 0) {
+ /*
+ * This case is for when the encoded fields are stored
+ * separately and we get a separate avpkt for each one. To keep
+ * the pipeline stable, we should return nothing and wait for
+ * the next time round to grab the second field.
+ * H.264 PAFF is an example of this.
+ */
+ av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
+ avctx->has_b_frames--;
+ } else if (rec_ret == RET_COPY_NEXT_FIELD) {
+ /*
+ * This case is for when the encoded fields are stored in a
+ * single avpkt but the hardware returns then separately. Unless
+ * we grab the second field before returning, we'll slip another
+ * frame in the pipeline and if that happens a lot, we're sunk.
+ * So we have to get that second field now.
+ * Interlaced mpeg2 and vc1 are examples of this.
+ */
+ av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
+ while (1) {
+ usleep(priv->decode_wait);
+ ret = DtsGetDriverStatus(dev, &decoder_status);
+ if (ret == BC_STS_SUCCESS &&
+ decoder_status.ReadyListCount > 0) {
+ rec_ret = receive_frame(avctx, data, got_frame);
+ if ((rec_ret == RET_OK && *got_frame > 0) ||
+ rec_ret == RET_ERROR)
+ break;
+ }
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
+ } else if (rec_ret == RET_SKIP_NEXT_COPY) {
+ /*
+ * Two input packets got turned into a field pair. Gawd.
+ */
+ av_log(avctx, AV_LOG_VERBOSE,
+ "Don't output on next decode call.\n");
+ priv->skip_next_output = 1;
+ }
+ /*
+ * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
+ * a FMT_CHANGE event and need to go around again for the actual frame,
+ * we got a busy status and need to try again, or we're dealing with
+ * packed b-frames, where the hardware strangely returns the packed
+ * p-frame twice. We choose to keep the second copy as it carries the
+ * valid pts.
+ */
+ } while (rec_ret == RET_COPY_AGAIN);
+ usleep(priv->decode_wait);
+ return len;
+}
+
+
+#if CONFIG_H264_CRYSTALHD_DECODER
+static AVClass h264_class = {
+ "h264_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_h264_crystalhd_decoder = {
+ .name = "h264_crystalhd",
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &h264_class,
+};
+#endif
+
+#if CONFIG_MPEG2_CRYSTALHD_DECODER
+static AVClass mpeg2_class = {
+ "mpeg2_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_mpeg2_crystalhd_decoder = {
+ .name = "mpeg2_crystalhd",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &mpeg2_class,
+};
+#endif
+
+#if CONFIG_MPEG4_CRYSTALHD_DECODER
+static AVClass mpeg4_class = {
+ "mpeg4_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_mpeg4_crystalhd_decoder = {
+ .name = "mpeg4_crystalhd",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG4,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &mpeg4_class,
+};
+#endif
+
+#if CONFIG_MSMPEG4_CRYSTALHD_DECODER
+static AVClass msmpeg4_class = {
+ "msmpeg4_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_msmpeg4_crystalhd_decoder = {
+ .name = "msmpeg4_crystalhd",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MSMPEG4V3,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
+ .flush = flush,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &msmpeg4_class,
+};
+#endif
+
+#if CONFIG_VC1_CRYSTALHD_DECODER
+static AVClass vc1_class = {
+ "vc1_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_vc1_crystalhd_decoder = {
+ .name = "vc1_crystalhd",
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VC1,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &vc1_class,
+};
+#endif
+
+#if CONFIG_WMV3_CRYSTALHD_DECODER
+static AVClass wmv3_class = {
+ "wmv3_crystalhd",
+ av_default_item_name,
+ options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_wmv3_crystalhd_decoder = {
+ .name = "wmv3_crystalhd",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_WMV3,
+ .priv_data_size = sizeof(CHDContext),
+ .init = init,
+ .close = uninit,
+ .decode = decode,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
+ .priv_class = &wmv3_class,
+};
+#endif
--- /dev/null
- .capabilities = CODEC_CAP_EXPERIMENTAL,
+/*
+ * DCA encoder
+ * Copyright (C) 2008-2012 Alexander E. Patrakov
+ * 2010 Benjamin Larsson
+ * 2011 Xiang Wang
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "avcodec.h"
+#include "dca.h"
+#include "dcadata.h"
+#include "dcaenc.h"
+#include "internal.h"
+#include "mathops.h"
+#include "put_bits.h"
+
+#define MAX_CHANNELS 6
+#define DCA_MAX_FRAME_SIZE 16384
+#define DCA_HEADER_SIZE 13
+#define DCA_LFE_SAMPLES 8
+
+#define DCAENC_SUBBANDS 32
+#define SUBFRAMES 1
+#define SUBSUBFRAMES 2
+#define SUBBAND_SAMPLES (SUBFRAMES * SUBSUBFRAMES * 8)
+#define AUBANDS 25
+
+typedef struct DCAEncContext {
+ PutBitContext pb;
+ int frame_size;
+ int frame_bits;
+ int fullband_channels;
+ int channels;
+ int lfe_channel;
+ int samplerate_index;
+ int bitrate_index;
+ int channel_config;
+ const int32_t *band_interpolation;
+ const int32_t *band_spectrum;
+ int lfe_scale_factor;
+ softfloat lfe_quant;
+ int32_t lfe_peak_cb;
+
+ int32_t history[512][MAX_CHANNELS]; /* This is a circular buffer */
+ int32_t subband[SUBBAND_SAMPLES][DCAENC_SUBBANDS][MAX_CHANNELS];
+ int32_t quantized[SUBBAND_SAMPLES][DCAENC_SUBBANDS][MAX_CHANNELS];
+ int32_t peak_cb[DCAENC_SUBBANDS][MAX_CHANNELS];
+ int32_t downsampled_lfe[DCA_LFE_SAMPLES];
+ int32_t masking_curve_cb[SUBSUBFRAMES][256];
+ int abits[DCAENC_SUBBANDS][MAX_CHANNELS];
+ int scale_factor[DCAENC_SUBBANDS][MAX_CHANNELS];
+ softfloat quant[DCAENC_SUBBANDS][MAX_CHANNELS];
+ int32_t eff_masking_curve_cb[256];
+ int32_t band_masking_cb[32];
+ int32_t worst_quantization_noise;
+ int32_t worst_noise_ever;
+ int consumed_bits;
+} DCAEncContext;
+
+static int32_t cos_table[2048];
+static int32_t band_interpolation[2][512];
+static int32_t band_spectrum[2][8];
+static int32_t auf[9][AUBANDS][256];
+static int32_t cb_to_add[256];
+static int32_t cb_to_level[2048];
+static int32_t lfe_fir_64i[512];
+
+/* Transfer function of outer and middle ear, Hz -> dB */
+static double hom(double f)
+{
+ double f1 = f / 1000;
+
+ return -3.64 * pow(f1, -0.8)
+ + 6.8 * exp(-0.6 * (f1 - 3.4) * (f1 - 3.4))
+ - 6.0 * exp(-0.15 * (f1 - 8.7) * (f1 - 8.7))
+ - 0.0006 * (f1 * f1) * (f1 * f1);
+}
+
+static double gammafilter(int i, double f)
+{
+ double h = (f - fc[i]) / erb[i];
+
+ h = 1 + h * h;
+ h = 1 / (h * h);
+ return 20 * log10(h);
+}
+
+static int encode_init(AVCodecContext *avctx)
+{
+ DCAEncContext *c = avctx->priv_data;
+ uint64_t layout = avctx->channel_layout;
+ int i, min_frame_bits;
+
+ c->fullband_channels = c->channels = avctx->channels;
+ c->lfe_channel = (avctx->channels == 3 || avctx->channels == 6);
+ c->band_interpolation = band_interpolation[1];
+ c->band_spectrum = band_spectrum[1];
+ c->worst_quantization_noise = -2047;
+ c->worst_noise_ever = -2047;
+
+ if (!layout) {
+ av_log(avctx, AV_LOG_WARNING, "No channel layout specified. The "
+ "encoder will guess the layout, but it "
+ "might be incorrect.\n");
+ layout = av_get_default_channel_layout(avctx->channels);
+ }
+ switch (layout) {
+ case AV_CH_LAYOUT_MONO: c->channel_config = 0; break;
+ case AV_CH_LAYOUT_STEREO: c->channel_config = 2; break;
+ case AV_CH_LAYOUT_2_2: c->channel_config = 8; break;
+ case AV_CH_LAYOUT_5POINT0: c->channel_config = 9; break;
+ case AV_CH_LAYOUT_5POINT1: c->channel_config = 9; break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unsupported channel layout!\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ if (c->lfe_channel)
+ c->fullband_channels--;
+
+ for (i = 0; i < 9; i++) {
+ if (sample_rates[i] == avctx->sample_rate)
+ break;
+ }
+ if (i == 9)
+ return AVERROR(EINVAL);
+ c->samplerate_index = i;
+
+ if (avctx->bit_rate < 32000 || avctx->bit_rate > 3840000) {
+ av_log(avctx, AV_LOG_ERROR, "Bit rate %i not supported.", avctx->bit_rate);
+ return AVERROR(EINVAL);
+ }
+ for (i = 0; ff_dca_bit_rates[i] < avctx->bit_rate; i++)
+ ;
+ c->bitrate_index = i;
+ avctx->bit_rate = ff_dca_bit_rates[i];
+ c->frame_bits = FFALIGN((avctx->bit_rate * 512 + avctx->sample_rate - 1) / avctx->sample_rate, 32);
+ min_frame_bits = 132 + (493 + 28 * 32) * c->fullband_channels + c->lfe_channel * 72;
+ if (c->frame_bits < min_frame_bits || c->frame_bits > (DCA_MAX_FRAME_SIZE << 3))
+ return AVERROR(EINVAL);
+
+ c->frame_size = (c->frame_bits + 7) / 8;
+
+ avctx->frame_size = 32 * SUBBAND_SAMPLES;
+
+ if (!cos_table[0]) {
+ int j, k;
+
+ for (i = 0; i < 2048; i++) {
+ cos_table[i] = (int32_t)(0x7fffffff * cos(M_PI * i / 1024));
+ cb_to_level[i] = (int32_t)(0x7fffffff * pow(10, -0.005 * i));
+ }
+
+ /* FIXME: probably incorrect */
+ for (i = 0; i < 256; i++) {
+ lfe_fir_64i[i] = (int32_t)(0x01ffffff * ff_dca_lfe_fir_64[i]);
+ lfe_fir_64i[511 - i] = (int32_t)(0x01ffffff * ff_dca_lfe_fir_64[i]);
+ }
+
+ for (i = 0; i < 512; i++) {
+ band_interpolation[0][i] = (int32_t)(0x1000000000ULL * ff_dca_fir_32bands_perfect[i]);
+ band_interpolation[1][i] = (int32_t)(0x1000000000ULL * ff_dca_fir_32bands_nonperfect[i]);
+ }
+
+ for (i = 0; i < 9; i++) {
+ for (j = 0; j < AUBANDS; j++) {
+ for (k = 0; k < 256; k++) {
+ double freq = sample_rates[i] * (k + 0.5) / 512;
+
+ auf[i][j][k] = (int32_t)(10 * (hom(freq) + gammafilter(j, freq)));
+ }
+ }
+ }
+
+ for (i = 0; i < 256; i++) {
+ double add = 1 + pow(10, -0.01 * i);
+ cb_to_add[i] = (int32_t)(100 * log10(add));
+ }
+ for (j = 0; j < 8; j++) {
+ double accum = 0;
+ for (i = 0; i < 512; i++) {
+ double reconst = ff_dca_fir_32bands_perfect[i] * ((i & 64) ? (-1) : 1);
+ accum += reconst * cos(2 * M_PI * (i + 0.5 - 256) * (j + 0.5) / 512);
+ }
+ band_spectrum[0][j] = (int32_t)(200 * log10(accum));
+ }
+ for (j = 0; j < 8; j++) {
+ double accum = 0;
+ for (i = 0; i < 512; i++) {
+ double reconst = ff_dca_fir_32bands_nonperfect[i] * ((i & 64) ? (-1) : 1);
+ accum += reconst * cos(2 * M_PI * (i + 0.5 - 256) * (j + 0.5) / 512);
+ }
+ band_spectrum[1][j] = (int32_t)(200 * log10(accum));
+ }
+ }
+ return 0;
+}
+
+static inline int32_t cos_t(int x)
+{
+ return cos_table[x & 2047];
+}
+
+static inline int32_t sin_t(int x)
+{
+ return cos_t(x - 512);
+}
+
+static inline int32_t half32(int32_t a)
+{
+ return (a + 1) >> 1;
+}
+
+static inline int32_t mul32(int32_t a, int32_t b)
+{
+ int64_t r = (int64_t)a * b + 0x80000000ULL;
+ return r >> 32;
+}
+
+static void subband_transform(DCAEncContext *c, const int32_t *input)
+{
+ int ch, subs, i, k, j;
+
+ for (ch = 0; ch < c->fullband_channels; ch++) {
+ /* History is copied because it is also needed for PSY */
+ int32_t hist[512];
+ int hist_start = 0;
+
+ for (i = 0; i < 512; i++)
+ hist[i] = c->history[i][ch];
+
+ for (subs = 0; subs < SUBBAND_SAMPLES; subs++) {
+ int32_t accum[64];
+ int32_t resp;
+ int band;
+
+ /* Calculate the convolutions at once */
+ for (i = 0; i < 64; i++)
+ accum[i] = 0;
+
+ for (k = 0, i = hist_start, j = 0;
+ i < 512; k = (k + 1) & 63, i++, j++)
+ accum[k] += mul32(hist[i], c->band_interpolation[j]);
+ for (i = 0; i < hist_start; k = (k + 1) & 63, i++, j++)
+ accum[k] += mul32(hist[i], c->band_interpolation[j]);
+
+ for (k = 16; k < 32; k++)
+ accum[k] = accum[k] - accum[31 - k];
+ for (k = 32; k < 48; k++)
+ accum[k] = accum[k] + accum[95 - k];
+
+ for (band = 0; band < 32; band++) {
+ resp = 0;
+ for (i = 16; i < 48; i++) {
+ int s = (2 * band + 1) * (2 * (i + 16) + 1);
+ resp += mul32(accum[i], cos_t(s << 3)) >> 3;
+ }
+
+ c->subband[subs][band][ch] = ((band + 1) & 2) ? -resp : resp;
+ }
+
+ /* Copy in 32 new samples from input */
+ for (i = 0; i < 32; i++)
+ hist[i + hist_start] = input[(subs * 32 + i) * c->channels + ch];
+ hist_start = (hist_start + 32) & 511;
+ }
+ }
+}
+
+static void lfe_downsample(DCAEncContext *c, const int32_t *input)
+{
+ /* FIXME: make 128x LFE downsampling possible */
+ int i, j, lfes;
+ int32_t hist[512];
+ int32_t accum;
+ int hist_start = 0;
+
+ for (i = 0; i < 512; i++)
+ hist[i] = c->history[i][c->channels - 1];
+
+ for (lfes = 0; lfes < DCA_LFE_SAMPLES; lfes++) {
+ /* Calculate the convolution */
+ accum = 0;
+
+ for (i = hist_start, j = 0; i < 512; i++, j++)
+ accum += mul32(hist[i], lfe_fir_64i[j]);
+ for (i = 0; i < hist_start; i++, j++)
+ accum += mul32(hist[i], lfe_fir_64i[j]);
+
+ c->downsampled_lfe[lfes] = accum;
+
+ /* Copy in 64 new samples from input */
+ for (i = 0; i < 64; i++)
+ hist[i + hist_start] = input[(lfes * 64 + i) * c->channels + c->channels - 1];
+
+ hist_start = (hist_start + 64) & 511;
+ }
+}
+
+typedef struct {
+ int32_t re;
+ int32_t im;
+} cplx32;
+
+static void fft(const int32_t in[2 * 256], cplx32 out[256])
+{
+ cplx32 buf[256], rin[256], rout[256];
+ int i, j, k, l;
+
+ /* do two transforms in parallel */
+ for (i = 0; i < 256; i++) {
+ /* Apply the Hann window */
+ rin[i].re = mul32(in[2 * i], 0x3fffffff - (cos_t(8 * i + 2) >> 1));
+ rin[i].im = mul32(in[2 * i + 1], 0x3fffffff - (cos_t(8 * i + 6) >> 1));
+ }
+ /* pre-rotation */
+ for (i = 0; i < 256; i++) {
+ buf[i].re = mul32(cos_t(4 * i + 2), rin[i].re)
+ - mul32(sin_t(4 * i + 2), rin[i].im);
+ buf[i].im = mul32(cos_t(4 * i + 2), rin[i].im)
+ + mul32(sin_t(4 * i + 2), rin[i].re);
+ }
+
+ for (j = 256, l = 1; j != 1; j >>= 1, l <<= 1) {
+ for (k = 0; k < 256; k += j) {
+ for (i = k; i < k + j / 2; i++) {
+ cplx32 sum, diff;
+ int t = 8 * l * i;
+
+ sum.re = buf[i].re + buf[i + j / 2].re;
+ sum.im = buf[i].im + buf[i + j / 2].im;
+
+ diff.re = buf[i].re - buf[i + j / 2].re;
+ diff.im = buf[i].im - buf[i + j / 2].im;
+
+ buf[i].re = half32(sum.re);
+ buf[i].im = half32(sum.im);
+
+ buf[i + j / 2].re = mul32(diff.re, cos_t(t))
+ - mul32(diff.im, sin_t(t));
+ buf[i + j / 2].im = mul32(diff.im, cos_t(t))
+ + mul32(diff.re, sin_t(t));
+ }
+ }
+ }
+ /* post-rotation */
+ for (i = 0; i < 256; i++) {
+ int b = ff_reverse[i];
+ rout[i].re = mul32(buf[b].re, cos_t(4 * i))
+ - mul32(buf[b].im, sin_t(4 * i));
+ rout[i].im = mul32(buf[b].im, cos_t(4 * i))
+ + mul32(buf[b].re, sin_t(4 * i));
+ }
+ for (i = 0; i < 256; i++) {
+ /* separate the results of the two transforms */
+ cplx32 o1, o2;
+
+ o1.re = rout[i].re - rout[255 - i].re;
+ o1.im = rout[i].im + rout[255 - i].im;
+
+ o2.re = rout[i].im - rout[255 - i].im;
+ o2.im = -rout[i].re - rout[255 - i].re;
+
+ /* combine them into one long transform */
+ out[i].re = mul32( o1.re + o2.re, cos_t(2 * i + 1))
+ + mul32( o1.im - o2.im, sin_t(2 * i + 1));
+ out[i].im = mul32( o1.im + o2.im, cos_t(2 * i + 1))
+ + mul32(-o1.re + o2.re, sin_t(2 * i + 1));
+ }
+}
+
+static int32_t get_cb(int32_t in)
+{
+ int i, res;
+
+ res = 0;
+ if (in < 0)
+ in = -in;
+ for (i = 1024; i > 0; i >>= 1) {
+ if (cb_to_level[i + res] >= in)
+ res += i;
+ }
+ return -res;
+}
+
+static int32_t add_cb(int32_t a, int32_t b)
+{
+ if (a < b)
+ FFSWAP(int32_t, a, b);
+
+ if (a - b >= 256)
+ return a;
+ return a + cb_to_add[a - b];
+}
+
+static void adjust_jnd(int samplerate_index,
+ const int32_t in[512], int32_t out_cb[256])
+{
+ int32_t power[256];
+ cplx32 out[256];
+ int32_t out_cb_unnorm[256];
+ int32_t denom;
+ const int32_t ca_cb = -1114;
+ const int32_t cs_cb = 928;
+ int i, j;
+
+ fft(in, out);
+
+ for (j = 0; j < 256; j++) {
+ power[j] = add_cb(get_cb(out[j].re), get_cb(out[j].im));
+ out_cb_unnorm[j] = -2047; /* and can only grow */
+ }
+
+ for (i = 0; i < AUBANDS; i++) {
+ denom = ca_cb; /* and can only grow */
+ for (j = 0; j < 256; j++)
+ denom = add_cb(denom, power[j] + auf[samplerate_index][i][j]);
+ for (j = 0; j < 256; j++)
+ out_cb_unnorm[j] = add_cb(out_cb_unnorm[j],
+ -denom + auf[samplerate_index][i][j]);
+ }
+
+ for (j = 0; j < 256; j++)
+ out_cb[j] = add_cb(out_cb[j], -out_cb_unnorm[j] - ca_cb - cs_cb);
+}
+
+typedef void (*walk_band_t)(DCAEncContext *c, int band1, int band2, int f,
+ int32_t spectrum1, int32_t spectrum2, int channel,
+ int32_t * arg);
+
+static void walk_band_low(DCAEncContext *c, int band, int channel,
+ walk_band_t walk, int32_t *arg)
+{
+ int f;
+
+ if (band == 0) {
+ for (f = 0; f < 4; f++)
+ walk(c, 0, 0, f, 0, -2047, channel, arg);
+ } else {
+ for (f = 0; f < 8; f++)
+ walk(c, band, band - 1, 8 * band - 4 + f,
+ c->band_spectrum[7 - f], c->band_spectrum[f], channel, arg);
+ }
+}
+
+static void walk_band_high(DCAEncContext *c, int band, int channel,
+ walk_band_t walk, int32_t *arg)
+{
+ int f;
+
+ if (band == 31) {
+ for (f = 0; f < 4; f++)
+ walk(c, 31, 31, 256 - 4 + f, 0, -2047, channel, arg);
+ } else {
+ for (f = 0; f < 8; f++)
+ walk(c, band, band + 1, 8 * band + 4 + f,
+ c->band_spectrum[f], c->band_spectrum[7 - f], channel, arg);
+ }
+}
+
+static void update_band_masking(DCAEncContext *c, int band1, int band2,
+ int f, int32_t spectrum1, int32_t spectrum2,
+ int channel, int32_t * arg)
+{
+ int32_t value = c->eff_masking_curve_cb[f] - spectrum1;
+
+ if (value < c->band_masking_cb[band1])
+ c->band_masking_cb[band1] = value;
+}
+
+static void calc_masking(DCAEncContext *c, const int32_t *input)
+{
+ int i, k, band, ch, ssf;
+ int32_t data[512];
+
+ for (i = 0; i < 256; i++)
+ for (ssf = 0; ssf < SUBSUBFRAMES; ssf++)
+ c->masking_curve_cb[ssf][i] = -2047;
+
+ for (ssf = 0; ssf < SUBSUBFRAMES; ssf++)
+ for (ch = 0; ch < c->fullband_channels; ch++) {
+ for (i = 0, k = 128 + 256 * ssf; k < 512; i++, k++)
+ data[i] = c->history[k][ch];
+ for (k -= 512; i < 512; i++, k++)
+ data[i] = input[k * c->channels + ch];
+ adjust_jnd(c->samplerate_index, data, c->masking_curve_cb[ssf]);
+ }
+ for (i = 0; i < 256; i++) {
+ int32_t m = 2048;
+
+ for (ssf = 0; ssf < SUBSUBFRAMES; ssf++)
+ if (c->masking_curve_cb[ssf][i] < m)
+ m = c->masking_curve_cb[ssf][i];
+ c->eff_masking_curve_cb[i] = m;
+ }
+
+ for (band = 0; band < 32; band++) {
+ c->band_masking_cb[band] = 2048;
+ walk_band_low(c, band, 0, update_band_masking, NULL);
+ walk_band_high(c, band, 0, update_band_masking, NULL);
+ }
+}
+
+static void find_peaks(DCAEncContext *c)
+{
+ int band, ch;
+
+ for (band = 0; band < 32; band++)
+ for (ch = 0; ch < c->fullband_channels; ch++) {
+ int sample;
+ int32_t m = 0;
+
+ for (sample = 0; sample < SUBBAND_SAMPLES; sample++) {
+ int32_t s = abs(c->subband[sample][band][ch]);
+ if (m < s)
+ m = s;
+ }
+ c->peak_cb[band][ch] = get_cb(m);
+ }
+
+ if (c->lfe_channel) {
+ int sample;
+ int32_t m = 0;
+
+ for (sample = 0; sample < DCA_LFE_SAMPLES; sample++)
+ if (m < abs(c->downsampled_lfe[sample]))
+ m = abs(c->downsampled_lfe[sample]);
+ c->lfe_peak_cb = get_cb(m);
+ }
+}
+
+static const int snr_fudge = 128;
+#define USED_1ABITS 1
+#define USED_NABITS 2
+#define USED_26ABITS 4
+
+static int init_quantization_noise(DCAEncContext *c, int noise)
+{
+ int ch, band, ret = 0;
+
+ c->consumed_bits = 132 + 493 * c->fullband_channels;
+ if (c->lfe_channel)
+ c->consumed_bits += 72;
+
+ /* attempt to guess the bit distribution based on the prevoius frame */
+ for (ch = 0; ch < c->fullband_channels; ch++) {
+ for (band = 0; band < 32; band++) {
+ int snr_cb = c->peak_cb[band][ch] - c->band_masking_cb[band] - noise;
+
+ if (snr_cb >= 1312) {
+ c->abits[band][ch] = 26;
+ ret |= USED_26ABITS;
+ } else if (snr_cb >= 222) {
+ c->abits[band][ch] = 8 + mul32(snr_cb - 222, 69000000);
+ ret |= USED_NABITS;
+ } else if (snr_cb >= 0) {
+ c->abits[band][ch] = 2 + mul32(snr_cb, 106000000);
+ ret |= USED_NABITS;
+ } else {
+ c->abits[band][ch] = 1;
+ ret |= USED_1ABITS;
+ }
+ }
+ }
+
+ for (band = 0; band < 32; band++)
+ for (ch = 0; ch < c->fullband_channels; ch++) {
+ c->consumed_bits += bit_consumption[c->abits[band][ch]];
+ }
+
+ return ret;
+}
+
+static void assign_bits(DCAEncContext *c)
+{
+ /* Find the bounds where the binary search should work */
+ int low, high, down;
+ int used_abits = 0;
+
+ init_quantization_noise(c, c->worst_quantization_noise);
+ low = high = c->worst_quantization_noise;
+ if (c->consumed_bits > c->frame_bits) {
+ while (c->consumed_bits > c->frame_bits) {
+ av_assert0(used_abits != USED_1ABITS);
+ low = high;
+ high += snr_fudge;
+ used_abits = init_quantization_noise(c, high);
+ }
+ } else {
+ while (c->consumed_bits <= c->frame_bits) {
+ high = low;
+ if (used_abits == USED_26ABITS)
+ goto out; /* The requested bitrate is too high, pad with zeros */
+ low -= snr_fudge;
+ used_abits = init_quantization_noise(c, low);
+ }
+ }
+
+ /* Now do a binary search between low and high to see what fits */
+ for (down = snr_fudge >> 1; down; down >>= 1) {
+ init_quantization_noise(c, high - down);
+ if (c->consumed_bits <= c->frame_bits)
+ high -= down;
+ }
+ init_quantization_noise(c, high);
+out:
+ c->worst_quantization_noise = high;
+ if (high > c->worst_noise_ever)
+ c->worst_noise_ever = high;
+}
+
+static void shift_history(DCAEncContext *c, const int32_t *input)
+{
+ int k, ch;
+
+ for (k = 0; k < 512; k++)
+ for (ch = 0; ch < c->channels; ch++)
+ c->history[k][ch] = input[k * c->channels + ch];
+}
+
+static int32_t quantize_value(int32_t value, softfloat quant)
+{
+ int32_t offset = 1 << (quant.e - 1);
+
+ value = mul32(value, quant.m) + offset;
+ value = value >> quant.e;
+ return value;
+}
+
+static int calc_one_scale(int32_t peak_cb, int abits, softfloat *quant)
+{
+ int32_t peak;
+ int our_nscale, try_remove;
+ softfloat our_quant;
+
+ av_assert0(peak_cb <= 0);
+ av_assert0(peak_cb >= -2047);
+
+ our_nscale = 127;
+ peak = cb_to_level[-peak_cb];
+
+ for (try_remove = 64; try_remove > 0; try_remove >>= 1) {
+ if (scalefactor_inv[our_nscale - try_remove].e + stepsize_inv[abits].e <= 17)
+ continue;
+ our_quant.m = mul32(scalefactor_inv[our_nscale - try_remove].m, stepsize_inv[abits].m);
+ our_quant.e = scalefactor_inv[our_nscale - try_remove].e + stepsize_inv[abits].e - 17;
+ if ((quant_levels[abits] - 1) / 2 < quantize_value(peak, our_quant))
+ continue;
+ our_nscale -= try_remove;
+ }
+
+ if (our_nscale >= 125)
+ our_nscale = 124;
+
+ quant->m = mul32(scalefactor_inv[our_nscale].m, stepsize_inv[abits].m);
+ quant->e = scalefactor_inv[our_nscale].e + stepsize_inv[abits].e - 17;
+ av_assert0((quant_levels[abits] - 1) / 2 >= quantize_value(peak, *quant));
+
+ return our_nscale;
+}
+
+static void calc_scales(DCAEncContext *c)
+{
+ int band, ch;
+
+ for (band = 0; band < 32; band++)
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ c->scale_factor[band][ch] = calc_one_scale(c->peak_cb[band][ch],
+ c->abits[band][ch],
+ &c->quant[band][ch]);
+
+ if (c->lfe_channel)
+ c->lfe_scale_factor = calc_one_scale(c->lfe_peak_cb, 11, &c->lfe_quant);
+}
+
+static void quantize_all(DCAEncContext *c)
+{
+ int sample, band, ch;
+
+ for (sample = 0; sample < SUBBAND_SAMPLES; sample++)
+ for (band = 0; band < 32; band++)
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ c->quantized[sample][band][ch] = quantize_value(c->subband[sample][band][ch], c->quant[band][ch]);
+}
+
+static void put_frame_header(DCAEncContext *c)
+{
+ /* SYNC */
+ put_bits(&c->pb, 16, 0x7ffe);
+ put_bits(&c->pb, 16, 0x8001);
+
+ /* Frame type: normal */
+ put_bits(&c->pb, 1, 1);
+
+ /* Deficit sample count: none */
+ put_bits(&c->pb, 5, 31);
+
+ /* CRC is not present */
+ put_bits(&c->pb, 1, 0);
+
+ /* Number of PCM sample blocks */
+ put_bits(&c->pb, 7, SUBBAND_SAMPLES - 1);
+
+ /* Primary frame byte size */
+ put_bits(&c->pb, 14, c->frame_size - 1);
+
+ /* Audio channel arrangement */
+ put_bits(&c->pb, 6, c->channel_config);
+
+ /* Core audio sampling frequency */
+ put_bits(&c->pb, 4, bitstream_sfreq[c->samplerate_index]);
+
+ /* Transmission bit rate */
+ put_bits(&c->pb, 5, c->bitrate_index);
+
+ /* Embedded down mix: disabled */
+ put_bits(&c->pb, 1, 0);
+
+ /* Embedded dynamic range flag: not present */
+ put_bits(&c->pb, 1, 0);
+
+ /* Embedded time stamp flag: not present */
+ put_bits(&c->pb, 1, 0);
+
+ /* Auxiliary data flag: not present */
+ put_bits(&c->pb, 1, 0);
+
+ /* HDCD source: no */
+ put_bits(&c->pb, 1, 0);
+
+ /* Extension audio ID: N/A */
+ put_bits(&c->pb, 3, 0);
+
+ /* Extended audio data: not present */
+ put_bits(&c->pb, 1, 0);
+
+ /* Audio sync word insertion flag: after each sub-frame */
+ put_bits(&c->pb, 1, 0);
+
+ /* Low frequency effects flag: not present or 64x subsampling */
+ put_bits(&c->pb, 2, c->lfe_channel ? 2 : 0);
+
+ /* Predictor history switch flag: on */
+ put_bits(&c->pb, 1, 1);
+
+ /* No CRC */
+ /* Multirate interpolator switch: non-perfect reconstruction */
+ put_bits(&c->pb, 1, 0);
+
+ /* Encoder software revision: 7 */
+ put_bits(&c->pb, 4, 7);
+
+ /* Copy history: 0 */
+ put_bits(&c->pb, 2, 0);
+
+ /* Source PCM resolution: 16 bits, not DTS ES */
+ put_bits(&c->pb, 3, 0);
+
+ /* Front sum/difference coding: no */
+ put_bits(&c->pb, 1, 0);
+
+ /* Surrounds sum/difference coding: no */
+ put_bits(&c->pb, 1, 0);
+
+ /* Dialog normalization: 0 dB */
+ put_bits(&c->pb, 4, 0);
+}
+
+static void put_primary_audio_header(DCAEncContext *c)
+{
+ static const int bitlen[11] = { 0, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3 };
+ static const int thr[11] = { 0, 1, 3, 3, 3, 3, 7, 7, 7, 7, 7 };
+
+ int ch, i;
+ /* Number of subframes */
+ put_bits(&c->pb, 4, SUBFRAMES - 1);
+
+ /* Number of primary audio channels */
+ put_bits(&c->pb, 3, c->fullband_channels - 1);
+
+ /* Subband activity count */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, 5, DCAENC_SUBBANDS - 2);
+
+ /* High frequency VQ start subband */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, 5, DCAENC_SUBBANDS - 1);
+
+ /* Joint intensity coding index: 0, 0 */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, 3, 0);
+
+ /* Transient mode codebook: A4, A4 (arbitrary) */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, 2, 0);
+
+ /* Scale factor code book: 7 bit linear, 7-bit sqrt table (for each channel) */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, 3, 6);
+
+ /* Bit allocation quantizer select: linear 5-bit */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, 3, 6);
+
+ /* Quantization index codebook select: dummy data
+ to avoid transmission of scale factor adjustment */
+ for (i = 1; i < 11; i++)
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ put_bits(&c->pb, bitlen[i], thr[i]);
+
+ /* Scale factor adjustment index: not transmitted */
+ /* Audio header CRC check word: not transmitted */
+}
+
+static void put_subframe_samples(DCAEncContext *c, int ss, int band, int ch)
+{
+ if (c->abits[band][ch] <= 7) {
+ int sum, i, j;
+ for (i = 0; i < 8; i += 4) {
+ sum = 0;
+ for (j = 3; j >= 0; j--) {
+ sum *= quant_levels[c->abits[band][ch]];
+ sum += c->quantized[ss * 8 + i + j][band][ch];
+ sum += (quant_levels[c->abits[band][ch]] - 1) / 2;
+ }
+ put_bits(&c->pb, bit_consumption[c->abits[band][ch]] / 4, sum);
+ }
+ } else {
+ int i;
+ for (i = 0; i < 8; i++) {
+ int bits = bit_consumption[c->abits[band][ch]] / 16;
+ put_sbits(&c->pb, bits, c->quantized[ss * 8 + i][band][ch]);
+ }
+ }
+}
+
+static void put_subframe(DCAEncContext *c, int subframe)
+{
+ int i, band, ss, ch;
+
+ /* Subsubframes count */
+ put_bits(&c->pb, 2, SUBSUBFRAMES -1);
+
+ /* Partial subsubframe sample count: dummy */
+ put_bits(&c->pb, 3, 0);
+
+ /* Prediction mode: no ADPCM, in each channel and subband */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ for (band = 0; band < DCAENC_SUBBANDS; band++)
+ put_bits(&c->pb, 1, 0);
+
+ /* Prediction VQ address: not transmitted */
+ /* Bit allocation index */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ for (band = 0; band < DCAENC_SUBBANDS; band++)
+ put_bits(&c->pb, 5, c->abits[band][ch]);
+
+ if (SUBSUBFRAMES > 1) {
+ /* Transition mode: none for each channel and subband */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ for (band = 0; band < DCAENC_SUBBANDS; band++)
+ put_bits(&c->pb, 1, 0); /* codebook A4 */
+ }
+
+ /* Scale factors */
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ for (band = 0; band < DCAENC_SUBBANDS; band++)
+ put_bits(&c->pb, 7, c->scale_factor[band][ch]);
+
+ /* Joint subband scale factor codebook select: not transmitted */
+ /* Scale factors for joint subband coding: not transmitted */
+ /* Stereo down-mix coefficients: not transmitted */
+ /* Dynamic range coefficient: not transmitted */
+ /* Stde information CRC check word: not transmitted */
+ /* VQ encoded high frequency subbands: not transmitted */
+
+ /* LFE data: 8 samples and scalefactor */
+ if (c->lfe_channel) {
+ for (i = 0; i < DCA_LFE_SAMPLES; i++)
+ put_bits(&c->pb, 8, quantize_value(c->downsampled_lfe[i], c->lfe_quant) & 0xff);
+ put_bits(&c->pb, 8, c->lfe_scale_factor);
+ }
+
+ /* Audio data (subsubframes) */
+ for (ss = 0; ss < SUBSUBFRAMES ; ss++)
+ for (ch = 0; ch < c->fullband_channels; ch++)
+ for (band = 0; band < DCAENC_SUBBANDS; band++)
+ put_subframe_samples(c, ss, band, ch);
+
+ /* DSYNC */
+ put_bits(&c->pb, 16, 0xffff);
+}
+
+static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ DCAEncContext *c = avctx->priv_data;
+ const int32_t *samples;
+ int ret, i;
+
+ if ((ret = ff_alloc_packet2(avctx, avpkt, c->frame_size , 0)) < 0)
+ return ret;
+
+ samples = (const int32_t *)frame->data[0];
+
+ subband_transform(c, samples);
+ if (c->lfe_channel)
+ lfe_downsample(c, samples);
+
+ calc_masking(c, samples);
+ find_peaks(c);
+ assign_bits(c);
+ calc_scales(c);
+ quantize_all(c);
+ shift_history(c, samples);
+
+ init_put_bits(&c->pb, avpkt->data, avpkt->size);
+ put_frame_header(c);
+ put_primary_audio_header(c);
+ for (i = 0; i < SUBFRAMES; i++)
+ put_subframe(c, i);
+
+ flush_put_bits(&c->pb);
+
+ avpkt->pts = frame->pts;
+ avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
+ avpkt->size = c->frame_size + 1;
+ *got_packet_ptr = 1;
+ return 0;
+}
+
+static const AVCodecDefault defaults[] = {
+ { "b", "1411200" },
+ { NULL },
+};
+
+AVCodec ff_dca_encoder = {
+ .name = "dca",
+ .long_name = NULL_IF_CONFIG_SMALL("DCA (DTS Coherent Acoustics)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_DTS,
+ .priv_data_size = sizeof(DCAEncContext),
+ .init = encode_init,
+ .encode2 = encode_frame,
++ .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_NONE },
+ .supported_samplerates = sample_rates,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
+ AV_CH_LAYOUT_STEREO,
+ AV_CH_LAYOUT_2_2,
+ AV_CH_LAYOUT_5POINT0,
+ AV_CH_LAYOUT_5POINT1,
+ 0 },
+ .defaults = defaults,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * Copyright (C) 2007 Marco Gerards <marco@gnu.org>
+ * Copyright (C) 2009 David Conrad
+ * Copyright (C) 2011 Jordi Ortiz
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Dirac Decoder
+ * @author Marco Gerards <marco@gnu.org>, David Conrad, Jordi Ortiz <nenjordi@gmail.com>
+ */
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "bytestream.h"
+#include "internal.h"
+#include "golomb.h"
+#include "dirac_arith.h"
+#include "mpeg12data.h"
+#include "libavcodec/mpegvideo.h"
+#include "mpegvideoencdsp.h"
+#include "dirac_dwt.h"
+#include "dirac.h"
+#include "diracdsp.h"
+#include "videodsp.h"
+
+/**
+ * The spec limits the number of wavelet decompositions to 4 for both
+ * level 1 (VC-2) and 128 (long-gop default).
+ * 5 decompositions is the maximum before >16-bit buffers are needed.
+ * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting
+ * the others to 4 decompositions (or 3 for the fidelity filter).
+ *
+ * We use this instead of MAX_DECOMPOSITIONS to save some memory.
+ */
+#define MAX_DWT_LEVELS 5
+
+/**
+ * The spec limits this to 3 for frame coding, but in practice can be as high as 6
+ */
+#define MAX_REFERENCE_FRAMES 8
+#define MAX_DELAY 5 /* limit for main profile for frame coding (TODO: field coding) */
+#define MAX_FRAMES (MAX_REFERENCE_FRAMES + MAX_DELAY + 1)
+#define MAX_QUANT 68 /* max quant for VC-2 */
+#define MAX_BLOCKSIZE 32 /* maximum xblen/yblen we support */
+
+/**
+ * DiracBlock->ref flags, if set then the block does MC from the given ref
+ */
+#define DIRAC_REF_MASK_REF1 1
+#define DIRAC_REF_MASK_REF2 2
+#define DIRAC_REF_MASK_GLOBAL 4
+
+/**
+ * Value of Picture.reference when Picture is not a reference picture, but
+ * is held for delayed output.
+ */
+#define DELAYED_PIC_REF 4
+
+#define CALC_PADDING(size, depth) \
+ (((size + (1 << depth) - 1) >> depth) << depth)
+
+#define DIVRNDUP(a, b) (((a) + (b) - 1) / (b))
+
+typedef struct {
+ AVFrame *avframe;
+ int interpolated[3]; /* 1 if hpel[] is valid */
+ uint8_t *hpel[3][4];
+ uint8_t *hpel_base[3][4];
+} DiracFrame;
+
+typedef struct {
+ union {
+ int16_t mv[2][2];
+ int16_t dc[3];
+ } u; /* anonymous unions aren't in C99 :( */
+ uint8_t ref;
+} DiracBlock;
+
+typedef struct SubBand {
+ int level;
+ int orientation;
+ int stride;
+ int width;
+ int height;
+ int quant;
+ IDWTELEM *ibuf;
+ struct SubBand *parent;
+
+ /* for low delay */
+ unsigned length;
+ const uint8_t *coeff_data;
+} SubBand;
+
+typedef struct Plane {
+ int width;
+ int height;
+ ptrdiff_t stride;
+
+ int idwt_width;
+ int idwt_height;
+ int idwt_stride;
+ IDWTELEM *idwt_buf;
+ IDWTELEM *idwt_buf_base;
+ IDWTELEM *idwt_tmp;
+
+ /* block length */
+ uint8_t xblen;
+ uint8_t yblen;
+ /* block separation (block n+1 starts after this many pixels in block n) */
+ uint8_t xbsep;
+ uint8_t ybsep;
+ /* amount of overspill on each edge (half of the overlap between blocks) */
+ uint8_t xoffset;
+ uint8_t yoffset;
+
+ SubBand band[MAX_DWT_LEVELS][4];
+} Plane;
+
+typedef struct DiracContext {
+ AVCodecContext *avctx;
+ MpegvideoEncDSPContext mpvencdsp;
+ VideoDSPContext vdsp;
+ DiracDSPContext diracdsp;
+ GetBitContext gb;
+ dirac_source_params source;
+ int seen_sequence_header;
+ int frame_number; /* number of the next frame to display */
+ Plane plane[3];
+ int chroma_x_shift;
+ int chroma_y_shift;
+
+ int zero_res; /* zero residue flag */
+ int is_arith; /* whether coeffs use arith or golomb coding */
+ int low_delay; /* use the low delay syntax */
+ int globalmc_flag; /* use global motion compensation */
+ int num_refs; /* number of reference pictures */
+
+ /* wavelet decoding */
+ unsigned wavelet_depth; /* depth of the IDWT */
+ unsigned wavelet_idx;
+
+ /**
+ * schroedinger older than 1.0.8 doesn't store
+ * quant delta if only one codebook exists in a band
+ */
+ unsigned old_delta_quant;
+ unsigned codeblock_mode;
+
+ struct {
+ unsigned width;
+ unsigned height;
+ } codeblock[MAX_DWT_LEVELS+1];
+
+ struct {
+ unsigned num_x; /* number of horizontal slices */
+ unsigned num_y; /* number of vertical slices */
+ AVRational bytes; /* average bytes per slice */
+ uint8_t quant[MAX_DWT_LEVELS][4]; /* [DIRAC_STD] E.1 */
+ } lowdelay;
+
+ struct {
+ int pan_tilt[2]; /* pan/tilt vector */
+ int zrs[2][2]; /* zoom/rotate/shear matrix */
+ int perspective[2]; /* perspective vector */
+ unsigned zrs_exp;
+ unsigned perspective_exp;
+ } globalmc[2];
+
+ /* motion compensation */
+ uint8_t mv_precision; /* [DIRAC_STD] REFS_WT_PRECISION */
+ int16_t weight[2]; /* [DIRAC_STD] REF1_WT and REF2_WT */
+ unsigned weight_log2denom; /* [DIRAC_STD] REFS_WT_PRECISION */
+
+ int blwidth; /* number of blocks (horizontally) */
+ int blheight; /* number of blocks (vertically) */
+ int sbwidth; /* number of superblocks (horizontally) */
+ int sbheight; /* number of superblocks (vertically) */
+
+ uint8_t *sbsplit;
+ DiracBlock *blmotion;
+
+ uint8_t *edge_emu_buffer[4];
+ uint8_t *edge_emu_buffer_base;
+
+ uint16_t *mctmp; /* buffer holding the MC data multiplied by OBMC weights */
+ uint8_t *mcscratch;
+ int buffer_stride;
+
+ DECLARE_ALIGNED(16, uint8_t, obmc_weight)[3][MAX_BLOCKSIZE*MAX_BLOCKSIZE];
+
+ void (*put_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
+ void (*avg_pixels_tab[4])(uint8_t *dst, const uint8_t *src[5], int stride, int h);
+ void (*add_obmc)(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
+ dirac_weight_func weight_func;
+ dirac_biweight_func biweight_func;
+
+ DiracFrame *current_picture;
+ DiracFrame *ref_pics[2];
+
+ DiracFrame *ref_frames[MAX_REFERENCE_FRAMES+1];
+ DiracFrame *delay_frames[MAX_DELAY+1];
+ DiracFrame all_frames[MAX_FRAMES];
+} DiracContext;
+
+/**
+ * Dirac Specification ->
+ * Parse code values. 9.6.1 Table 9.1
+ */
+enum dirac_parse_code {
+ pc_seq_header = 0x00,
+ pc_eos = 0x10,
+ pc_aux_data = 0x20,
+ pc_padding = 0x30,
+};
+
+enum dirac_subband {
+ subband_ll = 0,
+ subband_hl = 1,
+ subband_lh = 2,
+ subband_hh = 3,
+ subband_nb,
+};
+
+static const uint8_t default_qmat[][4][4] = {
+ { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
+ { { 4, 2, 2, 0}, { 0, 4, 4, 2}, { 0, 5, 5, 3}, { 0, 7, 7, 5} },
+ { { 5, 3, 3, 0}, { 0, 4, 4, 1}, { 0, 5, 5, 2}, { 0, 6, 6, 3} },
+ { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
+ { { 8, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0}, { 0, 4, 4, 0} },
+ { { 0, 4, 4, 8}, { 0, 8, 8, 12}, { 0, 13, 13, 17}, { 0, 17, 17, 21} },
+ { { 3, 1, 1, 0}, { 0, 4, 4, 2}, { 0, 6, 6, 5}, { 0, 9, 9, 7} },
+};
+
+static const int qscale_tab[MAX_QUANT+1] = {
+ 4, 5, 6, 7, 8, 10, 11, 13,
+ 16, 19, 23, 27, 32, 38, 45, 54,
+ 64, 76, 91, 108, 128, 152, 181, 215,
+ 256, 304, 362, 431, 512, 609, 724, 861,
+ 1024, 1218, 1448, 1722, 2048, 2435, 2896, 3444,
+ 4096, 4871, 5793, 6889, 8192, 9742, 11585, 13777,
+ 16384, 19484, 23170, 27554, 32768, 38968, 46341, 55109,
+ 65536, 77936
+};
+
+static const int qoffset_intra_tab[MAX_QUANT+1] = {
+ 1, 2, 3, 4, 4, 5, 6, 7,
+ 8, 10, 12, 14, 16, 19, 23, 27,
+ 32, 38, 46, 54, 64, 76, 91, 108,
+ 128, 152, 181, 216, 256, 305, 362, 431,
+ 512, 609, 724, 861, 1024, 1218, 1448, 1722,
+ 2048, 2436, 2897, 3445, 4096, 4871, 5793, 6889,
+ 8192, 9742, 11585, 13777, 16384, 19484, 23171, 27555,
+ 32768, 38968
+};
+
+static const int qoffset_inter_tab[MAX_QUANT+1] = {
+ 1, 2, 2, 3, 3, 4, 4, 5,
+ 6, 7, 9, 10, 12, 14, 17, 20,
+ 24, 29, 34, 41, 48, 57, 68, 81,
+ 96, 114, 136, 162, 192, 228, 272, 323,
+ 384, 457, 543, 646, 768, 913, 1086, 1292,
+ 1536, 1827, 2172, 2583, 3072, 3653, 4344, 5166,
+ 6144, 7307, 8689, 10333, 12288, 14613, 17378, 20666,
+ 24576, 29226
+};
+
+/* magic number division by 3 from schroedinger */
+static inline int divide3(int x)
+{
+ return ((x+1)*21845 + 10922) >> 16;
+}
+
+static DiracFrame *remove_frame(DiracFrame *framelist[], int picnum)
+{
+ DiracFrame *remove_pic = NULL;
+ int i, remove_idx = -1;
+
+ for (i = 0; framelist[i]; i++)
+ if (framelist[i]->avframe->display_picture_number == picnum) {
+ remove_pic = framelist[i];
+ remove_idx = i;
+ }
+
+ if (remove_pic)
+ for (i = remove_idx; framelist[i]; i++)
+ framelist[i] = framelist[i+1];
+
+ return remove_pic;
+}
+
+static int add_frame(DiracFrame *framelist[], int maxframes, DiracFrame *frame)
+{
+ int i;
+ for (i = 0; i < maxframes; i++)
+ if (!framelist[i]) {
+ framelist[i] = frame;
+ return 0;
+ }
+ return -1;
+}
+
+static int alloc_sequence_buffers(DiracContext *s)
+{
+ int sbwidth = DIVRNDUP(s->source.width, 4);
+ int sbheight = DIVRNDUP(s->source.height, 4);
+ int i, w, h, top_padding;
+
+ /* todo: think more about this / use or set Plane here */
+ for (i = 0; i < 3; i++) {
+ int max_xblen = MAX_BLOCKSIZE >> (i ? s->chroma_x_shift : 0);
+ int max_yblen = MAX_BLOCKSIZE >> (i ? s->chroma_y_shift : 0);
+ w = s->source.width >> (i ? s->chroma_x_shift : 0);
+ h = s->source.height >> (i ? s->chroma_y_shift : 0);
+
+ /* we allocate the max we support here since num decompositions can
+ * change from frame to frame. Stride is aligned to 16 for SIMD, and
+ * 1<<MAX_DWT_LEVELS top padding to avoid if(y>0) in arith decoding
+ * MAX_BLOCKSIZE padding for MC: blocks can spill up to half of that
+ * on each side */
+ top_padding = FFMAX(1<<MAX_DWT_LEVELS, max_yblen/2);
+ w = FFALIGN(CALC_PADDING(w, MAX_DWT_LEVELS), 8); /* FIXME: Should this be 16 for SSE??? */
+ h = top_padding + CALC_PADDING(h, MAX_DWT_LEVELS) + max_yblen/2;
+
+ s->plane[i].idwt_buf_base = av_mallocz_array((w+max_xblen), h * sizeof(IDWTELEM));
+ s->plane[i].idwt_tmp = av_malloc_array((w+16), sizeof(IDWTELEM));
+ s->plane[i].idwt_buf = s->plane[i].idwt_buf_base + top_padding*w;
+ if (!s->plane[i].idwt_buf_base || !s->plane[i].idwt_tmp)
+ return AVERROR(ENOMEM);
+ }
+
+ /* fixme: allocate using real stride here */
+ s->sbsplit = av_malloc_array(sbwidth, sbheight);
+ s->blmotion = av_malloc_array(sbwidth, sbheight * 16 * sizeof(*s->blmotion));
+
+ if (!s->sbsplit || !s->blmotion)
+ return AVERROR(ENOMEM);
+ return 0;
+}
+
+static int alloc_buffers(DiracContext *s, int stride)
+{
+ int w = s->source.width;
+ int h = s->source.height;
+
+ av_assert0(stride >= w);
+ stride += 64;
+
+ if (s->buffer_stride >= stride)
+ return 0;
+ s->buffer_stride = 0;
+
+ av_freep(&s->edge_emu_buffer_base);
+ memset(s->edge_emu_buffer, 0, sizeof(s->edge_emu_buffer));
+ av_freep(&s->mctmp);
+ av_freep(&s->mcscratch);
+
+ s->edge_emu_buffer_base = av_malloc_array(stride, MAX_BLOCKSIZE);
+
+ s->mctmp = av_malloc_array((stride+MAX_BLOCKSIZE), (h+MAX_BLOCKSIZE) * sizeof(*s->mctmp));
+ s->mcscratch = av_malloc_array(stride, MAX_BLOCKSIZE);
+
+ if (!s->edge_emu_buffer_base || !s->mctmp || !s->mcscratch)
+ return AVERROR(ENOMEM);
+
+ s->buffer_stride = stride;
+ return 0;
+}
+
+static void free_sequence_buffers(DiracContext *s)
+{
+ int i, j, k;
+
+ for (i = 0; i < MAX_FRAMES; i++) {
+ if (s->all_frames[i].avframe->data[0]) {
+ av_frame_unref(s->all_frames[i].avframe);
+ memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
+ }
+
+ for (j = 0; j < 3; j++)
+ for (k = 1; k < 4; k++)
+ av_freep(&s->all_frames[i].hpel_base[j][k]);
+ }
+
+ memset(s->ref_frames, 0, sizeof(s->ref_frames));
+ memset(s->delay_frames, 0, sizeof(s->delay_frames));
+
+ for (i = 0; i < 3; i++) {
+ av_freep(&s->plane[i].idwt_buf_base);
+ av_freep(&s->plane[i].idwt_tmp);
+ }
+
+ s->buffer_stride = 0;
+ av_freep(&s->sbsplit);
+ av_freep(&s->blmotion);
+ av_freep(&s->edge_emu_buffer_base);
+
+ av_freep(&s->mctmp);
+ av_freep(&s->mcscratch);
+}
+
+static av_cold int dirac_decode_init(AVCodecContext *avctx)
+{
+ DiracContext *s = avctx->priv_data;
+ int i;
+
+ s->avctx = avctx;
+ s->frame_number = -1;
+
+ ff_diracdsp_init(&s->diracdsp);
+ ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
+ ff_videodsp_init(&s->vdsp, 8);
+
+ for (i = 0; i < MAX_FRAMES; i++) {
+ s->all_frames[i].avframe = av_frame_alloc();
+ if (!s->all_frames[i].avframe) {
+ while (i > 0)
+ av_frame_free(&s->all_frames[--i].avframe);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+static void dirac_decode_flush(AVCodecContext *avctx)
+{
+ DiracContext *s = avctx->priv_data;
+ free_sequence_buffers(s);
+ s->seen_sequence_header = 0;
+ s->frame_number = -1;
+}
+
+static av_cold int dirac_decode_end(AVCodecContext *avctx)
+{
+ DiracContext *s = avctx->priv_data;
+ int i;
+
+ dirac_decode_flush(avctx);
+ for (i = 0; i < MAX_FRAMES; i++)
+ av_frame_free(&s->all_frames[i].avframe);
+
+ return 0;
+}
+
+#define SIGN_CTX(x) (CTX_SIGN_ZERO + ((x) > 0) - ((x) < 0))
+
+static inline void coeff_unpack_arith(DiracArith *c, int qfactor, int qoffset,
+ SubBand *b, IDWTELEM *buf, int x, int y)
+{
+ int coeff, sign;
+ int sign_pred = 0;
+ int pred_ctx = CTX_ZPZN_F1;
+
+ /* Check if the parent subband has a 0 in the corresponding position */
+ if (b->parent)
+ pred_ctx += !!b->parent->ibuf[b->parent->stride * (y>>1) + (x>>1)] << 1;
+
+ if (b->orientation == subband_hl)
+ sign_pred = buf[-b->stride];
+
+ /* Determine if the pixel has only zeros in its neighbourhood */
+ if (x) {
+ pred_ctx += !(buf[-1] | buf[-b->stride] | buf[-1-b->stride]);
+ if (b->orientation == subband_lh)
+ sign_pred = buf[-1];
+ } else {
+ pred_ctx += !buf[-b->stride];
+ }
+
+ coeff = dirac_get_arith_uint(c, pred_ctx, CTX_COEFF_DATA);
+ if (coeff) {
+ coeff = (coeff * qfactor + qoffset + 2) >> 2;
+ sign = dirac_get_arith_bit(c, SIGN_CTX(sign_pred));
+ coeff = (coeff ^ -sign) + sign;
+ }
+ *buf = coeff;
+}
+
+static inline int coeff_unpack_golomb(GetBitContext *gb, int qfactor, int qoffset)
+{
+ int sign, coeff;
+
+ coeff = svq3_get_ue_golomb(gb);
+ if (coeff) {
+ coeff = (coeff * qfactor + qoffset + 2) >> 2;
+ sign = get_bits1(gb);
+ coeff = (coeff ^ -sign) + sign;
+ }
+ return coeff;
+}
+
+/**
+ * Decode the coeffs in the rectangle defined by left, right, top, bottom
+ * [DIRAC_STD] 13.4.3.2 Codeblock unpacking loop. codeblock()
+ */
+static inline void codeblock(DiracContext *s, SubBand *b,
+ GetBitContext *gb, DiracArith *c,
+ int left, int right, int top, int bottom,
+ int blockcnt_one, int is_arith)
+{
+ int x, y, zero_block;
+ int qoffset, qfactor;
+ IDWTELEM *buf;
+
+ /* check for any coded coefficients in this codeblock */
+ if (!blockcnt_one) {
+ if (is_arith)
+ zero_block = dirac_get_arith_bit(c, CTX_ZERO_BLOCK);
+ else
+ zero_block = get_bits1(gb);
+
+ if (zero_block)
+ return;
+ }
+
+ if (s->codeblock_mode && !(s->old_delta_quant && blockcnt_one)) {
+ int quant = b->quant;
+ if (is_arith)
+ quant += dirac_get_arith_int(c, CTX_DELTA_Q_F, CTX_DELTA_Q_DATA);
+ else
+ quant += dirac_get_se_golomb(gb);
+ if (quant < 0) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid quant\n");
+ return;
+ }
+ b->quant = quant;
+ }
+
+ b->quant = FFMIN(b->quant, MAX_QUANT);
+
+ qfactor = qscale_tab[b->quant];
+ /* TODO: context pointer? */
+ if (!s->num_refs)
+ qoffset = qoffset_intra_tab[b->quant];
+ else
+ qoffset = qoffset_inter_tab[b->quant];
+
+ buf = b->ibuf + top * b->stride;
+ for (y = top; y < bottom; y++) {
+ for (x = left; x < right; x++) {
+ /* [DIRAC_STD] 13.4.4 Subband coefficients. coeff_unpack() */
+ if (is_arith)
+ coeff_unpack_arith(c, qfactor, qoffset, b, buf+x, x, y);
+ else
+ buf[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
+ }
+ buf += b->stride;
+ }
+}
+
+/**
+ * Dirac Specification ->
+ * 13.3 intra_dc_prediction(band)
+ */
+static inline void intra_dc_prediction(SubBand *b)
+{
+ IDWTELEM *buf = b->ibuf;
+ int x, y;
+
+ for (x = 1; x < b->width; x++)
+ buf[x] += buf[x-1];
+ buf += b->stride;
+
+ for (y = 1; y < b->height; y++) {
+ buf[0] += buf[-b->stride];
+
+ for (x = 1; x < b->width; x++) {
+ int pred = buf[x - 1] + buf[x - b->stride] + buf[x - b->stride-1];
+ buf[x] += divide3(pred);
+ }
+ buf += b->stride;
+ }
+}
+
+/**
+ * Dirac Specification ->
+ * 13.4.2 Non-skipped subbands. subband_coeffs()
+ */
+static av_always_inline void decode_subband_internal(DiracContext *s, SubBand *b, int is_arith)
+{
+ int cb_x, cb_y, left, right, top, bottom;
+ DiracArith c;
+ GetBitContext gb;
+ int cb_width = s->codeblock[b->level + (b->orientation != subband_ll)].width;
+ int cb_height = s->codeblock[b->level + (b->orientation != subband_ll)].height;
+ int blockcnt_one = (cb_width + cb_height) == 2;
+
+ if (!b->length)
+ return;
+
+ init_get_bits8(&gb, b->coeff_data, b->length);
+
+ if (is_arith)
+ ff_dirac_init_arith_decoder(&c, &gb, b->length);
+
+ top = 0;
+ for (cb_y = 0; cb_y < cb_height; cb_y++) {
+ bottom = (b->height * (cb_y+1LL)) / cb_height;
+ left = 0;
+ for (cb_x = 0; cb_x < cb_width; cb_x++) {
+ right = (b->width * (cb_x+1LL)) / cb_width;
+ codeblock(s, b, &gb, &c, left, right, top, bottom, blockcnt_one, is_arith);
+ left = right;
+ }
+ top = bottom;
+ }
+
+ if (b->orientation == subband_ll && s->num_refs == 0)
+ intra_dc_prediction(b);
+}
+
+static int decode_subband_arith(AVCodecContext *avctx, void *b)
+{
+ DiracContext *s = avctx->priv_data;
+ decode_subband_internal(s, b, 1);
+ return 0;
+}
+
+static int decode_subband_golomb(AVCodecContext *avctx, void *arg)
+{
+ DiracContext *s = avctx->priv_data;
+ SubBand **b = arg;
+ decode_subband_internal(s, *b, 0);
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * [DIRAC_STD] 13.4.1 core_transform_data()
+ */
+static void decode_component(DiracContext *s, int comp)
+{
+ AVCodecContext *avctx = s->avctx;
+ SubBand *bands[3*MAX_DWT_LEVELS+1];
+ enum dirac_subband orientation;
+ int level, num_bands = 0;
+
+ /* Unpack all subbands at all levels. */
+ for (level = 0; level < s->wavelet_depth; level++) {
+ for (orientation = !!level; orientation < 4; orientation++) {
+ SubBand *b = &s->plane[comp].band[level][orientation];
+ bands[num_bands++] = b;
+
+ align_get_bits(&s->gb);
+ /* [DIRAC_STD] 13.4.2 subband() */
+ b->length = svq3_get_ue_golomb(&s->gb);
+ if (b->length) {
+ b->quant = svq3_get_ue_golomb(&s->gb);
+ align_get_bits(&s->gb);
+ b->coeff_data = s->gb.buffer + get_bits_count(&s->gb)/8;
+ b->length = FFMIN(b->length, FFMAX(get_bits_left(&s->gb)/8, 0));
+ skip_bits_long(&s->gb, b->length*8);
+ }
+ }
+ /* arithmetic coding has inter-level dependencies, so we can only execute one level at a time */
+ if (s->is_arith)
+ avctx->execute(avctx, decode_subband_arith, &s->plane[comp].band[level][!!level],
+ NULL, 4-!!level, sizeof(SubBand));
+ }
+ /* golomb coding has no inter-level dependencies, so we can execute all subbands in parallel */
+ if (!s->is_arith)
+ avctx->execute(avctx, decode_subband_golomb, bands, NULL, num_bands, sizeof(SubBand*));
+}
+
+/* [DIRAC_STD] 13.5.5.2 Luma slice subband data. luma_slice_band(level,orient,sx,sy) --> if b2 == NULL */
+/* [DIRAC_STD] 13.5.5.3 Chroma slice subband data. chroma_slice_band(level,orient,sx,sy) --> if b2 != NULL */
+static void lowdelay_subband(DiracContext *s, GetBitContext *gb, int quant,
+ int slice_x, int slice_y, int bits_end,
+ SubBand *b1, SubBand *b2)
+{
+ int left = b1->width * slice_x / s->lowdelay.num_x;
+ int right = b1->width *(slice_x+1) / s->lowdelay.num_x;
+ int top = b1->height * slice_y / s->lowdelay.num_y;
+ int bottom = b1->height *(slice_y+1) / s->lowdelay.num_y;
+
+ int qfactor = qscale_tab[FFMIN(quant, MAX_QUANT)];
+ int qoffset = qoffset_intra_tab[FFMIN(quant, MAX_QUANT)];
+
+ IDWTELEM *buf1 = b1->ibuf + top * b1->stride;
+ IDWTELEM *buf2 = b2 ? b2->ibuf + top * b2->stride : NULL;
+ int x, y;
+ /* we have to constantly check for overread since the spec explicitly
+ requires this, with the meaning that all remaining coeffs are set to 0 */
+ if (get_bits_count(gb) >= bits_end)
+ return;
+
+ for (y = top; y < bottom; y++) {
+ for (x = left; x < right; x++) {
+ buf1[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
+ if (get_bits_count(gb) >= bits_end)
+ return;
+ if (buf2) {
+ buf2[x] = coeff_unpack_golomb(gb, qfactor, qoffset);
+ if (get_bits_count(gb) >= bits_end)
+ return;
+ }
+ }
+ buf1 += b1->stride;
+ if (buf2)
+ buf2 += b2->stride;
+ }
+}
+
+struct lowdelay_slice {
+ GetBitContext gb;
+ int slice_x;
+ int slice_y;
+ int bytes;
+};
+
+
+/**
+ * Dirac Specification ->
+ * 13.5.2 Slices. slice(sx,sy)
+ */
+static int decode_lowdelay_slice(AVCodecContext *avctx, void *arg)
+{
+ DiracContext *s = avctx->priv_data;
+ struct lowdelay_slice *slice = arg;
+ GetBitContext *gb = &slice->gb;
+ enum dirac_subband orientation;
+ int level, quant, chroma_bits, chroma_end;
+
+ int quant_base = get_bits(gb, 7); /*[DIRAC_STD] qindex */
+ int length_bits = av_log2(8 * slice->bytes)+1;
+ int luma_bits = get_bits_long(gb, length_bits);
+ int luma_end = get_bits_count(gb) + FFMIN(luma_bits, get_bits_left(gb));
+
+ /* [DIRAC_STD] 13.5.5.2 luma_slice_band */
+ for (level = 0; level < s->wavelet_depth; level++)
+ for (orientation = !!level; orientation < 4; orientation++) {
+ quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
+ lowdelay_subband(s, gb, quant, slice->slice_x, slice->slice_y, luma_end,
+ &s->plane[0].band[level][orientation], NULL);
+ }
+
+ /* consume any unused bits from luma */
+ skip_bits_long(gb, get_bits_count(gb) - luma_end);
+
+ chroma_bits = 8*slice->bytes - 7 - length_bits - luma_bits;
+ chroma_end = get_bits_count(gb) + FFMIN(chroma_bits, get_bits_left(gb));
+ /* [DIRAC_STD] 13.5.5.3 chroma_slice_band */
+ for (level = 0; level < s->wavelet_depth; level++)
+ for (orientation = !!level; orientation < 4; orientation++) {
+ quant = FFMAX(quant_base - s->lowdelay.quant[level][orientation], 0);
+ lowdelay_subband(s, gb, quant, slice->slice_x, slice->slice_y, chroma_end,
+ &s->plane[1].band[level][orientation],
+ &s->plane[2].band[level][orientation]);
+ }
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 13.5.1 low_delay_transform_data()
+ */
+static int decode_lowdelay(DiracContext *s)
+{
+ AVCodecContext *avctx = s->avctx;
+ int slice_x, slice_y, bytes, bufsize;
+ const uint8_t *buf;
+ struct lowdelay_slice *slices;
+ int slice_num = 0;
+
+ slices = av_mallocz_array(s->lowdelay.num_x, s->lowdelay.num_y * sizeof(struct lowdelay_slice));
+ if (!slices)
+ return AVERROR(ENOMEM);
+
+ align_get_bits(&s->gb);
+ /*[DIRAC_STD] 13.5.2 Slices. slice(sx,sy) */
+ buf = s->gb.buffer + get_bits_count(&s->gb)/8;
+ bufsize = get_bits_left(&s->gb);
+
+ for (slice_y = 0; bufsize > 0 && slice_y < s->lowdelay.num_y; slice_y++)
+ for (slice_x = 0; bufsize > 0 && slice_x < s->lowdelay.num_x; slice_x++) {
+ bytes = (slice_num+1) * s->lowdelay.bytes.num / s->lowdelay.bytes.den
+ - slice_num * s->lowdelay.bytes.num / s->lowdelay.bytes.den;
+
+ slices[slice_num].bytes = bytes;
+ slices[slice_num].slice_x = slice_x;
+ slices[slice_num].slice_y = slice_y;
+ init_get_bits(&slices[slice_num].gb, buf, bufsize);
+ slice_num++;
+
+ buf += bytes;
+ if (bufsize/8 >= bytes)
+ bufsize -= bytes*8;
+ else
+ bufsize = 0;
+ }
+
+ avctx->execute(avctx, decode_lowdelay_slice, slices, NULL, slice_num,
+ sizeof(struct lowdelay_slice)); /* [DIRAC_STD] 13.5.2 Slices */
+ intra_dc_prediction(&s->plane[0].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
+ intra_dc_prediction(&s->plane[1].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
+ intra_dc_prediction(&s->plane[2].band[0][0]); /* [DIRAC_STD] 13.3 intra_dc_prediction() */
+ av_free(slices);
+ return 0;
+}
+
+static void init_planes(DiracContext *s)
+{
+ int i, w, h, level, orientation;
+
+ for (i = 0; i < 3; i++) {
+ Plane *p = &s->plane[i];
+
+ p->width = s->source.width >> (i ? s->chroma_x_shift : 0);
+ p->height = s->source.height >> (i ? s->chroma_y_shift : 0);
+ p->idwt_width = w = CALC_PADDING(p->width , s->wavelet_depth);
+ p->idwt_height = h = CALC_PADDING(p->height, s->wavelet_depth);
+ p->idwt_stride = FFALIGN(p->idwt_width, 8);
+
+ for (level = s->wavelet_depth-1; level >= 0; level--) {
+ w = w>>1;
+ h = h>>1;
+ for (orientation = !!level; orientation < 4; orientation++) {
+ SubBand *b = &p->band[level][orientation];
+
+ b->ibuf = p->idwt_buf;
+ b->level = level;
+ b->stride = p->idwt_stride << (s->wavelet_depth - level);
+ b->width = w;
+ b->height = h;
+ b->orientation = orientation;
+
+ if (orientation & 1)
+ b->ibuf += w;
+ if (orientation > 1)
+ b->ibuf += b->stride>>1;
+
+ if (level)
+ b->parent = &p->band[level-1][orientation];
+ }
+ }
+
+ if (i > 0) {
+ p->xblen = s->plane[0].xblen >> s->chroma_x_shift;
+ p->yblen = s->plane[0].yblen >> s->chroma_y_shift;
+ p->xbsep = s->plane[0].xbsep >> s->chroma_x_shift;
+ p->ybsep = s->plane[0].ybsep >> s->chroma_y_shift;
+ }
+
+ p->xoffset = (p->xblen - p->xbsep)/2;
+ p->yoffset = (p->yblen - p->ybsep)/2;
+ }
+}
+
+/**
+ * Unpack the motion compensation parameters
+ * Dirac Specification ->
+ * 11.2 Picture prediction data. picture_prediction()
+ */
+static int dirac_unpack_prediction_parameters(DiracContext *s)
+{
+ static const uint8_t default_blen[] = { 4, 12, 16, 24 };
+
+ GetBitContext *gb = &s->gb;
+ unsigned idx, ref;
+
+ align_get_bits(gb);
+ /* [DIRAC_STD] 11.2.2 Block parameters. block_parameters() */
+ /* Luma and Chroma are equal. 11.2.3 */
+ idx = svq3_get_ue_golomb(gb); /* [DIRAC_STD] index */
+
+ if (idx > 4) {
+ av_log(s->avctx, AV_LOG_ERROR, "Block prediction index too high\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (idx == 0) {
+ s->plane[0].xblen = svq3_get_ue_golomb(gb);
+ s->plane[0].yblen = svq3_get_ue_golomb(gb);
+ s->plane[0].xbsep = svq3_get_ue_golomb(gb);
+ s->plane[0].ybsep = svq3_get_ue_golomb(gb);
+ } else {
+ /*[DIRAC_STD] preset_block_params(index). Table 11.1 */
+ s->plane[0].xblen = default_blen[idx-1];
+ s->plane[0].yblen = default_blen[idx-1];
+ s->plane[0].xbsep = 4 * idx;
+ s->plane[0].ybsep = 4 * idx;
+ }
+ /*[DIRAC_STD] 11.2.4 motion_data_dimensions()
+ Calculated in function dirac_unpack_block_motion_data */
+
+ if (s->plane[0].xblen % (1 << s->chroma_x_shift) != 0 ||
+ s->plane[0].yblen % (1 << s->chroma_y_shift) != 0 ||
+ !s->plane[0].xblen || !s->plane[0].yblen) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "invalid x/y block length (%d/%d) for x/y chroma shift (%d/%d)\n",
+ s->plane[0].xblen, s->plane[0].yblen, s->chroma_x_shift, s->chroma_y_shift);
+ return AVERROR_INVALIDDATA;
+ }
+ if (!s->plane[0].xbsep || !s->plane[0].ybsep || s->plane[0].xbsep < s->plane[0].xblen/2 || s->plane[0].ybsep < s->plane[0].yblen/2) {
+ av_log(s->avctx, AV_LOG_ERROR, "Block separation too small\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (s->plane[0].xbsep > s->plane[0].xblen || s->plane[0].ybsep > s->plane[0].yblen) {
+ av_log(s->avctx, AV_LOG_ERROR, "Block separation greater than size\n");
+ return AVERROR_INVALIDDATA;
+ }
+ if (FFMAX(s->plane[0].xblen, s->plane[0].yblen) > MAX_BLOCKSIZE) {
+ av_log(s->avctx, AV_LOG_ERROR, "Unsupported large block size\n");
+ return AVERROR_PATCHWELCOME;
+ }
+
+ /*[DIRAC_STD] 11.2.5 Motion vector precision. motion_vector_precision()
+ Read motion vector precision */
+ s->mv_precision = svq3_get_ue_golomb(gb);
+ if (s->mv_precision > 3) {
+ av_log(s->avctx, AV_LOG_ERROR, "MV precision finer than eighth-pel\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /*[DIRAC_STD] 11.2.6 Global motion. global_motion()
+ Read the global motion compensation parameters */
+ s->globalmc_flag = get_bits1(gb);
+ if (s->globalmc_flag) {
+ memset(s->globalmc, 0, sizeof(s->globalmc));
+ /* [DIRAC_STD] pan_tilt(gparams) */
+ for (ref = 0; ref < s->num_refs; ref++) {
+ if (get_bits1(gb)) {
+ s->globalmc[ref].pan_tilt[0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].pan_tilt[1] = dirac_get_se_golomb(gb);
+ }
+ /* [DIRAC_STD] zoom_rotate_shear(gparams)
+ zoom/rotation/shear parameters */
+ if (get_bits1(gb)) {
+ s->globalmc[ref].zrs_exp = svq3_get_ue_golomb(gb);
+ s->globalmc[ref].zrs[0][0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].zrs[0][1] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].zrs[1][0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].zrs[1][1] = dirac_get_se_golomb(gb);
+ } else {
+ s->globalmc[ref].zrs[0][0] = 1;
+ s->globalmc[ref].zrs[1][1] = 1;
+ }
+ /* [DIRAC_STD] perspective(gparams) */
+ if (get_bits1(gb)) {
+ s->globalmc[ref].perspective_exp = svq3_get_ue_golomb(gb);
+ s->globalmc[ref].perspective[0] = dirac_get_se_golomb(gb);
+ s->globalmc[ref].perspective[1] = dirac_get_se_golomb(gb);
+ }
+ }
+ }
+
+ /*[DIRAC_STD] 11.2.7 Picture prediction mode. prediction_mode()
+ Picture prediction mode, not currently used. */
+ if (svq3_get_ue_golomb(gb)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Unknown picture prediction mode\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* [DIRAC_STD] 11.2.8 Reference picture weight. reference_picture_weights()
+ just data read, weight calculation will be done later on. */
+ s->weight_log2denom = 1;
+ s->weight[0] = 1;
+ s->weight[1] = 1;
+
+ if (get_bits1(gb)) {
+ s->weight_log2denom = svq3_get_ue_golomb(gb);
+ s->weight[0] = dirac_get_se_golomb(gb);
+ if (s->num_refs == 2)
+ s->weight[1] = dirac_get_se_golomb(gb);
+ }
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 11.3 Wavelet transform data. wavelet_transform()
+ */
+static int dirac_unpack_idwt_params(DiracContext *s)
+{
+ GetBitContext *gb = &s->gb;
+ int i, level;
+ unsigned tmp;
+
+#define CHECKEDREAD(dst, cond, errmsg) \
+ tmp = svq3_get_ue_golomb(gb); \
+ if (cond) { \
+ av_log(s->avctx, AV_LOG_ERROR, errmsg); \
+ return AVERROR_INVALIDDATA; \
+ }\
+ dst = tmp;
+
+ align_get_bits(gb);
+
+ s->zero_res = s->num_refs ? get_bits1(gb) : 0;
+ if (s->zero_res)
+ return 0;
+
+ /*[DIRAC_STD] 11.3.1 Transform parameters. transform_parameters() */
+ CHECKEDREAD(s->wavelet_idx, tmp > 6, "wavelet_idx is too big\n")
+
+ CHECKEDREAD(s->wavelet_depth, tmp > MAX_DWT_LEVELS || tmp < 1, "invalid number of DWT decompositions\n")
+
+ if (!s->low_delay) {
+ /* Codeblock parameters (core syntax only) */
+ if (get_bits1(gb)) {
+ for (i = 0; i <= s->wavelet_depth; i++) {
+ CHECKEDREAD(s->codeblock[i].width , tmp < 1 || tmp > (s->avctx->width >>s->wavelet_depth-i), "codeblock width invalid\n")
+ CHECKEDREAD(s->codeblock[i].height, tmp < 1 || tmp > (s->avctx->height>>s->wavelet_depth-i), "codeblock height invalid\n")
+ }
+
+ CHECKEDREAD(s->codeblock_mode, tmp > 1, "unknown codeblock mode\n")
+ } else
+ for (i = 0; i <= s->wavelet_depth; i++)
+ s->codeblock[i].width = s->codeblock[i].height = 1;
+ } else {
+ /* Slice parameters + quantization matrix*/
+ /*[DIRAC_STD] 11.3.4 Slice coding Parameters (low delay syntax only). slice_parameters() */
+ s->lowdelay.num_x = svq3_get_ue_golomb(gb);
+ s->lowdelay.num_y = svq3_get_ue_golomb(gb);
+ s->lowdelay.bytes.num = svq3_get_ue_golomb(gb);
+ s->lowdelay.bytes.den = svq3_get_ue_golomb(gb);
+
+ if (s->lowdelay.bytes.den <= 0) {
+ av_log(s->avctx,AV_LOG_ERROR,"Invalid lowdelay.bytes.den\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* [DIRAC_STD] 11.3.5 Quantisation matrices (low-delay syntax). quant_matrix() */
+ if (get_bits1(gb)) {
+ av_log(s->avctx,AV_LOG_DEBUG,"Low Delay: Has Custom Quantization Matrix!\n");
+ /* custom quantization matrix */
+ s->lowdelay.quant[0][0] = svq3_get_ue_golomb(gb);
+ for (level = 0; level < s->wavelet_depth; level++) {
+ s->lowdelay.quant[level][1] = svq3_get_ue_golomb(gb);
+ s->lowdelay.quant[level][2] = svq3_get_ue_golomb(gb);
+ s->lowdelay.quant[level][3] = svq3_get_ue_golomb(gb);
+ }
+ } else {
+ if (s->wavelet_depth > 4) {
+ av_log(s->avctx,AV_LOG_ERROR,"Mandatory custom low delay matrix missing for depth %d\n", s->wavelet_depth);
+ return AVERROR_INVALIDDATA;
+ }
+ /* default quantization matrix */
+ for (level = 0; level < s->wavelet_depth; level++)
+ for (i = 0; i < 4; i++) {
+ s->lowdelay.quant[level][i] = default_qmat[s->wavelet_idx][level][i];
+ /* haar with no shift differs for different depths */
+ if (s->wavelet_idx == 3)
+ s->lowdelay.quant[level][i] += 4*(s->wavelet_depth-1 - level);
+ }
+ }
+ }
+ return 0;
+}
+
+static inline int pred_sbsplit(uint8_t *sbsplit, int stride, int x, int y)
+{
+ static const uint8_t avgsplit[7] = { 0, 0, 1, 1, 1, 2, 2 };
+
+ if (!(x|y))
+ return 0;
+ else if (!y)
+ return sbsplit[-1];
+ else if (!x)
+ return sbsplit[-stride];
+
+ return avgsplit[sbsplit[-1] + sbsplit[-stride] + sbsplit[-stride-1]];
+}
+
+static inline int pred_block_mode(DiracBlock *block, int stride, int x, int y, int refmask)
+{
+ int pred;
+
+ if (!(x|y))
+ return 0;
+ else if (!y)
+ return block[-1].ref & refmask;
+ else if (!x)
+ return block[-stride].ref & refmask;
+
+ /* return the majority */
+ pred = (block[-1].ref & refmask) + (block[-stride].ref & refmask) + (block[-stride-1].ref & refmask);
+ return (pred >> 1) & refmask;
+}
+
+static inline void pred_block_dc(DiracBlock *block, int stride, int x, int y)
+{
+ int i, n = 0;
+
+ memset(block->u.dc, 0, sizeof(block->u.dc));
+
+ if (x && !(block[-1].ref & 3)) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += block[-1].u.dc[i];
+ n++;
+ }
+
+ if (y && !(block[-stride].ref & 3)) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += block[-stride].u.dc[i];
+ n++;
+ }
+
+ if (x && y && !(block[-1-stride].ref & 3)) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += block[-1-stride].u.dc[i];
+ n++;
+ }
+
+ if (n == 2) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] = (block->u.dc[i]+1)>>1;
+ } else if (n == 3) {
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] = divide3(block->u.dc[i]);
+ }
+}
+
+static inline void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
+{
+ int16_t *pred[3];
+ int refmask = ref+1;
+ int mask = refmask | DIRAC_REF_MASK_GLOBAL; /* exclude gmc blocks */
+ int n = 0;
+
+ if (x && (block[-1].ref & mask) == refmask)
+ pred[n++] = block[-1].u.mv[ref];
+
+ if (y && (block[-stride].ref & mask) == refmask)
+ pred[n++] = block[-stride].u.mv[ref];
+
+ if (x && y && (block[-stride-1].ref & mask) == refmask)
+ pred[n++] = block[-stride-1].u.mv[ref];
+
+ switch (n) {
+ case 0:
+ block->u.mv[ref][0] = 0;
+ block->u.mv[ref][1] = 0;
+ break;
+ case 1:
+ block->u.mv[ref][0] = pred[0][0];
+ block->u.mv[ref][1] = pred[0][1];
+ break;
+ case 2:
+ block->u.mv[ref][0] = (pred[0][0] + pred[1][0] + 1) >> 1;
+ block->u.mv[ref][1] = (pred[0][1] + pred[1][1] + 1) >> 1;
+ break;
+ case 3:
+ block->u.mv[ref][0] = mid_pred(pred[0][0], pred[1][0], pred[2][0]);
+ block->u.mv[ref][1] = mid_pred(pred[0][1], pred[1][1], pred[2][1]);
+ break;
+ }
+}
+
+static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref)
+{
+ int ez = s->globalmc[ref].zrs_exp;
+ int ep = s->globalmc[ref].perspective_exp;
+ int (*A)[2] = s->globalmc[ref].zrs;
+ int *b = s->globalmc[ref].pan_tilt;
+ int *c = s->globalmc[ref].perspective;
+
+ int m = (1<<ep) - (c[0]*x + c[1]*y);
+ int mx = m * ((A[0][0] * x + A[0][1]*y) + (1<<ez) * b[0]);
+ int my = m * ((A[1][0] * x + A[1][1]*y) + (1<<ez) * b[1]);
+
+ block->u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep);
+ block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep);
+}
+
+static void decode_block_params(DiracContext *s, DiracArith arith[8], DiracBlock *block,
+ int stride, int x, int y)
+{
+ int i;
+
+ block->ref = pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_REF1);
+ block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF1);
+
+ if (s->num_refs == 2) {
+ block->ref |= pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_REF2);
+ block->ref ^= dirac_get_arith_bit(arith, CTX_PMODE_REF2) << 1;
+ }
+
+ if (!block->ref) {
+ pred_block_dc(block, stride, x, y);
+ for (i = 0; i < 3; i++)
+ block->u.dc[i] += dirac_get_arith_int(arith+1+i, CTX_DC_F1, CTX_DC_DATA);
+ return;
+ }
+
+ if (s->globalmc_flag) {
+ block->ref |= pred_block_mode(block, stride, x, y, DIRAC_REF_MASK_GLOBAL);
+ block->ref ^= dirac_get_arith_bit(arith, CTX_GLOBAL_BLOCK) << 2;
+ }
+
+ for (i = 0; i < s->num_refs; i++)
+ if (block->ref & (i+1)) {
+ if (block->ref & DIRAC_REF_MASK_GLOBAL) {
+ global_mv(s, block, x, y, i);
+ } else {
+ pred_mv(block, stride, x, y, i);
+ block->u.mv[i][0] += dirac_get_arith_int(arith + 4 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
+ block->u.mv[i][1] += dirac_get_arith_int(arith + 5 + 2 * i, CTX_MV_F1, CTX_MV_DATA);
+ }
+ }
+}
+
+/**
+ * Copies the current block to the other blocks covered by the current superblock split mode
+ */
+static void propagate_block_data(DiracBlock *block, int stride, int size)
+{
+ int x, y;
+ DiracBlock *dst = block;
+
+ for (x = 1; x < size; x++)
+ dst[x] = *block;
+
+ for (y = 1; y < size; y++) {
+ dst += stride;
+ for (x = 0; x < size; x++)
+ dst[x] = *block;
+ }
+}
+
+/**
+ * Dirac Specification ->
+ * 12. Block motion data syntax
+ */
+static int dirac_unpack_block_motion_data(DiracContext *s)
+{
+ GetBitContext *gb = &s->gb;
+ uint8_t *sbsplit = s->sbsplit;
+ int i, x, y, q, p;
+ DiracArith arith[8];
+
+ align_get_bits(gb);
+
+ /* [DIRAC_STD] 11.2.4 and 12.2.1 Number of blocks and superblocks */
+ s->sbwidth = DIVRNDUP(s->source.width, 4*s->plane[0].xbsep);
+ s->sbheight = DIVRNDUP(s->source.height, 4*s->plane[0].ybsep);
+ s->blwidth = 4 * s->sbwidth;
+ s->blheight = 4 * s->sbheight;
+
+ /* [DIRAC_STD] 12.3.1 Superblock splitting modes. superblock_split_modes()
+ decode superblock split modes */
+ ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb)); /* svq3_get_ue_golomb(gb) is the length */
+ for (y = 0; y < s->sbheight; y++) {
+ for (x = 0; x < s->sbwidth; x++) {
+ unsigned int split = dirac_get_arith_uint(arith, CTX_SB_F1, CTX_SB_DATA);
+ if (split > 2)
+ return AVERROR_INVALIDDATA;
+ sbsplit[x] = (split + pred_sbsplit(sbsplit+x, s->sbwidth, x, y)) % 3;
+ }
+ sbsplit += s->sbwidth;
+ }
+
+ /* setup arith decoding */
+ ff_dirac_init_arith_decoder(arith, gb, svq3_get_ue_golomb(gb));
+ for (i = 0; i < s->num_refs; i++) {
+ ff_dirac_init_arith_decoder(arith + 4 + 2 * i, gb, svq3_get_ue_golomb(gb));
+ ff_dirac_init_arith_decoder(arith + 5 + 2 * i, gb, svq3_get_ue_golomb(gb));
+ }
+ for (i = 0; i < 3; i++)
+ ff_dirac_init_arith_decoder(arith+1+i, gb, svq3_get_ue_golomb(gb));
+
+ for (y = 0; y < s->sbheight; y++)
+ for (x = 0; x < s->sbwidth; x++) {
+ int blkcnt = 1 << s->sbsplit[y * s->sbwidth + x];
+ int step = 4 >> s->sbsplit[y * s->sbwidth + x];
+
+ for (q = 0; q < blkcnt; q++)
+ for (p = 0; p < blkcnt; p++) {
+ int bx = 4 * x + p*step;
+ int by = 4 * y + q*step;
+ DiracBlock *block = &s->blmotion[by*s->blwidth + bx];
+ decode_block_params(s, arith, block, s->blwidth, bx, by);
+ propagate_block_data(block, s->blwidth, step);
+ }
+ }
+
+ return 0;
+}
+
+static int weight(int i, int blen, int offset)
+{
+#define ROLLOFF(i) offset == 1 ? ((i) ? 5 : 3) : \
+ (1 + (6*(i) + offset - 1) / (2*offset - 1))
+
+ if (i < 2*offset)
+ return ROLLOFF(i);
+ else if (i > blen-1 - 2*offset)
+ return ROLLOFF(blen-1 - i);
+ return 8;
+}
+
+static void init_obmc_weight_row(Plane *p, uint8_t *obmc_weight, int stride,
+ int left, int right, int wy)
+{
+ int x;
+ for (x = 0; left && x < p->xblen >> 1; x++)
+ obmc_weight[x] = wy*8;
+ for (; x < p->xblen >> right; x++)
+ obmc_weight[x] = wy*weight(x, p->xblen, p->xoffset);
+ for (; x < p->xblen; x++)
+ obmc_weight[x] = wy*8;
+ for (; x < stride; x++)
+ obmc_weight[x] = 0;
+}
+
+static void init_obmc_weight(Plane *p, uint8_t *obmc_weight, int stride,
+ int left, int right, int top, int bottom)
+{
+ int y;
+ for (y = 0; top && y < p->yblen >> 1; y++) {
+ init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
+ obmc_weight += stride;
+ }
+ for (; y < p->yblen >> bottom; y++) {
+ int wy = weight(y, p->yblen, p->yoffset);
+ init_obmc_weight_row(p, obmc_weight, stride, left, right, wy);
+ obmc_weight += stride;
+ }
+ for (; y < p->yblen; y++) {
+ init_obmc_weight_row(p, obmc_weight, stride, left, right, 8);
+ obmc_weight += stride;
+ }
+}
+
+static void init_obmc_weights(DiracContext *s, Plane *p, int by)
+{
+ int top = !by;
+ int bottom = by == s->blheight-1;
+
+ /* don't bother re-initing for rows 2 to blheight-2, the weights don't change */
+ if (top || bottom || by == 1) {
+ init_obmc_weight(p, s->obmc_weight[0], MAX_BLOCKSIZE, 1, 0, top, bottom);
+ init_obmc_weight(p, s->obmc_weight[1], MAX_BLOCKSIZE, 0, 0, top, bottom);
+ init_obmc_weight(p, s->obmc_weight[2], MAX_BLOCKSIZE, 0, 1, top, bottom);
+ }
+}
+
+static const uint8_t epel_weights[4][4][4] = {
+ {{ 16, 0, 0, 0 },
+ { 12, 4, 0, 0 },
+ { 8, 8, 0, 0 },
+ { 4, 12, 0, 0 }},
+ {{ 12, 0, 4, 0 },
+ { 9, 3, 3, 1 },
+ { 6, 6, 2, 2 },
+ { 3, 9, 1, 3 }},
+ {{ 8, 0, 8, 0 },
+ { 6, 2, 6, 2 },
+ { 4, 4, 4, 4 },
+ { 2, 6, 2, 6 }},
+ {{ 4, 0, 12, 0 },
+ { 3, 1, 9, 3 },
+ { 2, 2, 6, 6 },
+ { 1, 3, 3, 9 }}
+};
+
+/**
+ * For block x,y, determine which of the hpel planes to do bilinear
+ * interpolation from and set src[] to the location in each hpel plane
+ * to MC from.
+ *
+ * @return the index of the put_dirac_pixels_tab function to use
+ * 0 for 1 plane (fpel,hpel), 1 for 2 planes (qpel), 2 for 4 planes (qpel), and 3 for epel
+ */
+static int mc_subpel(DiracContext *s, DiracBlock *block, const uint8_t *src[5],
+ int x, int y, int ref, int plane)
+{
+ Plane *p = &s->plane[plane];
+ uint8_t **ref_hpel = s->ref_pics[ref]->hpel[plane];
+ int motion_x = block->u.mv[ref][0];
+ int motion_y = block->u.mv[ref][1];
+ int mx, my, i, epel, nplanes = 0;
+
+ if (plane) {
+ motion_x >>= s->chroma_x_shift;
+ motion_y >>= s->chroma_y_shift;
+ }
+
+ mx = motion_x & ~(-1U << s->mv_precision);
+ my = motion_y & ~(-1U << s->mv_precision);
+ motion_x >>= s->mv_precision;
+ motion_y >>= s->mv_precision;
+ /* normalize subpel coordinates to epel */
+ /* TODO: template this function? */
+ mx <<= 3 - s->mv_precision;
+ my <<= 3 - s->mv_precision;
+
+ x += motion_x;
+ y += motion_y;
+ epel = (mx|my)&1;
+
+ /* hpel position */
+ if (!((mx|my)&3)) {
+ nplanes = 1;
+ src[0] = ref_hpel[(my>>1)+(mx>>2)] + y*p->stride + x;
+ } else {
+ /* qpel or epel */
+ nplanes = 4;
+ for (i = 0; i < 4; i++)
+ src[i] = ref_hpel[i] + y*p->stride + x;
+
+ /* if we're interpolating in the right/bottom halves, adjust the planes as needed
+ we increment x/y because the edge changes for half of the pixels */
+ if (mx > 4) {
+ src[0] += 1;
+ src[2] += 1;
+ x++;
+ }
+ if (my > 4) {
+ src[0] += p->stride;
+ src[1] += p->stride;
+ y++;
+ }
+
+ /* hpel planes are:
+ [0]: F [1]: H
+ [2]: V [3]: C */
+ if (!epel) {
+ /* check if we really only need 2 planes since either mx or my is
+ a hpel position. (epel weights of 0 handle this there) */
+ if (!(mx&3)) {
+ /* mx == 0: average [0] and [2]
+ mx == 4: average [1] and [3] */
+ src[!mx] = src[2 + !!mx];
+ nplanes = 2;
+ } else if (!(my&3)) {
+ src[0] = src[(my>>1) ];
+ src[1] = src[(my>>1)+1];
+ nplanes = 2;
+ }
+ } else {
+ /* adjust the ordering if needed so the weights work */
+ if (mx > 4) {
+ FFSWAP(const uint8_t *, src[0], src[1]);
+ FFSWAP(const uint8_t *, src[2], src[3]);
+ }
+ if (my > 4) {
+ FFSWAP(const uint8_t *, src[0], src[2]);
+ FFSWAP(const uint8_t *, src[1], src[3]);
+ }
+ src[4] = epel_weights[my&3][mx&3];
+ }
+ }
+
+ /* fixme: v/h _edge_pos */
+ if (x + p->xblen > p->width +EDGE_WIDTH/2 ||
+ y + p->yblen > p->height+EDGE_WIDTH/2 ||
+ x < 0 || y < 0) {
+ for (i = 0; i < nplanes; i++) {
+ s->vdsp.emulated_edge_mc(s->edge_emu_buffer[i], src[i],
+ p->stride, p->stride,
+ p->xblen, p->yblen, x, y,
+ p->width+EDGE_WIDTH/2, p->height+EDGE_WIDTH/2);
+ src[i] = s->edge_emu_buffer[i];
+ }
+ }
+ return (nplanes>>1) + epel;
+}
+
+static void add_dc(uint16_t *dst, int dc, int stride,
+ uint8_t *obmc_weight, int xblen, int yblen)
+{
+ int x, y;
+ dc += 128;
+
+ for (y = 0; y < yblen; y++) {
+ for (x = 0; x < xblen; x += 2) {
+ dst[x ] += dc * obmc_weight[x ];
+ dst[x+1] += dc * obmc_weight[x+1];
+ }
+ dst += stride;
+ obmc_weight += MAX_BLOCKSIZE;
+ }
+}
+
+static void block_mc(DiracContext *s, DiracBlock *block,
+ uint16_t *mctmp, uint8_t *obmc_weight,
+ int plane, int dstx, int dsty)
+{
+ Plane *p = &s->plane[plane];
+ const uint8_t *src[5];
+ int idx;
+
+ switch (block->ref&3) {
+ case 0: /* DC */
+ add_dc(mctmp, block->u.dc[plane], p->stride, obmc_weight, p->xblen, p->yblen);
+ return;
+ case 1:
+ case 2:
+ idx = mc_subpel(s, block, src, dstx, dsty, (block->ref&3)-1, plane);
+ s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
+ if (s->weight_func)
+ s->weight_func(s->mcscratch, p->stride, s->weight_log2denom,
+ s->weight[0] + s->weight[1], p->yblen);
+ break;
+ case 3:
+ idx = mc_subpel(s, block, src, dstx, dsty, 0, plane);
+ s->put_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
+ idx = mc_subpel(s, block, src, dstx, dsty, 1, plane);
+ if (s->biweight_func) {
+ /* fixme: +32 is a quick hack */
+ s->put_pixels_tab[idx](s->mcscratch + 32, src, p->stride, p->yblen);
+ s->biweight_func(s->mcscratch, s->mcscratch+32, p->stride, s->weight_log2denom,
+ s->weight[0], s->weight[1], p->yblen);
+ } else
+ s->avg_pixels_tab[idx](s->mcscratch, src, p->stride, p->yblen);
+ break;
+ }
+ s->add_obmc(mctmp, s->mcscratch, p->stride, obmc_weight, p->yblen);
+}
+
+static void mc_row(DiracContext *s, DiracBlock *block, uint16_t *mctmp, int plane, int dsty)
+{
+ Plane *p = &s->plane[plane];
+ int x, dstx = p->xbsep - p->xoffset;
+
+ block_mc(s, block, mctmp, s->obmc_weight[0], plane, -p->xoffset, dsty);
+ mctmp += p->xbsep;
+
+ for (x = 1; x < s->blwidth-1; x++) {
+ block_mc(s, block+x, mctmp, s->obmc_weight[1], plane, dstx, dsty);
+ dstx += p->xbsep;
+ mctmp += p->xbsep;
+ }
+ block_mc(s, block+x, mctmp, s->obmc_weight[2], plane, dstx, dsty);
+}
+
+static void select_dsp_funcs(DiracContext *s, int width, int height, int xblen, int yblen)
+{
+ int idx = 0;
+ if (xblen > 8)
+ idx = 1;
+ if (xblen > 16)
+ idx = 2;
+
+ memcpy(s->put_pixels_tab, s->diracdsp.put_dirac_pixels_tab[idx], sizeof(s->put_pixels_tab));
+ memcpy(s->avg_pixels_tab, s->diracdsp.avg_dirac_pixels_tab[idx], sizeof(s->avg_pixels_tab));
+ s->add_obmc = s->diracdsp.add_dirac_obmc[idx];
+ if (s->weight_log2denom > 1 || s->weight[0] != 1 || s->weight[1] != 1) {
+ s->weight_func = s->diracdsp.weight_dirac_pixels_tab[idx];
+ s->biweight_func = s->diracdsp.biweight_dirac_pixels_tab[idx];
+ } else {
+ s->weight_func = NULL;
+ s->biweight_func = NULL;
+ }
+}
+
+static int interpolate_refplane(DiracContext *s, DiracFrame *ref, int plane, int width, int height)
+{
+ /* chroma allocates an edge of 8 when subsampled
+ which for 4:2:2 means an h edge of 16 and v edge of 8
+ just use 8 for everything for the moment */
+ int i, edge = EDGE_WIDTH/2;
+
+ ref->hpel[plane][0] = ref->avframe->data[plane];
+ s->mpvencdsp.draw_edges(ref->hpel[plane][0], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM); /* EDGE_TOP | EDGE_BOTTOM values just copied to make it build, this needs to be ensured */
+
+ /* no need for hpel if we only have fpel vectors */
+ if (!s->mv_precision)
+ return 0;
+
+ for (i = 1; i < 4; i++) {
+ if (!ref->hpel_base[plane][i])
+ ref->hpel_base[plane][i] = av_malloc((height+2*edge) * ref->avframe->linesize[plane] + 32);
+ if (!ref->hpel_base[plane][i]) {
+ return AVERROR(ENOMEM);
+ }
+ /* we need to be 16-byte aligned even for chroma */
+ ref->hpel[plane][i] = ref->hpel_base[plane][i] + edge*ref->avframe->linesize[plane] + 16;
+ }
+
+ if (!ref->interpolated[plane]) {
+ s->diracdsp.dirac_hpel_filter(ref->hpel[plane][1], ref->hpel[plane][2],
+ ref->hpel[plane][3], ref->hpel[plane][0],
+ ref->avframe->linesize[plane], width, height);
+ s->mpvencdsp.draw_edges(ref->hpel[plane][1], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
+ s->mpvencdsp.draw_edges(ref->hpel[plane][2], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
+ s->mpvencdsp.draw_edges(ref->hpel[plane][3], ref->avframe->linesize[plane], width, height, edge, edge, EDGE_TOP | EDGE_BOTTOM);
+ }
+ ref->interpolated[plane] = 1;
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 13.0 Transform data syntax. transform_data()
+ */
+static int dirac_decode_frame_internal(DiracContext *s)
+{
+ DWTContext d;
+ int y, i, comp, dsty;
+ int ret;
+
+ if (s->low_delay) {
+ /* [DIRAC_STD] 13.5.1 low_delay_transform_data() */
+ for (comp = 0; comp < 3; comp++) {
+ Plane *p = &s->plane[comp];
+ memset(p->idwt_buf, 0, p->idwt_stride * p->idwt_height * sizeof(IDWTELEM));
+ }
+ if (!s->zero_res) {
+ if ((ret = decode_lowdelay(s)) < 0)
+ return ret;
+ }
+ }
+
+ for (comp = 0; comp < 3; comp++) {
+ Plane *p = &s->plane[comp];
+ uint8_t *frame = s->current_picture->avframe->data[comp];
+
+ /* FIXME: small resolutions */
+ for (i = 0; i < 4; i++)
+ s->edge_emu_buffer[i] = s->edge_emu_buffer_base + i*FFALIGN(p->width, 16);
+
+ if (!s->zero_res && !s->low_delay)
+ {
+ memset(p->idwt_buf, 0, p->idwt_stride * p->idwt_height * sizeof(IDWTELEM));
+ decode_component(s, comp); /* [DIRAC_STD] 13.4.1 core_transform_data() */
+ }
+ ret = ff_spatial_idwt_init2(&d, p->idwt_buf, p->idwt_width, p->idwt_height, p->idwt_stride,
+ s->wavelet_idx+2, s->wavelet_depth, p->idwt_tmp);
+ if (ret < 0)
+ return ret;
+
+ if (!s->num_refs) { /* intra */
+ for (y = 0; y < p->height; y += 16) {
+ ff_spatial_idwt_slice2(&d, y+16); /* decode */
+ s->diracdsp.put_signed_rect_clamped(frame + y*p->stride, p->stride,
+ p->idwt_buf + y*p->idwt_stride, p->idwt_stride, p->width, 16);
+ }
+ } else { /* inter */
+ int rowheight = p->ybsep*p->stride;
+
+ select_dsp_funcs(s, p->width, p->height, p->xblen, p->yblen);
+
+ for (i = 0; i < s->num_refs; i++) {
+ int ret = interpolate_refplane(s, s->ref_pics[i], comp, p->width, p->height);
+ if (ret < 0)
+ return ret;
+ }
+
+ memset(s->mctmp, 0, 4*p->yoffset*p->stride);
+
+ dsty = -p->yoffset;
+ for (y = 0; y < s->blheight; y++) {
+ int h = 0,
+ start = FFMAX(dsty, 0);
+ uint16_t *mctmp = s->mctmp + y*rowheight;
+ DiracBlock *blocks = s->blmotion + y*s->blwidth;
+
+ init_obmc_weights(s, p, y);
+
+ if (y == s->blheight-1 || start+p->ybsep > p->height)
+ h = p->height - start;
+ else
+ h = p->ybsep - (start - dsty);
+ if (h < 0)
+ break;
+
+ memset(mctmp+2*p->yoffset*p->stride, 0, 2*rowheight);
+ mc_row(s, blocks, mctmp, comp, dsty);
+
+ mctmp += (start - dsty)*p->stride + p->xoffset;
+ ff_spatial_idwt_slice2(&d, start + h); /* decode */
+ s->diracdsp.add_rect_clamped(frame + start*p->stride, mctmp, p->stride,
+ p->idwt_buf + start*p->idwt_stride, p->idwt_stride, p->width, h);
+
+ dsty += p->ybsep;
+ }
+ }
+ }
+
+
+ return 0;
+}
+
+static int get_buffer_with_edge(AVCodecContext *avctx, AVFrame *f, int flags)
+{
+ int ret, i;
+ int chroma_x_shift, chroma_y_shift;
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift, &chroma_y_shift);
+
+ f->width = avctx->width + 2 * EDGE_WIDTH;
+ f->height = avctx->height + 2 * EDGE_WIDTH + 2;
+ ret = ff_get_buffer(avctx, f, flags);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; f->data[i]; i++) {
+ int offset = (EDGE_WIDTH >> (i && i<3 ? chroma_y_shift : 0)) *
+ f->linesize[i] + 32;
+ f->data[i] += offset;
+ }
+ f->width = avctx->width;
+ f->height = avctx->height;
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 11.1.1 Picture Header. picture_header()
+ */
+static int dirac_decode_picture_header(DiracContext *s)
+{
+ unsigned retire, picnum;
+ int i, j, ret;
+ int64_t refdist, refnum;
+ GetBitContext *gb = &s->gb;
+
+ /* [DIRAC_STD] 11.1.1 Picture Header. picture_header() PICTURE_NUM */
+ picnum = s->current_picture->avframe->display_picture_number = get_bits_long(gb, 32);
+
+
+ av_log(s->avctx,AV_LOG_DEBUG,"PICTURE_NUM: %d\n",picnum);
+
+ /* if this is the first keyframe after a sequence header, start our
+ reordering from here */
+ if (s->frame_number < 0)
+ s->frame_number = picnum;
+
+ s->ref_pics[0] = s->ref_pics[1] = NULL;
+ for (i = 0; i < s->num_refs; i++) {
+ refnum = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
+ refdist = INT64_MAX;
+
+ /* find the closest reference to the one we want */
+ /* Jordi: this is needed if the referenced picture hasn't yet arrived */
+ for (j = 0; j < MAX_REFERENCE_FRAMES && refdist; j++)
+ if (s->ref_frames[j]
+ && FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum) < refdist) {
+ s->ref_pics[i] = s->ref_frames[j];
+ refdist = FFABS(s->ref_frames[j]->avframe->display_picture_number - refnum);
+ }
+
+ if (!s->ref_pics[i] || refdist)
+ av_log(s->avctx, AV_LOG_DEBUG, "Reference not found\n");
+
+ /* if there were no references at all, allocate one */
+ if (!s->ref_pics[i])
+ for (j = 0; j < MAX_FRAMES; j++)
+ if (!s->all_frames[j].avframe->data[0]) {
+ s->ref_pics[i] = &s->all_frames[j];
+ get_buffer_with_edge(s->avctx, s->ref_pics[i]->avframe, AV_GET_BUFFER_FLAG_REF);
+ break;
+ }
+
+ if (!s->ref_pics[i]) {
+ av_log(s->avctx, AV_LOG_ERROR, "Reference could not be allocated\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ }
+
+ /* retire the reference frames that are not used anymore */
+ if (s->current_picture->avframe->reference) {
+ retire = (picnum + dirac_get_se_golomb(gb)) & 0xFFFFFFFF;
+ if (retire != picnum) {
+ DiracFrame *retire_pic = remove_frame(s->ref_frames, retire);
+
+ if (retire_pic)
+ retire_pic->avframe->reference &= DELAYED_PIC_REF;
+ else
+ av_log(s->avctx, AV_LOG_DEBUG, "Frame to retire not found\n");
+ }
+
+ /* if reference array is full, remove the oldest as per the spec */
+ while (add_frame(s->ref_frames, MAX_REFERENCE_FRAMES, s->current_picture)) {
+ av_log(s->avctx, AV_LOG_ERROR, "Reference frame overflow\n");
+ remove_frame(s->ref_frames, s->ref_frames[0]->avframe->display_picture_number)->avframe->reference &= DELAYED_PIC_REF;
+ }
+ }
+
+ if (s->num_refs) {
+ ret = dirac_unpack_prediction_parameters(s); /* [DIRAC_STD] 11.2 Picture Prediction Data. picture_prediction() */
+ if (ret < 0)
+ return ret;
+ ret = dirac_unpack_block_motion_data(s); /* [DIRAC_STD] 12. Block motion data syntax */
+ if (ret < 0)
+ return ret;
+ }
+ ret = dirac_unpack_idwt_params(s); /* [DIRAC_STD] 11.3 Wavelet transform data */
+ if (ret < 0)
+ return ret;
+
+ init_planes(s);
+ return 0;
+}
+
+static int get_delayed_pic(DiracContext *s, AVFrame *picture, int *got_frame)
+{
+ DiracFrame *out = s->delay_frames[0];
+ int i, out_idx = 0;
+ int ret;
+
+ /* find frame with lowest picture number */
+ for (i = 1; s->delay_frames[i]; i++)
+ if (s->delay_frames[i]->avframe->display_picture_number < out->avframe->display_picture_number) {
+ out = s->delay_frames[i];
+ out_idx = i;
+ }
+
+ for (i = out_idx; s->delay_frames[i]; i++)
+ s->delay_frames[i] = s->delay_frames[i+1];
+
+ if (out) {
+ out->avframe->reference ^= DELAYED_PIC_REF;
+ *got_frame = 1;
+ if((ret = av_frame_ref(picture, out->avframe)) < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * Dirac Specification ->
+ * 9.6 Parse Info Header Syntax. parse_info()
+ * 4 byte start code + byte parse code + 4 byte size + 4 byte previous size
+ */
+#define DATA_UNIT_HEADER_SIZE 13
+
+/* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3
+ inside the function parse_sequence() */
+static int dirac_decode_data_unit(AVCodecContext *avctx, const uint8_t *buf, int size)
+{
+ DiracContext *s = avctx->priv_data;
+ DiracFrame *pic = NULL;
+ int ret, i, parse_code;
+ unsigned tmp;
+
+ if (size < DATA_UNIT_HEADER_SIZE)
+ return AVERROR_INVALIDDATA;
+
+ parse_code = buf[4];
+
+ init_get_bits(&s->gb, &buf[13], 8*(size - DATA_UNIT_HEADER_SIZE));
+
+ if (parse_code == pc_seq_header) {
+ if (s->seen_sequence_header)
+ return 0;
+
+ /* [DIRAC_STD] 10. Sequence header */
+ ret = avpriv_dirac_parse_sequence_header(avctx, &s->gb, &s->source);
+ if (ret < 0)
+ return ret;
+
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
+
+ ret = alloc_sequence_buffers(s);
+ if (ret < 0)
+ return ret;
+
+ s->seen_sequence_header = 1;
+ } else if (parse_code == pc_eos) { /* [DIRAC_STD] End of Sequence */
+ free_sequence_buffers(s);
+ s->seen_sequence_header = 0;
+ } else if (parse_code == pc_aux_data) {
+ if (buf[13] == 1) { /* encoder implementation/version */
+ int ver[3];
+ /* versions older than 1.0.8 don't store quant delta for
+ subbands with only one codeblock */
+ if (sscanf(buf+14, "Schroedinger %d.%d.%d", ver, ver+1, ver+2) == 3)
+ if (ver[0] == 1 && ver[1] == 0 && ver[2] <= 7)
+ s->old_delta_quant = 1;
+ }
+ } else if (parse_code & 0x8) { /* picture data unit */
+ if (!s->seen_sequence_header) {
+ av_log(avctx, AV_LOG_DEBUG, "Dropping frame without sequence header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* find an unused frame */
+ for (i = 0; i < MAX_FRAMES; i++)
+ if (s->all_frames[i].avframe->data[0] == NULL)
+ pic = &s->all_frames[i];
+ if (!pic) {
+ av_log(avctx, AV_LOG_ERROR, "framelist full\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ av_frame_unref(pic->avframe);
+
+ /* [DIRAC_STD] Defined in 9.6.1 ... */
+ tmp = parse_code & 0x03; /* [DIRAC_STD] num_refs() */
+ if (tmp > 2) {
+ av_log(avctx, AV_LOG_ERROR, "num_refs of 3\n");
+ return AVERROR_INVALIDDATA;
+ }
+ s->num_refs = tmp;
+ s->is_arith = (parse_code & 0x48) == 0x08; /* [DIRAC_STD] using_ac() */
+ s->low_delay = (parse_code & 0x88) == 0x88; /* [DIRAC_STD] is_low_delay() */
+ pic->avframe->reference = (parse_code & 0x0C) == 0x0C; /* [DIRAC_STD] is_reference() */
+ pic->avframe->key_frame = s->num_refs == 0; /* [DIRAC_STD] is_intra() */
+ pic->avframe->pict_type = s->num_refs + 1; /* Definition of AVPictureType in avutil.h */
+
+ if ((ret = get_buffer_with_edge(avctx, pic->avframe, (parse_code & 0x0C) == 0x0C ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
+ return ret;
+ s->current_picture = pic;
+ s->plane[0].stride = pic->avframe->linesize[0];
+ s->plane[1].stride = pic->avframe->linesize[1];
+ s->plane[2].stride = pic->avframe->linesize[2];
+
+ if (alloc_buffers(s, FFMAX3(FFABS(s->plane[0].stride), FFABS(s->plane[1].stride), FFABS(s->plane[2].stride))) < 0)
+ return AVERROR(ENOMEM);
+
+ /* [DIRAC_STD] 11.1 Picture parse. picture_parse() */
+ ret = dirac_decode_picture_header(s);
+ if (ret < 0)
+ return ret;
+
+ /* [DIRAC_STD] 13.0 Transform data syntax. transform_data() */
+ ret = dirac_decode_frame_internal(s);
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+static int dirac_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt)
+{
+ DiracContext *s = avctx->priv_data;
+ AVFrame *picture = data;
+ uint8_t *buf = pkt->data;
+ int buf_size = pkt->size;
+ int i, buf_idx = 0;
+ int ret;
+ unsigned data_unit_size;
+
+ /* release unused frames */
+ for (i = 0; i < MAX_FRAMES; i++)
+ if (s->all_frames[i].avframe->data[0] && !s->all_frames[i].avframe->reference) {
+ av_frame_unref(s->all_frames[i].avframe);
+ memset(s->all_frames[i].interpolated, 0, sizeof(s->all_frames[i].interpolated));
+ }
+
+ s->current_picture = NULL;
+ *got_frame = 0;
+
+ /* end of stream, so flush delayed pics */
+ if (buf_size == 0)
+ return get_delayed_pic(s, (AVFrame *)data, got_frame);
+
+ for (;;) {
+ /*[DIRAC_STD] Here starts the code from parse_info() defined in 9.6
+ [DIRAC_STD] PARSE_INFO_PREFIX = "BBCD" as defined in ISO/IEC 646
+ BBCD start code search */
+ for (; buf_idx + DATA_UNIT_HEADER_SIZE < buf_size; buf_idx++) {
+ if (buf[buf_idx ] == 'B' && buf[buf_idx+1] == 'B' &&
+ buf[buf_idx+2] == 'C' && buf[buf_idx+3] == 'D')
+ break;
+ }
+ /* BBCD found or end of data */
+ if (buf_idx + DATA_UNIT_HEADER_SIZE >= buf_size)
+ break;
+
+ data_unit_size = AV_RB32(buf+buf_idx+5);
+ if (data_unit_size > buf_size - buf_idx || !data_unit_size) {
+ if(data_unit_size > buf_size - buf_idx)
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Data unit with size %d is larger than input buffer, discarding\n",
+ data_unit_size);
+ buf_idx += 4;
+ continue;
+ }
+ /* [DIRAC_STD] dirac_decode_data_unit makes reference to the while defined in 9.3 inside the function parse_sequence() */
+ ret = dirac_decode_data_unit(avctx, buf+buf_idx, data_unit_size);
+ if (ret < 0)
+ {
+ av_log(s->avctx, AV_LOG_ERROR,"Error in dirac_decode_data_unit\n");
+ return ret;
+ }
+ buf_idx += data_unit_size;
+ }
+
+ if (!s->current_picture)
+ return buf_size;
+
+ if (s->current_picture->avframe->display_picture_number > s->frame_number) {
+ DiracFrame *delayed_frame = remove_frame(s->delay_frames, s->frame_number);
+
+ s->current_picture->avframe->reference |= DELAYED_PIC_REF;
+
+ if (add_frame(s->delay_frames, MAX_DELAY, s->current_picture)) {
+ int min_num = s->delay_frames[0]->avframe->display_picture_number;
+ /* Too many delayed frames, so we display the frame with the lowest pts */
+ av_log(avctx, AV_LOG_ERROR, "Delay frame overflow\n");
+
+ for (i = 1; s->delay_frames[i]; i++)
+ if (s->delay_frames[i]->avframe->display_picture_number < min_num)
+ min_num = s->delay_frames[i]->avframe->display_picture_number;
+
+ delayed_frame = remove_frame(s->delay_frames, min_num);
+ add_frame(s->delay_frames, MAX_DELAY, s->current_picture);
+ }
+
+ if (delayed_frame) {
+ delayed_frame->avframe->reference ^= DELAYED_PIC_REF;
+ if((ret=av_frame_ref(data, delayed_frame->avframe)) < 0)
+ return ret;
+ *got_frame = 1;
+ }
+ } else if (s->current_picture->avframe->display_picture_number == s->frame_number) {
+ /* The right frame at the right time :-) */
+ if((ret=av_frame_ref(data, s->current_picture->avframe)) < 0)
+ return ret;
+ *got_frame = 1;
+ }
+
+ if (*got_frame)
+ s->frame_number = picture->display_picture_number + 1;
+
+ return buf_idx;
+}
+
+AVCodec ff_dirac_decoder = {
+ .name = "dirac",
+ .long_name = NULL_IF_CONFIG_SMALL("BBC Dirac VC-2"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_DIRAC,
+ .priv_data_size = sizeof(DiracContext),
+ .init = dirac_decode_init,
+ .close = dirac_decode_end,
+ .decode = dirac_decode_frame,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .flush = dirac_decode_flush,
+};
.init = dnxhd_decode_init,
.close = dnxhd_decode_close,
.decode = dnxhd_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
- .capabilities = AV_CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
};
.priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_decode_init,
.decode = dvvideo_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
+ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
+ .max_lowres = 3,
};
.priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_encode_init,
.encode2 = dvvideo_encode_frame,
- .capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
- .capabilities = AV_CODEC_CAP_SLICE_THREADS,
++ .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
s->error_occurred = 0;
}
- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU ||
+static int er_supported(ERContext *s)
+{
+ if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
++ s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU ||
+ !s->cur_pic.f ||
+ s->cur_pic.field_picture
+ )
+ return 0;
+ return 1;
+}
+
/**
* Add a slice.
* @param endx x component of the last macroblock, can be -1
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * Enhanced Variable Rate Codec, Service Option 3 decoder
+ * Copyright (c) 2013 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Enhanced Variable Rate Codec, Service Option 3 decoder
+ * @author Paul B Mahol
+ */
+
+#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "get_bits.h"
+#include "evrcdata.h"
+#include "acelp_vectors.h"
+#include "lsp.h"
+
+#define MIN_LSP_SEP (0.05 / (2.0 * M_PI))
+#define MIN_DELAY 20
+#define MAX_DELAY 120
+#define NB_SUBFRAMES 3
+#define SUBFRAME_SIZE 54
+#define FILTER_ORDER 10
+#define ACB_SIZE 128
+
+typedef enum {
+ RATE_ERRS = -1,
+ SILENCE,
+ RATE_QUANT,
+ RATE_QUARTER,
+ RATE_HALF,
+ RATE_FULL,
+} evrc_packet_rate;
+
+/**
+ * EVRC-A unpacked data frame
+ */
+typedef struct EVRCAFrame {
+ uint8_t lpc_flag; ///< spectral change indicator
+ uint16_t lsp[4]; ///< index into LSP codebook
+ uint8_t pitch_delay; ///< pitch delay for entire frame
+ uint8_t delay_diff; ///< delay difference for entire frame
+ uint8_t acb_gain[3]; ///< adaptive codebook gain
+ uint16_t fcb_shape[3][4]; ///< fixed codebook shape
+ uint8_t fcb_gain[3]; ///< fixed codebook gain index
+ uint8_t energy_gain; ///< frame energy gain index
+ uint8_t tty; ///< tty baud rate bit
+} EVRCAFrame;
+
+typedef struct EVRCContext {
+ AVClass *class;
+
+ int postfilter;
+
+ GetBitContext gb;
+ evrc_packet_rate bitrate;
+ evrc_packet_rate last_valid_bitrate;
+ EVRCAFrame frame;
+
+ float lspf[FILTER_ORDER];
+ float prev_lspf[FILTER_ORDER];
+ float synthesis[FILTER_ORDER];
+ float postfilter_fir[FILTER_ORDER];
+ float postfilter_iir[FILTER_ORDER];
+ float postfilter_residual[ACB_SIZE + SUBFRAME_SIZE];
+ float pitch_delay;
+ float prev_pitch_delay;
+ float avg_acb_gain; ///< average adaptive codebook gain
+ float avg_fcb_gain; ///< average fixed codebook gain
+ float pitch[ACB_SIZE + FILTER_ORDER + SUBFRAME_SIZE];
+ float pitch_back[ACB_SIZE];
+ float interpolation_coeffs[136];
+ float energy_vector[NB_SUBFRAMES];
+ float fade_scale;
+ float last;
+
+ uint8_t prev_energy_gain;
+ uint8_t prev_error_flag;
+ uint8_t warned_buf_mismatch_bitrate;
+} EVRCContext;
+
+/**
+ * Frame unpacking for RATE_FULL, RATE_HALF and RATE_QUANT
+ *
+ * @param e the context
+ *
+ * TIA/IS-127 Table 4.21-1
+ */
+static void unpack_frame(EVRCContext *e)
+{
+ EVRCAFrame *frame = &e->frame;
+ GetBitContext *gb = &e->gb;
+
+ switch (e->bitrate) {
+ case RATE_FULL:
+ frame->lpc_flag = get_bits1(gb);
+ frame->lsp[0] = get_bits(gb, 6);
+ frame->lsp[1] = get_bits(gb, 6);
+ frame->lsp[2] = get_bits(gb, 9);
+ frame->lsp[3] = get_bits(gb, 7);
+ frame->pitch_delay = get_bits(gb, 7);
+ frame->delay_diff = get_bits(gb, 5);
+ frame->acb_gain[0] = get_bits(gb, 3);
+ frame->fcb_shape[0][0] = get_bits(gb, 8);
+ frame->fcb_shape[0][1] = get_bits(gb, 8);
+ frame->fcb_shape[0][2] = get_bits(gb, 8);
+ frame->fcb_shape[0][3] = get_bits(gb, 11);
+ frame->fcb_gain[0] = get_bits(gb, 5);
+ frame->acb_gain[1] = get_bits(gb, 3);
+ frame->fcb_shape[1][0] = get_bits(gb, 8);
+ frame->fcb_shape[1][1] = get_bits(gb, 8);
+ frame->fcb_shape[1][2] = get_bits(gb, 8);
+ frame->fcb_shape[1][3] = get_bits(gb, 11);
+ frame->fcb_gain [1] = get_bits(gb, 5);
+ frame->acb_gain [2] = get_bits(gb, 3);
+ frame->fcb_shape[2][0] = get_bits(gb, 8);
+ frame->fcb_shape[2][1] = get_bits(gb, 8);
+ frame->fcb_shape[2][2] = get_bits(gb, 8);
+ frame->fcb_shape[2][3] = get_bits(gb, 11);
+ frame->fcb_gain [2] = get_bits(gb, 5);
+ frame->tty = get_bits1(gb);
+ break;
+ case RATE_HALF:
+ frame->lsp [0] = get_bits(gb, 7);
+ frame->lsp [1] = get_bits(gb, 7);
+ frame->lsp [2] = get_bits(gb, 8);
+ frame->pitch_delay = get_bits(gb, 7);
+ frame->acb_gain [0] = get_bits(gb, 3);
+ frame->fcb_shape[0][0] = get_bits(gb, 10);
+ frame->fcb_gain [0] = get_bits(gb, 4);
+ frame->acb_gain [1] = get_bits(gb, 3);
+ frame->fcb_shape[1][0] = get_bits(gb, 10);
+ frame->fcb_gain [1] = get_bits(gb, 4);
+ frame->acb_gain [2] = get_bits(gb, 3);
+ frame->fcb_shape[2][0] = get_bits(gb, 10);
+ frame->fcb_gain [2] = get_bits(gb, 4);
+ break;
+ case RATE_QUANT:
+ frame->lsp [0] = get_bits(gb, 4);
+ frame->lsp [1] = get_bits(gb, 4);
+ frame->energy_gain = get_bits(gb, 8);
+ break;
+ }
+}
+
+static evrc_packet_rate buf_size2bitrate(const int buf_size)
+{
+ switch (buf_size) {
+ case 23: return RATE_FULL;
+ case 11: return RATE_HALF;
+ case 6: return RATE_QUARTER;
+ case 3: return RATE_QUANT;
+ case 1: return SILENCE;
+ }
+
+ return RATE_ERRS;
+}
+
+/**
+ * Determine the bitrate from the frame size and/or the first byte of the frame.
+ *
+ * @param avctx the AV codec context
+ * @param buf_size length of the buffer
+ * @param buf the bufffer
+ *
+ * @return the bitrate on success,
+ * RATE_ERRS if the bitrate cannot be satisfactorily determined
+ */
+static evrc_packet_rate determine_bitrate(AVCodecContext *avctx,
+ int *buf_size,
+ const uint8_t **buf)
+{
+ evrc_packet_rate bitrate;
+
+ if ((bitrate = buf_size2bitrate(*buf_size)) >= 0) {
+ if (bitrate > **buf) {
+ EVRCContext *e = avctx->priv_data;
+ if (!e->warned_buf_mismatch_bitrate) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Claimed bitrate and buffer size mismatch.\n");
+ e->warned_buf_mismatch_bitrate = 1;
+ }
+ bitrate = **buf;
+ } else if (bitrate < **buf) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Buffer is too small for the claimed bitrate.\n");
+ return RATE_ERRS;
+ }
+ (*buf)++;
+ *buf_size -= 1;
+ } else if ((bitrate = buf_size2bitrate(*buf_size + 1)) >= 0) {
+ av_log(avctx, AV_LOG_DEBUG,
+ "Bitrate byte is missing, guessing the bitrate from packet size.\n");
+ } else
+ return RATE_ERRS;
+
+ return bitrate;
+}
+
+static void warn_insufficient_frame_quality(AVCodecContext *avctx,
+ const char *message)
+{
+ av_log(avctx, AV_LOG_WARNING, "Frame #%d, %s\n",
+ avctx->frame_number, message);
+}
+
+/**
+ * Initialize the speech codec according to the specification.
+ *
+ * TIA/IS-127 5.2
+ */
+static av_cold int evrc_decode_init(AVCodecContext *avctx)
+{
+ EVRCContext *e = avctx->priv_data;
+ int i, n, idx = 0;
+ float denom = 2.0 / (2.0 * 8.0 + 1.0);
+
+ avctx->channels = 1;
+ avctx->channel_layout = AV_CH_LAYOUT_MONO;
+ avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
+
+ for (i = 0; i < FILTER_ORDER; i++) {
+ e->prev_lspf[i] = (i + 1) * 0.048;
+ e->synthesis[i] = 0.0;
+ }
+
+ for (i = 0; i < ACB_SIZE; i++)
+ e->pitch[i] = e->pitch_back[i] = 0.0;
+
+ e->last_valid_bitrate = RATE_QUANT;
+ e->prev_pitch_delay = 40.0;
+ e->fade_scale = 1.0;
+ e->prev_error_flag = 0;
+ e->avg_acb_gain = e->avg_fcb_gain = 0.0;
+
+ for (i = 0; i < 8; i++) {
+ float tt = ((float)i - 8.0 / 2.0) / 8.0;
+
+ for (n = -8; n <= 8; n++, idx++) {
+ float arg1 = M_PI * 0.9 * (tt - n);
+ float arg2 = M_PI * (tt - n);
+
+ e->interpolation_coeffs[idx] = 0.9;
+ if (arg1)
+ e->interpolation_coeffs[idx] *= (0.54 + 0.46 * cos(arg2 * denom)) *
+ sin(arg1) / arg1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Decode the 10 vector quantized line spectral pair frequencies from the LSP
+ * transmission codes of any bitrate and check for badly received packets.
+ *
+ * @param e the context
+ *
+ * @return 0 on success, -1 if the packet is badly received
+ *
+ * TIA/IS-127 5.2.1, 5.7.1
+ */
+static int decode_lspf(EVRCContext *e)
+{
+ const float * const *codebooks = evrc_lspq_codebooks[e->bitrate];
+ int i, j, k = 0;
+
+ for (i = 0; i < evrc_lspq_nb_codebooks[e->bitrate]; i++) {
+ int row_size = evrc_lspq_codebooks_row_sizes[e->bitrate][i];
+ const float *codebook = codebooks[i];
+
+ for (j = 0; j < row_size; j++)
+ e->lspf[k++] = codebook[e->frame.lsp[i] * row_size + j];
+ }
+
+ // check for monotonic LSPs
+ for (i = 1; i < FILTER_ORDER; i++)
+ if (e->lspf[i] <= e->lspf[i - 1])
+ return -1;
+
+ // check for minimum separation of LSPs at the splits
+ for (i = 0, k = 0; i < evrc_lspq_nb_codebooks[e->bitrate] - 1; i++) {
+ k += evrc_lspq_codebooks_row_sizes[e->bitrate][i];
+ if (e->lspf[k] - e->lspf[k - 1] <= MIN_LSP_SEP)
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Interpolation of LSP parameters.
+ *
+ * TIA/IS-127 5.2.3.1, 5.7.3.2
+ */
+static void interpolate_lsp(float *ilsp, const float *lsp,
+ const float *prev, int index)
+{
+ static const float lsp_interpolation_factors[] = { 0.1667, 0.5, 0.8333 };
+ ff_weighted_vector_sumf(ilsp, prev, lsp,
+ 1.0 - lsp_interpolation_factors[index],
+ lsp_interpolation_factors[index], FILTER_ORDER);
+}
+
+/*
+ * Reconstruction of the delay contour.
+ *
+ * TIA/IS-127 5.2.2.3.2
+ */
+static void interpolate_delay(float *dst, float current, float prev, int index)
+{
+ static const float d_interpolation_factors[] = { 0, 0.3313, 0.6625, 1, 1 };
+ dst[0] = (1.0 - d_interpolation_factors[index ]) * prev
+ + d_interpolation_factors[index ] * current;
+ dst[1] = (1.0 - d_interpolation_factors[index + 1]) * prev
+ + d_interpolation_factors[index + 1] * current;
+ dst[2] = (1.0 - d_interpolation_factors[index + 2]) * prev
+ + d_interpolation_factors[index + 2] * current;
+}
+
+/*
+ * Convert the quantized, interpolated line spectral frequencies,
+ * to prediction coefficients.
+ *
+ * TIA/IS-127 5.2.3.2, 4.7.2.2
+ */
+static void decode_predictor_coeffs(const float *ilspf, float *ilpc)
+{
+ double lsp[FILTER_ORDER];
+ float a[FILTER_ORDER / 2 + 1], b[FILTER_ORDER / 2 + 1];
+ float a1[FILTER_ORDER / 2] = { 0 };
+ float a2[FILTER_ORDER / 2] = { 0 };
+ float b1[FILTER_ORDER / 2] = { 0 };
+ float b2[FILTER_ORDER / 2] = { 0 };
+ int i, k;
+
+ ff_acelp_lsf2lspd(lsp, ilspf, FILTER_ORDER);
+
+ for (k = 0; k <= FILTER_ORDER; k++) {
+ a[0] = k < 2 ? 0.25 : 0;
+ b[0] = k < 2 ? k < 1 ? 0.25 : -0.25 : 0;
+
+ for (i = 0; i < FILTER_ORDER / 2; i++) {
+ a[i + 1] = a[i] - 2 * lsp[i * 2 ] * a1[i] + a2[i];
+ b[i + 1] = b[i] - 2 * lsp[i * 2 + 1] * b1[i] + b2[i];
+ a2[i] = a1[i];
+ a1[i] = a[i];
+ b2[i] = b1[i];
+ b1[i] = b[i];
+ }
+
+ if (k)
+ ilpc[k - 1] = 2.0 * (a[FILTER_ORDER / 2] + b[FILTER_ORDER / 2]);
+ }
+}
+
+static void bl_intrp(EVRCContext *e, float *ex, float delay)
+{
+ float *f;
+ int offset, i, coef_idx;
+ int16_t t;
+
+ offset = lrintf(delay);
+
+ t = (offset - delay + 0.5) * 8.0 + 0.5;
+ if (t == 8) {
+ t = 0;
+ offset--;
+ }
+
+ f = ex - offset - 8;
+
+ coef_idx = t * (2 * 8 + 1);
+
+ ex[0] = 0.0;
+ for (i = 0; i < 2 * 8 + 1; i++)
+ ex[0] += e->interpolation_coeffs[coef_idx + i] * f[i];
+}
+
+/*
+ * Adaptive codebook excitation.
+ *
+ * TIA/IS-127 5.2.2.3.3, 4.12.5.2
+ */
+static void acb_excitation(EVRCContext *e, float *excitation, float gain,
+ const float delay[3], int length)
+{
+ float denom, locdelay, dpr, invl;
+ int i;
+
+ invl = 1.0 / ((float) length);
+ dpr = length;
+
+ /* first at-most extra samples */
+ denom = (delay[1] - delay[0]) * invl;
+ for (i = 0; i < dpr; i++) {
+ locdelay = delay[0] + i * denom;
+ bl_intrp(e, excitation + i, locdelay);
+ }
+
+ denom = (delay[2] - delay[1]) * invl;
+ /* interpolation */
+ for (i = dpr; i < dpr + 10; i++) {
+ locdelay = delay[1] + (i - dpr) * denom;
+ bl_intrp(e, excitation + i, locdelay);
+ }
+
+ for (i = 0; i < length; i++)
+ excitation[i] *= gain;
+}
+
+static void decode_8_pulses_35bits(const uint16_t *fixed_index, float *cod)
+{
+ int i, pos1, pos2, offset;
+
+ offset = (fixed_index[3] >> 9) & 3;
+
+ for (i = 0; i < 3; i++) {
+ pos1 = ((fixed_index[i] & 0x7f) / 11) * 5 + ((i + offset) % 5);
+ pos2 = ((fixed_index[i] & 0x7f) % 11) * 5 + ((i + offset) % 5);
+
+ cod[pos1] = (fixed_index[i] & 0x80) ? -1.0 : 1.0;
+
+ if (pos2 < pos1)
+ cod[pos2] = -cod[pos1];
+ else
+ cod[pos2] += cod[pos1];
+ }
+
+ pos1 = ((fixed_index[3] & 0x7f) / 11) * 5 + ((3 + offset) % 5);
+ pos2 = ((fixed_index[3] & 0x7f) % 11) * 5 + ((4 + offset) % 5);
+
+ cod[pos1] = (fixed_index[3] & 0x100) ? -1.0 : 1.0;
+ cod[pos2] = (fixed_index[3] & 0x80 ) ? -1.0 : 1.0;
+}
+
+static void decode_3_pulses_10bits(uint16_t fixed_index, float *cod)
+{
+ float sign;
+ int pos;
+
+ sign = (fixed_index & 0x200) ? -1.0 : 1.0;
+
+ pos = ((fixed_index & 0x7) * 7) + 4;
+ cod[pos] += sign;
+ pos = (((fixed_index >> 3) & 0x7) * 7) + 2;
+ cod[pos] -= sign;
+ pos = (((fixed_index >> 6) & 0x7) * 7);
+ cod[pos] += sign;
+}
+
+/*
+ * Reconstruction of ACELP fixed codebook excitation for full and half rate.
+ *
+ * TIA/IS-127 5.2.3.7
+ */
+static void fcb_excitation(EVRCContext *e, const uint16_t *codebook,
+ float *excitation, float pitch_gain,
+ int pitch_lag, int subframe_size)
+{
+ int i;
+
+ if (e->bitrate == RATE_FULL)
+ decode_8_pulses_35bits(codebook, excitation);
+ else
+ decode_3_pulses_10bits(*codebook, excitation);
+
+ pitch_gain = av_clipf(pitch_gain, 0.2, 0.9);
+
+ for (i = pitch_lag; i < subframe_size; i++)
+ excitation[i] += pitch_gain * excitation[i - pitch_lag];
+}
+
+/**
+ * Synthesis of the decoder output signal.
+ *
+ * param[in] in input signal
+ * param[in] filter_coeffs LPC coefficients
+ * param[in/out] memory synthesis filter memory
+ * param buffer_length amount of data to process
+ * param[out] samples output samples
+ *
+ * TIA/IS-127 5.2.3.15, 5.7.3.4
+ */
+static void synthesis_filter(const float *in, const float *filter_coeffs,
+ float *memory, int buffer_length, float *samples)
+{
+ int i, j;
+
+ for (i = 0; i < buffer_length; i++) {
+ samples[i] = in[i];
+ for (j = FILTER_ORDER - 1; j > 0; j--) {
+ samples[i] -= filter_coeffs[j] * memory[j];
+ memory[j] = memory[j - 1];
+ }
+ samples[i] -= filter_coeffs[0] * memory[0];
+ memory[0] = samples[i];
+ }
+}
+
+static void bandwidth_expansion(float *coeff, const float *inbuf, float gamma)
+{
+ double fac = gamma;
+ int i;
+
+ for (i = 0; i < FILTER_ORDER; i++) {
+ coeff[i] = inbuf[i] * fac;
+ fac *= gamma;
+ }
+}
+
+static void residual_filter(float *output, const float *input,
+ const float *coef, float *memory, int length)
+{
+ float sum;
+ int i, j;
+
+ for (i = 0; i < length; i++) {
+ sum = input[i];
+
+ for (j = FILTER_ORDER - 1; j > 0; j--) {
+ sum += coef[j] * memory[j];
+ memory[j] = memory[j - 1];
+ }
+ sum += coef[0] * memory[0];
+ memory[0] = input[i];
+ output[i] = sum;
+ }
+}
+
+/*
+ * TIA/IS-127 Table 5.9.1-1.
+ */
+static const struct PfCoeff {
+ float tilt;
+ float ltgain;
+ float p1;
+ float p2;
+} postfilter_coeffs[5] = {
+ { 0.0 , 0.0 , 0.0 , 0.0 },
+ { 0.0 , 0.0 , 0.57, 0.57 },
+ { 0.0 , 0.0 , 0.0 , 0.0 },
+ { 0.35, 0.50, 0.50, 0.75 },
+ { 0.20, 0.50, 0.57, 0.75 },
+};
+
+/*
+ * Adaptive postfilter.
+ *
+ * TIA/IS-127 5.9
+ */
+static void postfilter(EVRCContext *e, float *in, const float *coeff,
+ float *out, int idx, const struct PfCoeff *pfc,
+ int length)
+{
+ float wcoef1[FILTER_ORDER], wcoef2[FILTER_ORDER],
+ scratch[SUBFRAME_SIZE], temp[SUBFRAME_SIZE],
+ mem[SUBFRAME_SIZE];
+ float sum1 = 0.0, sum2 = 0.0, gamma, gain;
+ float tilt = pfc->tilt;
+ int i, n, best;
+
+ bandwidth_expansion(wcoef1, coeff, pfc->p1);
+ bandwidth_expansion(wcoef2, coeff, pfc->p2);
+
+ /* Tilt compensation filter, TIA/IS-127 5.9.1 */
+ for (i = 0; i < length - 1; i++)
+ sum2 += in[i] * in[i + 1];
+ if (sum2 < 0.0)
+ tilt = 0.0;
+
+ for (i = 0; i < length; i++) {
+ scratch[i] = in[i] - tilt * e->last;
+ e->last = in[i];
+ }
+
+ /* Short term residual filter, TIA/IS-127 5.9.2 */
+ residual_filter(&e->postfilter_residual[ACB_SIZE], scratch, wcoef1, e->postfilter_fir, length);
+
+ /* Long term postfilter */
+ best = idx;
+ for (i = FFMIN(MIN_DELAY, idx - 3); i <= FFMAX(MAX_DELAY, idx + 3); i++) {
+ for (n = ACB_SIZE, sum2 = 0; n < ACB_SIZE + length; n++)
+ sum2 += e->postfilter_residual[n] * e->postfilter_residual[n - i];
+ if (sum2 > sum1) {
+ sum1 = sum2;
+ best = i;
+ }
+ }
+
+ for (i = ACB_SIZE, sum1 = 0; i < ACB_SIZE + length; i++)
+ sum1 += e->postfilter_residual[i - best] * e->postfilter_residual[i - best];
+ for (i = ACB_SIZE, sum2 = 0; i < ACB_SIZE + length; i++)
+ sum2 += e->postfilter_residual[i] * e->postfilter_residual[i - best];
+
+ if (sum2 * sum1 == 0 || e->bitrate == RATE_QUANT) {
+ memcpy(temp, e->postfilter_residual + ACB_SIZE, length * sizeof(float));
+ } else {
+ gamma = sum2 / sum1;
+ if (gamma < 0.5)
+ memcpy(temp, e->postfilter_residual + ACB_SIZE, length * sizeof(float));
+ else {
+ gamma = FFMIN(gamma, 1.0);
+
+ for (i = 0; i < length; i++) {
+ temp[i] = e->postfilter_residual[ACB_SIZE + i] + gamma *
+ pfc->ltgain * e->postfilter_residual[ACB_SIZE + i - best];
+ }
+ }
+ }
+
+ memcpy(scratch, temp, length * sizeof(float));
+ memcpy(mem, e->postfilter_iir, FILTER_ORDER * sizeof(float));
+ synthesis_filter(scratch, wcoef2, mem, length, scratch);
+
+ /* Gain computation, TIA/IS-127 5.9.4-2 */
+ for (i = 0, sum1 = 0, sum2 = 0; i < length; i++) {
+ sum1 += in[i] * in[i];
+ sum2 += scratch[i] * scratch[i];
+ }
+ gain = sum2 ? sqrt(sum1 / sum2) : 1.0;
+
+ for (i = 0; i < length; i++)
+ temp[i] *= gain;
+
+ /* Short term postfilter */
+ synthesis_filter(temp, wcoef2, e->postfilter_iir, length, out);
+
+ memmove(e->postfilter_residual,
+ e->postfilter_residual + length, ACB_SIZE * sizeof(float));
+}
+
+static void frame_erasure(EVRCContext *e, float *samples)
+{
+ float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES],
+ tmp[SUBFRAME_SIZE + 6], f;
+ int i, j;
+
+ for (i = 0; i < FILTER_ORDER; i++) {
+ if (e->bitrate != RATE_QUANT)
+ e->lspf[i] = e->prev_lspf[i] * 0.875 + 0.125 * (i + 1) * 0.048;
+ else
+ e->lspf[i] = e->prev_lspf[i];
+ }
+
+ if (e->prev_error_flag)
+ e->avg_acb_gain *= 0.75;
+ if (e->bitrate == RATE_FULL)
+ memcpy(e->pitch_back, e->pitch, ACB_SIZE * sizeof(float));
+ if (e->last_valid_bitrate == RATE_QUANT)
+ e->bitrate = RATE_QUANT;
+ else
+ e->bitrate = RATE_FULL;
+
+ if (e->bitrate == RATE_FULL || e->bitrate == RATE_HALF) {
+ e->pitch_delay = e->prev_pitch_delay;
+ } else {
+ float sum = 0;
+
+ idelay[0] = idelay[1] = idelay[2] = MIN_DELAY;
+
+ for (i = 0; i < NB_SUBFRAMES; i++)
+ sum += evrc_energy_quant[e->prev_energy_gain][i];
+ sum /= (float) NB_SUBFRAMES;
+ sum = pow(10, sum);
+ for (i = 0; i < NB_SUBFRAMES; i++)
+ e->energy_vector[i] = sum;
+ }
+
+ if (fabs(e->pitch_delay - e->prev_pitch_delay) > 15)
+ e->prev_pitch_delay = e->pitch_delay;
+
+ for (i = 0; i < NB_SUBFRAMES; i++) {
+ int subframe_size = subframe_sizes[i];
+ int pitch_lag;
+
+ interpolate_lsp(ilspf, e->lspf, e->prev_lspf, i);
+
+ if (e->bitrate != RATE_QUANT) {
+ if (e->avg_acb_gain < 0.3) {
+ idelay[0] = estimation_delay[i];
+ idelay[1] = estimation_delay[i + 1];
+ idelay[2] = estimation_delay[i + 2];
+ } else {
+ interpolate_delay(idelay, e->pitch_delay, e->prev_pitch_delay, i);
+ }
+ }
+
+ pitch_lag = lrintf((idelay[1] + idelay[0]) / 2.0);
+ decode_predictor_coeffs(ilspf, ilpc);
+
+ if (e->bitrate != RATE_QUANT) {
+ acb_excitation(e, e->pitch + ACB_SIZE,
+ e->avg_acb_gain, idelay, subframe_size);
+ for (j = 0; j < subframe_size; j++)
+ e->pitch[ACB_SIZE + j] *= e->fade_scale;
+ e->fade_scale = FFMAX(e->fade_scale - 0.05, 0.0);
+ } else {
+ for (j = 0; j < subframe_size; j++)
+ e->pitch[ACB_SIZE + j] = e->energy_vector[i];
+ }
+
+ memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
+
+ if (e->bitrate != RATE_QUANT && e->avg_acb_gain < 0.4) {
+ f = 0.1 * e->avg_fcb_gain;
+ for (j = 0; j < subframe_size; j++)
+ e->pitch[ACB_SIZE + j] += f;
+ } else if (e->bitrate == RATE_QUANT) {
+ for (j = 0; j < subframe_size; j++)
+ e->pitch[ACB_SIZE + j] = e->energy_vector[i];
+ }
+
+ synthesis_filter(e->pitch + ACB_SIZE, ilpc,
+ e->synthesis, subframe_size, tmp);
+ postfilter(e, tmp, ilpc, samples, pitch_lag,
+ &postfilter_coeffs[e->bitrate], subframe_size);
+
+ samples += subframe_size;
+ }
+}
+
+static int evrc_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ AVFrame *frame = data;
+ EVRCContext *e = avctx->priv_data;
+ int buf_size = avpkt->size;
+ float ilspf[FILTER_ORDER], ilpc[FILTER_ORDER], idelay[NB_SUBFRAMES];
+ float *samples;
+ int i, j, ret, error_flag = 0;
+
+ frame->nb_samples = 160;
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+ samples = (float *)frame->data[0];
+
+ if ((e->bitrate = determine_bitrate(avctx, &buf_size, &buf)) == RATE_ERRS) {
+ warn_insufficient_frame_quality(avctx, "bitrate cannot be determined.");
+ goto erasure;
+ }
+ if (e->bitrate <= SILENCE || e->bitrate == RATE_QUARTER)
+ goto erasure;
+ if (e->bitrate == RATE_QUANT && e->last_valid_bitrate == RATE_FULL
+ && !e->prev_error_flag)
+ goto erasure;
+
+ if ((ret = init_get_bits8(&e->gb, buf, buf_size)) < 0)
+ return ret;
+ memset(&e->frame, 0, sizeof(EVRCAFrame));
+
+ unpack_frame(e);
+
+ if (e->bitrate != RATE_QUANT) {
+ uint8_t *p = (uint8_t *) &e->frame;
+ for (i = 0; i < sizeof(EVRCAFrame); i++) {
+ if (p[i])
+ break;
+ }
+ if (i == sizeof(EVRCAFrame))
+ goto erasure;
+ } else if (e->frame.lsp[0] == 0xf &&
+ e->frame.lsp[1] == 0xf &&
+ e->frame.energy_gain == 0xff) {
+ goto erasure;
+ }
+
+ if (decode_lspf(e) < 0)
+ goto erasure;
+
+ if (e->bitrate == RATE_FULL || e->bitrate == RATE_HALF) {
+ /* Pitch delay parameter checking as per TIA/IS-127 5.1.5.1 */
+ if (e->frame.pitch_delay > MAX_DELAY - MIN_DELAY)
+ goto erasure;
+
+ e->pitch_delay = e->frame.pitch_delay + MIN_DELAY;
+
+ /* Delay diff parameter checking as per TIA/IS-127 5.1.5.2 */
+ if (e->frame.delay_diff) {
+ int p = e->pitch_delay - e->frame.delay_diff + 16;
+ if (p < MIN_DELAY || p > MAX_DELAY)
+ goto erasure;
+ }
+
+ /* Delay contour reconstruction as per TIA/IS-127 5.2.2.2 */
+ if (e->frame.delay_diff &&
+ e->bitrate == RATE_FULL && e->prev_error_flag) {
+ float delay;
+
+ memcpy(e->pitch, e->pitch_back, ACB_SIZE * sizeof(float));
+
+ delay = e->prev_pitch_delay;
+ e->prev_pitch_delay = delay - e->frame.delay_diff + 16.0;
+
+ if (fabs(e->pitch_delay - delay) > 15)
+ delay = e->pitch_delay;
+
+ for (i = 0; i < NB_SUBFRAMES; i++) {
+ int subframe_size = subframe_sizes[i];
+
+ interpolate_delay(idelay, delay, e->prev_pitch_delay, i);
+ acb_excitation(e, e->pitch + ACB_SIZE, e->avg_acb_gain, idelay, subframe_size);
+ memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
+ }
+ }
+
+ /* Smoothing of the decoded delay as per TIA/IS-127 5.2.2.5 */
+ if (fabs(e->pitch_delay - e->prev_pitch_delay) > 15)
+ e->prev_pitch_delay = e->pitch_delay;
+
+ e->avg_acb_gain = e->avg_fcb_gain = 0.0;
+ } else {
+ idelay[0] = idelay[1] = idelay[2] = MIN_DELAY;
+
+ /* Decode frame energy vectors as per TIA/IS-127 5.7.2 */
+ for (i = 0; i < NB_SUBFRAMES; i++)
+ e->energy_vector[i] = pow(10, evrc_energy_quant[e->frame.energy_gain][i]);
+ e->prev_energy_gain = e->frame.energy_gain;
+ }
+
+ for (i = 0; i < NB_SUBFRAMES; i++) {
+ float tmp[SUBFRAME_SIZE + 6] = { 0 };
+ int subframe_size = subframe_sizes[i];
+ int pitch_lag;
+
+ interpolate_lsp(ilspf, e->lspf, e->prev_lspf, i);
+
+ if (e->bitrate != RATE_QUANT)
+ interpolate_delay(idelay, e->pitch_delay, e->prev_pitch_delay, i);
+
+ pitch_lag = lrintf((idelay[1] + idelay[0]) / 2.0);
+ decode_predictor_coeffs(ilspf, ilpc);
+
+ /* Bandwidth expansion as per TIA/IS-127 5.2.3.3 */
+ if (e->frame.lpc_flag && e->prev_error_flag)
+ bandwidth_expansion(ilpc, ilpc, 0.75);
+
+ if (e->bitrate != RATE_QUANT) {
+ float acb_sum, f;
+
+ f = exp((e->bitrate == RATE_HALF ? 0.5 : 0.25)
+ * (e->frame.fcb_gain[i] + 1));
+ acb_sum = pitch_gain_vq[e->frame.acb_gain[i]];
+ e->avg_acb_gain += acb_sum / NB_SUBFRAMES;
+ e->avg_fcb_gain += f / NB_SUBFRAMES;
+
+ acb_excitation(e, e->pitch + ACB_SIZE,
+ acb_sum, idelay, subframe_size);
+ fcb_excitation(e, e->frame.fcb_shape[i], tmp,
+ acb_sum, pitch_lag, subframe_size);
+
+ /* Total excitation generation as per TIA/IS-127 5.2.3.9 */
+ for (j = 0; j < subframe_size; j++)
+ e->pitch[ACB_SIZE + j] += f * tmp[j];
+ e->fade_scale = FFMIN(e->fade_scale + 0.2, 1.0);
+ } else {
+ for (j = 0; j < subframe_size; j++)
+ e->pitch[ACB_SIZE + j] = e->energy_vector[i];
+ }
+
+ memmove(e->pitch, e->pitch + subframe_size, ACB_SIZE * sizeof(float));
+
+ synthesis_filter(e->pitch + ACB_SIZE, ilpc,
+ e->synthesis, subframe_size,
+ e->postfilter ? tmp : samples);
+ if (e->postfilter)
+ postfilter(e, tmp, ilpc, samples, pitch_lag,
+ &postfilter_coeffs[e->bitrate], subframe_size);
+
+ samples += subframe_size;
+ }
+
+ if (error_flag) {
+erasure:
+ error_flag = 1;
+ av_log(avctx, AV_LOG_WARNING, "frame erasure\n");
+ frame_erasure(e, samples);
+ }
+
+ memcpy(e->prev_lspf, e->lspf, sizeof(e->prev_lspf));
+ e->prev_error_flag = error_flag;
+ e->last_valid_bitrate = e->bitrate;
+
+ if (e->bitrate != RATE_QUANT)
+ e->prev_pitch_delay = e->pitch_delay;
+
+ samples = (float *)frame->data[0];
+ for (i = 0; i < 160; i++)
+ samples[i] /= 32768;
+
+ *got_frame_ptr = 1;
+
+ return avpkt->size;
+}
+
+#define OFFSET(x) offsetof(EVRCContext, x)
+#define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ { "postfilter", "enable postfilter", OFFSET(postfilter), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, AD },
+ { NULL }
+};
+
+static const AVClass evrcdec_class = {
+ .class_name = "evrc",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_evrc_decoder = {
+ .name = "evrc",
+ .long_name = NULL_IF_CONFIG_SMALL("EVRC (Enhanced Variable Rate Codec)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_EVRC,
+ .init = evrc_decode_init,
+ .decode = evrc_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .priv_data_size = sizeof(EVRCContext),
+ .priv_class = &evrcdec_class,
+};
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FFV1,
.priv_data_size = sizeof(FFV1Context),
- .init = ffv1_decode_init,
- .close = ffv1_decode_close,
- .decode = ffv1_decode_frame,
+ .init = decode_init,
+ .close = ffv1_close,
+ .decode = decode_frame,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
- .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
- CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS,
+ .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/ |
- AV_CODEC_CAP_SLICE_THREADS,
++ AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
};
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FFV1,
.priv_data_size = sizeof(FFV1Context),
- .init = ffv1_encode_init,
- .encode2 = ffv1_encode_frame,
- .close = ffv1_encode_close,
- .capabilities = AV_CODEC_CAP_SLICE_THREADS,
+ .init = encode_init,
+ .encode2 = encode_frame,
+ .close = encode_close,
- .capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DELAY,
.pix_fmts = (const enum AVPixelFormat[]) {
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
- AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV420P9,
- AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
- AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
- AV_PIX_FMT_RGB32,
- AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
- AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
- AV_PIX_FMT_GRAY16, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_0RGB32, AV_PIX_FMT_RGB32, AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16, AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV422P9,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUVA444P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA420P16,
+ AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA420P10,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA420P9,
+ AV_PIX_FMT_GRAY16, AV_PIX_FMT_GRAY8, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10,
+ AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
AV_PIX_FMT_NONE
},
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * Wavesynth pseudo-codec
+ * Copyright (c) 2011 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/log.h"
+#include "avcodec.h"
+#include "internal.h"
+
+
+#define SIN_BITS 14
+#define WS_MAX_CHANNELS 32
+#define INF_TS 0x7FFFFFFFFFFFFFFF
+
+#define PINK_UNIT 128
+
+/*
+ Format of the extradata and packets
+
+ THIS INFORMATION IS NOT PART OF THE PUBLIC API OR ABI.
+ IT CAN CHANGE WITHOUT NOTIFICATION.
+
+ All numbers are in little endian.
+
+ The codec extradata define a set of intervals with uniform content.
+ Overlapping intervals are added together.
+
+ extradata:
+ uint32 number of intervals
+ ... intervals
+
+ interval:
+ int64 start timestamp; time_base must be 1/sample_rate;
+ start timestamps must be in ascending order
+ int64 end timestamp
+ uint32 type
+ uint32 channels mask
+ ... additional information, depends on type
+
+ sine interval (type fourcc "SINE"):
+ int32 start frequency, in 1/(1<<16) Hz
+ int32 end frequency
+ int32 start amplitude, 1<<16 is the full amplitude
+ int32 end amplitude
+ uint32 start phase, 0 is sin(0), 0x20000000 is sin(pi/2), etc.;
+ n | (1<<31) means to match the phase of previous channel #n
+
+ pink noise interval (type fourcc "NOIS"):
+ int32 start amplitude
+ int32 end amplitude
+
+ The input packets encode the time and duration of the requested segment.
+
+ packet:
+ int64 start timestamp
+ int32 duration
+
+*/
+
+enum ws_interval_type {
+ WS_SINE = MKTAG('S','I','N','E'),
+ WS_NOISE = MKTAG('N','O','I','S'),
+};
+
+struct ws_interval {
+ int64_t ts_start, ts_end;
+ uint64_t phi0, dphi0, ddphi;
+ uint64_t amp0, damp;
+ uint64_t phi, dphi, amp;
+ uint32_t channels;
+ enum ws_interval_type type;
+ int next;
+};
+
+struct wavesynth_context {
+ int64_t cur_ts;
+ int64_t next_ts;
+ int32_t *sin;
+ struct ws_interval *inter;
+ uint32_t dither_state;
+ uint32_t pink_state;
+ int32_t pink_pool[PINK_UNIT];
+ unsigned pink_need, pink_pos;
+ int nb_inter;
+ int cur_inter;
+ int next_inter;
+};
+
+#define LCG_A 1284865837
+#define LCG_C 4150755663
+#define LCG_AI 849225893 /* A*AI = 1 [mod 1<<32] */
+
+static uint32_t lcg_next(uint32_t *s)
+{
+ *s = *s * LCG_A + LCG_C;
+ return *s;
+}
+
+static void lcg_seek(uint32_t *s, int64_t dt)
+{
+ uint32_t a, c, t = *s;
+
+ if (dt >= 0) {
+ a = LCG_A;
+ c = LCG_C;
+ } else { /* coefficients for a step backward */
+ a = LCG_AI;
+ c = (uint32_t)(LCG_AI * LCG_C);
+ dt = -dt;
+ }
+ while (dt) {
+ if (dt & 1)
+ t = a * t + c;
+ c *= a + 1; /* coefficients for a double step */
+ a *= a;
+ dt >>= 1;
+ }
+ *s = t;
+}
+
+/* Emulate pink noise by summing white noise at the sampling frequency,
+ * white noise at half the sampling frequency (each value taken twice),
+ * etc., with a total of 8 octaves.
+ * This is known as the Voss-McCartney algorithm. */
+
+static void pink_fill(struct wavesynth_context *ws)
+{
+ int32_t vt[7] = { 0 }, v = 0;
+ int i, j;
+
+ ws->pink_pos = 0;
+ if (!ws->pink_need)
+ return;
+ for (i = 0; i < PINK_UNIT; i++) {
+ for (j = 0; j < 7; j++) {
+ if ((i >> j) & 1)
+ break;
+ v -= vt[j];
+ vt[j] = (int32_t)lcg_next(&ws->pink_state) >> 3;
+ v += vt[j];
+ }
+ ws->pink_pool[i] = v + ((int32_t)lcg_next(&ws->pink_state) >> 3);
+ }
+ lcg_next(&ws->pink_state); /* so we use exactly 256 steps */
+}
+
+/**
+ * @return (1<<64) * a / b, without overflow, if a < b
+ */
+static uint64_t frac64(uint64_t a, uint64_t b)
+{
+ uint64_t r = 0;
+ int i;
+
+ if (b < (uint64_t)1 << 32) { /* b small, use two 32-bits steps */
+ a <<= 32;
+ return ((a / b) << 32) | ((a % b) << 32) / b;
+ }
+ if (b < (uint64_t)1 << 48) { /* b medium, use four 16-bits steps */
+ for (i = 0; i < 4; i++) {
+ a <<= 16;
+ r = (r << 16) | (a / b);
+ a %= b;
+ }
+ return r;
+ }
+ for (i = 63; i >= 0; i--) {
+ if (a >= (uint64_t)1 << 63 || a << 1 >= b) {
+ r |= (uint64_t)1 << i;
+ a = (a << 1) - b;
+ } else {
+ a <<= 1;
+ }
+ }
+ return r;
+}
+
+static uint64_t phi_at(struct ws_interval *in, int64_t ts)
+{
+ uint64_t dt = ts - in->ts_start;
+ uint64_t dt2 = dt & 1 ? /* dt * (dt - 1) / 2 without overflow */
+ dt * ((dt - 1) >> 1) : (dt >> 1) * (dt - 1);
+ return in->phi0 + dt * in->dphi0 + dt2 * in->ddphi;
+}
+
+static void wavesynth_seek(struct wavesynth_context *ws, int64_t ts)
+{
+ int *last, i;
+ struct ws_interval *in;
+
+ last = &ws->cur_inter;
+ for (i = 0; i < ws->nb_inter; i++) {
+ in = &ws->inter[i];
+ if (ts < in->ts_start)
+ break;
+ if (ts >= in->ts_end)
+ continue;
+ *last = i;
+ last = &in->next;
+ in->phi = phi_at(in, ts);
+ in->dphi = in->dphi0 + (ts - in->ts_start) * in->ddphi;
+ in->amp = in->amp0 + (ts - in->ts_start) * in->damp;
+ }
+ ws->next_inter = i;
+ ws->next_ts = i < ws->nb_inter ? ws->inter[i].ts_start : INF_TS;
+ *last = -1;
+ lcg_seek(&ws->dither_state, ts - ws->cur_ts);
+ if (ws->pink_need) {
+ int64_t pink_ts_cur = (ws->cur_ts + PINK_UNIT - 1) & ~(PINK_UNIT - 1);
+ int64_t pink_ts_next = ts & ~(PINK_UNIT - 1);
+ int pos = ts & (PINK_UNIT - 1);
+ lcg_seek(&ws->pink_state, (pink_ts_next - pink_ts_cur) << 1);
+ if (pos) {
+ pink_fill(ws);
+ ws->pink_pos = pos;
+ } else {
+ ws->pink_pos = PINK_UNIT;
+ }
+ }
+ ws->cur_ts = ts;
+}
+
+static int wavesynth_parse_extradata(AVCodecContext *avc)
+{
+ struct wavesynth_context *ws = avc->priv_data;
+ struct ws_interval *in;
+ uint8_t *edata, *edata_end;
+ int32_t f1, f2, a1, a2;
+ uint32_t phi;
+ int64_t dphi1, dphi2, dt, cur_ts = -0x8000000000000000;
+ int i;
+
+ if (avc->extradata_size < 4)
+ return AVERROR(EINVAL);
+ edata = avc->extradata;
+ edata_end = edata + avc->extradata_size;
+ ws->nb_inter = AV_RL32(edata);
+ edata += 4;
+ if (ws->nb_inter < 0)
+ return AVERROR(EINVAL);
+ ws->inter = av_calloc(ws->nb_inter, sizeof(*ws->inter));
+ if (!ws->inter)
+ return AVERROR(ENOMEM);
+ for (i = 0; i < ws->nb_inter; i++) {
+ in = &ws->inter[i];
+ if (edata_end - edata < 24)
+ return AVERROR(EINVAL);
+ in->ts_start = AV_RL64(edata + 0);
+ in->ts_end = AV_RL64(edata + 8);
+ in->type = AV_RL32(edata + 16);
+ in->channels = AV_RL32(edata + 20);
+ edata += 24;
+ if (in->ts_start < cur_ts || in->ts_end <= in->ts_start)
+ return AVERROR(EINVAL);
+ cur_ts = in->ts_start;
+ dt = in->ts_end - in->ts_start;
+ switch (in->type) {
+ case WS_SINE:
+ if (edata_end - edata < 20)
+ return AVERROR(EINVAL);
+ f1 = AV_RL32(edata + 0);
+ f2 = AV_RL32(edata + 4);
+ a1 = AV_RL32(edata + 8);
+ a2 = AV_RL32(edata + 12);
+ phi = AV_RL32(edata + 16);
+ edata += 20;
+ dphi1 = frac64(f1, (int64_t)avc->sample_rate << 16);
+ dphi2 = frac64(f2, (int64_t)avc->sample_rate << 16);
+ in->dphi0 = dphi1;
+ in->ddphi = (dphi2 - dphi1) / dt;
+ if (phi & 0x80000000) {
+ phi &= ~0x80000000;
+ if (phi >= i)
+ return AVERROR(EINVAL);
+ in->phi0 = phi_at(&ws->inter[phi], in->ts_start);
+ } else {
+ in->phi0 = (uint64_t)phi << 33;
+ }
+ break;
+ case WS_NOISE:
+ if (edata_end - edata < 8)
+ return AVERROR(EINVAL);
+ a1 = AV_RL32(edata + 0);
+ a2 = AV_RL32(edata + 4);
+ edata += 8;
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+ in->amp0 = (int64_t)a1 << 32;
+ in->damp = (((int64_t)a2 << 32) - ((int64_t)a1 << 32)) / dt;
+ }
+ if (edata != edata_end)
+ return AVERROR(EINVAL);
+ return 0;
+}
+
+static av_cold int wavesynth_init(AVCodecContext *avc)
+{
+ struct wavesynth_context *ws = avc->priv_data;
+ int i, r;
+
+ if (avc->channels > WS_MAX_CHANNELS) {
+ av_log(avc, AV_LOG_ERROR,
+ "This implementation is limited to %d channels.\n",
+ WS_MAX_CHANNELS);
+ return AVERROR(EINVAL);
+ }
+ r = wavesynth_parse_extradata(avc);
+ if (r < 0) {
+ av_log(avc, AV_LOG_ERROR, "Invalid intervals definitions.\n");
+ goto fail;
+ }
+ ws->sin = av_malloc(sizeof(*ws->sin) << SIN_BITS);
+ if (!ws->sin) {
+ r = AVERROR(ENOMEM);
+ goto fail;
+ }
+ for (i = 0; i < 1 << SIN_BITS; i++)
+ ws->sin[i] = floor(32767 * sin(2 * M_PI * i / (1 << SIN_BITS)));
+ ws->dither_state = MKTAG('D','I','T','H');
+ for (i = 0; i < ws->nb_inter; i++)
+ ws->pink_need += ws->inter[i].type == WS_NOISE;
+ ws->pink_state = MKTAG('P','I','N','K');
+ ws->pink_pos = PINK_UNIT;
+ wavesynth_seek(ws, 0);
+ avc->sample_fmt = AV_SAMPLE_FMT_S16;
+ return 0;
+
+fail:
+ av_freep(&ws->inter);
+ av_freep(&ws->sin);
+ return r;
+}
+
+static void wavesynth_synth_sample(struct wavesynth_context *ws, int64_t ts,
+ int32_t *channels)
+{
+ int32_t amp, val, *cv;
+ struct ws_interval *in;
+ int i, *last, pink;
+ uint32_t c, all_ch = 0;
+
+ i = ws->cur_inter;
+ last = &ws->cur_inter;
+ if (ws->pink_pos == PINK_UNIT)
+ pink_fill(ws);
+ pink = ws->pink_pool[ws->pink_pos++] >> 16;
+ while (i >= 0) {
+ in = &ws->inter[i];
+ i = in->next;
+ if (ts >= in->ts_end) {
+ *last = i;
+ continue;
+ }
+ last = &in->next;
+ amp = in->amp >> 32;
+ in->amp += in->damp;
+ switch (in->type) {
+ case WS_SINE:
+ val = amp * ws->sin[in->phi >> (64 - SIN_BITS)];
+ in->phi += in->dphi;
+ in->dphi += in->ddphi;
+ break;
+ case WS_NOISE:
+ val = amp * pink;
+ break;
+ default:
+ val = 0;
+ }
+ all_ch |= in->channels;
+ for (c = in->channels, cv = channels; c; c >>= 1, cv++)
+ if (c & 1)
+ *cv += val;
+ }
+ val = (int32_t)lcg_next(&ws->dither_state) >> 16;
+ for (c = all_ch, cv = channels; c; c >>= 1, cv++)
+ if (c & 1)
+ *cv += val;
+}
+
+static void wavesynth_enter_intervals(struct wavesynth_context *ws, int64_t ts)
+{
+ int *last, i;
+ struct ws_interval *in;
+
+ last = &ws->cur_inter;
+ for (i = ws->cur_inter; i >= 0; i = ws->inter[i].next)
+ last = &ws->inter[i].next;
+ for (i = ws->next_inter; i < ws->nb_inter; i++) {
+ in = &ws->inter[i];
+ if (ts < in->ts_start)
+ break;
+ if (ts >= in->ts_end)
+ continue;
+ *last = i;
+ last = &in->next;
+ in->phi = in->phi0;
+ in->dphi = in->dphi0;
+ in->amp = in->amp0;
+ }
+ ws->next_inter = i;
+ ws->next_ts = i < ws->nb_inter ? ws->inter[i].ts_start : INF_TS;
+ *last = -1;
+}
+
+static int wavesynth_decode(AVCodecContext *avc, void *rframe, int *rgot_frame,
+ AVPacket *packet)
+{
+ struct wavesynth_context *ws = avc->priv_data;
+ AVFrame *frame = rframe;
+ int64_t ts;
+ int duration;
+ int s, c, r;
+ int16_t *pcm;
+ int32_t channels[WS_MAX_CHANNELS];
+
+ *rgot_frame = 0;
+ if (packet->size != 12)
+ return AVERROR_INVALIDDATA;
+ ts = AV_RL64(packet->data);
+ if (ts != ws->cur_ts)
+ wavesynth_seek(ws, ts);
+ duration = AV_RL32(packet->data + 8);
+ if (duration <= 0)
+ return AVERROR(EINVAL);
+ frame->nb_samples = duration;
+ r = ff_get_buffer(avc, frame, 0);
+ if (r < 0)
+ return r;
+ pcm = (int16_t *)frame->data[0];
+ for (s = 0; s < duration; s++, ts++) {
+ memset(channels, 0, avc->channels * sizeof(*channels));
+ if (ts >= ws->next_ts)
+ wavesynth_enter_intervals(ws, ts);
+ wavesynth_synth_sample(ws, ts, channels);
+ for (c = 0; c < avc->channels; c++)
+ *(pcm++) = channels[c] >> 16;
+ }
+ ws->cur_ts += duration;
+ *rgot_frame = 1;
+ return packet->size;
+}
+
+static av_cold int wavesynth_close(AVCodecContext *avc)
+{
+ struct wavesynth_context *ws = avc->priv_data;
+
+ av_freep(&ws->sin);
+ av_freep(&ws->inter);
+ return 0;
+}
+
+AVCodec ff_ffwavesynth_decoder = {
+ .name = "wavesynth",
+ .long_name = NULL_IF_CONFIG_SMALL("Wave synthesis pseudo-codec"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_FFWAVESYNTH,
+ .priv_data_size = sizeof(struct wavesynth_context),
+ .init = wavesynth_init,
+ .close = wavesynth_close,
+ .decode = wavesynth_decode,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
.init = fic_decode_init,
.decode = fic_decode_frame,
.close = fic_decode_close,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
+ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
+ .priv_class = &fic_decoder_class,
};
.init = flac_decode_init,
.close = flac_decode_close,
.decode = flac_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32,
.init = flac_encode_init,
.encode2 = flac_encode_frame,
.close = flac_encode_close,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS,
- .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_LOSSLESS,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_NONE },
.init = ff_h263_decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE },
};
--- /dev/null
- || !(avctx->codec->capabilities & CODEC_CAP_INTRA_ONLY))
+/*
+ * Copyright (c) 2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "frame_thread_encoder.h"
+
+#include "libavutil/fifo.h"
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "thread.h"
+
+#if HAVE_PTHREADS
+#include <pthread.h>
+#elif HAVE_W32THREADS
+#include "compat/w32pthreads.h"
+#elif HAVE_OS2THREADS
+#include "compat/os2threads.h"
+#endif
+
+#define MAX_THREADS 64
+#define BUFFER_SIZE (2*MAX_THREADS)
+
+typedef struct{
+ void *indata;
+ void *outdata;
+ int64_t return_code;
+ unsigned index;
+} Task;
+
+typedef struct{
+ AVCodecContext *parent_avctx;
+ pthread_mutex_t buffer_mutex;
+
+ AVFifoBuffer *task_fifo;
+ pthread_mutex_t task_fifo_mutex;
+ pthread_cond_t task_fifo_cond;
+
+ Task finished_tasks[BUFFER_SIZE];
+ pthread_mutex_t finished_task_mutex;
+ pthread_cond_t finished_task_cond;
+
+ unsigned task_index;
+ unsigned finished_task_index;
+
+ pthread_t worker[MAX_THREADS];
+ int exit;
+} ThreadContext;
+
+static void * attribute_align_arg worker(void *v){
+ AVCodecContext *avctx = v;
+ ThreadContext *c = avctx->internal->frame_thread_encoder;
+ AVPacket *pkt = NULL;
+
+ while(!c->exit){
+ int got_packet, ret;
+ AVFrame *frame;
+ Task task;
+
+ if(!pkt) pkt= av_mallocz(sizeof(*pkt));
+ if(!pkt) continue;
+ av_init_packet(pkt);
+
+ pthread_mutex_lock(&c->task_fifo_mutex);
+ while (av_fifo_size(c->task_fifo) <= 0 || c->exit) {
+ if(c->exit){
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+ goto end;
+ }
+ pthread_cond_wait(&c->task_fifo_cond, &c->task_fifo_mutex);
+ }
+ av_fifo_generic_read(c->task_fifo, &task, sizeof(task), NULL);
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+ frame = task.indata;
+
+ ret = avcodec_encode_video2(avctx, pkt, frame, &got_packet);
+ pthread_mutex_lock(&c->buffer_mutex);
+ av_frame_unref(frame);
+ pthread_mutex_unlock(&c->buffer_mutex);
+ av_frame_free(&frame);
+ if(got_packet) {
+ av_dup_packet(pkt);
+ } else {
+ pkt->data = NULL;
+ pkt->size = 0;
+ }
+ pthread_mutex_lock(&c->finished_task_mutex);
+ c->finished_tasks[task.index].outdata = pkt; pkt = NULL;
+ c->finished_tasks[task.index].return_code = ret;
+ pthread_cond_signal(&c->finished_task_cond);
+ pthread_mutex_unlock(&c->finished_task_mutex);
+ }
+end:
+ av_free(pkt);
+ pthread_mutex_lock(&c->buffer_mutex);
+ avcodec_close(avctx);
+ pthread_mutex_unlock(&c->buffer_mutex);
+ av_freep(&avctx);
+ return NULL;
+}
+
+int ff_frame_thread_encoder_init(AVCodecContext *avctx, AVDictionary *options){
+ int i=0;
+ ThreadContext *c;
+
+
+ if( !(avctx->thread_type & FF_THREAD_FRAME)
++ || !(avctx->codec->capabilities & AV_CODEC_CAP_INTRA_ONLY))
+ return 0;
+
+ if( !avctx->thread_count
+ && avctx->codec_id == AV_CODEC_ID_MJPEG
+ && !(avctx->flags & AV_CODEC_FLAG_QSCALE)) {
+ av_log(avctx, AV_LOG_DEBUG,
+ "Forcing thread count to 1 for MJPEG encoding, use -thread_type slice "
+ "or a constant quantizer if you want to use multiple cpu cores\n");
+ avctx->thread_count = 1;
+ }
+ if( avctx->thread_count > 1
+ && avctx->codec_id == AV_CODEC_ID_MJPEG
+ && !(avctx->flags & AV_CODEC_FLAG_QSCALE))
+ av_log(avctx, AV_LOG_WARNING,
+ "MJPEG CBR encoding works badly with frame multi-threading, consider "
+ "using -threads 1, -thread_type slice or a constant quantizer.\n");
+
+ if (avctx->codec_id == AV_CODEC_ID_HUFFYUV ||
+ avctx->codec_id == AV_CODEC_ID_FFVHUFF) {
+ int warn = 0;
+ if (avctx->flags & AV_CODEC_FLAG_PASS1)
+ warn = 1;
+ else if(avctx->context_model > 0) {
+ AVDictionaryEntry *t = av_dict_get(options, "non_deterministic",
+ NULL, AV_DICT_MATCH_CASE);
+ warn = !t || !t->value || !atoi(t->value) ? 1 : 0;
+ }
+ // huffyuv does not support these with multiple frame threads currently
+ if (warn) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Forcing thread count to 1 for huffyuv encoding with first pass or context 1\n");
+ avctx->thread_count = 1;
+ }
+ }
+
+ if(!avctx->thread_count) {
+ avctx->thread_count = av_cpu_count();
+ avctx->thread_count = FFMIN(avctx->thread_count, MAX_THREADS);
+ }
+
+ if(avctx->thread_count <= 1)
+ return 0;
+
+ if(avctx->thread_count > MAX_THREADS)
+ return AVERROR(EINVAL);
+
+ av_assert0(!avctx->internal->frame_thread_encoder);
+ c = avctx->internal->frame_thread_encoder = av_mallocz(sizeof(ThreadContext));
+ if(!c)
+ return AVERROR(ENOMEM);
+
+ c->parent_avctx = avctx;
+
+ c->task_fifo = av_fifo_alloc_array(BUFFER_SIZE, sizeof(Task));
+ if(!c->task_fifo)
+ goto fail;
+
+ pthread_mutex_init(&c->task_fifo_mutex, NULL);
+ pthread_mutex_init(&c->finished_task_mutex, NULL);
+ pthread_mutex_init(&c->buffer_mutex, NULL);
+ pthread_cond_init(&c->task_fifo_cond, NULL);
+ pthread_cond_init(&c->finished_task_cond, NULL);
+
+ for(i=0; i<avctx->thread_count ; i++){
+ AVDictionary *tmp = NULL;
+ void *tmpv;
+ AVCodecContext *thread_avctx = avcodec_alloc_context3(avctx->codec);
+ if(!thread_avctx)
+ goto fail;
+ tmpv = thread_avctx->priv_data;
+ *thread_avctx = *avctx;
+ thread_avctx->priv_data = tmpv;
+ thread_avctx->internal = NULL;
+ memcpy(thread_avctx->priv_data, avctx->priv_data, avctx->codec->priv_data_size);
+ thread_avctx->thread_count = 1;
+ thread_avctx->active_thread_type &= ~FF_THREAD_FRAME;
+
+ av_dict_copy(&tmp, options, 0);
+ av_dict_set(&tmp, "threads", "1", 0);
+ if(avcodec_open2(thread_avctx, avctx->codec, &tmp) < 0) {
+ av_dict_free(&tmp);
+ goto fail;
+ }
+ av_dict_free(&tmp);
+ av_assert0(!thread_avctx->internal->frame_thread_encoder);
+ thread_avctx->internal->frame_thread_encoder = c;
+ if(pthread_create(&c->worker[i], NULL, worker, thread_avctx)) {
+ goto fail;
+ }
+ }
+
+ avctx->active_thread_type = FF_THREAD_FRAME;
+
+ return 0;
+fail:
+ avctx->thread_count = i;
+ av_log(avctx, AV_LOG_ERROR, "ff_frame_thread_encoder_init failed\n");
+ ff_frame_thread_encoder_free(avctx);
+ return -1;
+}
+
+void ff_frame_thread_encoder_free(AVCodecContext *avctx){
+ int i;
+ ThreadContext *c= avctx->internal->frame_thread_encoder;
+
+ pthread_mutex_lock(&c->task_fifo_mutex);
+ c->exit = 1;
+ pthread_cond_broadcast(&c->task_fifo_cond);
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+
+ for (i=0; i<avctx->thread_count; i++) {
+ pthread_join(c->worker[i], NULL);
+ }
+
+ pthread_mutex_destroy(&c->task_fifo_mutex);
+ pthread_mutex_destroy(&c->finished_task_mutex);
+ pthread_mutex_destroy(&c->buffer_mutex);
+ pthread_cond_destroy(&c->task_fifo_cond);
+ pthread_cond_destroy(&c->finished_task_cond);
+ av_fifo_freep(&c->task_fifo);
+ av_freep(&avctx->internal->frame_thread_encoder);
+}
+
+int ff_thread_video_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet_ptr){
+ ThreadContext *c = avctx->internal->frame_thread_encoder;
+ Task task;
+ int ret;
+
+ av_assert1(!*got_packet_ptr);
+
+ if(frame){
+ AVFrame *new = av_frame_alloc();
+ if(!new)
+ return AVERROR(ENOMEM);
+ ret = av_frame_ref(new, frame);
+ if(ret < 0) {
+ av_frame_free(&new);
+ return ret;
+ }
+
+ task.index = c->task_index;
+ task.indata = (void*)new;
+ pthread_mutex_lock(&c->task_fifo_mutex);
+ av_fifo_generic_write(c->task_fifo, &task, sizeof(task), NULL);
+ pthread_cond_signal(&c->task_fifo_cond);
+ pthread_mutex_unlock(&c->task_fifo_mutex);
+
+ c->task_index = (c->task_index+1) % BUFFER_SIZE;
+
+ if(!c->finished_tasks[c->finished_task_index].outdata && (c->task_index - c->finished_task_index) % BUFFER_SIZE <= avctx->thread_count)
+ return 0;
+ }
+
+ if(c->task_index == c->finished_task_index)
+ return 0;
+
+ pthread_mutex_lock(&c->finished_task_mutex);
+ while (!c->finished_tasks[c->finished_task_index].outdata) {
+ pthread_cond_wait(&c->finished_task_cond, &c->finished_task_mutex);
+ }
+ task = c->finished_tasks[c->finished_task_index];
+ *pkt = *(AVPacket*)(task.outdata);
+ if(pkt->data)
+ *got_packet_ptr = 1;
+ av_freep(&c->finished_tasks[c->finished_task_index].outdata);
+ c->finished_task_index = (c->finished_task_index+1) % BUFFER_SIZE;
+ pthread_mutex_unlock(&c->finished_task_mutex);
+
+ return task.return_code;
+}
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
- .capabilities = AV_CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
};
.long_name = NULL_IF_CONFIG_SMALL("Forward Uncompressed"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FRWU,
+ .priv_data_size = sizeof(FRWUContext),
.init = decode_init,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .priv_class = &frwu_class,
};
.priv_data_size = sizeof(G723_1_Context),
.init = g723_1_decode_init,
.decode = g723_1_decode_frame,
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.priv_class = &g723_1dec_class,
};
+
+#if CONFIG_G723_1_ENCODER
+#define BITSTREAM_WRITER_LE
+#include "put_bits.h"
+
+static av_cold int g723_1_encode_init(AVCodecContext *avctx)
+{
+ G723_1_Context *p = avctx->priv_data;
+
+ if (avctx->sample_rate != 8000) {
+ av_log(avctx, AV_LOG_ERROR, "Only 8000Hz sample rate supported\n");
+ return -1;
+ }
+
+ if (avctx->channels != 1) {
+ av_log(avctx, AV_LOG_ERROR, "Only mono supported\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->bit_rate == 6300) {
+ p->cur_rate = RATE_6300;
+ } else if (avctx->bit_rate == 5300) {
+ av_log(avctx, AV_LOG_ERROR, "Bitrate not supported yet, use 6.3k\n");
+ return AVERROR_PATCHWELCOME;
+ } else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Bitrate not supported, use 6.3k\n");
+ return AVERROR(EINVAL);
+ }
+ avctx->frame_size = 240;
+ memcpy(p->prev_lsp, dc_lsp, LPC_ORDER * sizeof(int16_t));
+
+ return 0;
+}
+
+/**
+ * Remove DC component from the input signal.
+ *
+ * @param buf input signal
+ * @param fir zero memory
+ * @param iir pole memory
+ */
+static void highpass_filter(int16_t *buf, int16_t *fir, int *iir)
+{
+ int i;
+ for (i = 0; i < FRAME_LEN; i++) {
+ *iir = (buf[i] << 15) + ((-*fir) << 15) + MULL2(*iir, 0x7f00);
+ *fir = buf[i];
+ buf[i] = av_clipl_int32((int64_t)*iir + (1 << 15)) >> 16;
+ }
+}
+
+/**
+ * Estimate autocorrelation of the input vector.
+ *
+ * @param buf input buffer
+ * @param autocorr autocorrelation coefficients vector
+ */
+static void comp_autocorr(int16_t *buf, int16_t *autocorr)
+{
+ int i, scale, temp;
+ int16_t vector[LPC_FRAME];
+
+ scale_vector(vector, buf, LPC_FRAME);
+
+ /* Apply the Hamming window */
+ for (i = 0; i < LPC_FRAME; i++)
+ vector[i] = (vector[i] * hamming_window[i] + (1 << 14)) >> 15;
+
+ /* Compute the first autocorrelation coefficient */
+ temp = ff_dot_product(vector, vector, LPC_FRAME);
+
+ /* Apply a white noise correlation factor of (1025/1024) */
+ temp += temp >> 10;
+
+ /* Normalize */
+ scale = normalize_bits_int32(temp);
+ autocorr[0] = av_clipl_int32((int64_t)(temp << scale) +
+ (1 << 15)) >> 16;
+
+ /* Compute the remaining coefficients */
+ if (!autocorr[0]) {
+ memset(autocorr + 1, 0, LPC_ORDER * sizeof(int16_t));
+ } else {
+ for (i = 1; i <= LPC_ORDER; i++) {
+ temp = ff_dot_product(vector, vector + i, LPC_FRAME - i);
+ temp = MULL2((temp << scale), binomial_window[i - 1]);
+ autocorr[i] = av_clipl_int32((int64_t)temp + (1 << 15)) >> 16;
+ }
+ }
+}
+
+/**
+ * Use Levinson-Durbin recursion to compute LPC coefficients from
+ * autocorrelation values.
+ *
+ * @param lpc LPC coefficients vector
+ * @param autocorr autocorrelation coefficients vector
+ * @param error prediction error
+ */
+static void levinson_durbin(int16_t *lpc, int16_t *autocorr, int16_t error)
+{
+ int16_t vector[LPC_ORDER];
+ int16_t partial_corr;
+ int i, j, temp;
+
+ memset(lpc, 0, LPC_ORDER * sizeof(int16_t));
+
+ for (i = 0; i < LPC_ORDER; i++) {
+ /* Compute the partial correlation coefficient */
+ temp = 0;
+ for (j = 0; j < i; j++)
+ temp -= lpc[j] * autocorr[i - j - 1];
+ temp = ((autocorr[i] << 13) + temp) << 3;
+
+ if (FFABS(temp) >= (error << 16))
+ break;
+
+ partial_corr = temp / (error << 1);
+
+ lpc[i] = av_clipl_int32((int64_t)(partial_corr << 14) +
+ (1 << 15)) >> 16;
+
+ /* Update the prediction error */
+ temp = MULL2(temp, partial_corr);
+ error = av_clipl_int32((int64_t)(error << 16) - temp +
+ (1 << 15)) >> 16;
+
+ memcpy(vector, lpc, i * sizeof(int16_t));
+ for (j = 0; j < i; j++) {
+ temp = partial_corr * vector[i - j - 1] << 1;
+ lpc[j] = av_clipl_int32((int64_t)(lpc[j] << 16) - temp +
+ (1 << 15)) >> 16;
+ }
+ }
+}
+
+/**
+ * Calculate LPC coefficients for the current frame.
+ *
+ * @param buf current frame
+ * @param prev_data 2 trailing subframes of the previous frame
+ * @param lpc LPC coefficients vector
+ */
+static void comp_lpc_coeff(int16_t *buf, int16_t *lpc)
+{
+ int16_t autocorr[(LPC_ORDER + 1) * SUBFRAMES];
+ int16_t *autocorr_ptr = autocorr;
+ int16_t *lpc_ptr = lpc;
+ int i, j;
+
+ for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
+ comp_autocorr(buf + i, autocorr_ptr);
+ levinson_durbin(lpc_ptr, autocorr_ptr + 1, autocorr_ptr[0]);
+
+ lpc_ptr += LPC_ORDER;
+ autocorr_ptr += LPC_ORDER + 1;
+ }
+}
+
+static void lpc2lsp(int16_t *lpc, int16_t *prev_lsp, int16_t *lsp)
+{
+ int f[LPC_ORDER + 2]; ///< coefficients of the sum and difference
+ ///< polynomials (F1, F2) ordered as
+ ///< f1[0], f2[0], ...., f1[5], f2[5]
+
+ int max, shift, cur_val, prev_val, count, p;
+ int i, j;
+ int64_t temp;
+
+ /* Initialize f1[0] and f2[0] to 1 in Q25 */
+ for (i = 0; i < LPC_ORDER; i++)
+ lsp[i] = (lpc[i] * bandwidth_expand[i] + (1 << 14)) >> 15;
+
+ /* Apply bandwidth expansion on the LPC coefficients */
+ f[0] = f[1] = 1 << 25;
+
+ /* Compute the remaining coefficients */
+ for (i = 0; i < LPC_ORDER / 2; i++) {
+ /* f1 */
+ f[2 * i + 2] = -f[2 * i] - ((lsp[i] + lsp[LPC_ORDER - 1 - i]) << 12);
+ /* f2 */
+ f[2 * i + 3] = f[2 * i + 1] - ((lsp[i] - lsp[LPC_ORDER - 1 - i]) << 12);
+ }
+
+ /* Divide f1[5] and f2[5] by 2 for use in polynomial evaluation */
+ f[LPC_ORDER] >>= 1;
+ f[LPC_ORDER + 1] >>= 1;
+
+ /* Normalize and shorten */
+ max = FFABS(f[0]);
+ for (i = 1; i < LPC_ORDER + 2; i++)
+ max = FFMAX(max, FFABS(f[i]));
+
+ shift = normalize_bits_int32(max);
+
+ for (i = 0; i < LPC_ORDER + 2; i++)
+ f[i] = av_clipl_int32((int64_t)(f[i] << shift) + (1 << 15)) >> 16;
+
+ /**
+ * Evaluate F1 and F2 at uniform intervals of pi/256 along the
+ * unit circle and check for zero crossings.
+ */
+ p = 0;
+ temp = 0;
+ for (i = 0; i <= LPC_ORDER / 2; i++)
+ temp += f[2 * i] * cos_tab[0];
+ prev_val = av_clipl_int32(temp << 1);
+ count = 0;
+ for ( i = 1; i < COS_TBL_SIZE / 2; i++) {
+ /* Evaluate */
+ temp = 0;
+ for (j = 0; j <= LPC_ORDER / 2; j++)
+ temp += f[LPC_ORDER - 2 * j + p] * cos_tab[i * j % COS_TBL_SIZE];
+ cur_val = av_clipl_int32(temp << 1);
+
+ /* Check for sign change, indicating a zero crossing */
+ if ((cur_val ^ prev_val) < 0) {
+ int abs_cur = FFABS(cur_val);
+ int abs_prev = FFABS(prev_val);
+ int sum = abs_cur + abs_prev;
+
+ shift = normalize_bits_int32(sum);
+ sum <<= shift;
+ abs_prev = abs_prev << shift >> 8;
+ lsp[count++] = ((i - 1) << 7) + (abs_prev >> 1) / (sum >> 16);
+
+ if (count == LPC_ORDER)
+ break;
+
+ /* Switch between sum and difference polynomials */
+ p ^= 1;
+
+ /* Evaluate */
+ temp = 0;
+ for (j = 0; j <= LPC_ORDER / 2; j++){
+ temp += f[LPC_ORDER - 2 * j + p] *
+ cos_tab[i * j % COS_TBL_SIZE];
+ }
+ cur_val = av_clipl_int32(temp<<1);
+ }
+ prev_val = cur_val;
+ }
+
+ if (count != LPC_ORDER)
+ memcpy(lsp, prev_lsp, LPC_ORDER * sizeof(int16_t));
+}
+
+/**
+ * Quantize the current LSP subvector.
+ *
+ * @param num band number
+ * @param offset offset of the current subvector in an LPC_ORDER vector
+ * @param size size of the current subvector
+ */
+#define get_index(num, offset, size) \
+{\
+ int error, max = -1;\
+ int16_t temp[4];\
+ int i, j;\
+ for (i = 0; i < LSP_CB_SIZE; i++) {\
+ for (j = 0; j < size; j++){\
+ temp[j] = (weight[j + (offset)] * lsp_band##num[i][j] +\
+ (1 << 14)) >> 15;\
+ }\
+ error = dot_product(lsp + (offset), temp, size) << 1;\
+ error -= dot_product(lsp_band##num[i], temp, size);\
+ if (error > max) {\
+ max = error;\
+ lsp_index[num] = i;\
+ }\
+ }\
+}
+
+/**
+ * Vector quantize the LSP frequencies.
+ *
+ * @param lsp the current lsp vector
+ * @param prev_lsp the previous lsp vector
+ */
+static void lsp_quantize(uint8_t *lsp_index, int16_t *lsp, int16_t *prev_lsp)
+{
+ int16_t weight[LPC_ORDER];
+ int16_t min, max;
+ int shift, i;
+
+ /* Calculate the VQ weighting vector */
+ weight[0] = (1 << 20) / (lsp[1] - lsp[0]);
+ weight[LPC_ORDER - 1] = (1 << 20) /
+ (lsp[LPC_ORDER - 1] - lsp[LPC_ORDER - 2]);
+
+ for (i = 1; i < LPC_ORDER - 1; i++) {
+ min = FFMIN(lsp[i] - lsp[i - 1], lsp[i + 1] - lsp[i]);
+ if (min > 0x20)
+ weight[i] = (1 << 20) / min;
+ else
+ weight[i] = INT16_MAX;
+ }
+
+ /* Normalize */
+ max = 0;
+ for (i = 0; i < LPC_ORDER; i++)
+ max = FFMAX(weight[i], max);
+
+ shift = normalize_bits_int16(max);
+ for (i = 0; i < LPC_ORDER; i++) {
+ weight[i] <<= shift;
+ }
+
+ /* Compute the VQ target vector */
+ for (i = 0; i < LPC_ORDER; i++) {
+ lsp[i] -= dc_lsp[i] +
+ (((prev_lsp[i] - dc_lsp[i]) * 12288 + (1 << 14)) >> 15);
+ }
+
+ get_index(0, 0, 3);
+ get_index(1, 3, 3);
+ get_index(2, 6, 4);
+}
+
+/**
+ * Apply the formant perceptual weighting filter.
+ *
+ * @param flt_coef filter coefficients
+ * @param unq_lpc unquantized lpc vector
+ */
+static void perceptual_filter(G723_1_Context *p, int16_t *flt_coef,
+ int16_t *unq_lpc, int16_t *buf)
+{
+ int16_t vector[FRAME_LEN + LPC_ORDER];
+ int i, j, k, l = 0;
+
+ memcpy(buf, p->iir_mem, sizeof(int16_t) * LPC_ORDER);
+ memcpy(vector, p->fir_mem, sizeof(int16_t) * LPC_ORDER);
+ memcpy(vector + LPC_ORDER, buf + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
+
+ for (i = LPC_ORDER, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++) {
+ for (k = 0; k < LPC_ORDER; k++) {
+ flt_coef[k + 2 * l] = (unq_lpc[k + l] * percept_flt_tbl[0][k] +
+ (1 << 14)) >> 15;
+ flt_coef[k + 2 * l + LPC_ORDER] = (unq_lpc[k + l] *
+ percept_flt_tbl[1][k] +
+ (1 << 14)) >> 15;
+ }
+ iir_filter(flt_coef + 2 * l, flt_coef + 2 * l + LPC_ORDER, vector + i,
+ buf + i, 0);
+ l += LPC_ORDER;
+ }
+ memcpy(p->iir_mem, buf + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
+ memcpy(p->fir_mem, vector + FRAME_LEN, sizeof(int16_t) * LPC_ORDER);
+}
+
+/**
+ * Estimate the open loop pitch period.
+ *
+ * @param buf perceptually weighted speech
+ * @param start estimation is carried out from this position
+ */
+static int estimate_pitch(int16_t *buf, int start)
+{
+ int max_exp = 32;
+ int max_ccr = 0x4000;
+ int max_eng = 0x7fff;
+ int index = PITCH_MIN;
+ int offset = start - PITCH_MIN + 1;
+
+ int ccr, eng, orig_eng, ccr_eng, exp;
+ int diff, temp;
+
+ int i;
+
+ orig_eng = ff_dot_product(buf + offset, buf + offset, HALF_FRAME_LEN);
+
+ for (i = PITCH_MIN; i <= PITCH_MAX - 3; i++) {
+ offset--;
+
+ /* Update energy and compute correlation */
+ orig_eng += buf[offset] * buf[offset] -
+ buf[offset + HALF_FRAME_LEN] * buf[offset + HALF_FRAME_LEN];
+ ccr = ff_dot_product(buf + start, buf + offset, HALF_FRAME_LEN);
+ if (ccr <= 0)
+ continue;
+
+ /* Split into mantissa and exponent to maintain precision */
+ exp = normalize_bits_int32(ccr);
+ ccr = av_clipl_int32((int64_t)(ccr << exp) + (1 << 15)) >> 16;
+ exp <<= 1;
+ ccr *= ccr;
+ temp = normalize_bits_int32(ccr);
+ ccr = ccr << temp >> 16;
+ exp += temp;
+
+ temp = normalize_bits_int32(orig_eng);
+ eng = av_clipl_int32((int64_t)(orig_eng << temp) + (1 << 15)) >> 16;
+ exp -= temp;
+
+ if (ccr >= eng) {
+ exp--;
+ ccr >>= 1;
+ }
+ if (exp > max_exp)
+ continue;
+
+ if (exp + 1 < max_exp)
+ goto update;
+
+ /* Equalize exponents before comparison */
+ if (exp + 1 == max_exp)
+ temp = max_ccr >> 1;
+ else
+ temp = max_ccr;
+ ccr_eng = ccr * max_eng;
+ diff = ccr_eng - eng * temp;
+ if (diff > 0 && (i - index < PITCH_MIN || diff > ccr_eng >> 2)) {
+update:
+ index = i;
+ max_exp = exp;
+ max_ccr = ccr;
+ max_eng = eng;
+ }
+ }
+ return index;
+}
+
+/**
+ * Compute harmonic noise filter parameters.
+ *
+ * @param buf perceptually weighted speech
+ * @param pitch_lag open loop pitch period
+ * @param hf harmonic filter parameters
+ */
+static void comp_harmonic_coeff(int16_t *buf, int16_t pitch_lag, HFParam *hf)
+{
+ int ccr, eng, max_ccr, max_eng;
+ int exp, max, diff;
+ int energy[15];
+ int i, j;
+
+ for (i = 0, j = pitch_lag - 3; j <= pitch_lag + 3; i++, j++) {
+ /* Compute residual energy */
+ energy[i << 1] = ff_dot_product(buf - j, buf - j, SUBFRAME_LEN);
+ /* Compute correlation */
+ energy[(i << 1) + 1] = ff_dot_product(buf, buf - j, SUBFRAME_LEN);
+ }
+
+ /* Compute target energy */
+ energy[14] = ff_dot_product(buf, buf, SUBFRAME_LEN);
+
+ /* Normalize */
+ max = 0;
+ for (i = 0; i < 15; i++)
+ max = FFMAX(max, FFABS(energy[i]));
+
+ exp = normalize_bits_int32(max);
+ for (i = 0; i < 15; i++) {
+ energy[i] = av_clipl_int32((int64_t)(energy[i] << exp) +
+ (1 << 15)) >> 16;
+ }
+
+ hf->index = -1;
+ hf->gain = 0;
+ max_ccr = 1;
+ max_eng = 0x7fff;
+
+ for (i = 0; i <= 6; i++) {
+ eng = energy[i << 1];
+ ccr = energy[(i << 1) + 1];
+
+ if (ccr <= 0)
+ continue;
+
+ ccr = (ccr * ccr + (1 << 14)) >> 15;
+ diff = ccr * max_eng - eng * max_ccr;
+ if (diff > 0) {
+ max_ccr = ccr;
+ max_eng = eng;
+ hf->index = i;
+ }
+ }
+
+ if (hf->index == -1) {
+ hf->index = pitch_lag;
+ return;
+ }
+
+ eng = energy[14] * max_eng;
+ eng = (eng >> 2) + (eng >> 3);
+ ccr = energy[(hf->index << 1) + 1] * energy[(hf->index << 1) + 1];
+ if (eng < ccr) {
+ eng = energy[(hf->index << 1) + 1];
+
+ if (eng >= max_eng)
+ hf->gain = 0x2800;
+ else
+ hf->gain = ((eng << 15) / max_eng * 0x2800 + (1 << 14)) >> 15;
+ }
+ hf->index += pitch_lag - 3;
+}
+
+/**
+ * Apply the harmonic noise shaping filter.
+ *
+ * @param hf filter parameters
+ */
+static void harmonic_filter(HFParam *hf, const int16_t *src, int16_t *dest)
+{
+ int i;
+
+ for (i = 0; i < SUBFRAME_LEN; i++) {
+ int64_t temp = hf->gain * src[i - hf->index] << 1;
+ dest[i] = av_clipl_int32((src[i] << 16) - temp + (1 << 15)) >> 16;
+ }
+}
+
+static void harmonic_noise_sub(HFParam *hf, const int16_t *src, int16_t *dest)
+{
+ int i;
+ for (i = 0; i < SUBFRAME_LEN; i++) {
+ int64_t temp = hf->gain * src[i - hf->index] << 1;
+ dest[i] = av_clipl_int32(((dest[i] - src[i]) << 16) + temp +
+ (1 << 15)) >> 16;
+
+ }
+}
+
+/**
+ * Combined synthesis and formant perceptual weighting filer.
+ *
+ * @param qnt_lpc quantized lpc coefficients
+ * @param perf_lpc perceptual filter coefficients
+ * @param perf_fir perceptual filter fir memory
+ * @param perf_iir perceptual filter iir memory
+ * @param scale the filter output will be scaled by 2^scale
+ */
+static void synth_percept_filter(int16_t *qnt_lpc, int16_t *perf_lpc,
+ int16_t *perf_fir, int16_t *perf_iir,
+ const int16_t *src, int16_t *dest, int scale)
+{
+ int i, j;
+ int16_t buf_16[SUBFRAME_LEN + LPC_ORDER];
+ int64_t buf[SUBFRAME_LEN];
+
+ int16_t *bptr_16 = buf_16 + LPC_ORDER;
+
+ memcpy(buf_16, perf_fir, sizeof(int16_t) * LPC_ORDER);
+ memcpy(dest - LPC_ORDER, perf_iir, sizeof(int16_t) * LPC_ORDER);
+
+ for (i = 0; i < SUBFRAME_LEN; i++) {
+ int64_t temp = 0;
+ for (j = 1; j <= LPC_ORDER; j++)
+ temp -= qnt_lpc[j - 1] * bptr_16[i - j];
+
+ buf[i] = (src[i] << 15) + (temp << 3);
+ bptr_16[i] = av_clipl_int32(buf[i] + (1 << 15)) >> 16;
+ }
+
+ for (i = 0; i < SUBFRAME_LEN; i++) {
+ int64_t fir = 0, iir = 0;
+ for (j = 1; j <= LPC_ORDER; j++) {
+ fir -= perf_lpc[j - 1] * bptr_16[i - j];
+ iir += perf_lpc[j + LPC_ORDER - 1] * dest[i - j];
+ }
+ dest[i] = av_clipl_int32(((buf[i] + (fir << 3)) << scale) + (iir << 3) +
+ (1 << 15)) >> 16;
+ }
+ memcpy(perf_fir, buf_16 + SUBFRAME_LEN, sizeof(int16_t) * LPC_ORDER);
+ memcpy(perf_iir, dest + SUBFRAME_LEN - LPC_ORDER,
+ sizeof(int16_t) * LPC_ORDER);
+}
+
+/**
+ * Compute the adaptive codebook contribution.
+ *
+ * @param buf input signal
+ * @param index the current subframe index
+ */
+static void acb_search(G723_1_Context *p, int16_t *residual,
+ int16_t *impulse_resp, const int16_t *buf,
+ int index)
+{
+
+ int16_t flt_buf[PITCH_ORDER][SUBFRAME_LEN];
+
+ const int16_t *cb_tbl = adaptive_cb_gain85;
+
+ int ccr_buf[PITCH_ORDER * SUBFRAMES << 2];
+
+ int pitch_lag = p->pitch_lag[index >> 1];
+ int acb_lag = 1;
+ int acb_gain = 0;
+ int odd_frame = index & 1;
+ int iter = 3 + odd_frame;
+ int count = 0;
+ int tbl_size = 85;
+
+ int i, j, k, l, max;
+ int64_t temp;
+
+ if (!odd_frame) {
+ if (pitch_lag == PITCH_MIN)
+ pitch_lag++;
+ else
+ pitch_lag = FFMIN(pitch_lag, PITCH_MAX - 5);
+ }
+
+ for (i = 0; i < iter; i++) {
+ get_residual(residual, p->prev_excitation, pitch_lag + i - 1);
+
+ for (j = 0; j < SUBFRAME_LEN; j++) {
+ temp = 0;
+ for (k = 0; k <= j; k++)
+ temp += residual[PITCH_ORDER - 1 + k] * impulse_resp[j - k];
+ flt_buf[PITCH_ORDER - 1][j] = av_clipl_int32((temp << 1) +
+ (1 << 15)) >> 16;
+ }
+
+ for (j = PITCH_ORDER - 2; j >= 0; j--) {
+ flt_buf[j][0] = ((residual[j] << 13) + (1 << 14)) >> 15;
+ for (k = 1; k < SUBFRAME_LEN; k++) {
+ temp = (flt_buf[j + 1][k - 1] << 15) +
+ residual[j] * impulse_resp[k];
+ flt_buf[j][k] = av_clipl_int32((temp << 1) + (1 << 15)) >> 16;
+ }
+ }
+
+ /* Compute crosscorrelation with the signal */
+ for (j = 0; j < PITCH_ORDER; j++) {
+ temp = ff_dot_product(buf, flt_buf[j], SUBFRAME_LEN);
+ ccr_buf[count++] = av_clipl_int32(temp << 1);
+ }
+
+ /* Compute energies */
+ for (j = 0; j < PITCH_ORDER; j++) {
+ ccr_buf[count++] = dot_product(flt_buf[j], flt_buf[j],
+ SUBFRAME_LEN);
+ }
+
+ for (j = 1; j < PITCH_ORDER; j++) {
+ for (k = 0; k < j; k++) {
+ temp = ff_dot_product(flt_buf[j], flt_buf[k], SUBFRAME_LEN);
+ ccr_buf[count++] = av_clipl_int32(temp<<2);
+ }
+ }
+ }
+
+ /* Normalize and shorten */
+ max = 0;
+ for (i = 0; i < 20 * iter; i++)
+ max = FFMAX(max, FFABS(ccr_buf[i]));
+
+ temp = normalize_bits_int32(max);
+
+ for (i = 0; i < 20 * iter; i++){
+ ccr_buf[i] = av_clipl_int32((int64_t)(ccr_buf[i] << temp) +
+ (1 << 15)) >> 16;
+ }
+
+ max = 0;
+ for (i = 0; i < iter; i++) {
+ /* Select quantization table */
+ if (!odd_frame && pitch_lag + i - 1 >= SUBFRAME_LEN - 2 ||
+ odd_frame && pitch_lag >= SUBFRAME_LEN - 2) {
+ cb_tbl = adaptive_cb_gain170;
+ tbl_size = 170;
+ }
+
+ for (j = 0, k = 0; j < tbl_size; j++, k += 20) {
+ temp = 0;
+ for (l = 0; l < 20; l++)
+ temp += ccr_buf[20 * i + l] * cb_tbl[k + l];
+ temp = av_clipl_int32(temp);
+
+ if (temp > max) {
+ max = temp;
+ acb_gain = j;
+ acb_lag = i;
+ }
+ }
+ }
+
+ if (!odd_frame) {
+ pitch_lag += acb_lag - 1;
+ acb_lag = 1;
+ }
+
+ p->pitch_lag[index >> 1] = pitch_lag;
+ p->subframe[index].ad_cb_lag = acb_lag;
+ p->subframe[index].ad_cb_gain = acb_gain;
+}
+
+/**
+ * Subtract the adaptive codebook contribution from the input
+ * to obtain the residual.
+ *
+ * @param buf target vector
+ */
+static void sub_acb_contrib(const int16_t *residual, const int16_t *impulse_resp,
+ int16_t *buf)
+{
+ int i, j;
+ /* Subtract adaptive CB contribution to obtain the residual */
+ for (i = 0; i < SUBFRAME_LEN; i++) {
+ int64_t temp = buf[i] << 14;
+ for (j = 0; j <= i; j++)
+ temp -= residual[j] * impulse_resp[i - j];
+
+ buf[i] = av_clipl_int32((temp << 2) + (1 << 15)) >> 16;
+ }
+}
+
+/**
+ * Quantize the residual signal using the fixed codebook (MP-MLQ).
+ *
+ * @param optim optimized fixed codebook parameters
+ * @param buf excitation vector
+ */
+static void get_fcb_param(FCBParam *optim, int16_t *impulse_resp,
+ int16_t *buf, int pulse_cnt, int pitch_lag)
+{
+ FCBParam param;
+ int16_t impulse_r[SUBFRAME_LEN];
+ int16_t temp_corr[SUBFRAME_LEN];
+ int16_t impulse_corr[SUBFRAME_LEN];
+
+ int ccr1[SUBFRAME_LEN];
+ int ccr2[SUBFRAME_LEN];
+ int amp, err, max, max_amp_index, min, scale, i, j, k, l;
+
+ int64_t temp;
+
+ /* Update impulse response */
+ memcpy(impulse_r, impulse_resp, sizeof(int16_t) * SUBFRAME_LEN);
+ param.dirac_train = 0;
+ if (pitch_lag < SUBFRAME_LEN - 2) {
+ param.dirac_train = 1;
+ gen_dirac_train(impulse_r, pitch_lag);
+ }
+
+ for (i = 0; i < SUBFRAME_LEN; i++)
+ temp_corr[i] = impulse_r[i] >> 1;
+
+ /* Compute impulse response autocorrelation */
+ temp = dot_product(temp_corr, temp_corr, SUBFRAME_LEN);
+
+ scale = normalize_bits_int32(temp);
+ impulse_corr[0] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
+
+ for (i = 1; i < SUBFRAME_LEN; i++) {
+ temp = dot_product(temp_corr + i, temp_corr, SUBFRAME_LEN - i);
+ impulse_corr[i] = av_clipl_int32((temp << scale) + (1 << 15)) >> 16;
+ }
+
+ /* Compute crosscorrelation of impulse response with residual signal */
+ scale -= 4;
+ for (i = 0; i < SUBFRAME_LEN; i++){
+ temp = dot_product(buf + i, impulse_r, SUBFRAME_LEN - i);
+ if (scale < 0)
+ ccr1[i] = temp >> -scale;
+ else
+ ccr1[i] = av_clipl_int32(temp << scale);
+ }
+
+ /* Search loop */
+ for (i = 0; i < GRID_SIZE; i++) {
+ /* Maximize the crosscorrelation */
+ max = 0;
+ for (j = i; j < SUBFRAME_LEN; j += GRID_SIZE) {
+ temp = FFABS(ccr1[j]);
+ if (temp >= max) {
+ max = temp;
+ param.pulse_pos[0] = j;
+ }
+ }
+
+ /* Quantize the gain (max crosscorrelation/impulse_corr[0]) */
+ amp = max;
+ min = 1 << 30;
+ max_amp_index = GAIN_LEVELS - 2;
+ for (j = max_amp_index; j >= 2; j--) {
+ temp = av_clipl_int32((int64_t)fixed_cb_gain[j] *
+ impulse_corr[0] << 1);
+ temp = FFABS(temp - amp);
+ if (temp < min) {
+ min = temp;
+ max_amp_index = j;
+ }
+ }
+
+ max_amp_index--;
+ /* Select additional gain values */
+ for (j = 1; j < 5; j++) {
+ for (k = i; k < SUBFRAME_LEN; k += GRID_SIZE) {
+ temp_corr[k] = 0;
+ ccr2[k] = ccr1[k];
+ }
+ param.amp_index = max_amp_index + j - 2;
+ amp = fixed_cb_gain[param.amp_index];
+
+ param.pulse_sign[0] = (ccr2[param.pulse_pos[0]] < 0) ? -amp : amp;
+ temp_corr[param.pulse_pos[0]] = 1;
+
+ for (k = 1; k < pulse_cnt; k++) {
+ max = -1 << 30;
+ for (l = i; l < SUBFRAME_LEN; l += GRID_SIZE) {
+ if (temp_corr[l])
+ continue;
+ temp = impulse_corr[FFABS(l - param.pulse_pos[k - 1])];
+ temp = av_clipl_int32((int64_t)temp *
+ param.pulse_sign[k - 1] << 1);
+ ccr2[l] -= temp;
+ temp = FFABS(ccr2[l]);
+ if (temp > max) {
+ max = temp;
+ param.pulse_pos[k] = l;
+ }
+ }
+
+ param.pulse_sign[k] = (ccr2[param.pulse_pos[k]] < 0) ?
+ -amp : amp;
+ temp_corr[param.pulse_pos[k]] = 1;
+ }
+
+ /* Create the error vector */
+ memset(temp_corr, 0, sizeof(int16_t) * SUBFRAME_LEN);
+
+ for (k = 0; k < pulse_cnt; k++)
+ temp_corr[param.pulse_pos[k]] = param.pulse_sign[k];
+
+ for (k = SUBFRAME_LEN - 1; k >= 0; k--) {
+ temp = 0;
+ for (l = 0; l <= k; l++) {
+ int prod = av_clipl_int32((int64_t)temp_corr[l] *
+ impulse_r[k - l] << 1);
+ temp = av_clipl_int32(temp + prod);
+ }
+ temp_corr[k] = temp << 2 >> 16;
+ }
+
+ /* Compute square of error */
+ err = 0;
+ for (k = 0; k < SUBFRAME_LEN; k++) {
+ int64_t prod;
+ prod = av_clipl_int32((int64_t)buf[k] * temp_corr[k] << 1);
+ err = av_clipl_int32(err - prod);
+ prod = av_clipl_int32((int64_t)temp_corr[k] * temp_corr[k]);
+ err = av_clipl_int32(err + prod);
+ }
+
+ /* Minimize */
+ if (err < optim->min_err) {
+ optim->min_err = err;
+ optim->grid_index = i;
+ optim->amp_index = param.amp_index;
+ optim->dirac_train = param.dirac_train;
+
+ for (k = 0; k < pulse_cnt; k++) {
+ optim->pulse_sign[k] = param.pulse_sign[k];
+ optim->pulse_pos[k] = param.pulse_pos[k];
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Encode the pulse position and gain of the current subframe.
+ *
+ * @param optim optimized fixed CB parameters
+ * @param buf excitation vector
+ */
+static void pack_fcb_param(G723_1_Subframe *subfrm, FCBParam *optim,
+ int16_t *buf, int pulse_cnt)
+{
+ int i, j;
+
+ j = PULSE_MAX - pulse_cnt;
+
+ subfrm->pulse_sign = 0;
+ subfrm->pulse_pos = 0;
+
+ for (i = 0; i < SUBFRAME_LEN >> 1; i++) {
+ int val = buf[optim->grid_index + (i << 1)];
+ if (!val) {
+ subfrm->pulse_pos += combinatorial_table[j][i];
+ } else {
+ subfrm->pulse_sign <<= 1;
+ if (val < 0) subfrm->pulse_sign++;
+ j++;
+
+ if (j == PULSE_MAX) break;
+ }
+ }
+ subfrm->amp_index = optim->amp_index;
+ subfrm->grid_index = optim->grid_index;
+ subfrm->dirac_train = optim->dirac_train;
+}
+
+/**
+ * Compute the fixed codebook excitation.
+ *
+ * @param buf target vector
+ * @param impulse_resp impulse response of the combined filter
+ */
+static void fcb_search(G723_1_Context *p, int16_t *impulse_resp,
+ int16_t *buf, int index)
+{
+ FCBParam optim;
+ int pulse_cnt = pulses[index];
+ int i;
+
+ optim.min_err = 1 << 30;
+ get_fcb_param(&optim, impulse_resp, buf, pulse_cnt, SUBFRAME_LEN);
+
+ if (p->pitch_lag[index >> 1] < SUBFRAME_LEN - 2) {
+ get_fcb_param(&optim, impulse_resp, buf, pulse_cnt,
+ p->pitch_lag[index >> 1]);
+ }
+
+ /* Reconstruct the excitation */
+ memset(buf, 0, sizeof(int16_t) * SUBFRAME_LEN);
+ for (i = 0; i < pulse_cnt; i++)
+ buf[optim.pulse_pos[i]] = optim.pulse_sign[i];
+
+ pack_fcb_param(&p->subframe[index], &optim, buf, pulse_cnt);
+
+ if (optim.dirac_train)
+ gen_dirac_train(buf, p->pitch_lag[index >> 1]);
+}
+
+/**
+ * Pack the frame parameters into output bitstream.
+ *
+ * @param frame output buffer
+ * @param size size of the buffer
+ */
+static int pack_bitstream(G723_1_Context *p, unsigned char *frame, int size)
+{
+ PutBitContext pb;
+ int info_bits, i, temp;
+
+ init_put_bits(&pb, frame, size);
+
+ if (p->cur_rate == RATE_6300) {
+ info_bits = 0;
+ put_bits(&pb, 2, info_bits);
+ }else
+ av_assert0(0);
+
+ put_bits(&pb, 8, p->lsp_index[2]);
+ put_bits(&pb, 8, p->lsp_index[1]);
+ put_bits(&pb, 8, p->lsp_index[0]);
+
+ put_bits(&pb, 7, p->pitch_lag[0] - PITCH_MIN);
+ put_bits(&pb, 2, p->subframe[1].ad_cb_lag);
+ put_bits(&pb, 7, p->pitch_lag[1] - PITCH_MIN);
+ put_bits(&pb, 2, p->subframe[3].ad_cb_lag);
+
+ /* Write 12 bit combined gain */
+ for (i = 0; i < SUBFRAMES; i++) {
+ temp = p->subframe[i].ad_cb_gain * GAIN_LEVELS +
+ p->subframe[i].amp_index;
+ if (p->cur_rate == RATE_6300)
+ temp += p->subframe[i].dirac_train << 11;
+ put_bits(&pb, 12, temp);
+ }
+
+ put_bits(&pb, 1, p->subframe[0].grid_index);
+ put_bits(&pb, 1, p->subframe[1].grid_index);
+ put_bits(&pb, 1, p->subframe[2].grid_index);
+ put_bits(&pb, 1, p->subframe[3].grid_index);
+
+ if (p->cur_rate == RATE_6300) {
+ skip_put_bits(&pb, 1); /* reserved bit */
+
+ /* Write 13 bit combined position index */
+ temp = (p->subframe[0].pulse_pos >> 16) * 810 +
+ (p->subframe[1].pulse_pos >> 14) * 90 +
+ (p->subframe[2].pulse_pos >> 16) * 9 +
+ (p->subframe[3].pulse_pos >> 14);
+ put_bits(&pb, 13, temp);
+
+ put_bits(&pb, 16, p->subframe[0].pulse_pos & 0xffff);
+ put_bits(&pb, 14, p->subframe[1].pulse_pos & 0x3fff);
+ put_bits(&pb, 16, p->subframe[2].pulse_pos & 0xffff);
+ put_bits(&pb, 14, p->subframe[3].pulse_pos & 0x3fff);
+
+ put_bits(&pb, 6, p->subframe[0].pulse_sign);
+ put_bits(&pb, 5, p->subframe[1].pulse_sign);
+ put_bits(&pb, 6, p->subframe[2].pulse_sign);
+ put_bits(&pb, 5, p->subframe[3].pulse_sign);
+ }
+
+ flush_put_bits(&pb);
+ return frame_size[info_bits];
+}
+
+static int g723_1_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ G723_1_Context *p = avctx->priv_data;
+ int16_t unq_lpc[LPC_ORDER * SUBFRAMES];
+ int16_t qnt_lpc[LPC_ORDER * SUBFRAMES];
+ int16_t cur_lsp[LPC_ORDER];
+ int16_t weighted_lpc[LPC_ORDER * SUBFRAMES << 1];
+ int16_t vector[FRAME_LEN + PITCH_MAX];
+ int offset, ret;
+ int16_t *in_orig = av_memdup(frame->data[0], frame->nb_samples * sizeof(int16_t));
+ int16_t *in = in_orig;
+
+ HFParam hf[4];
+ int i, j;
+
+ if (!in)
+ return AVERROR(ENOMEM);
+
+ highpass_filter(in, &p->hpf_fir_mem, &p->hpf_iir_mem);
+
+ memcpy(vector, p->prev_data, HALF_FRAME_LEN * sizeof(int16_t));
+ memcpy(vector + HALF_FRAME_LEN, in, FRAME_LEN * sizeof(int16_t));
+
+ comp_lpc_coeff(vector, unq_lpc);
+ lpc2lsp(&unq_lpc[LPC_ORDER * 3], p->prev_lsp, cur_lsp);
+ lsp_quantize(p->lsp_index, cur_lsp, p->prev_lsp);
+
+ /* Update memory */
+ memcpy(vector + LPC_ORDER, p->prev_data + SUBFRAME_LEN,
+ sizeof(int16_t) * SUBFRAME_LEN);
+ memcpy(vector + LPC_ORDER + SUBFRAME_LEN, in,
+ sizeof(int16_t) * (HALF_FRAME_LEN + SUBFRAME_LEN));
+ memcpy(p->prev_data, in + HALF_FRAME_LEN,
+ sizeof(int16_t) * HALF_FRAME_LEN);
+ memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
+
+ perceptual_filter(p, weighted_lpc, unq_lpc, vector);
+
+ memcpy(in, vector + LPC_ORDER, sizeof(int16_t) * FRAME_LEN);
+ memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
+ memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
+
+ scale_vector(vector, vector, FRAME_LEN + PITCH_MAX);
+
+ p->pitch_lag[0] = estimate_pitch(vector, PITCH_MAX);
+ p->pitch_lag[1] = estimate_pitch(vector, PITCH_MAX + HALF_FRAME_LEN);
+
+ for (i = PITCH_MAX, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
+ comp_harmonic_coeff(vector + i, p->pitch_lag[j >> 1], hf + j);
+
+ memcpy(vector, p->prev_weight_sig, sizeof(int16_t) * PITCH_MAX);
+ memcpy(vector + PITCH_MAX, in, sizeof(int16_t) * FRAME_LEN);
+ memcpy(p->prev_weight_sig, vector + FRAME_LEN, sizeof(int16_t) * PITCH_MAX);
+
+ for (i = 0, j = 0; j < SUBFRAMES; i += SUBFRAME_LEN, j++)
+ harmonic_filter(hf + j, vector + PITCH_MAX + i, in + i);
+
+ inverse_quant(cur_lsp, p->prev_lsp, p->lsp_index, 0);
+ lsp_interpolate(qnt_lpc, cur_lsp, p->prev_lsp);
+
+ memcpy(p->prev_lsp, cur_lsp, sizeof(int16_t) * LPC_ORDER);
+
+ offset = 0;
+ for (i = 0; i < SUBFRAMES; i++) {
+ int16_t impulse_resp[SUBFRAME_LEN];
+ int16_t residual[SUBFRAME_LEN + PITCH_ORDER - 1];
+ int16_t flt_in[SUBFRAME_LEN];
+ int16_t zero[LPC_ORDER], fir[LPC_ORDER], iir[LPC_ORDER];
+
+ /**
+ * Compute the combined impulse response of the synthesis filter,
+ * formant perceptual weighting filter and harmonic noise shaping filter
+ */
+ memset(zero, 0, sizeof(int16_t) * LPC_ORDER);
+ memset(vector, 0, sizeof(int16_t) * PITCH_MAX);
+ memset(flt_in, 0, sizeof(int16_t) * SUBFRAME_LEN);
+
+ flt_in[0] = 1 << 13; /* Unit impulse */
+ synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
+ zero, zero, flt_in, vector + PITCH_MAX, 1);
+ harmonic_filter(hf + i, vector + PITCH_MAX, impulse_resp);
+
+ /* Compute the combined zero input response */
+ flt_in[0] = 0;
+ memcpy(fir, p->perf_fir_mem, sizeof(int16_t) * LPC_ORDER);
+ memcpy(iir, p->perf_iir_mem, sizeof(int16_t) * LPC_ORDER);
+
+ synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
+ fir, iir, flt_in, vector + PITCH_MAX, 0);
+ memcpy(vector, p->harmonic_mem, sizeof(int16_t) * PITCH_MAX);
+ harmonic_noise_sub(hf + i, vector + PITCH_MAX, in);
+
+ acb_search(p, residual, impulse_resp, in, i);
+ gen_acb_excitation(residual, p->prev_excitation,p->pitch_lag[i >> 1],
+ &p->subframe[i], p->cur_rate);
+ sub_acb_contrib(residual, impulse_resp, in);
+
+ fcb_search(p, impulse_resp, in, i);
+
+ /* Reconstruct the excitation */
+ gen_acb_excitation(impulse_resp, p->prev_excitation, p->pitch_lag[i >> 1],
+ &p->subframe[i], RATE_6300);
+
+ memmove(p->prev_excitation, p->prev_excitation + SUBFRAME_LEN,
+ sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
+ for (j = 0; j < SUBFRAME_LEN; j++)
+ in[j] = av_clip_int16((in[j] << 1) + impulse_resp[j]);
+ memcpy(p->prev_excitation + PITCH_MAX - SUBFRAME_LEN, in,
+ sizeof(int16_t) * SUBFRAME_LEN);
+
+ /* Update filter memories */
+ synth_percept_filter(qnt_lpc + offset, weighted_lpc + (offset << 1),
+ p->perf_fir_mem, p->perf_iir_mem,
+ in, vector + PITCH_MAX, 0);
+ memmove(p->harmonic_mem, p->harmonic_mem + SUBFRAME_LEN,
+ sizeof(int16_t) * (PITCH_MAX - SUBFRAME_LEN));
+ memcpy(p->harmonic_mem + PITCH_MAX - SUBFRAME_LEN, vector + PITCH_MAX,
+ sizeof(int16_t) * SUBFRAME_LEN);
+
+ in += SUBFRAME_LEN;
+ offset += LPC_ORDER;
+ }
+
+ av_freep(&in_orig); in = NULL;
+
+ if ((ret = ff_alloc_packet2(avctx, avpkt, 24, 0)) < 0)
+ return ret;
+
+ *got_packet_ptr = 1;
+ avpkt->size = pack_bitstream(p, avpkt->data, avpkt->size);
+ return 0;
+}
+
+AVCodec ff_g723_1_encoder = {
+ .name = "g723_1",
+ .long_name = NULL_IF_CONFIG_SMALL("G.723.1"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_G723_1,
+ .priv_data_size = sizeof(G723_1_Context),
+ .init = g723_1_encode_init,
+ .encode2 = g723_1_encode_frame,
+ .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE},
+};
+#endif
.priv_data_size = sizeof(G726Context),
.init = g726_encode_init,
.encode2 = g726_encode_frame,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
+ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
- .priv_class = &class,
+ .priv_class = &g726_class,
.defaults = defaults,
};
#endif
.init = g726_decode_init,
.decode = g726_decode_frame,
.flush = g726_decode_flush,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
#endif
- .capabilities = CODEC_CAP_DR1,
+
+#if CONFIG_ADPCM_G726LE_DECODER
+AVCodec ff_adpcm_g726le_decoder = {
+ .name = "g726le",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_ADPCM_G726LE,
+ .priv_data_size = sizeof(G726Context),
+ .init = g726_decode_init,
+ .decode = g726_decode_frame,
+ .flush = g726_decode_flush,
++ .capabilities = AV_CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM little-endian"),
+};
+#endif
--- /dev/null
- .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
+/*
+ * G.729, G729 Annex D decoders
+ * Copyright (c) 2008 Vladimir Voroshilov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <string.h>
+
+#include "avcodec.h"
+#include "libavutil/avutil.h"
+#include "get_bits.h"
+#include "audiodsp.h"
+#include "internal.h"
+
+
+#include "g729.h"
+#include "lsp.h"
+#include "celp_math.h"
+#include "celp_filters.h"
+#include "acelp_filters.h"
+#include "acelp_pitch_delay.h"
+#include "acelp_vectors.h"
+#include "g729data.h"
+#include "g729postfilter.h"
+
+/**
+ * minimum quantized LSF value (3.2.4)
+ * 0.005 in Q13
+ */
+#define LSFQ_MIN 40
+
+/**
+ * maximum quantized LSF value (3.2.4)
+ * 3.135 in Q13
+ */
+#define LSFQ_MAX 25681
+
+/**
+ * minimum LSF distance (3.2.4)
+ * 0.0391 in Q13
+ */
+#define LSFQ_DIFF_MIN 321
+
+/// interpolation filter length
+#define INTERPOL_LEN 11
+
+/**
+ * minimum gain pitch value (3.8, Equation 47)
+ * 0.2 in (1.14)
+ */
+#define SHARP_MIN 3277
+
+/**
+ * maximum gain pitch value (3.8, Equation 47)
+ * (EE) This does not comply with the specification.
+ * Specification says about 0.8, which should be
+ * 13107 in (1.14), but reference C code uses
+ * 13017 (equals to 0.7945) instead of it.
+ */
+#define SHARP_MAX 13017
+
+/**
+ * MR_ENERGY (mean removed energy) = mean_energy + 10 * log10(2^26 * subframe_size) in (7.13)
+ */
+#define MR_ENERGY 1018156
+
+#define DECISION_NOISE 0
+#define DECISION_INTERMEDIATE 1
+#define DECISION_VOICE 2
+
+typedef enum {
+ FORMAT_G729_8K = 0,
+ FORMAT_G729D_6K4,
+ FORMAT_COUNT,
+} G729Formats;
+
+typedef struct {
+ uint8_t ac_index_bits[2]; ///< adaptive codebook index for second subframe (size in bits)
+ uint8_t parity_bit; ///< parity bit for pitch delay
+ uint8_t gc_1st_index_bits; ///< gain codebook (first stage) index (size in bits)
+ uint8_t gc_2nd_index_bits; ///< gain codebook (second stage) index (size in bits)
+ uint8_t fc_signs_bits; ///< number of pulses in fixed-codebook vector
+ uint8_t fc_indexes_bits; ///< size (in bits) of fixed-codebook index entry
+} G729FormatDescription;
+
+typedef struct {
+ AudioDSPContext adsp;
+
+ /// past excitation signal buffer
+ int16_t exc_base[2*SUBFRAME_SIZE+PITCH_DELAY_MAX+INTERPOL_LEN];
+
+ int16_t* exc; ///< start of past excitation data in buffer
+ int pitch_delay_int_prev; ///< integer part of previous subframe's pitch delay (4.1.3)
+
+ /// (2.13) LSP quantizer outputs
+ int16_t past_quantizer_output_buf[MA_NP + 1][10];
+ int16_t* past_quantizer_outputs[MA_NP + 1];
+
+ int16_t lsfq[10]; ///< (2.13) quantized LSF coefficients from previous frame
+ int16_t lsp_buf[2][10]; ///< (0.15) LSP coefficients (previous and current frames) (3.2.5)
+ int16_t *lsp[2]; ///< pointers to lsp_buf
+
+ int16_t quant_energy[4]; ///< (5.10) past quantized energy
+
+ /// previous speech data for LP synthesis filter
+ int16_t syn_filter_data[10];
+
+
+ /// residual signal buffer (used in long-term postfilter)
+ int16_t residual[SUBFRAME_SIZE + RES_PREV_DATA_SIZE];
+
+ /// previous speech data for residual calculation filter
+ int16_t res_filter_data[SUBFRAME_SIZE+10];
+
+ /// previous speech data for short-term postfilter
+ int16_t pos_filter_data[SUBFRAME_SIZE+10];
+
+ /// (1.14) pitch gain of current and five previous subframes
+ int16_t past_gain_pitch[6];
+
+ /// (14.1) gain code from current and previous subframe
+ int16_t past_gain_code[2];
+
+ /// voice decision on previous subframe (0-noise, 1-intermediate, 2-voice), G.729D
+ int16_t voice_decision;
+
+ int16_t onset; ///< detected onset level (0-2)
+ int16_t was_periodic; ///< whether previous frame was declared as periodic or not (4.4)
+ int16_t ht_prev_data; ///< previous data for 4.2.3, equation 86
+ int gain_coeff; ///< (1.14) gain coefficient (4.2.4)
+ uint16_t rand_value; ///< random number generator value (4.4.4)
+ int ma_predictor_prev; ///< switched MA predictor of LSP quantizer from last good frame
+
+ /// (14.14) high-pass filter data (past input)
+ int hpf_f[2];
+
+ /// high-pass filter data (past output)
+ int16_t hpf_z[2];
+} G729Context;
+
+static const G729FormatDescription format_g729_8k = {
+ .ac_index_bits = {8,5},
+ .parity_bit = 1,
+ .gc_1st_index_bits = GC_1ST_IDX_BITS_8K,
+ .gc_2nd_index_bits = GC_2ND_IDX_BITS_8K,
+ .fc_signs_bits = 4,
+ .fc_indexes_bits = 13,
+};
+
+static const G729FormatDescription format_g729d_6k4 = {
+ .ac_index_bits = {8,4},
+ .parity_bit = 0,
+ .gc_1st_index_bits = GC_1ST_IDX_BITS_6K4,
+ .gc_2nd_index_bits = GC_2ND_IDX_BITS_6K4,
+ .fc_signs_bits = 2,
+ .fc_indexes_bits = 9,
+};
+
+/**
+ * @brief pseudo random number generator
+ */
+static inline uint16_t g729_prng(uint16_t value)
+{
+ return 31821 * value + 13849;
+}
+
+/**
+ * Get parity bit of bit 2..7
+ */
+static inline int get_parity(uint8_t value)
+{
+ return (0x6996966996696996ULL >> (value >> 2)) & 1;
+}
+
+/**
+ * Decodes LSF (Line Spectral Frequencies) from L0-L3 (3.2.4).
+ * @param[out] lsfq (2.13) quantized LSF coefficients
+ * @param[in,out] past_quantizer_outputs (2.13) quantizer outputs from previous frames
+ * @param ma_predictor switched MA predictor of LSP quantizer
+ * @param vq_1st first stage vector of quantizer
+ * @param vq_2nd_low second stage lower vector of LSP quantizer
+ * @param vq_2nd_high second stage higher vector of LSP quantizer
+ */
+static void lsf_decode(int16_t* lsfq, int16_t* past_quantizer_outputs[MA_NP + 1],
+ int16_t ma_predictor,
+ int16_t vq_1st, int16_t vq_2nd_low, int16_t vq_2nd_high)
+{
+ int i,j;
+ static const uint8_t min_distance[2]={10, 5}; //(2.13)
+ int16_t* quantizer_output = past_quantizer_outputs[MA_NP];
+
+ for (i = 0; i < 5; i++) {
+ quantizer_output[i] = cb_lsp_1st[vq_1st][i ] + cb_lsp_2nd[vq_2nd_low ][i ];
+ quantizer_output[i + 5] = cb_lsp_1st[vq_1st][i + 5] + cb_lsp_2nd[vq_2nd_high][i + 5];
+ }
+
+ for (j = 0; j < 2; j++) {
+ for (i = 1; i < 10; i++) {
+ int diff = (quantizer_output[i - 1] - quantizer_output[i] + min_distance[j]) >> 1;
+ if (diff > 0) {
+ quantizer_output[i - 1] -= diff;
+ quantizer_output[i ] += diff;
+ }
+ }
+ }
+
+ for (i = 0; i < 10; i++) {
+ int sum = quantizer_output[i] * cb_ma_predictor_sum[ma_predictor][i];
+ for (j = 0; j < MA_NP; j++)
+ sum += past_quantizer_outputs[j][i] * cb_ma_predictor[ma_predictor][j][i];
+
+ lsfq[i] = sum >> 15;
+ }
+
+ ff_acelp_reorder_lsf(lsfq, LSFQ_DIFF_MIN, LSFQ_MIN, LSFQ_MAX, 10);
+}
+
+/**
+ * Restores past LSP quantizer output using LSF from previous frame
+ * @param[in,out] lsfq (2.13) quantized LSF coefficients
+ * @param[in,out] past_quantizer_outputs (2.13) quantizer outputs from previous frames
+ * @param ma_predictor_prev MA predictor from previous frame
+ * @param lsfq_prev (2.13) quantized LSF coefficients from previous frame
+ */
+static void lsf_restore_from_previous(int16_t* lsfq,
+ int16_t* past_quantizer_outputs[MA_NP + 1],
+ int ma_predictor_prev)
+{
+ int16_t* quantizer_output = past_quantizer_outputs[MA_NP];
+ int i,k;
+
+ for (i = 0; i < 10; i++) {
+ int tmp = lsfq[i] << 15;
+
+ for (k = 0; k < MA_NP; k++)
+ tmp -= past_quantizer_outputs[k][i] * cb_ma_predictor[ma_predictor_prev][k][i];
+
+ quantizer_output[i] = ((tmp >> 15) * cb_ma_predictor_sum_inv[ma_predictor_prev][i]) >> 12;
+ }
+}
+
+/**
+ * Constructs new excitation signal and applies phase filter to it
+ * @param[out] out constructed speech signal
+ * @param in original excitation signal
+ * @param fc_cur (2.13) original fixed-codebook vector
+ * @param gain_code (14.1) gain code
+ * @param subframe_size length of the subframe
+ */
+static void g729d_get_new_exc(
+ int16_t* out,
+ const int16_t* in,
+ const int16_t* fc_cur,
+ int dstate,
+ int gain_code,
+ int subframe_size)
+{
+ int i;
+ int16_t fc_new[SUBFRAME_SIZE];
+
+ ff_celp_convolve_circ(fc_new, fc_cur, phase_filter[dstate], subframe_size);
+
+ for(i=0; i<subframe_size; i++)
+ {
+ out[i] = in[i];
+ out[i] -= (gain_code * fc_cur[i] + 0x2000) >> 14;
+ out[i] += (gain_code * fc_new[i] + 0x2000) >> 14;
+ }
+}
+
+/**
+ * Makes decision about onset in current subframe
+ * @param past_onset decision result of previous subframe
+ * @param past_gain_code gain code of current and previous subframe
+ *
+ * @return onset decision result for current subframe
+ */
+static int g729d_onset_decision(int past_onset, const int16_t* past_gain_code)
+{
+ if((past_gain_code[0] >> 1) > past_gain_code[1])
+ return 2;
+ else
+ return FFMAX(past_onset-1, 0);
+}
+
+/**
+ * Makes decision about voice presence in current subframe
+ * @param onset onset level
+ * @param prev_voice_decision voice decision result from previous subframe
+ * @param past_gain_pitch pitch gain of current and previous subframes
+ *
+ * @return voice decision result for current subframe
+ */
+static int16_t g729d_voice_decision(int onset, int prev_voice_decision, const int16_t* past_gain_pitch)
+{
+ int i, low_gain_pitch_cnt, voice_decision;
+
+ if(past_gain_pitch[0] >= 14745) // 0.9
+ voice_decision = DECISION_VOICE;
+ else if (past_gain_pitch[0] <= 9830) // 0.6
+ voice_decision = DECISION_NOISE;
+ else
+ voice_decision = DECISION_INTERMEDIATE;
+
+ for(i=0, low_gain_pitch_cnt=0; i<6; i++)
+ if(past_gain_pitch[i] < 9830)
+ low_gain_pitch_cnt++;
+
+ if(low_gain_pitch_cnt > 2 && !onset)
+ voice_decision = DECISION_NOISE;
+
+ if(!onset && voice_decision > prev_voice_decision + 1)
+ voice_decision--;
+
+ if(onset && voice_decision < DECISION_VOICE)
+ voice_decision++;
+
+ return voice_decision;
+}
+
+static int32_t scalarproduct_int16_c(const int16_t * v1, const int16_t * v2, int order)
+{
+ int res = 0;
+
+ while (order--)
+ res += *v1++ * *v2++;
+
+ return res;
+}
+
+static av_cold int decoder_init(AVCodecContext * avctx)
+{
+ G729Context* ctx = avctx->priv_data;
+ int i,k;
+
+ if (avctx->channels != 1) {
+ av_log(avctx, AV_LOG_ERROR, "Only mono sound is supported (requested channels: %d).\n", avctx->channels);
+ return AVERROR(EINVAL);
+ }
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+
+ /* Both 8kbit/s and 6.4kbit/s modes uses two subframes per frame. */
+ avctx->frame_size = SUBFRAME_SIZE << 1;
+
+ ctx->gain_coeff = 16384; // 1.0 in (1.14)
+
+ for (k = 0; k < MA_NP + 1; k++) {
+ ctx->past_quantizer_outputs[k] = ctx->past_quantizer_output_buf[k];
+ for (i = 1; i < 11; i++)
+ ctx->past_quantizer_outputs[k][i - 1] = (18717 * i) >> 3;
+ }
+
+ ctx->lsp[0] = ctx->lsp_buf[0];
+ ctx->lsp[1] = ctx->lsp_buf[1];
+ memcpy(ctx->lsp[0], lsp_init, 10 * sizeof(int16_t));
+
+ ctx->exc = &ctx->exc_base[PITCH_DELAY_MAX+INTERPOL_LEN];
+
+ ctx->pitch_delay_int_prev = PITCH_DELAY_MIN;
+
+ /* random seed initialization */
+ ctx->rand_value = 21845;
+
+ /* quantized prediction error */
+ for(i=0; i<4; i++)
+ ctx->quant_energy[i] = -14336; // -14 in (5.10)
+
+ ff_audiodsp_init(&ctx->adsp);
+ ctx->adsp.scalarproduct_int16 = scalarproduct_int16_c;
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
+ AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ int16_t *out_frame;
+ GetBitContext gb;
+ const G729FormatDescription *format;
+ int frame_erasure = 0; ///< frame erasure detected during decoding
+ int bad_pitch = 0; ///< parity check failed
+ int i;
+ int16_t *tmp;
+ G729Formats packet_type;
+ G729Context *ctx = avctx->priv_data;
+ int16_t lp[2][11]; // (3.12)
+ uint8_t ma_predictor; ///< switched MA predictor of LSP quantizer
+ uint8_t quantizer_1st; ///< first stage vector of quantizer
+ uint8_t quantizer_2nd_lo; ///< second stage lower vector of quantizer (size in bits)
+ uint8_t quantizer_2nd_hi; ///< second stage higher vector of quantizer (size in bits)
+
+ int pitch_delay_int[2]; // pitch delay, integer part
+ int pitch_delay_3x; // pitch delay, multiplied by 3
+ int16_t fc[SUBFRAME_SIZE]; // fixed-codebook vector
+ int16_t synth[SUBFRAME_SIZE+10]; // fixed-codebook vector
+ int j, ret;
+ int gain_before, gain_after;
+ int is_periodic = 0; // whether one of the subframes is declared as periodic or not
+ AVFrame *frame = data;
+
+ frame->nb_samples = SUBFRAME_SIZE<<1;
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+ out_frame = (int16_t*) frame->data[0];
+
+ if (buf_size % 10 == 0) {
+ packet_type = FORMAT_G729_8K;
+ format = &format_g729_8k;
+ //Reset voice decision
+ ctx->onset = 0;
+ ctx->voice_decision = DECISION_VOICE;
+ av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729 @ 8kbit/s");
+ } else if (buf_size == 8) {
+ packet_type = FORMAT_G729D_6K4;
+ format = &format_g729d_6k4;
+ av_log(avctx, AV_LOG_DEBUG, "Packet type: %s\n", "G.729D @ 6.4kbit/s");
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Packet size %d is unknown.\n", buf_size);
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i=0; i < buf_size; i++)
+ frame_erasure |= buf[i];
+ frame_erasure = !frame_erasure;
+
+ init_get_bits(&gb, buf, 8*buf_size);
+
+ ma_predictor = get_bits(&gb, 1);
+ quantizer_1st = get_bits(&gb, VQ_1ST_BITS);
+ quantizer_2nd_lo = get_bits(&gb, VQ_2ND_BITS);
+ quantizer_2nd_hi = get_bits(&gb, VQ_2ND_BITS);
+
+ if(frame_erasure)
+ lsf_restore_from_previous(ctx->lsfq, ctx->past_quantizer_outputs,
+ ctx->ma_predictor_prev);
+ else {
+ lsf_decode(ctx->lsfq, ctx->past_quantizer_outputs,
+ ma_predictor,
+ quantizer_1st, quantizer_2nd_lo, quantizer_2nd_hi);
+ ctx->ma_predictor_prev = ma_predictor;
+ }
+
+ tmp = ctx->past_quantizer_outputs[MA_NP];
+ memmove(ctx->past_quantizer_outputs + 1, ctx->past_quantizer_outputs,
+ MA_NP * sizeof(int16_t*));
+ ctx->past_quantizer_outputs[0] = tmp;
+
+ ff_acelp_lsf2lsp(ctx->lsp[1], ctx->lsfq, 10);
+
+ ff_acelp_lp_decode(&lp[0][0], &lp[1][0], ctx->lsp[1], ctx->lsp[0], 10);
+
+ FFSWAP(int16_t*, ctx->lsp[1], ctx->lsp[0]);
+
+ for (i = 0; i < 2; i++) {
+ int gain_corr_factor;
+
+ uint8_t ac_index; ///< adaptive codebook index
+ uint8_t pulses_signs; ///< fixed-codebook vector pulse signs
+ int fc_indexes; ///< fixed-codebook indexes
+ uint8_t gc_1st_index; ///< gain codebook (first stage) index
+ uint8_t gc_2nd_index; ///< gain codebook (second stage) index
+
+ ac_index = get_bits(&gb, format->ac_index_bits[i]);
+ if(!i && format->parity_bit)
+ bad_pitch = get_parity(ac_index) == get_bits1(&gb);
+ fc_indexes = get_bits(&gb, format->fc_indexes_bits);
+ pulses_signs = get_bits(&gb, format->fc_signs_bits);
+ gc_1st_index = get_bits(&gb, format->gc_1st_index_bits);
+ gc_2nd_index = get_bits(&gb, format->gc_2nd_index_bits);
+
+ if (frame_erasure)
+ pitch_delay_3x = 3 * ctx->pitch_delay_int_prev;
+ else if(!i) {
+ if (bad_pitch)
+ pitch_delay_3x = 3 * ctx->pitch_delay_int_prev;
+ else
+ pitch_delay_3x = ff_acelp_decode_8bit_to_1st_delay3(ac_index);
+ } else {
+ int pitch_delay_min = av_clip(ctx->pitch_delay_int_prev - 5,
+ PITCH_DELAY_MIN, PITCH_DELAY_MAX - 9);
+
+ if(packet_type == FORMAT_G729D_6K4)
+ pitch_delay_3x = ff_acelp_decode_4bit_to_2nd_delay3(ac_index, pitch_delay_min);
+ else
+ pitch_delay_3x = ff_acelp_decode_5_6_bit_to_2nd_delay3(ac_index, pitch_delay_min);
+ }
+
+ /* Round pitch delay to nearest (used everywhere except ff_acelp_interpolate). */
+ pitch_delay_int[i] = (pitch_delay_3x + 1) / 3;
+ if (pitch_delay_int[i] > PITCH_DELAY_MAX) {
+ av_log(avctx, AV_LOG_WARNING, "pitch_delay_int %d is too large\n", pitch_delay_int[i]);
+ pitch_delay_int[i] = PITCH_DELAY_MAX;
+ }
+
+ if (frame_erasure) {
+ ctx->rand_value = g729_prng(ctx->rand_value);
+ fc_indexes = av_mod_uintp2(ctx->rand_value, format->fc_indexes_bits);
+
+ ctx->rand_value = g729_prng(ctx->rand_value);
+ pulses_signs = ctx->rand_value;
+ }
+
+
+ memset(fc, 0, sizeof(int16_t) * SUBFRAME_SIZE);
+ switch (packet_type) {
+ case FORMAT_G729_8K:
+ ff_acelp_fc_pulse_per_track(fc, ff_fc_4pulses_8bits_tracks_13,
+ ff_fc_4pulses_8bits_track_4,
+ fc_indexes, pulses_signs, 3, 3);
+ break;
+ case FORMAT_G729D_6K4:
+ ff_acelp_fc_pulse_per_track(fc, ff_fc_2pulses_9bits_track1_gray,
+ ff_fc_2pulses_9bits_track2_gray,
+ fc_indexes, pulses_signs, 1, 4);
+ break;
+ }
+
+ /*
+ This filter enhances harmonic components of the fixed-codebook vector to
+ improve the quality of the reconstructed speech.
+
+ / fc_v[i], i < pitch_delay
+ fc_v[i] = <
+ \ fc_v[i] + gain_pitch * fc_v[i-pitch_delay], i >= pitch_delay
+ */
+ ff_acelp_weighted_vector_sum(fc + pitch_delay_int[i],
+ fc + pitch_delay_int[i],
+ fc, 1 << 14,
+ av_clip(ctx->past_gain_pitch[0], SHARP_MIN, SHARP_MAX),
+ 0, 14,
+ SUBFRAME_SIZE - pitch_delay_int[i]);
+
+ memmove(ctx->past_gain_pitch+1, ctx->past_gain_pitch, 5 * sizeof(int16_t));
+ ctx->past_gain_code[1] = ctx->past_gain_code[0];
+
+ if (frame_erasure) {
+ ctx->past_gain_pitch[0] = (29491 * ctx->past_gain_pitch[0]) >> 15; // 0.90 (0.15)
+ ctx->past_gain_code[0] = ( 2007 * ctx->past_gain_code[0] ) >> 11; // 0.98 (0.11)
+
+ gain_corr_factor = 0;
+ } else {
+ if (packet_type == FORMAT_G729D_6K4) {
+ ctx->past_gain_pitch[0] = cb_gain_1st_6k4[gc_1st_index][0] +
+ cb_gain_2nd_6k4[gc_2nd_index][0];
+ gain_corr_factor = cb_gain_1st_6k4[gc_1st_index][1] +
+ cb_gain_2nd_6k4[gc_2nd_index][1];
+
+ /* Without check below overflow can occur in ff_acelp_update_past_gain.
+ It is not issue for G.729, because gain_corr_factor in it's case is always
+ greater than 1024, while in G.729D it can be even zero. */
+ gain_corr_factor = FFMAX(gain_corr_factor, 1024);
+#ifndef G729_BITEXACT
+ gain_corr_factor >>= 1;
+#endif
+ } else {
+ ctx->past_gain_pitch[0] = cb_gain_1st_8k[gc_1st_index][0] +
+ cb_gain_2nd_8k[gc_2nd_index][0];
+ gain_corr_factor = cb_gain_1st_8k[gc_1st_index][1] +
+ cb_gain_2nd_8k[gc_2nd_index][1];
+ }
+
+ /* Decode the fixed-codebook gain. */
+ ctx->past_gain_code[0] = ff_acelp_decode_gain_code(&ctx->adsp, gain_corr_factor,
+ fc, MR_ENERGY,
+ ctx->quant_energy,
+ ma_prediction_coeff,
+ SUBFRAME_SIZE, 4);
+#ifdef G729_BITEXACT
+ /*
+ This correction required to get bit-exact result with
+ reference code, because gain_corr_factor in G.729D is
+ two times larger than in original G.729.
+
+ If bit-exact result is not issue then gain_corr_factor
+ can be simpler divided by 2 before call to g729_get_gain_code
+ instead of using correction below.
+ */
+ if (packet_type == FORMAT_G729D_6K4) {
+ gain_corr_factor >>= 1;
+ ctx->past_gain_code[0] >>= 1;
+ }
+#endif
+ }
+ ff_acelp_update_past_gain(ctx->quant_energy, gain_corr_factor, 2, frame_erasure);
+
+ /* Routine requires rounding to lowest. */
+ ff_acelp_interpolate(ctx->exc + i * SUBFRAME_SIZE,
+ ctx->exc + i * SUBFRAME_SIZE - pitch_delay_3x / 3,
+ ff_acelp_interp_filter, 6,
+ (pitch_delay_3x % 3) << 1,
+ 10, SUBFRAME_SIZE);
+
+ ff_acelp_weighted_vector_sum(ctx->exc + i * SUBFRAME_SIZE,
+ ctx->exc + i * SUBFRAME_SIZE, fc,
+ (!ctx->was_periodic && frame_erasure) ? 0 : ctx->past_gain_pitch[0],
+ ( ctx->was_periodic && frame_erasure) ? 0 : ctx->past_gain_code[0],
+ 1 << 13, 14, SUBFRAME_SIZE);
+
+ memcpy(synth, ctx->syn_filter_data, 10 * sizeof(int16_t));
+
+ if (ff_celp_lp_synthesis_filter(
+ synth+10,
+ &lp[i][1],
+ ctx->exc + i * SUBFRAME_SIZE,
+ SUBFRAME_SIZE,
+ 10,
+ 1,
+ 0,
+ 0x800))
+ /* Overflow occurred, downscale excitation signal... */
+ for (j = 0; j < 2 * SUBFRAME_SIZE + PITCH_DELAY_MAX + INTERPOL_LEN; j++)
+ ctx->exc_base[j] >>= 2;
+
+ /* ... and make synthesis again. */
+ if (packet_type == FORMAT_G729D_6K4) {
+ int16_t exc_new[SUBFRAME_SIZE];
+
+ ctx->onset = g729d_onset_decision(ctx->onset, ctx->past_gain_code);
+ ctx->voice_decision = g729d_voice_decision(ctx->onset, ctx->voice_decision, ctx->past_gain_pitch);
+
+ g729d_get_new_exc(exc_new, ctx->exc + i * SUBFRAME_SIZE, fc, ctx->voice_decision, ctx->past_gain_code[0], SUBFRAME_SIZE);
+
+ ff_celp_lp_synthesis_filter(
+ synth+10,
+ &lp[i][1],
+ exc_new,
+ SUBFRAME_SIZE,
+ 10,
+ 0,
+ 0,
+ 0x800);
+ } else {
+ ff_celp_lp_synthesis_filter(
+ synth+10,
+ &lp[i][1],
+ ctx->exc + i * SUBFRAME_SIZE,
+ SUBFRAME_SIZE,
+ 10,
+ 0,
+ 0,
+ 0x800);
+ }
+ /* Save data (without postfilter) for use in next subframe. */
+ memcpy(ctx->syn_filter_data, synth+SUBFRAME_SIZE, 10 * sizeof(int16_t));
+
+ /* Calculate gain of unfiltered signal for use in AGC. */
+ gain_before = 0;
+ for (j = 0; j < SUBFRAME_SIZE; j++)
+ gain_before += FFABS(synth[j+10]);
+
+ /* Call postfilter and also update voicing decision for use in next frame. */
+ ff_g729_postfilter(
+ &ctx->adsp,
+ &ctx->ht_prev_data,
+ &is_periodic,
+ &lp[i][0],
+ pitch_delay_int[0],
+ ctx->residual,
+ ctx->res_filter_data,
+ ctx->pos_filter_data,
+ synth+10,
+ SUBFRAME_SIZE);
+
+ /* Calculate gain of filtered signal for use in AGC. */
+ gain_after = 0;
+ for(j=0; j<SUBFRAME_SIZE; j++)
+ gain_after += FFABS(synth[j+10]);
+
+ ctx->gain_coeff = ff_g729_adaptive_gain_control(
+ gain_before,
+ gain_after,
+ synth+10,
+ SUBFRAME_SIZE,
+ ctx->gain_coeff);
+
+ if (frame_erasure)
+ ctx->pitch_delay_int_prev = FFMIN(ctx->pitch_delay_int_prev + 1, PITCH_DELAY_MAX);
+ else
+ ctx->pitch_delay_int_prev = pitch_delay_int[i];
+
+ memcpy(synth+8, ctx->hpf_z, 2*sizeof(int16_t));
+ ff_acelp_high_pass_filter(
+ out_frame + i*SUBFRAME_SIZE,
+ ctx->hpf_f,
+ synth+10,
+ SUBFRAME_SIZE);
+ memcpy(ctx->hpf_z, synth+8+SUBFRAME_SIZE, 2*sizeof(int16_t));
+ }
+
+ ctx->was_periodic = is_periodic;
+
+ /* Save signal for use in next frame. */
+ memmove(ctx->exc_base, ctx->exc_base + 2 * SUBFRAME_SIZE, (PITCH_DELAY_MAX+INTERPOL_LEN)*sizeof(int16_t));
+
+ *got_frame_ptr = 1;
+ return packet_type == FORMAT_G729_8K ? 10 : 8;
+}
+
+AVCodec ff_g729_decoder = {
+ .name = "g729",
+ .long_name = NULL_IF_CONFIG_SMALL("G.729"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_G729,
+ .priv_data_size = sizeof(G729Context),
+ .init = decoder_init,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
+};
.init = gif_decode_init,
.close = gif_decode_close,
.decode = gif_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .priv_class = &decoder_class,
};
.init = gsm_init,
.decode = gsm_decode_frame,
.flush = gsm_flush,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
-
+#endif
+#if CONFIG_GSM_MS_DECODER
AVCodec ff_gsm_ms_decoder = {
.name = "gsm_ms",
.long_name = NULL_IF_CONFIG_SMALL("GSM Microsoft variant"),
.init = gsm_init,
.decode = gsm_decode_frame,
.flush = gsm_flush,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
+#endif
.init = h261_decode_init,
.close = h261_decode_end,
.decode = h261_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
};
if (!s->divx_packed && !avctx->hwaccel)
ff_thread_finish_setup(avctx);
- if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) {
++ if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)) {
+ ff_vdpau_mpeg4_decode_picture(avctx->priv_data, s->gb.buffer, s->gb.buffer_end - s->gb.buffer);
+ goto frame_end;
+ }
+
if (avctx->hwaccel) {
ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer,
s->gb.buffer_end - s->gb.buffer);
.init = ff_h263_decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
- CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY,
.flush = ff_mpeg_flush,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
- CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
+ .max_lowres = 3,
+ .pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
+};
+
+AVCodec ff_h263p_decoder = {
+ .name = "h263p",
+ .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H263P,
+ .priv_data_size = sizeof(MpegEncContext),
+ .init = ff_h263_decode_init,
+ .close = ff_h263_decode_end,
+ .decode = ff_h263_decode_frame,
++ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
++ AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY,
+ .flush = ff_mpeg_flush,
+ .max_lowres = 3,
.pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
};
decode_postinit(h, nal_index >= nals_needed);
if (h->avctx->hwaccel &&
- (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
- return ret;
+ (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
+ goto end;
+ if (CONFIG_H264_VDPAU_DECODER &&
- h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
++ h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
+ ff_vdpau_h264_picture_start(h);
}
- if (sl->redundant_pic_count == 0 &&
- (avctx->skip_frame < AVDISCARD_NONREF ||
- h->nal_ref_idc) &&
- (avctx->skip_frame < AVDISCARD_BIDIR ||
- sl->slice_type_nos != AV_PICTURE_TYPE_B) &&
- (avctx->skip_frame < AVDISCARD_NONKEY ||
- sl->slice_type_nos == AV_PICTURE_TYPE_I) &&
- avctx->skip_frame < AVDISCARD_ALL) {
+ if (sl->redundant_pic_count == 0) {
if (avctx->hwaccel) {
ret = avctx->hwaccel->decode_slice(avctx,
&buf[buf_index - consumed],
consumed);
if (ret < 0)
- return ret;
+ goto end;
+ } else if (CONFIG_H264_VDPAU_DECODER &&
- h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU) {
++ h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
+ start_code,
+ sizeof(start_code));
+ ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
+ &buf[buf_index - consumed],
+ consumed);
} else
context_count++;
}
.profiles = NULL_IF_CONFIG_SMALL(profiles),
.priv_class = &h264_class,
};
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+
+#if CONFIG_H264_VDPAU_DECODER
+static const AVClass h264_vdpau_class = {
+ .class_name = "H264 VDPAU Decoder",
+ .item_name = av_default_item_name,
+ .option = h264_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_h264_vdpau_decoder = {
+ .name = "h264_vdpau",
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(H264Context),
+ .init = ff_h264_decode_init,
+ .close = h264_decode_end,
+ .decode = h264_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
+ .flush = flush_dpb,
+ .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
+ AV_PIX_FMT_NONE},
+ .profiles = NULL_IF_CONFIG_SMALL(profiles),
+ .priv_class = &h264_vdpau_class,
+};
+#endif
int err = 0;
h->mb_y = 0;
- if (!in_setup && !h->droppable)
- ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
- h->picture_structure == PICT_BOTTOM_FIELD);
+ if (CONFIG_H264_VDPAU_DECODER &&
- h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
++ h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
+ ff_vdpau_h264_set_reference_frames(h);
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
if (!h->droppable) {
"hardware accelerator failed to decode picture\n");
}
- h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
+ if (CONFIG_H264_VDPAU_DECODER &&
++ h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
+ ff_vdpau_h264_picture_complete(h);
+
#if CONFIG_ERROR_RESILIENCE
+ av_assert0(sl == h->slice_ctx);
/*
* FIXME: Error handling code does not seem to support interlaced
* when slices span multiple rows
if ((ret = alloc_picture(h, pic)) < 0)
return ret;
- !(h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU))
+ if(!h->frame_recovered && !h->avctx->hwaccel &&
++ !(h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU))
+ avpriv_color_frame(pic->f, c);
h->cur_pic_ptr = pic;
ff_h264_unref_picture(h, &h->cur_pic);
ret = ff_h264_alloc_tables(h);
if (ret < 0) {
av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
- return ret;
+ goto fail;
}
- if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 10) {
+ if (h->avctx->codec &&
- h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU &&
++ h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
+ (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
+ av_log(h->avctx, AV_LOG_ERROR,
+ "VDPAU decoding does not support video colorspace.\n");
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 14 ||
+ h->sps.bit_depth_luma == 11 || h->sps.bit_depth_luma == 13
+ ) {
av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
h->sps.bit_depth_luma);
- return AVERROR_INVALIDDATA;
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
}
+ h->cur_bit_depth_luma =
h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
+ h->cur_chroma_format_idc = h->sps.chroma_format_idc;
h->pixel_shift = h->sps.bit_depth_luma > 8;
h->chroma_format_idc = h->sps.chroma_format_idc;
h->bit_depth_luma = h->sps.bit_depth_luma;
H264SliceContext *sl;
int i, j;
- if (h->avctx->hwaccel)
+ av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
+
+ h->slice_ctx[0].next_slice_idx = INT_MAX;
+
+ if (h->avctx->hwaccel ||
- h->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
++ h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
return 0;
if (context_count == 1) {
int ret;
.flush = hevc_decode_flush,
.update_thread_context = hevc_update_thread_context,
.init_thread_copy = hevc_init_thread_copy,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY |
- CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS,
+ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
- AV_CODEC_CAP_FRAME_THREADS,
++ AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
.profiles = NULL_IF_CONFIG_SMALL(profiles),
};
.init = encode_init,
.encode2 = encode_frame,
.close = encode_end,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .priv_class = &normal_class,
.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
.init = encode_init,
.encode2 = encode_frame,
.close = encode_end,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .priv_class = &ff_class,
.pix_fmts = (const enum AVPixelFormat[]){
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_GBRP,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
+ AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
+ AV_PIX_FMT_GBRAP,
+ AV_PIX_FMT_GRAY8A,
+ AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16,
+ AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16,
+ AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
+ AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P16,
+ AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_RGB24,
AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
},
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
.priv_data_size = sizeof(IffContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame_ilbm,
+ .decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
-
+#endif
+#if CONFIG_IFF_BYTERUN1_DECODER
AVCodec ff_iff_byterun1_decoder = {
- .name = "iff_byterun1",
- .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"),
+ .name = "iff",
+ .long_name = NULL_IF_CONFIG_SMALL("IFF"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_IFF_BYTERUN1,
.priv_data_size = sizeof(IffContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame_byterun1,
+ .decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
+#endif
.init = imc_decode_init,
.close = imc_decode_close,
.decode = imc_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .flush = flush,
+ .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
};
.init = imc_decode_init,
.close = imc_decode_close,
.decode = imc_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .flush = flush,
+ .capabilities = AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
};
--- /dev/null
- .capabilities = CODEC_CAP_EXPERIMENTAL,
+/*
+ * JPEG2000 image encoder
+ * Copyright (c) 2007 Kamil Nowosad
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * JPEG2000 image encoder
+ * @file
+ * @author Kamil Nowosad
+ */
+
+#include <float.h>
+#include "avcodec.h"
+#include "internal.h"
+#include "bytestream.h"
+#include "jpeg2000.h"
+#include "libavutil/common.h"
+#include "libavutil/opt.h"
+
+#define NMSEDEC_BITS 7
+#define NMSEDEC_FRACBITS (NMSEDEC_BITS-1)
+#define WMSEDEC_SHIFT 13 ///< must be >= 13
+#define LAMBDA_SCALE (100000000LL << (WMSEDEC_SHIFT - 13))
+
+#define CODEC_JP2 1
+#define CODEC_J2K 0
+
+static int lut_nmsedec_ref [1<<NMSEDEC_BITS],
+ lut_nmsedec_ref0[1<<NMSEDEC_BITS],
+ lut_nmsedec_sig [1<<NMSEDEC_BITS],
+ lut_nmsedec_sig0[1<<NMSEDEC_BITS];
+
+static const int dwt_norms[2][4][10] = { // [dwt_type][band][rlevel] (multiplied by 10000)
+ {{10000, 19650, 41770, 84030, 169000, 338400, 676900, 1353000, 2706000, 5409000},
+ {20220, 39890, 83550, 170400, 342700, 686300, 1373000, 2746000, 5490000},
+ {20220, 39890, 83550, 170400, 342700, 686300, 1373000, 2746000, 5490000},
+ {20800, 38650, 83070, 171800, 347100, 695900, 1393000, 2786000, 5572000}},
+
+ {{10000, 15000, 27500, 53750, 106800, 213400, 426700, 853300, 1707000, 3413000},
+ {10380, 15920, 29190, 57030, 113300, 226400, 452500, 904800, 1809000},
+ {10380, 15920, 29190, 57030, 113300, 226400, 452500, 904800, 1809000},
+ { 7186, 9218, 15860, 30430, 60190, 120100, 240000, 479700, 959300}}
+};
+
+typedef struct {
+ Jpeg2000Component *comp;
+} Jpeg2000Tile;
+
+typedef struct {
+ AVClass *class;
+ AVCodecContext *avctx;
+ const AVFrame *picture;
+
+ int width, height; ///< image width and height
+ uint8_t cbps[4]; ///< bits per sample in particular components
+ int chroma_shift[2];
+ uint8_t planar;
+ int ncomponents;
+ int tile_width, tile_height; ///< tile size
+ int numXtiles, numYtiles;
+
+ uint8_t *buf_start;
+ uint8_t *buf;
+ uint8_t *buf_end;
+ int bit_index;
+
+ int64_t lambda;
+
+ Jpeg2000CodingStyle codsty;
+ Jpeg2000QuantStyle qntsty;
+
+ Jpeg2000Tile *tile;
+
+ int format;
+} Jpeg2000EncoderContext;
+
+
+/* debug */
+#if 0
+#undef ifprintf
+#undef printf
+
+static void nspaces(FILE *fd, int n)
+{
+ while(n--) putc(' ', fd);
+}
+
+static void printcomp(Jpeg2000Component *comp)
+{
+ int i;
+ for (i = 0; i < comp->y1 - comp->y0; i++)
+ ff_jpeg2000_printv(comp->i_data + i * (comp->x1 - comp->x0), comp->x1 - comp->x0);
+}
+
+static void dump(Jpeg2000EncoderContext *s, FILE *fd)
+{
+ int tileno, compno, reslevelno, bandno, precno;
+ fprintf(fd, "XSiz = %d, YSiz = %d, tile_width = %d, tile_height = %d\n"
+ "numXtiles = %d, numYtiles = %d, ncomponents = %d\n"
+ "tiles:\n",
+ s->width, s->height, s->tile_width, s->tile_height,
+ s->numXtiles, s->numYtiles, s->ncomponents);
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+ nspaces(fd, 2);
+ fprintf(fd, "tile %d:\n", tileno);
+ for(compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+ nspaces(fd, 4);
+ fprintf(fd, "component %d:\n", compno);
+ nspaces(fd, 4);
+ fprintf(fd, "x0 = %d, x1 = %d, y0 = %d, y1 = %d\n",
+ comp->x0, comp->x1, comp->y0, comp->y1);
+ for(reslevelno = 0; reslevelno < s->nreslevels; reslevelno++){
+ Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
+ nspaces(fd, 6);
+ fprintf(fd, "reslevel %d:\n", reslevelno);
+ nspaces(fd, 6);
+ fprintf(fd, "x0 = %d, x1 = %d, y0 = %d, y1 = %d, nbands = %d\n",
+ reslevel->x0, reslevel->x1, reslevel->y0,
+ reslevel->y1, reslevel->nbands);
+ for(bandno = 0; bandno < reslevel->nbands; bandno++){
+ Jpeg2000Band *band = reslevel->band + bandno;
+ nspaces(fd, 8);
+ fprintf(fd, "band %d:\n", bandno);
+ nspaces(fd, 8);
+ fprintf(fd, "x0 = %d, x1 = %d, y0 = %d, y1 = %d,"
+ "codeblock_width = %d, codeblock_height = %d cblknx = %d cblkny = %d\n",
+ band->x0, band->x1,
+ band->y0, band->y1,
+ band->codeblock_width, band->codeblock_height,
+ band->cblknx, band->cblkny);
+ for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
+ Jpeg2000Prec *prec = band->prec + precno;
+ nspaces(fd, 10);
+ fprintf(fd, "prec %d:\n", precno);
+ nspaces(fd, 10);
+ fprintf(fd, "xi0 = %d, xi1 = %d, yi0 = %d, yi1 = %d\n",
+ prec->xi0, prec->xi1, prec->yi0, prec->yi1);
+ }
+ }
+ }
+ }
+ }
+}
+#endif
+
+/* bitstream routines */
+
+/** put n times val bit */
+static void put_bits(Jpeg2000EncoderContext *s, int val, int n) // TODO: optimize
+{
+ while (n-- > 0){
+ if (s->bit_index == 8)
+ {
+ s->bit_index = *s->buf == 0xff;
+ *(++s->buf) = 0;
+ }
+ *s->buf |= val << (7 - s->bit_index++);
+ }
+}
+
+/** put n least significant bits of a number num */
+static void put_num(Jpeg2000EncoderContext *s, int num, int n)
+{
+ while(--n >= 0)
+ put_bits(s, (num >> n) & 1, 1);
+}
+
+/** flush the bitstream */
+static void j2k_flush(Jpeg2000EncoderContext *s)
+{
+ if (s->bit_index){
+ s->bit_index = 0;
+ s->buf++;
+ }
+}
+
+/* tag tree routines */
+
+/** code the value stored in node */
+static void tag_tree_code(Jpeg2000EncoderContext *s, Jpeg2000TgtNode *node, int threshold)
+{
+ Jpeg2000TgtNode *stack[30];
+ int sp = 1, curval = 0;
+ stack[0] = node;
+
+ node = node->parent;
+ while(node){
+ if (node->vis){
+ curval = node->val;
+ break;
+ }
+ node->vis++;
+ stack[sp++] = node;
+ node = node->parent;
+ }
+ while(--sp >= 0){
+ if (stack[sp]->val >= threshold){
+ put_bits(s, 0, threshold - curval);
+ break;
+ }
+ put_bits(s, 0, stack[sp]->val - curval);
+ put_bits(s, 1, 1);
+ curval = stack[sp]->val;
+ }
+}
+
+/** update the value in node */
+static void tag_tree_update(Jpeg2000TgtNode *node)
+{
+ int lev = 0;
+ while (node->parent){
+ if (node->parent->val <= node->val)
+ break;
+ node->parent->val = node->val;
+ node = node->parent;
+ lev++;
+ }
+}
+
+static int put_siz(Jpeg2000EncoderContext *s)
+{
+ int i;
+
+ if (s->buf_end - s->buf < 40 + 3 * s->ncomponents)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_SIZ);
+ bytestream_put_be16(&s->buf, 38 + 3 * s->ncomponents); // Lsiz
+ bytestream_put_be16(&s->buf, 0); // Rsiz
+ bytestream_put_be32(&s->buf, s->width); // width
+ bytestream_put_be32(&s->buf, s->height); // height
+ bytestream_put_be32(&s->buf, 0); // X0Siz
+ bytestream_put_be32(&s->buf, 0); // Y0Siz
+
+ bytestream_put_be32(&s->buf, s->tile_width); // XTSiz
+ bytestream_put_be32(&s->buf, s->tile_height); // YTSiz
+ bytestream_put_be32(&s->buf, 0); // XT0Siz
+ bytestream_put_be32(&s->buf, 0); // YT0Siz
+ bytestream_put_be16(&s->buf, s->ncomponents); // CSiz
+
+ for (i = 0; i < s->ncomponents; i++){ // Ssiz_i XRsiz_i, YRsiz_i
+ bytestream_put_byte(&s->buf, 7);
+ bytestream_put_byte(&s->buf, i?1<<s->chroma_shift[0]:1);
+ bytestream_put_byte(&s->buf, i?1<<s->chroma_shift[1]:1);
+ }
+ return 0;
+}
+
+static int put_cod(Jpeg2000EncoderContext *s)
+{
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ if (s->buf_end - s->buf < 14)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_COD);
+ bytestream_put_be16(&s->buf, 12); // Lcod
+ bytestream_put_byte(&s->buf, 0); // Scod
+ // SGcod
+ bytestream_put_byte(&s->buf, 0); // progression level
+ bytestream_put_be16(&s->buf, 1); // num of layers
+ if(s->avctx->pix_fmt == AV_PIX_FMT_YUV444P){
+ bytestream_put_byte(&s->buf, 0); // unspecified
+ }else{
+ bytestream_put_byte(&s->buf, 0); // unspecified
+ }
+ // SPcod
+ bytestream_put_byte(&s->buf, codsty->nreslevels - 1); // num of decomp. levels
+ bytestream_put_byte(&s->buf, codsty->log2_cblk_width-2); // cblk width
+ bytestream_put_byte(&s->buf, codsty->log2_cblk_height-2); // cblk height
+ bytestream_put_byte(&s->buf, 0); // cblk style
+ bytestream_put_byte(&s->buf, codsty->transform == FF_DWT53); // transformation
+ return 0;
+}
+
+static int put_qcd(Jpeg2000EncoderContext *s, int compno)
+{
+ int i, size;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ if (qntsty->quantsty == JPEG2000_QSTY_NONE)
+ size = 4 + 3 * (codsty->nreslevels-1);
+ else // QSTY_SE
+ size = 5 + 6 * (codsty->nreslevels-1);
+
+ if (s->buf_end - s->buf < size + 2)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_QCD);
+ bytestream_put_be16(&s->buf, size); // LQcd
+ bytestream_put_byte(&s->buf, (qntsty->nguardbits << 5) | qntsty->quantsty); // Sqcd
+ if (qntsty->quantsty == JPEG2000_QSTY_NONE)
+ for (i = 0; i < codsty->nreslevels * 3 - 2; i++)
+ bytestream_put_byte(&s->buf, qntsty->expn[i] << 3);
+ else // QSTY_SE
+ for (i = 0; i < codsty->nreslevels * 3 - 2; i++)
+ bytestream_put_be16(&s->buf, (qntsty->expn[i] << 11) | qntsty->mant[i]);
+ return 0;
+}
+
+static int put_com(Jpeg2000EncoderContext *s, int compno)
+{
+ int size = 4 + strlen(LIBAVCODEC_IDENT);
+
+ if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
+ return 0;
+
+ if (s->buf_end - s->buf < size + 2)
+ return -1;
+
+ bytestream_put_be16(&s->buf, JPEG2000_COM);
+ bytestream_put_be16(&s->buf, size);
+ bytestream_put_be16(&s->buf, 1); // General use (ISO/IEC 8859-15 (Latin) values)
+
+ bytestream_put_buffer(&s->buf, LIBAVCODEC_IDENT, strlen(LIBAVCODEC_IDENT));
+
+ return 0;
+}
+
+static uint8_t *put_sot(Jpeg2000EncoderContext *s, int tileno)
+{
+ uint8_t *psotptr;
+
+ if (s->buf_end - s->buf < 12)
+ return NULL;
+
+ bytestream_put_be16(&s->buf, JPEG2000_SOT);
+ bytestream_put_be16(&s->buf, 10); // Lsot
+ bytestream_put_be16(&s->buf, tileno); // Isot
+
+ psotptr = s->buf;
+ bytestream_put_be32(&s->buf, 0); // Psot (filled in later)
+
+ bytestream_put_byte(&s->buf, 0); // TPsot
+ bytestream_put_byte(&s->buf, 1); // TNsot
+ return psotptr;
+}
+
+/**
+ * compute the sizes of tiles, resolution levels, bands, etc.
+ * allocate memory for them
+ * divide the input image into tile-components
+ */
+static int init_tiles(Jpeg2000EncoderContext *s)
+{
+ int tileno, tilex, tiley, compno;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ s->numXtiles = ff_jpeg2000_ceildiv(s->width, s->tile_width);
+ s->numYtiles = ff_jpeg2000_ceildiv(s->height, s->tile_height);
+
+ s->tile = av_malloc_array(s->numXtiles, s->numYtiles * sizeof(Jpeg2000Tile));
+ if (!s->tile)
+ return AVERROR(ENOMEM);
+ for (tileno = 0, tiley = 0; tiley < s->numYtiles; tiley++)
+ for (tilex = 0; tilex < s->numXtiles; tilex++, tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+
+ tile->comp = av_mallocz_array(s->ncomponents, sizeof(Jpeg2000Component));
+ if (!tile->comp)
+ return AVERROR(ENOMEM);
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+ int ret, i, j;
+
+ comp->coord[0][0] = comp->coord_o[0][0] = tilex * s->tile_width;
+ comp->coord[0][1] = comp->coord_o[0][1] = FFMIN((tilex+1)*s->tile_width, s->width);
+ comp->coord[1][0] = comp->coord_o[1][0] = tiley * s->tile_height;
+ comp->coord[1][1] = comp->coord_o[1][1] = FFMIN((tiley+1)*s->tile_height, s->height);
+ if (compno > 0)
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 2; j++)
+ comp->coord[i][j] = comp->coord_o[i][j] = ff_jpeg2000_ceildivpow2(comp->coord[i][j], s->chroma_shift[i]);
+
+ if ((ret = ff_jpeg2000_init_component(comp,
+ codsty,
+ qntsty,
+ s->cbps[compno],
+ compno?1<<s->chroma_shift[0]:1,
+ compno?1<<s->chroma_shift[1]:1,
+ s->avctx
+ )) < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void copy_frame(Jpeg2000EncoderContext *s)
+{
+ int tileno, compno, i, y, x;
+ uint8_t *line;
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+ if (s->planar){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+ int *dst = comp->i_data;
+ line = s->picture->data[compno]
+ + comp->coord[1][0] * s->picture->linesize[compno]
+ + comp->coord[0][0];
+ for (y = comp->coord[1][0]; y < comp->coord[1][1]; y++){
+ uint8_t *ptr = line;
+ for (x = comp->coord[0][0]; x < comp->coord[0][1]; x++)
+ *dst++ = *ptr++ - (1 << 7);
+ line += s->picture->linesize[compno];
+ }
+ }
+ } else{
+ line = s->picture->data[0] + tile->comp[0].coord[1][0] * s->picture->linesize[0]
+ + tile->comp[0].coord[0][0] * s->ncomponents;
+
+ i = 0;
+ for (y = tile->comp[0].coord[1][0]; y < tile->comp[0].coord[1][1]; y++){
+ uint8_t *ptr = line;
+ for (x = tile->comp[0].coord[0][0]; x < tile->comp[0].coord[0][1]; x++, i++){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ tile->comp[compno].i_data[i] = *ptr++ - (1 << 7);
+ }
+ }
+ line += s->picture->linesize[0];
+ }
+ }
+ }
+}
+
+static void init_quantization(Jpeg2000EncoderContext *s)
+{
+ int compno, reslevelno, bandno;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ for (compno = 0; compno < s->ncomponents; compno++){
+ int gbandno = 0;
+ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
+ int nbands, lev = codsty->nreslevels - reslevelno - 1;
+ nbands = reslevelno ? 3 : 1;
+ for (bandno = 0; bandno < nbands; bandno++, gbandno++){
+ int expn, mant = 0;
+
+ if (codsty->transform == FF_DWT97_INT){
+ int bandpos = bandno + (reslevelno>0),
+ ss = 81920000 / dwt_norms[0][bandpos][lev],
+ log = av_log2(ss);
+ mant = (11 - log < 0 ? ss >> log - 11 : ss << 11 - log) & 0x7ff;
+ expn = s->cbps[compno] - log + 13;
+ } else
+ expn = ((bandno&2)>>1) + (reslevelno>0) + s->cbps[compno];
+
+ qntsty->expn[gbandno] = expn;
+ qntsty->mant[gbandno] = mant;
+ }
+ }
+ }
+}
+
+static void init_luts(void)
+{
+ int i, a,
+ mask = ~((1<<NMSEDEC_FRACBITS)-1);
+
+ for (i = 0; i < (1 << NMSEDEC_BITS); i++){
+ lut_nmsedec_sig[i] = FFMAX(6*i - (9<<NMSEDEC_FRACBITS-1) << 12-NMSEDEC_FRACBITS, 0);
+ lut_nmsedec_sig0[i] = FFMAX((i*i + (1<<NMSEDEC_FRACBITS-1) & mask) << 1, 0);
+
+ a = (i >> (NMSEDEC_BITS-2)&2) + 1;
+ lut_nmsedec_ref[i] = FFMAX((-2*i + (1<<NMSEDEC_FRACBITS) + a*i - (a*a<<NMSEDEC_FRACBITS-2))
+ << 13-NMSEDEC_FRACBITS, 0);
+ lut_nmsedec_ref0[i] = FFMAX(((i*i + (1-4*i << NMSEDEC_FRACBITS-1) + (1<<2*NMSEDEC_FRACBITS)) & mask)
+ << 1, 0);
+ }
+}
+
+/* tier-1 routines */
+static int getnmsedec_sig(int x, int bpno)
+{
+ if (bpno > NMSEDEC_FRACBITS)
+ return lut_nmsedec_sig[(x >> (bpno - NMSEDEC_FRACBITS)) & ((1 << NMSEDEC_BITS) - 1)];
+ return lut_nmsedec_sig0[x & ((1 << NMSEDEC_BITS) - 1)];
+}
+
+static int getnmsedec_ref(int x, int bpno)
+{
+ if (bpno > NMSEDEC_FRACBITS)
+ return lut_nmsedec_ref[(x >> (bpno - NMSEDEC_FRACBITS)) & ((1 << NMSEDEC_BITS) - 1)];
+ return lut_nmsedec_ref0[x & ((1 << NMSEDEC_BITS) - 1)];
+}
+
+static void encode_sigpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno)
+{
+ int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS);
+ for (y0 = 0; y0 < height; y0 += 4)
+ for (x = 0; x < width; x++)
+ for (y = y0; y < height && y < y0+4; y++){
+ if (!(t1->flags[(y+1) * t1->stride + x+1] & JPEG2000_T1_SIG) && (t1->flags[(y+1) * t1->stride + x+1] & JPEG2000_T1_SIG_NB)){
+ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[(y+1) * t1->stride + x+1], bandno),
+ bit = t1->data[(y) * t1->stride + x] & mask ? 1 : 0;
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, bit);
+ if (bit){
+ int xorbit;
+ int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[(y+1) * t1->stride + x+1], &xorbit);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[(y+1) * t1->stride + x+1] >> 15) ^ xorbit);
+ *nmsedec += getnmsedec_sig(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_jpeg2000_set_significance(t1, x, y, t1->flags[(y+1) * t1->stride + x+1] >> 15);
+ }
+ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_VIS;
+ }
+ }
+}
+
+static void encode_refpass(Jpeg2000T1Context *t1, int width, int height, int *nmsedec, int bpno)
+{
+ int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS);
+ for (y0 = 0; y0 < height; y0 += 4)
+ for (x = 0; x < width; x++)
+ for (y = y0; y < height && y < y0+4; y++)
+ if ((t1->flags[(y+1) * t1->stride + x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS)) == JPEG2000_T1_SIG){
+ int ctxno = ff_jpeg2000_getrefctxno(t1->flags[(y+1) * t1->stride + x+1]);
+ *nmsedec += getnmsedec_ref(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[(y) * t1->stride + x] & mask ? 1:0);
+ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_REF;
+ }
+}
+
+static void encode_clnpass(Jpeg2000T1Context *t1, int width, int height, int bandno, int *nmsedec, int bpno)
+{
+ int y0, x, y, mask = 1 << (bpno + NMSEDEC_FRACBITS);
+ for (y0 = 0; y0 < height; y0 += 4)
+ for (x = 0; x < width; x++){
+ if (y0 + 3 < height && !(
+ (t1->flags[(y0+1) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
+ (t1->flags[(y0+2) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
+ (t1->flags[(y0+3) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG)) ||
+ (t1->flags[(y0+4) * t1->stride + x+1] & (JPEG2000_T1_SIG_NB | JPEG2000_T1_VIS | JPEG2000_T1_SIG))))
+ {
+ // aggregation mode
+ int rlen;
+ for (rlen = 0; rlen < 4; rlen++)
+ if (t1->data[(y0+rlen) * t1->stride + x] & mask)
+ break;
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_RL, rlen != 4);
+ if (rlen == 4)
+ continue;
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI, rlen >> 1);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + MQC_CX_UNI, rlen & 1);
+ for (y = y0 + rlen; y < y0 + 4; y++){
+ if (!(t1->flags[(y+1) * t1->stride + x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))){
+ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[(y+1) * t1->stride + x+1], bandno);
+ if (y > y0 + rlen)
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[(y) * t1->stride + x] & mask ? 1:0);
+ if (t1->data[(y) * t1->stride + x] & mask){ // newly significant
+ int xorbit;
+ int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[(y+1) * t1->stride + x+1], &xorbit);
+ *nmsedec += getnmsedec_sig(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[(y+1) * t1->stride + x+1] >> 15) ^ xorbit);
+ ff_jpeg2000_set_significance(t1, x, y, t1->flags[(y+1) * t1->stride + x+1] >> 15);
+ }
+ }
+ t1->flags[(y+1) * t1->stride + x+1] &= ~JPEG2000_T1_VIS;
+ }
+ } else{
+ for (y = y0; y < y0 + 4 && y < height; y++){
+ if (!(t1->flags[(y+1) * t1->stride + x+1] & (JPEG2000_T1_SIG | JPEG2000_T1_VIS))){
+ int ctxno = ff_jpeg2000_getsigctxno(t1->flags[(y+1) * t1->stride + x+1], bandno);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, t1->data[(y) * t1->stride + x] & mask ? 1:0);
+ if (t1->data[(y) * t1->stride + x] & mask){ // newly significant
+ int xorbit;
+ int ctxno = ff_jpeg2000_getsgnctxno(t1->flags[(y+1) * t1->stride + x+1], &xorbit);
+ *nmsedec += getnmsedec_sig(t1->data[(y) * t1->stride + x], bpno + NMSEDEC_FRACBITS);
+ ff_mqc_encode(&t1->mqc, t1->mqc.cx_states + ctxno, (t1->flags[(y+1) * t1->stride + x+1] >> 15) ^ xorbit);
+ ff_jpeg2000_set_significance(t1, x, y, t1->flags[(y+1) * t1->stride + x+1] >> 15);
+ }
+ }
+ t1->flags[(y+1) * t1->stride + x+1] &= ~JPEG2000_T1_VIS;
+ }
+ }
+ }
+}
+
+static void encode_cblk(Jpeg2000EncoderContext *s, Jpeg2000T1Context *t1, Jpeg2000Cblk *cblk, Jpeg2000Tile *tile,
+ int width, int height, int bandpos, int lev)
+{
+ int pass_t = 2, passno, x, y, max=0, nmsedec, bpno;
+ int64_t wmsedec = 0;
+
+ memset(t1->flags, 0, t1->stride * (height + 2) * sizeof(*t1->flags));
+
+ for (y = 0; y < height; y++){
+ for (x = 0; x < width; x++){
+ if (t1->data[(y) * t1->stride + x] < 0){
+ t1->flags[(y+1) * t1->stride + x+1] |= JPEG2000_T1_SGN;
+ t1->data[(y) * t1->stride + x] = -t1->data[(y) * t1->stride + x];
+ }
+ max = FFMAX(max, t1->data[(y) * t1->stride + x]);
+ }
+ }
+
+ if (max == 0){
+ cblk->nonzerobits = 0;
+ bpno = 0;
+ } else{
+ cblk->nonzerobits = av_log2(max) + 1 - NMSEDEC_FRACBITS;
+ bpno = cblk->nonzerobits - 1;
+ }
+
+ ff_mqc_initenc(&t1->mqc, cblk->data);
+
+ for (passno = 0; bpno >= 0; passno++){
+ nmsedec=0;
+
+ switch(pass_t){
+ case 0: encode_sigpass(t1, width, height, bandpos, &nmsedec, bpno);
+ break;
+ case 1: encode_refpass(t1, width, height, &nmsedec, bpno);
+ break;
+ case 2: encode_clnpass(t1, width, height, bandpos, &nmsedec, bpno);
+ break;
+ }
+
+ cblk->passes[passno].rate = ff_mqc_flush_to(&t1->mqc, cblk->passes[passno].flushed, &cblk->passes[passno].flushed_len);
+ wmsedec += (int64_t)nmsedec << (2*bpno);
+ cblk->passes[passno].disto = wmsedec;
+
+ if (++pass_t == 3){
+ pass_t = 0;
+ bpno--;
+ }
+ }
+ cblk->npasses = passno;
+ cblk->ninclpasses = passno;
+
+ cblk->passes[passno-1].rate = ff_mqc_flush_to(&t1->mqc, cblk->passes[passno-1].flushed, &cblk->passes[passno-1].flushed_len);
+}
+
+/* tier-2 routines: */
+
+static void putnumpasses(Jpeg2000EncoderContext *s, int n)
+{
+ if (n == 1)
+ put_num(s, 0, 1);
+ else if (n == 2)
+ put_num(s, 2, 2);
+ else if (n <= 5)
+ put_num(s, 0xc | (n-3), 4);
+ else if (n <= 36)
+ put_num(s, 0x1e0 | (n-6), 9);
+ else
+ put_num(s, 0xff80 | (n-37), 16);
+}
+
+
+static int encode_packet(Jpeg2000EncoderContext *s, Jpeg2000ResLevel *rlevel, int precno,
+ uint8_t *expn, int numgbits)
+{
+ int bandno, empty = 1;
+
+ // init bitstream
+ *s->buf = 0;
+ s->bit_index = 0;
+
+ // header
+
+ // is the packet empty?
+ for (bandno = 0; bandno < rlevel->nbands; bandno++){
+ if (rlevel->band[bandno].coord[0][0] < rlevel->band[bandno].coord[0][1]
+ && rlevel->band[bandno].coord[1][0] < rlevel->band[bandno].coord[1][1]){
+ empty = 0;
+ break;
+ }
+ }
+
+ put_bits(s, !empty, 1);
+ if (empty){
+ j2k_flush(s);
+ return 0;
+ }
+
+ for (bandno = 0; bandno < rlevel->nbands; bandno++){
+ Jpeg2000Band *band = rlevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec + precno;
+ int yi, xi, pos;
+ int cblknw = prec->nb_codeblocks_width;
+
+ if (band->coord[0][0] == band->coord[0][1]
+ || band->coord[1][0] == band->coord[1][1])
+ continue;
+
+ for (pos=0, yi = 0; yi < prec->nb_codeblocks_height; yi++){
+ for (xi = 0; xi < cblknw; xi++, pos++){
+ prec->cblkincl[pos].val = prec->cblk[yi * cblknw + xi].ninclpasses == 0;
+ tag_tree_update(prec->cblkincl + pos);
+ prec->zerobits[pos].val = expn[bandno] + numgbits - 1 - prec->cblk[yi * cblknw + xi].nonzerobits;
+ tag_tree_update(prec->zerobits + pos);
+ }
+ }
+
+ for (pos=0, yi = 0; yi < prec->nb_codeblocks_height; yi++){
+ for (xi = 0; xi < cblknw; xi++, pos++){
+ int pad = 0, llen, length;
+ Jpeg2000Cblk *cblk = prec->cblk + yi * cblknw + xi;
+
+ if (s->buf_end - s->buf < 20) // approximately
+ return -1;
+
+ // inclusion information
+ tag_tree_code(s, prec->cblkincl + pos, 1);
+ if (!cblk->ninclpasses)
+ continue;
+ // zerobits information
+ tag_tree_code(s, prec->zerobits + pos, 100);
+ // number of passes
+ putnumpasses(s, cblk->ninclpasses);
+
+ length = cblk->passes[cblk->ninclpasses-1].rate;
+ llen = av_log2(length) - av_log2(cblk->ninclpasses) - 2;
+ if (llen < 0){
+ pad = -llen;
+ llen = 0;
+ }
+ // length of code block
+ put_bits(s, 1, llen);
+ put_bits(s, 0, 1);
+ put_num(s, length, av_log2(length)+1+pad);
+ }
+ }
+ }
+ j2k_flush(s);
+ for (bandno = 0; bandno < rlevel->nbands; bandno++){
+ Jpeg2000Band *band = rlevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec + precno;
+ int yi, cblknw = prec->nb_codeblocks_width;
+ for (yi =0; yi < prec->nb_codeblocks_height; yi++){
+ int xi;
+ for (xi = 0; xi < cblknw; xi++){
+ Jpeg2000Cblk *cblk = prec->cblk + yi * cblknw + xi;
+ if (cblk->ninclpasses){
+ if (s->buf_end - s->buf < cblk->passes[cblk->ninclpasses-1].rate)
+ return -1;
+ bytestream_put_buffer(&s->buf, cblk->data, cblk->passes[cblk->ninclpasses-1].rate
+ - cblk->passes[cblk->ninclpasses-1].flushed_len);
+ bytestream_put_buffer(&s->buf, cblk->passes[cblk->ninclpasses-1].flushed,
+ cblk->passes[cblk->ninclpasses-1].flushed_len);
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static int encode_packets(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno)
+{
+ int compno, reslevelno, ret;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ av_log(s->avctx, AV_LOG_DEBUG, "tier2\n");
+ // lay-rlevel-comp-pos progression
+ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ int precno;
+ Jpeg2000ResLevel *reslevel = s->tile[tileno].comp[compno].reslevel + reslevelno;
+ for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
+ if ((ret = encode_packet(s, reslevel, precno, qntsty->expn + (reslevelno ? 3*reslevelno-2 : 0),
+ qntsty->nguardbits)) < 0)
+ return ret;
+ }
+ }
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "after tier2\n");
+ return 0;
+}
+
+static int getcut(Jpeg2000Cblk *cblk, int64_t lambda, int dwt_norm)
+{
+ int passno, res = 0;
+ for (passno = 0; passno < cblk->npasses; passno++){
+ int dr;
+ int64_t dd;
+
+ dr = cblk->passes[passno].rate
+ - (res ? cblk->passes[res-1].rate:0);
+ dd = cblk->passes[passno].disto
+ - (res ? cblk->passes[res-1].disto:0);
+
+ if (((dd * dwt_norm) >> WMSEDEC_SHIFT) * dwt_norm >= dr * lambda)
+ res = passno+1;
+ }
+ return res;
+}
+
+static void truncpasses(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile)
+{
+ int precno, compno, reslevelno, bandno, cblkno, lev;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = tile->comp + compno;
+
+ for (reslevelno = 0, lev = codsty->nreslevels-1; reslevelno < codsty->nreslevels; reslevelno++, lev--){
+ Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
+
+ for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++){
+ for (bandno = 0; bandno < reslevel->nbands ; bandno++){
+ int bandpos = bandno + (reslevelno > 0);
+ Jpeg2000Band *band = reslevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec + precno;
+
+ for (cblkno = 0; cblkno < prec->nb_codeblocks_height * prec->nb_codeblocks_width; cblkno++){
+ Jpeg2000Cblk *cblk = prec->cblk + cblkno;
+
+ cblk->ninclpasses = getcut(cblk, s->lambda,
+ (int64_t)dwt_norms[codsty->transform == FF_DWT53][bandpos][lev] * (int64_t)band->i_stepsize >> 15);
+ }
+ }
+ }
+ }
+ }
+}
+
+static int encode_tile(Jpeg2000EncoderContext *s, Jpeg2000Tile *tile, int tileno)
+{
+ int compno, reslevelno, bandno, ret;
+ Jpeg2000T1Context t1;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = s->tile[tileno].comp + compno;
+
+ t1.stride = (1<<codsty->log2_cblk_width) + 2;
+
+ av_log(s->avctx, AV_LOG_DEBUG,"dwt\n");
+ if ((ret = ff_dwt_encode(&comp->dwt, comp->i_data)) < 0)
+ return ret;
+ av_log(s->avctx, AV_LOG_DEBUG,"after dwt -> tier1\n");
+
+ for (reslevelno = 0; reslevelno < codsty->nreslevels; reslevelno++){
+ Jpeg2000ResLevel *reslevel = comp->reslevel + reslevelno;
+
+ for (bandno = 0; bandno < reslevel->nbands ; bandno++){
+ Jpeg2000Band *band = reslevel->band + bandno;
+ Jpeg2000Prec *prec = band->prec; // we support only 1 precinct per band ATM in the encoder
+ int cblkx, cblky, cblkno=0, xx0, x0, xx1, y0, yy0, yy1, bandpos;
+ yy0 = bandno == 0 ? 0 : comp->reslevel[reslevelno-1].coord[1][1] - comp->reslevel[reslevelno-1].coord[1][0];
+ y0 = yy0;
+ yy1 = FFMIN(ff_jpeg2000_ceildivpow2(band->coord[1][0] + 1, band->log2_cblk_height) << band->log2_cblk_height,
+ band->coord[1][1]) - band->coord[1][0] + yy0;
+
+ if (band->coord[0][0] == band->coord[0][1] || band->coord[1][0] == band->coord[1][1])
+ continue;
+
+ bandpos = bandno + (reslevelno > 0);
+
+ for (cblky = 0; cblky < prec->nb_codeblocks_height; cblky++){
+ if (reslevelno == 0 || bandno == 1)
+ xx0 = 0;
+ else
+ xx0 = comp->reslevel[reslevelno-1].coord[0][1] - comp->reslevel[reslevelno-1].coord[0][0];
+ x0 = xx0;
+ xx1 = FFMIN(ff_jpeg2000_ceildivpow2(band->coord[0][0] + 1, band->log2_cblk_width) << band->log2_cblk_width,
+ band->coord[0][1]) - band->coord[0][0] + xx0;
+
+ for (cblkx = 0; cblkx < prec->nb_codeblocks_width; cblkx++, cblkno++){
+ int y, x;
+ if (codsty->transform == FF_DWT53){
+ for (y = yy0; y < yy1; y++){
+ int *ptr = t1.data + (y-yy0)*t1.stride;
+ for (x = xx0; x < xx1; x++){
+ *ptr++ = comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x] << NMSEDEC_FRACBITS;
+ }
+ }
+ } else{
+ for (y = yy0; y < yy1; y++){
+ int *ptr = t1.data + (y-yy0)*t1.stride;
+ for (x = xx0; x < xx1; x++){
+ *ptr = (comp->i_data[(comp->coord[0][1] - comp->coord[0][0]) * y + x]);
+ *ptr = (int64_t)*ptr * (int64_t)(16384 * 65536 / band->i_stepsize) >> 15 - NMSEDEC_FRACBITS;
+ ptr++;
+ }
+ }
+ }
+ encode_cblk(s, &t1, prec->cblk + cblkno, tile, xx1 - xx0, yy1 - yy0,
+ bandpos, codsty->nreslevels - reslevelno - 1);
+ xx0 = xx1;
+ xx1 = FFMIN(xx1 + (1 << band->log2_cblk_width), band->coord[0][1] - band->coord[0][0] + x0);
+ }
+ yy0 = yy1;
+ yy1 = FFMIN(yy1 + (1 << band->log2_cblk_height), band->coord[1][1] - band->coord[1][0] + y0);
+ }
+ }
+ }
+ av_log(s->avctx, AV_LOG_DEBUG, "after tier1\n");
+ }
+
+ av_log(s->avctx, AV_LOG_DEBUG, "rate control\n");
+ truncpasses(s, tile);
+ if ((ret = encode_packets(s, tile, tileno)) < 0)
+ return ret;
+ av_log(s->avctx, AV_LOG_DEBUG, "after rate control\n");
+ return 0;
+}
+
+static void cleanup(Jpeg2000EncoderContext *s)
+{
+ int tileno, compno;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ for (compno = 0; compno < s->ncomponents; compno++){
+ Jpeg2000Component *comp = s->tile[tileno].comp + compno;
+ ff_jpeg2000_cleanup(comp, codsty);
+ }
+ av_freep(&s->tile[tileno].comp);
+ }
+ av_freep(&s->tile);
+}
+
+static void reinit(Jpeg2000EncoderContext *s)
+{
+ int tileno, compno;
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ Jpeg2000Tile *tile = s->tile + tileno;
+ for (compno = 0; compno < s->ncomponents; compno++)
+ ff_jpeg2000_reinit(tile->comp + compno, &s->codsty);
+ }
+}
+
+static void update_size(uint8_t *size, const uint8_t *end)
+{
+ AV_WB32(size, end-size);
+}
+
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pict, int *got_packet)
+{
+ int tileno, ret;
+ Jpeg2000EncoderContext *s = avctx->priv_data;
+ uint8_t *chunkstart, *jp2cstart, *jp2hstart;
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*9 + FF_MIN_BUFFER_SIZE, 0)) < 0)
+ return ret;
+
+ // init:
+ s->buf = s->buf_start = pkt->data;
+ s->buf_end = pkt->data + pkt->size;
+
+ s->picture = pict;
+
+ s->lambda = s->picture->quality * LAMBDA_SCALE;
+
+ copy_frame(s);
+ reinit(s);
+
+ if (s->format == CODEC_JP2) {
+ av_assert0(s->buf == pkt->data);
+
+ bytestream_put_be32(&s->buf, 0x0000000C);
+ bytestream_put_be32(&s->buf, 0x6A502020);
+ bytestream_put_be32(&s->buf, 0x0D0A870A);
+
+ chunkstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "ftyp", 4);
+ bytestream_put_buffer(&s->buf, "jp2\040\040", 4);
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "jp2\040", 4);
+ update_size(chunkstart, s->buf);
+
+ jp2hstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "jp2h", 4);
+
+ chunkstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "ihdr", 4);
+ bytestream_put_be32(&s->buf, avctx->height);
+ bytestream_put_be32(&s->buf, avctx->width);
+ bytestream_put_be16(&s->buf, s->ncomponents);
+ bytestream_put_byte(&s->buf, s->cbps[0]);
+ bytestream_put_byte(&s->buf, 7);
+ bytestream_put_byte(&s->buf, 0);
+ bytestream_put_byte(&s->buf, 0);
+ update_size(chunkstart, s->buf);
+
+ chunkstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "colr", 4);
+ bytestream_put_byte(&s->buf, 1);
+ bytestream_put_byte(&s->buf, 0);
+ bytestream_put_byte(&s->buf, 0);
+ if (s->ncomponents == 1) {
+ bytestream_put_be32(&s->buf, 17);
+ } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
+ bytestream_put_be32(&s->buf, 16);
+ } else {
+ bytestream_put_be32(&s->buf, 18);
+ }
+ update_size(chunkstart, s->buf);
+ update_size(jp2hstart, s->buf);
+
+ jp2cstart = s->buf;
+ bytestream_put_be32(&s->buf, 0);
+ bytestream_put_buffer(&s->buf, "jp2c", 4);
+ }
+
+ if (s->buf_end - s->buf < 2)
+ return -1;
+ bytestream_put_be16(&s->buf, JPEG2000_SOC);
+ if ((ret = put_siz(s)) < 0)
+ return ret;
+ if ((ret = put_cod(s)) < 0)
+ return ret;
+ if ((ret = put_qcd(s, 0)) < 0)
+ return ret;
+ if ((ret = put_com(s, 0)) < 0)
+ return ret;
+
+ for (tileno = 0; tileno < s->numXtiles * s->numYtiles; tileno++){
+ uint8_t *psotptr;
+ if (!(psotptr = put_sot(s, tileno)))
+ return -1;
+ if (s->buf_end - s->buf < 2)
+ return -1;
+ bytestream_put_be16(&s->buf, JPEG2000_SOD);
+ if ((ret = encode_tile(s, s->tile + tileno, tileno)) < 0)
+ return ret;
+ bytestream_put_be32(&psotptr, s->buf - psotptr + 6);
+ }
+ if (s->buf_end - s->buf < 2)
+ return -1;
+ bytestream_put_be16(&s->buf, JPEG2000_EOC);
+
+ if (s->format == CODEC_JP2)
+ update_size(jp2cstart, s->buf);
+
+ av_log(s->avctx, AV_LOG_DEBUG, "end\n");
+ pkt->size = s->buf - s->buf_start;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static av_cold int j2kenc_init(AVCodecContext *avctx)
+{
+ int i, ret;
+ Jpeg2000EncoderContext *s = avctx->priv_data;
+ Jpeg2000CodingStyle *codsty = &s->codsty;
+ Jpeg2000QuantStyle *qntsty = &s->qntsty;
+
+ s->avctx = avctx;
+ av_log(s->avctx, AV_LOG_DEBUG, "init\n");
+
+ // defaults:
+ // TODO: implement setting non-standard precinct size
+ memset(codsty->log2_prec_widths , 15, sizeof(codsty->log2_prec_widths ));
+ memset(codsty->log2_prec_heights, 15, sizeof(codsty->log2_prec_heights));
+ codsty->nreslevels2decode=
+ codsty->nreslevels = 7;
+ codsty->log2_cblk_width = 4;
+ codsty->log2_cblk_height = 4;
+ codsty->transform = avctx->prediction_method ? FF_DWT53 : FF_DWT97_INT;
+
+ qntsty->nguardbits = 1;
+
+ if ((s->tile_width & (s->tile_width -1)) ||
+ (s->tile_height & (s->tile_height-1))) {
+ av_log(avctx, AV_LOG_WARNING, "Tile dimension not a power of 2\n");
+ }
+
+ if (codsty->transform == FF_DWT53)
+ qntsty->quantsty = JPEG2000_QSTY_NONE;
+ else
+ qntsty->quantsty = JPEG2000_QSTY_SE;
+
+ s->width = avctx->width;
+ s->height = avctx->height;
+
+ for (i = 0; i < 3; i++)
+ s->cbps[i] = 8;
+
+ if (avctx->pix_fmt == AV_PIX_FMT_RGB24){
+ s->ncomponents = 3;
+ } else if (avctx->pix_fmt == AV_PIX_FMT_GRAY8){
+ s->ncomponents = 1;
+ } else{ // planar YUV
+ s->planar = 1;
+ s->ncomponents = 3;
+ avcodec_get_chroma_sub_sample(avctx->pix_fmt,
+ s->chroma_shift, s->chroma_shift + 1);
+ }
+
+ ff_jpeg2000_init_tier1_luts();
+ ff_mqc_init_context_tables();
+ init_luts();
+
+ init_quantization(s);
+ if ((ret=init_tiles(s)) < 0)
+ return ret;
+
+ av_log(s->avctx, AV_LOG_DEBUG, "after init\n");
+
+ return 0;
+}
+
+static int j2kenc_destroy(AVCodecContext *avctx)
+{
+ Jpeg2000EncoderContext *s = avctx->priv_data;
+
+ cleanup(s);
+ return 0;
+}
+
+// taken from the libopenjpeg wraper so it matches
+
+#define OFFSET(x) offsetof(Jpeg2000EncoderContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "format", "Codec Format", OFFSET(format), AV_OPT_TYPE_INT, { .i64 = CODEC_JP2 }, CODEC_J2K, CODEC_JP2, VE, "format" },
+ { "j2k", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CODEC_J2K }, 0, 0, VE, "format" },
+ { "jp2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CODEC_JP2 }, 0, 0, VE, "format" },
+ { "tile_width", "Tile Width", OFFSET(tile_width), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, 1<<30, VE, },
+ { "tile_height", "Tile Height", OFFSET(tile_height), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, 1<<30, VE, },
+
+ { NULL }
+};
+
+static const AVClass j2k_class = {
+ .class_name = "jpeg 2000 encoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_jpeg2000_encoder = {
+ .name = "jpeg2000",
+ .long_name = NULL_IF_CONFIG_SMALL("JPEG 2000"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_JPEG2000,
+ .priv_data_size = sizeof(Jpeg2000EncoderContext),
+ .init = j2kenc_init,
+ .encode2 = encode_frame,
+ .close = j2kenc_destroy,
++ .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_YUV444P, AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P,
+ AV_PIX_FMT_NONE
+ },
+ .priv_class = &j2k_class,
+};
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_JPEGLS,
.init = encode_init_ls,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.encode2 = encode_picture_ls,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
.init = encode_init,
.encode2 = encode_frame,
.close = encode_end,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * Xiph CELT decoder using libcelt
+ * Copyright (c) 2011 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <celt/celt.h>
+#include <celt/celt_header.h>
+#include "avcodec.h"
+#include "internal.h"
+#include "libavutil/intreadwrite.h"
+
+struct libcelt_context {
+ CELTMode *mode;
+ CELTDecoder *dec;
+ int discard;
+};
+
+static int ff_celt_error_to_averror(int err)
+{
+ switch (err) {
+ case CELT_BAD_ARG: return AVERROR(EINVAL);
+#ifdef CELT_BUFFER_TOO_SMALL
+ case CELT_BUFFER_TOO_SMALL: return AVERROR(ENOBUFS);
+#endif
+ case CELT_INTERNAL_ERROR: return AVERROR(EFAULT);
+ case CELT_CORRUPTED_DATA: return AVERROR_INVALIDDATA;
+ case CELT_UNIMPLEMENTED: return AVERROR(ENOSYS);
+#ifdef ENOTRECOVERABLE
+ case CELT_INVALID_STATE: return AVERROR(ENOTRECOVERABLE);
+#endif
+ case CELT_ALLOC_FAIL: return AVERROR(ENOMEM);
+ default: return AVERROR(EINVAL);
+ }
+}
+
+static int ff_celt_bitstream_version_hack(CELTMode *mode)
+{
+ CELTHeader header = { .version_id = 0 };
+ celt_header_init(&header, mode, 960, 2);
+ return header.version_id;
+}
+
+static av_cold int libcelt_dec_init(AVCodecContext *c)
+{
+ struct libcelt_context *celt = c->priv_data;
+ int err;
+
+ if (!c->channels || !c->frame_size ||
+ c->frame_size > INT_MAX / sizeof(int16_t) / c->channels)
+ return AVERROR(EINVAL);
+ celt->mode = celt_mode_create(c->sample_rate, c->frame_size, &err);
+ if (!celt->mode)
+ return ff_celt_error_to_averror(err);
+ celt->dec = celt_decoder_create_custom(celt->mode, c->channels, &err);
+ if (!celt->dec) {
+ celt_mode_destroy(celt->mode);
+ return ff_celt_error_to_averror(err);
+ }
+ if (c->extradata_size >= 4) {
+ celt->discard = AV_RL32(c->extradata);
+ if (celt->discard < 0 || celt->discard >= c->frame_size) {
+ av_log(c, AV_LOG_WARNING,
+ "Invalid overlap (%d), ignored.\n", celt->discard);
+ celt->discard = 0;
+ }
+ }
+ if (c->extradata_size >= 8) {
+ unsigned version = AV_RL32(c->extradata + 4);
+ unsigned lib_version = ff_celt_bitstream_version_hack(celt->mode);
+ if (version != lib_version)
+ av_log(c, AV_LOG_WARNING,
+ "CELT bitstream version 0x%x may be "
+ "improperly decoded by libcelt for version 0x%x.\n",
+ version, lib_version);
+ }
+ c->sample_fmt = AV_SAMPLE_FMT_S16;
+ return 0;
+}
+
+static av_cold int libcelt_dec_close(AVCodecContext *c)
+{
+ struct libcelt_context *celt = c->priv_data;
+
+ celt_decoder_destroy(celt->dec);
+ celt_mode_destroy(celt->mode);
+ return 0;
+}
+
+static int libcelt_dec_decode(AVCodecContext *c, void *data,
+ int *got_frame_ptr, AVPacket *pkt)
+{
+ struct libcelt_context *celt = c->priv_data;
+ AVFrame *frame = data;
+ int err;
+ int16_t *pcm;
+
+ frame->nb_samples = c->frame_size;
+ if ((err = ff_get_buffer(c, frame, 0)) < 0)
+ return err;
+ pcm = (int16_t *)frame->data[0];
+ err = celt_decode(celt->dec, pkt->data, pkt->size, pcm, c->frame_size);
+ if (err < 0)
+ return ff_celt_error_to_averror(err);
+ if (celt->discard) {
+ frame->nb_samples -= celt->discard;
+ memmove(pcm, pcm + celt->discard * c->channels,
+ frame->nb_samples * c->channels * sizeof(int16_t));
+ celt->discard = 0;
+ }
+ *got_frame_ptr = 1;
+ return pkt->size;
+}
+
+AVCodec ff_libcelt_decoder = {
+ .name = "libcelt",
+ .long_name = NULL_IF_CONFIG_SMALL("Xiph CELT decoder using libcelt"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_CELT,
+ .priv_data_size = sizeof(struct libcelt_context),
+ .init = libcelt_dec_init,
+ .close = libcelt_dec_close,
+ .decode = libcelt_dec_decode,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
.close = libgsm_decode_close,
.decode = libgsm_decode_frame,
.flush = libgsm_flush,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
-
+#endif
+#if CONFIG_LIBGSM_MS_DECODER
AVCodec ff_libgsm_ms_decoder = {
.name = "libgsm_ms",
.long_name = NULL_IF_CONFIG_SMALL("libgsm GSM Microsoft variant"),
.close = libgsm_decode_close,
.decode = libgsm_decode_frame,
.flush = libgsm_flush,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
+#endif
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * libkvazaar encoder
+ *
+ * Copyright (c) 2015 Tampere University of Technology
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <kvazaar.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/dict.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "internal.h"
+
+typedef struct LibkvazaarContext {
+ const AVClass *class;
+
+ const kvz_api *api;
+ kvz_encoder *encoder;
+ kvz_config *config;
+
+ char *kvz_params;
+} LibkvazaarContext;
+
+static av_cold int libkvazaar_init(AVCodecContext *avctx)
+{
+ int retval = 0;
+ kvz_config *cfg = NULL;
+ kvz_encoder *enc = NULL;
+ const kvz_api *const api = kvz_api_get(8);
+
+ LibkvazaarContext *const ctx = avctx->priv_data;
+
+ // Kvazaar requires width and height to be multiples of eight.
+ if (avctx->width % 8 || avctx->height % 8) {
+ av_log(avctx, AV_LOG_ERROR, "Video dimensions are not a multiple of 8.\n");
+ retval = AVERROR_INVALIDDATA;
+ goto done;
+ }
+
+ cfg = api->config_alloc();
+ if (!cfg) {
+ av_log(avctx, AV_LOG_ERROR, "Could not allocate kvazaar config structure.\n");
+ retval = AVERROR(ENOMEM);
+ goto done;
+ }
+
+ if (!api->config_init(cfg)) {
+ av_log(avctx, AV_LOG_ERROR, "Could not initialize kvazaar config structure.\n");
+ retval = AVERROR_EXTERNAL;
+ goto done;
+ }
+
+ cfg->width = avctx->width;
+ cfg->height = avctx->height;
+ cfg->framerate =
+ (double)(avctx->time_base.num * avctx->ticks_per_frame) / avctx->time_base.den;
+ cfg->threads = avctx->thread_count;
+ cfg->target_bitrate = avctx->bit_rate;
+ cfg->vui.sar_width = avctx->sample_aspect_ratio.num;
+ cfg->vui.sar_height = avctx->sample_aspect_ratio.den;
+
+ if (ctx->kvz_params) {
+ AVDictionary *dict = NULL;
+ if (!av_dict_parse_string(&dict, ctx->kvz_params, "=", ",", 0)) {
+ AVDictionaryEntry *entry = NULL;
+ while ((entry = av_dict_get(dict, "", entry, AV_DICT_IGNORE_SUFFIX))) {
+ if (!api->config_parse(cfg, entry->key, entry->value)) {
+ av_log(avctx, AV_LOG_WARNING,
+ "Invalid option: %s=%s.\n",
+ entry->key, entry->value);
+ }
+ }
+ av_dict_free(&dict);
+ }
+ }
+
+ enc = api->encoder_open(cfg);
+ if (!enc) {
+ av_log(avctx, AV_LOG_ERROR, "Could not open kvazaar encoder.\n");
+ retval = AVERROR_EXTERNAL;
+ goto done;
+ }
+
+ ctx->api = api;
+ ctx->encoder = enc;
+ ctx->config = cfg;
+ enc = NULL;
+ cfg = NULL;
+
+done:
+ if (cfg) api->config_destroy(cfg);
+ if (enc) api->encoder_close(enc);
+
+ return retval;
+}
+
+static av_cold int libkvazaar_close(AVCodecContext *avctx)
+{
+ LibkvazaarContext *ctx = avctx->priv_data;
+ if (!ctx->api) return 0;
+
+ if (ctx->encoder) {
+ ctx->api->encoder_close(ctx->encoder);
+ ctx->encoder = NULL;
+ }
+
+ if (ctx->config) {
+ ctx->api->config_destroy(ctx->config);
+ ctx->config = NULL;
+ }
+
+ return 0;
+}
+
+static int libkvazaar_encode(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+{
+ int retval = 0;
+ kvz_picture *img_in = NULL;
+ kvz_data_chunk *data_out = NULL;
+ uint32_t len_out = 0;
+ LibkvazaarContext *ctx = avctx->priv_data;
+
+ *got_packet_ptr = 0;
+
+ if (frame) {
+ int i = 0;
+
+ av_assert0(frame->width == ctx->config->width);
+ av_assert0(frame->height == ctx->config->height);
+ av_assert0(frame->format == avctx->pix_fmt);
+
+ // Allocate input picture for kvazaar.
+ img_in = ctx->api->picture_alloc(frame->width, frame->height);
+ if (!img_in) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate picture.\n");
+ retval = AVERROR(ENOMEM);
+ goto done;
+ }
+
+ // Copy pixels from frame to img_in.
+ for (i = 0; i < 3; ++i) {
+ uint8_t *dst = img_in->data[i];
+ uint8_t *src = frame->data[i];
+ int width = (i == 0) ? frame->width : (frame->width / 2);
+ int height = (i == 0) ? frame->height : (frame->height / 2);
+ int y = 0;
+ for (y = 0; y < height; ++y) {
+ memcpy(dst, src, width);
+ src += frame->linesize[i];
+ dst += width;
+ }
+ }
+ }
+
+ if (!ctx->api->encoder_encode(ctx->encoder, img_in, &data_out, &len_out, NULL)) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to encode frame.\n");
+ retval = AVERROR_EXTERNAL;
+ goto done;
+ }
+
+ if (data_out) {
+ kvz_data_chunk *chunk = NULL;
+ uint64_t written = 0;
+
+ retval = ff_alloc_packet(avpkt, len_out);
+ if (retval < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate output packet.\n");
+ goto done;
+ }
+
+ for (chunk = data_out; chunk != NULL; chunk = chunk->next) {
+ av_assert0(written + chunk->len <= len_out);
+ memcpy(avpkt->data + written, chunk->data, chunk->len);
+ written += chunk->len;
+ }
+ *got_packet_ptr = 1;
+
+ ctx->api->chunk_free(data_out);
+ data_out = NULL;
+ }
+
+done:
+ if (img_in) ctx->api->picture_free(img_in);
+ if (data_out) ctx->api->chunk_free(data_out);
+ return retval;
+}
+
+static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+};
+
+static const AVOption options[] = {
+ { "kvazaar-params", "Set kvazaar parameters as a comma-separated list of name=value pairs.",
+ offsetof(LibkvazaarContext, kvz_params), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0,
+ AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM },
+ { NULL },
+};
+
+static const AVClass class = {
+ .class_name = "libkvazaar",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const AVCodecDefault defaults[] = {
+ { "b", "0" },
+ { NULL },
+};
+
+AVCodec ff_libkvazaar_encoder = {
+ .name = "libkvazaar",
+ .long_name = NULL_IF_CONFIG_SMALL("libkvazaar H.265 / HEVC"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_HEVC,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .pix_fmts = pix_fmts,
+
+ .priv_class = &class,
+ .priv_data_size = sizeof(LibkvazaarContext),
+ .defaults = defaults,
+
+ .init = libkvazaar_init,
+ .encode2 = libkvazaar_encode,
+ .close = libkvazaar_close,
+};
.init = amr_nb_encode_init,
.encode2 = amr_nb_encode_frame,
.close = amr_nb_encode_close,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
- .priv_class = &class,
+ .priv_class = &amrnb_class,
};
#endif /* CONFIG_LIBOPENCORE_AMRNB_ENCODER */
return ret;
}
- codec->capabilities |= CODEC_CAP_EXPERIMENTAL;
+static av_cold void libopenjpeg_static_init(AVCodec *codec)
+{
+ const char *version = opj_version();
+ int major, minor;
+
+ if (sscanf(version, "%d.%d", &major, &minor) == 2 && 1000*major + minor <= 1003)
++ codec->capabilities |= AV_CODEC_CAP_EXPERIMENTAL;
+}
+
#define OFFSET(x) offsetof(LibOpenJPEGContext, x)
#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
.priv_data_size = sizeof(LibOpenJPEGContext),
.init = libopenjpeg_decode_init,
.decode = libopenjpeg_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
+ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
- .priv_class = &class,
+ .max_lowres = 31,
+ .priv_class = &openjpeg_class,
+ .init_static_data = libopenjpeg_static_init,
};
.init = libopenjpeg_encode_init,
.encode2 = libopenjpeg_encode_frame,
.close = libopenjpeg_encode_close,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
- .capabilities = 0,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB48,
- AV_PIX_FMT_RGBA64,
- AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16, AV_PIX_FMT_YA8,
+ AV_PIX_FMT_RGBA64, AV_PIX_FMT_GBR24P,
+ AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_YA8, AV_PIX_FMT_GRAY16, AV_PIX_FMT_YA16,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P,
- AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P,
+ AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA422P,
+ AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
+ AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
+ AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
+ AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
+ AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
+ AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
+ AV_PIX_FMT_XYZ12,
AV_PIX_FMT_NONE
},
- .priv_class = &class,
+ .priv_class = &openjpeg_class,
};
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * Interface to libshine for mp3 encoding
+ * Copyright (c) 2012 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <shine/layer3.h>
+
+#include "libavutil/intreadwrite.h"
+#include "audio_frame_queue.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "mpegaudio.h"
+#include "mpegaudiodecheader.h"
+
+#define BUFFER_SIZE (4096 * 20)
+
+typedef struct SHINEContext {
+ shine_config_t config;
+ shine_t shine;
+ uint8_t buffer[BUFFER_SIZE];
+ int buffer_index;
+ AudioFrameQueue afq;
+} SHINEContext;
+
+static av_cold int libshine_encode_init(AVCodecContext *avctx)
+{
+ SHINEContext *s = avctx->priv_data;
+
+ if (avctx->channels <= 0 || avctx->channels > 2){
+ av_log(avctx, AV_LOG_ERROR, "only mono or stereo is supported\n");
+ return AVERROR(EINVAL);
+ }
+
+ shine_set_config_mpeg_defaults(&s->config.mpeg);
+ if (avctx->bit_rate)
+ s->config.mpeg.bitr = avctx->bit_rate / 1000;
+ s->config.mpeg.mode = avctx->channels == 2 ? STEREO : MONO;
+ s->config.wave.samplerate = avctx->sample_rate;
+ s->config.wave.channels = avctx->channels == 2 ? PCM_STEREO : PCM_MONO;
+ if (shine_check_config(s->config.wave.samplerate, s->config.mpeg.bitr) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "invalid configuration\n");
+ return AVERROR(EINVAL);
+ }
+ s->shine = shine_initialise(&s->config);
+ if (!s->shine)
+ return AVERROR(ENOMEM);
+ avctx->frame_size = shine_samples_per_pass(s->shine);
+ ff_af_queue_init(avctx, &s->afq);
+ return 0;
+}
+
+static int libshine_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ SHINEContext *s = avctx->priv_data;
+ MPADecodeHeader hdr;
+ unsigned char *data;
+ int written;
+ int ret, len;
+
+ if (frame)
+ data = shine_encode_buffer(s->shine, (int16_t **)frame->data, &written);
+ else
+ data = shine_flush(s->shine, &written);
+ if (written < 0)
+ return -1;
+ if (written > 0) {
+ if (s->buffer_index + written > BUFFER_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "internal buffer too small\n");
+ return AVERROR_BUG;
+ }
+ memcpy(s->buffer + s->buffer_index, data, written);
+ s->buffer_index += written;
+ }
+ if (frame) {
+ if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
+ return ret;
+ }
+
+ if (s->buffer_index < 4 || !s->afq.frame_count)
+ return 0;
+ if (avpriv_mpegaudio_decode_header(&hdr, AV_RB32(s->buffer))) {
+ av_log(avctx, AV_LOG_ERROR, "free format output not supported\n");
+ return -1;
+ }
+
+ len = hdr.frame_size;
+ if (len <= s->buffer_index) {
+ if ((ret = ff_alloc_packet2(avctx, avpkt, len)))
+ return ret;
+ memcpy(avpkt->data, s->buffer, len);
+ s->buffer_index -= len;
+ memmove(s->buffer, s->buffer + len, s->buffer_index);
+
+ ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
+ &avpkt->duration);
+
+ avpkt->size = len;
+ *got_packet_ptr = 1;
+ }
+ return 0;
+}
+
+static av_cold int libshine_encode_close(AVCodecContext *avctx)
+{
+ SHINEContext *s = avctx->priv_data;
+
+ ff_af_queue_close(&s->afq);
+ shine_close(s->shine);
+ return 0;
+}
+
+static const int libshine_sample_rates[] = {
+ 44100, 48000, 32000, 0
+};
+
+AVCodec ff_libshine_encoder = {
+ .name = "libshine",
+ .long_name = NULL_IF_CONFIG_SMALL("libshine MP3 (MPEG audio layer 3)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_MP3,
+ .priv_data_size = sizeof(SHINEContext),
+ .init = libshine_encode_init,
+ .encode2 = libshine_encode_frame,
+ .close = libshine_encode_close,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_NONE },
+ .supported_samplerates = libshine_sample_rates,
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
+ AV_CH_LAYOUT_STEREO,
+ 0 },
+};
--- /dev/null
- CODEC_CAP_DELAY,
+/*
+ * Interface to the Android Stagefright library for
+ * H/W accelerated H.264 decoding
+ *
+ * Copyright (C) 2011 Mohamed Naufal
+ * Copyright (C) 2011 Martin Storsjö
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <utils/List.h>
+#include <new>
+#include <map>
+
+extern "C" {
+#include "avcodec.h"
+#include "libavutil/imgutils.h"
+#include "internal.h"
+}
+
+#define OMX_QCOM_COLOR_FormatYVU420SemiPlanar 0x7FA30C00
+
+using namespace android;
+
+struct Frame {
+ status_t status;
+ size_t size;
+ int64_t time;
+ int key;
+ uint8_t *buffer;
+ AVFrame *vframe;
+};
+
+struct TimeStamp {
+ int64_t pts;
+ int64_t reordered_opaque;
+};
+
+class CustomSource;
+
+struct StagefrightContext {
+ AVCodecContext *avctx;
+ AVBitStreamFilterContext *bsfc;
+ uint8_t* orig_extradata;
+ int orig_extradata_size;
+ sp<MediaSource> *source;
+ List<Frame*> *in_queue, *out_queue;
+ pthread_mutex_t in_mutex, out_mutex;
+ pthread_cond_t condition;
+ pthread_t decode_thread_id;
+
+ Frame *end_frame;
+ bool source_done;
+ volatile sig_atomic_t thread_started, thread_exited, stop_decode;
+
+ AVFrame *prev_frame;
+ std::map<int64_t, TimeStamp> *ts_map;
+ int64_t frame_index;
+
+ uint8_t *dummy_buf;
+ int dummy_bufsize;
+
+ OMXClient *client;
+ sp<MediaSource> *decoder;
+ const char *decoder_component;
+};
+
+class CustomSource : public MediaSource {
+public:
+ CustomSource(AVCodecContext *avctx, sp<MetaData> meta) {
+ s = (StagefrightContext*)avctx->priv_data;
+ source_meta = meta;
+ frame_size = (avctx->width * avctx->height * 3) / 2;
+ buf_group.add_buffer(new MediaBuffer(frame_size));
+ }
+
+ virtual sp<MetaData> getFormat() {
+ return source_meta;
+ }
+
+ virtual status_t start(MetaData *params) {
+ return OK;
+ }
+
+ virtual status_t stop() {
+ return OK;
+ }
+
+ virtual status_t read(MediaBuffer **buffer,
+ const MediaSource::ReadOptions *options) {
+ Frame *frame;
+ status_t ret;
+
+ if (s->thread_exited)
+ return ERROR_END_OF_STREAM;
+ pthread_mutex_lock(&s->in_mutex);
+
+ while (s->in_queue->empty())
+ pthread_cond_wait(&s->condition, &s->in_mutex);
+
+ frame = *s->in_queue->begin();
+ ret = frame->status;
+
+ if (ret == OK) {
+ ret = buf_group.acquire_buffer(buffer);
+ if (ret == OK) {
+ memcpy((*buffer)->data(), frame->buffer, frame->size);
+ (*buffer)->set_range(0, frame->size);
+ (*buffer)->meta_data()->clear();
+ (*buffer)->meta_data()->setInt32(kKeyIsSyncFrame,frame->key);
+ (*buffer)->meta_data()->setInt64(kKeyTime, frame->time);
+ } else {
+ av_log(s->avctx, AV_LOG_ERROR, "Failed to acquire MediaBuffer\n");
+ }
+ av_freep(&frame->buffer);
+ }
+
+ s->in_queue->erase(s->in_queue->begin());
+ pthread_mutex_unlock(&s->in_mutex);
+
+ av_freep(&frame);
+ return ret;
+ }
+
+private:
+ MediaBufferGroup buf_group;
+ sp<MetaData> source_meta;
+ StagefrightContext *s;
+ int frame_size;
+};
+
+void* decode_thread(void *arg)
+{
+ AVCodecContext *avctx = (AVCodecContext*)arg;
+ StagefrightContext *s = (StagefrightContext*)avctx->priv_data;
+ const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ Frame* frame;
+ MediaBuffer *buffer;
+ int32_t w, h;
+ int decode_done = 0;
+ int ret;
+ int src_linesize[3];
+ const uint8_t *src_data[3];
+ int64_t out_frame_index = 0;
+
+ do {
+ buffer = NULL;
+ frame = (Frame*)av_mallocz(sizeof(Frame));
+ if (!frame) {
+ frame = s->end_frame;
+ frame->status = AVERROR(ENOMEM);
+ decode_done = 1;
+ s->end_frame = NULL;
+ goto push_frame;
+ }
+ frame->status = (*s->decoder)->read(&buffer);
+ if (frame->status == OK) {
+ sp<MetaData> outFormat = (*s->decoder)->getFormat();
+ outFormat->findInt32(kKeyWidth , &w);
+ outFormat->findInt32(kKeyHeight, &h);
+ frame->vframe = av_frame_alloc();
+ if (!frame->vframe) {
+ frame->status = AVERROR(ENOMEM);
+ decode_done = 1;
+ buffer->release();
+ goto push_frame;
+ }
+ ret = ff_get_buffer(avctx, frame->vframe, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0) {
+ frame->status = ret;
+ decode_done = 1;
+ buffer->release();
+ goto push_frame;
+ }
+
+ // The OMX.SEC decoder doesn't signal the modified width/height
+ if (s->decoder_component && !strncmp(s->decoder_component, "OMX.SEC", 7) &&
+ (w & 15 || h & 15)) {
+ if (((w + 15)&~15) * ((h + 15)&~15) * 3/2 == buffer->range_length()) {
+ w = (w + 15)&~15;
+ h = (h + 15)&~15;
+ }
+ }
+
+ if (!avctx->width || !avctx->height || avctx->width > w || avctx->height > h) {
+ avctx->width = w;
+ avctx->height = h;
+ }
+
+ src_linesize[0] = av_image_get_linesize(avctx->pix_fmt, w, 0);
+ src_linesize[1] = av_image_get_linesize(avctx->pix_fmt, w, 1);
+ src_linesize[2] = av_image_get_linesize(avctx->pix_fmt, w, 2);
+
+ src_data[0] = (uint8_t*)buffer->data();
+ src_data[1] = src_data[0] + src_linesize[0] * h;
+ src_data[2] = src_data[1] + src_linesize[1] * -(-h>>pix_desc->log2_chroma_h);
+ av_image_copy(frame->vframe->data, frame->vframe->linesize,
+ src_data, src_linesize,
+ avctx->pix_fmt, avctx->width, avctx->height);
+
+ buffer->meta_data()->findInt64(kKeyTime, &out_frame_index);
+ if (out_frame_index && s->ts_map->count(out_frame_index) > 0) {
+ frame->vframe->pts = (*s->ts_map)[out_frame_index].pts;
+ frame->vframe->reordered_opaque = (*s->ts_map)[out_frame_index].reordered_opaque;
+ s->ts_map->erase(out_frame_index);
+ }
+ buffer->release();
+ } else if (frame->status == INFO_FORMAT_CHANGED) {
+ if (buffer)
+ buffer->release();
+ av_free(frame);
+ continue;
+ } else {
+ decode_done = 1;
+ }
+push_frame:
+ while (true) {
+ pthread_mutex_lock(&s->out_mutex);
+ if (s->out_queue->size() >= 10) {
+ pthread_mutex_unlock(&s->out_mutex);
+ usleep(10000);
+ continue;
+ }
+ break;
+ }
+ s->out_queue->push_back(frame);
+ pthread_mutex_unlock(&s->out_mutex);
+ } while (!decode_done && !s->stop_decode);
+
+ s->thread_exited = true;
+
+ return 0;
+}
+
+static av_cold int Stagefright_init(AVCodecContext *avctx)
+{
+ StagefrightContext *s = (StagefrightContext*)avctx->priv_data;
+ sp<MetaData> meta, outFormat;
+ int32_t colorFormat = 0;
+ int ret;
+
+ if (!avctx->extradata || !avctx->extradata_size || avctx->extradata[0] != 1)
+ return -1;
+
+ s->avctx = avctx;
+ s->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
+ if (!s->bsfc) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot open the h264_mp4toannexb BSF!\n");
+ return -1;
+ }
+
+ s->orig_extradata_size = avctx->extradata_size;
+ s->orig_extradata = (uint8_t*) av_mallocz(avctx->extradata_size +
+ FF_INPUT_BUFFER_PADDING_SIZE);
+ if (!s->orig_extradata) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ memcpy(s->orig_extradata, avctx->extradata, avctx->extradata_size);
+
+ meta = new MetaData;
+ if (!meta) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+ meta->setInt32(kKeyWidth, avctx->width);
+ meta->setInt32(kKeyHeight, avctx->height);
+ meta->setData(kKeyAVCC, kTypeAVCC, avctx->extradata, avctx->extradata_size);
+
+ android::ProcessState::self()->startThreadPool();
+
+ s->source = new sp<MediaSource>();
+ *s->source = new CustomSource(avctx, meta);
+ s->in_queue = new List<Frame*>;
+ s->out_queue = new List<Frame*>;
+ s->ts_map = new std::map<int64_t, TimeStamp>;
+ s->client = new OMXClient;
+ s->end_frame = (Frame*)av_mallocz(sizeof(Frame));
+ if (s->source == NULL || !s->in_queue || !s->out_queue || !s->client ||
+ !s->ts_map || !s->end_frame) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (s->client->connect() != OK) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot connect OMX client\n");
+ ret = -1;
+ goto fail;
+ }
+
+ s->decoder = new sp<MediaSource>();
+ *s->decoder = OMXCodec::Create(s->client->interface(), meta,
+ false, *s->source, NULL,
+ OMXCodec::kClientNeedsFramebuffer);
+ if ((*s->decoder)->start() != OK) {
+ av_log(avctx, AV_LOG_ERROR, "Cannot start decoder\n");
+ ret = -1;
+ s->client->disconnect();
+ goto fail;
+ }
+
+ outFormat = (*s->decoder)->getFormat();
+ outFormat->findInt32(kKeyColorFormat, &colorFormat);
+ if (colorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar ||
+ colorFormat == OMX_COLOR_FormatYUV420SemiPlanar)
+ avctx->pix_fmt = AV_PIX_FMT_NV21;
+ else if (colorFormat == OMX_COLOR_FormatYCbYCr)
+ avctx->pix_fmt = AV_PIX_FMT_YUYV422;
+ else if (colorFormat == OMX_COLOR_FormatCbYCrY)
+ avctx->pix_fmt = AV_PIX_FMT_UYVY422;
+ else
+ avctx->pix_fmt = AV_PIX_FMT_YUV420P;
+
+ outFormat->findCString(kKeyDecoderComponent, &s->decoder_component);
+ if (s->decoder_component)
+ s->decoder_component = av_strdup(s->decoder_component);
+
+ pthread_mutex_init(&s->in_mutex, NULL);
+ pthread_mutex_init(&s->out_mutex, NULL);
+ pthread_cond_init(&s->condition, NULL);
+ return 0;
+
+fail:
+ av_bitstream_filter_close(s->bsfc);
+ av_freep(&s->orig_extradata);
+ av_freep(&s->end_frame);
+ delete s->in_queue;
+ delete s->out_queue;
+ delete s->ts_map;
+ delete s->client;
+ return ret;
+}
+
+static int Stagefright_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ StagefrightContext *s = (StagefrightContext*)avctx->priv_data;
+ Frame *frame;
+ status_t status;
+ int orig_size = avpkt->size;
+ AVPacket pkt = *avpkt;
+ AVFrame *ret_frame;
+
+ if (!s->thread_started) {
+ if(pthread_create(&s->decode_thread_id, NULL, &decode_thread, avctx))
+ return AVERROR(ENOMEM);
+ s->thread_started = true;
+ }
+
+ if (avpkt && avpkt->data) {
+ av_bitstream_filter_filter(s->bsfc, avctx, NULL, &pkt.data, &pkt.size,
+ avpkt->data, avpkt->size, avpkt->flags & AV_PKT_FLAG_KEY);
+ avpkt = &pkt;
+ }
+
+ if (!s->source_done) {
+ if(!s->dummy_buf) {
+ s->dummy_buf = (uint8_t*)av_malloc(avpkt->size);
+ if (!s->dummy_buf)
+ return AVERROR(ENOMEM);
+ s->dummy_bufsize = avpkt->size;
+ memcpy(s->dummy_buf, avpkt->data, avpkt->size);
+ }
+
+ frame = (Frame*)av_mallocz(sizeof(Frame));
+ if (avpkt->data) {
+ frame->status = OK;
+ frame->size = avpkt->size;
+ frame->key = avpkt->flags & AV_PKT_FLAG_KEY ? 1 : 0;
+ frame->buffer = (uint8_t*)av_malloc(avpkt->size);
+ if (!frame->buffer) {
+ av_freep(&frame);
+ return AVERROR(ENOMEM);
+ }
+ uint8_t *ptr = avpkt->data;
+ // The OMX.SEC decoder fails without this.
+ if (avpkt->size == orig_size + avctx->extradata_size) {
+ ptr += avctx->extradata_size;
+ frame->size = orig_size;
+ }
+ memcpy(frame->buffer, ptr, orig_size);
+ if (avpkt == &pkt)
+ av_free(avpkt->data);
+
+ frame->time = ++s->frame_index;
+ (*s->ts_map)[s->frame_index].pts = avpkt->pts;
+ (*s->ts_map)[s->frame_index].reordered_opaque = avctx->reordered_opaque;
+ } else {
+ frame->status = ERROR_END_OF_STREAM;
+ s->source_done = true;
+ }
+
+ while (true) {
+ if (s->thread_exited) {
+ s->source_done = true;
+ break;
+ }
+ pthread_mutex_lock(&s->in_mutex);
+ if (s->in_queue->size() >= 10) {
+ pthread_mutex_unlock(&s->in_mutex);
+ usleep(10000);
+ continue;
+ }
+ s->in_queue->push_back(frame);
+ pthread_cond_signal(&s->condition);
+ pthread_mutex_unlock(&s->in_mutex);
+ break;
+ }
+ }
+ while (true) {
+ pthread_mutex_lock(&s->out_mutex);
+ if (!s->out_queue->empty()) break;
+ pthread_mutex_unlock(&s->out_mutex);
+ if (s->source_done) {
+ usleep(10000);
+ continue;
+ } else {
+ return orig_size;
+ }
+ }
+
+ frame = *s->out_queue->begin();
+ s->out_queue->erase(s->out_queue->begin());
+ pthread_mutex_unlock(&s->out_mutex);
+
+ ret_frame = frame->vframe;
+ status = frame->status;
+ av_freep(&frame);
+
+ if (status == ERROR_END_OF_STREAM)
+ return 0;
+ if (status != OK) {
+ if (status == AVERROR(ENOMEM))
+ return status;
+ av_log(avctx, AV_LOG_ERROR, "Decode failed: %x\n", status);
+ return -1;
+ }
+
+ if (s->prev_frame)
+ av_frame_free(&s->prev_frame);
+ s->prev_frame = ret_frame;
+
+ *got_frame = 1;
+ *(AVFrame*)data = *ret_frame;
+ return orig_size;
+}
+
+static av_cold int Stagefright_close(AVCodecContext *avctx)
+{
+ StagefrightContext *s = (StagefrightContext*)avctx->priv_data;
+ Frame *frame;
+
+ if (s->thread_started) {
+ if (!s->thread_exited) {
+ s->stop_decode = 1;
+
+ // Make sure decode_thread() doesn't get stuck
+ pthread_mutex_lock(&s->out_mutex);
+ while (!s->out_queue->empty()) {
+ frame = *s->out_queue->begin();
+ s->out_queue->erase(s->out_queue->begin());
+ if (frame->vframe)
+ av_frame_free(&frame->vframe);
+ av_freep(&frame);
+ }
+ pthread_mutex_unlock(&s->out_mutex);
+
+ // Feed a dummy frame prior to signalling EOF.
+ // This is required to terminate the decoder(OMX.SEC)
+ // when only one frame is read during stream info detection.
+ if (s->dummy_buf && (frame = (Frame*)av_mallocz(sizeof(Frame)))) {
+ frame->status = OK;
+ frame->size = s->dummy_bufsize;
+ frame->key = 1;
+ frame->buffer = s->dummy_buf;
+ pthread_mutex_lock(&s->in_mutex);
+ s->in_queue->push_back(frame);
+ pthread_cond_signal(&s->condition);
+ pthread_mutex_unlock(&s->in_mutex);
+ s->dummy_buf = NULL;
+ }
+
+ pthread_mutex_lock(&s->in_mutex);
+ s->end_frame->status = ERROR_END_OF_STREAM;
+ s->in_queue->push_back(s->end_frame);
+ pthread_cond_signal(&s->condition);
+ pthread_mutex_unlock(&s->in_mutex);
+ s->end_frame = NULL;
+ }
+
+ pthread_join(s->decode_thread_id, NULL);
+
+ if (s->prev_frame)
+ av_frame_free(&s->prev_frame);
+
+ s->thread_started = false;
+ }
+
+ while (!s->in_queue->empty()) {
+ frame = *s->in_queue->begin();
+ s->in_queue->erase(s->in_queue->begin());
+ if (frame->size)
+ av_freep(&frame->buffer);
+ av_freep(&frame);
+ }
+
+ while (!s->out_queue->empty()) {
+ frame = *s->out_queue->begin();
+ s->out_queue->erase(s->out_queue->begin());
+ if (frame->vframe)
+ av_frame_free(&frame->vframe);
+ av_freep(&frame);
+ }
+
+ (*s->decoder)->stop();
+ s->client->disconnect();
+
+ if (s->decoder_component)
+ av_freep(&s->decoder_component);
+ av_freep(&s->dummy_buf);
+ av_freep(&s->end_frame);
+
+ // Reset the extradata back to the original mp4 format, so that
+ // the next invocation (both when decoding and when called from
+ // av_find_stream_info) get the original mp4 format extradata.
+ av_freep(&avctx->extradata);
+ avctx->extradata = s->orig_extradata;
+ avctx->extradata_size = s->orig_extradata_size;
+
+ delete s->in_queue;
+ delete s->out_queue;
+ delete s->ts_map;
+ delete s->client;
+ delete s->decoder;
+ delete s->source;
+
+ pthread_mutex_destroy(&s->in_mutex);
+ pthread_mutex_destroy(&s->out_mutex);
+ pthread_cond_destroy(&s->condition);
+ av_bitstream_filter_close(s->bsfc);
+ return 0;
+}
+
+AVCodec ff_libstagefright_h264_decoder = {
+ "libstagefright_h264",
+ NULL_IF_CONFIG_SMALL("libstagefright H.264"),
+ AVMEDIA_TYPE_VIDEO,
+ AV_CODEC_ID_H264,
++ AV_CODEC_CAP_DELAY,
+ NULL, //supported_framerates
+ NULL, //pix_fmts
+ NULL, //supported_samplerates
+ NULL, //sample_fmts
+ NULL, //channel_layouts
+ 0, //max_lowres
+ NULL, //priv_class
+ NULL, //profiles
+ sizeof(StagefrightContext),
+ NULL, //next
+ NULL, //init_thread_copy
+ NULL, //update_thread_context
+ NULL, //defaults
+ NULL, //init_static_data
+ Stagefright_init,
+ NULL, //encode
+ NULL, //encode2
+ Stagefright_decode_frame,
+ Stagefright_close,
+};
--- /dev/null
- CODEC_CAP_AUTO_THREADS | CODEC_CAP_LOSSLESS,
+/*
+ * Copyright (c) 2012 Derek Buitenhuis
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License as published by the Free Software Foundation;
+ * version 2 of the License.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Known FOURCCs:
+ * 'ULY0' (YCbCr 4:2:0), 'ULY2' (YCbCr 4:2:2), 'ULRG' (RGB), 'ULRA' (RGBA),
+ * 'ULH0' (YCbCr 4:2:0 BT.709), 'ULH2' (YCbCr 4:2:2 BT.709)
+ */
+
+extern "C" {
+#include "libavutil/avassert.h"
+#include "avcodec.h"
+#include "internal.h"
+}
+
+#include "libutvideo.h"
+#include "put_bits.h"
+
+static av_cold int utvideo_encode_init(AVCodecContext *avctx)
+{
+ UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
+ UtVideoExtra *info;
+ uint32_t flags, in_format;
+ int ret;
+
+ switch (avctx->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ in_format = UTVF_YV12;
+ avctx->bits_per_coded_sample = 12;
+ if (avctx->colorspace == AVCOL_SPC_BT709)
+ avctx->codec_tag = MKTAG('U', 'L', 'H', '0');
+ else
+ avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
+ break;
+ case AV_PIX_FMT_YUYV422:
+ in_format = UTVF_YUYV;
+ avctx->bits_per_coded_sample = 16;
+ if (avctx->colorspace == AVCOL_SPC_BT709)
+ avctx->codec_tag = MKTAG('U', 'L', 'H', '2');
+ else
+ avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
+ break;
+ case AV_PIX_FMT_BGR24:
+ in_format = UTVF_NFCC_BGR_BU;
+ avctx->bits_per_coded_sample = 24;
+ avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
+ break;
+ case AV_PIX_FMT_RGB32:
+ in_format = UTVF_NFCC_BGRA_BU;
+ avctx->bits_per_coded_sample = 32;
+ avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ /* Check before we alloc anything */
+ if (avctx->prediction_method != 0 && avctx->prediction_method != 2) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid prediction method.\n");
+ return AVERROR(EINVAL);
+ }
+
+ flags = ((avctx->prediction_method + 1) << 8) | (avctx->thread_count - 1);
+
+ avctx->priv_data = utv;
+
+ /* Alloc extradata buffer */
+ info = (UtVideoExtra *)av_malloc(sizeof(*info));
+
+ if (!info) {
+ av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata buffer.\n");
+ return AVERROR(ENOMEM);
+ }
+
+ /*
+ * We use this buffer to hold the data that Ut Video returns,
+ * since we cannot decode planes separately with it.
+ */
+ ret = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
+ if (ret < 0) {
+ av_free(info);
+ return ret;
+ }
+ utv->buf_size = ret;
+
+ utv->buffer = (uint8_t *)av_malloc(utv->buf_size);
+
+ if (utv->buffer == NULL) {
+ av_log(avctx, AV_LOG_ERROR, "Could not allocate output buffer.\n");
+ av_free(info);
+ return AVERROR(ENOMEM);
+ }
+
+ /*
+ * Create a Ut Video instance. Since the function wants
+ * an "interface name" string, pass it the name of the lib.
+ */
+ utv->codec = CCodec::CreateInstance(UNFCC(avctx->codec_tag), "libavcodec");
+
+ /* Initialize encoder */
+ utv->codec->EncodeBegin(in_format, avctx->width, avctx->height,
+ CBGROSSWIDTH_WINDOWS);
+
+ /* Get extradata from encoder */
+ avctx->extradata_size = utv->codec->EncodeGetExtraDataSize();
+ utv->codec->EncodeGetExtraData(info, avctx->extradata_size, in_format,
+ avctx->width, avctx->height,
+ CBGROSSWIDTH_WINDOWS);
+ avctx->extradata = (uint8_t *)info;
+
+ /* Set flags */
+ utv->codec->SetState(&flags, sizeof(flags));
+
+ return 0;
+}
+
+static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pic, int *got_packet)
+{
+ UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
+ int w = avctx->width, h = avctx->height;
+ int ret, rgb_size, i;
+ bool keyframe;
+ uint8_t *y, *u, *v;
+ uint8_t *dst;
+
+ /* Alloc buffer */
+ if ((ret = ff_alloc_packet2(avctx, pkt, utv->buf_size, 0)) < 0)
+ return ret;
+
+ dst = pkt->data;
+
+ /* Move input if needed data into Ut Video friendly buffer */
+ switch (avctx->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ y = utv->buffer;
+ u = y + w * h;
+ v = u + w * h / 4;
+ for (i = 0; i < h; i++) {
+ memcpy(y, pic->data[0] + i * pic->linesize[0], w);
+ y += w;
+ }
+ for (i = 0; i < h / 2; i++) {
+ memcpy(u, pic->data[2] + i * pic->linesize[2], w >> 1);
+ memcpy(v, pic->data[1] + i * pic->linesize[1], w >> 1);
+ u += w >> 1;
+ v += w >> 1;
+ }
+ break;
+ case AV_PIX_FMT_YUYV422:
+ for (i = 0; i < h; i++)
+ memcpy(utv->buffer + i * (w << 1),
+ pic->data[0] + i * pic->linesize[0], w << 1);
+ break;
+ case AV_PIX_FMT_BGR24:
+ case AV_PIX_FMT_RGB32:
+ /* Ut Video takes bottom-up BGR */
+ rgb_size = avctx->pix_fmt == AV_PIX_FMT_BGR24 ? 3 : 4;
+ for (i = 0; i < h; i++)
+ memcpy(utv->buffer + (h - i - 1) * w * rgb_size,
+ pic->data[0] + i * pic->linesize[0],
+ w * rgb_size);
+ break;
+ default:
+ return AVERROR(EINVAL);
+ }
+
+ /* Encode frame */
+ pkt->size = utv->codec->EncodeFrame(dst, &keyframe, utv->buffer);
+
+ if (!pkt->size) {
+ av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /*
+ * Ut Video is intra-only and every frame is a keyframe,
+ * and the API always returns true. In case something
+ * durastic changes in the future, such as inter support,
+ * assert that this is true.
+ */
+ av_assert2(keyframe == true);
+ avctx->coded_frame->key_frame = 1;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+ return 0;
+}
+
+static av_cold int utvideo_encode_close(AVCodecContext *avctx)
+{
+ UtVideoContext *utv = (UtVideoContext *)avctx->priv_data;
+
+ av_freep(&avctx->extradata);
+ av_freep(&utv->buffer);
+
+ utv->codec->EncodeEnd();
+ CCodec::DeleteInstance(utv->codec);
+
+ return 0;
+}
+
+AVCodec ff_libutvideo_encoder = {
+ "libutvideo",
+ NULL_IF_CONFIG_SMALL("Ut Video"),
+ AVMEDIA_TYPE_VIDEO,
+ AV_CODEC_ID_UTVIDEO,
++ AV_CODEC_CAP_AUTO_THREADS | AV_CODEC_CAP_LOSSLESS,
+ NULL, /* supported_framerates */
+ (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUYV422, AV_PIX_FMT_BGR24,
+ AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
+ },
+ NULL, /* supported_samplerates */
+ NULL, /* sample_fmts */
+ NULL, /* channel_layouts */
+ 0, /* max_lowres */
+ NULL, /* priv_class */
+ NULL, /* profiles */
+ sizeof(UtVideoContext),
+ NULL, /* next */
+ NULL, /* init_thread_copy */
+ NULL, /* update_thread_context */
+ NULL, /* defaults */
+ NULL, /* init_static_data */
+ utvideo_encode_init,
+ NULL, /* encode */
+ utvideo_encode_frame,
+ NULL, /* decode */
+ utvideo_encode_close,
+ NULL, /* flush */
+};
.init = aac_encode_init,
.encode2 = aac_encode_frame,
.close = aac_encode_close,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
+ .supported_samplerates = mpeg4audio_sample_rates,
+ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
};
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * Copyright (c) 2002 Mark Hills <mark@pogo.org.uk>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <vorbis/vorbisenc.h>
+
+#include "avcodec.h"
+#include "bytestream.h"
+#include "internal.h"
+
+typedef struct OggVorbisDecContext {
+ vorbis_info vi; /**< vorbis_info used during init */
+ vorbis_dsp_state vd; /**< DSP state used for analysis */
+ vorbis_block vb; /**< vorbis_block used for analysis */
+ vorbis_comment vc; /**< VorbisComment info */
+ ogg_packet op; /**< ogg packet */
+} OggVorbisDecContext;
+
+static int oggvorbis_decode_init(AVCodecContext *avccontext) {
+ OggVorbisDecContext *context = avccontext->priv_data ;
+ uint8_t *p= avccontext->extradata;
+ int i, hsizes[3], ret;
+ unsigned char *headers[3], *extradata = avccontext->extradata;
+
+ if(! avccontext->extradata_size || ! p) {
+ av_log(avccontext, AV_LOG_ERROR, "vorbis extradata absent\n");
+ return AVERROR(EINVAL);
+ }
+
+ vorbis_info_init(&context->vi) ;
+ vorbis_comment_init(&context->vc) ;
+
+ if(p[0] == 0 && p[1] == 30) {
+ for(i = 0; i < 3; i++){
+ hsizes[i] = bytestream_get_be16((const uint8_t **)&p);
+ headers[i] = p;
+ p += hsizes[i];
+ }
+ } else if(*p == 2) {
+ unsigned int offset = 1;
+ p++;
+ for(i=0; i<2; i++) {
+ hsizes[i] = 0;
+ while((*p == 0xFF) && (offset < avccontext->extradata_size)) {
+ hsizes[i] += 0xFF;
+ offset++;
+ p++;
+ }
+ if(offset >= avccontext->extradata_size - 1) {
+ av_log(avccontext, AV_LOG_ERROR,
+ "vorbis header sizes damaged\n");
+ ret = AVERROR_INVALIDDATA;
+ goto error;
+ }
+ hsizes[i] += *p;
+ offset++;
+ p++;
+ }
+ hsizes[2] = avccontext->extradata_size - hsizes[0]-hsizes[1]-offset;
+#if 0
+ av_log(avccontext, AV_LOG_DEBUG,
+ "vorbis header sizes: %d, %d, %d, / extradata_len is %d \n",
+ hsizes[0], hsizes[1], hsizes[2], avccontext->extradata_size);
+#endif
+ headers[0] = extradata + offset;
+ headers[1] = extradata + offset + hsizes[0];
+ headers[2] = extradata + offset + hsizes[0] + hsizes[1];
+ } else {
+ av_log(avccontext, AV_LOG_ERROR,
+ "vorbis initial header len is wrong: %d\n", *p);
+ ret = AVERROR_INVALIDDATA;
+ goto error;
+ }
+
+ for(i=0; i<3; i++){
+ context->op.b_o_s= i==0;
+ context->op.bytes = hsizes[i];
+ context->op.packet = headers[i];
+ if(vorbis_synthesis_headerin(&context->vi, &context->vc, &context->op)<0){
+ av_log(avccontext, AV_LOG_ERROR, "%d. vorbis header damaged\n", i+1);
+ ret = AVERROR_INVALIDDATA;
+ goto error;
+ }
+ }
+
+ avccontext->channels = context->vi.channels;
+ avccontext->sample_rate = context->vi.rate;
+ avccontext->sample_fmt = AV_SAMPLE_FMT_S16;
+ avccontext->time_base= (AVRational){1, avccontext->sample_rate};
+
+ vorbis_synthesis_init(&context->vd, &context->vi);
+ vorbis_block_init(&context->vd, &context->vb);
+
+ return 0 ;
+
+ error:
+ vorbis_info_clear(&context->vi);
+ vorbis_comment_clear(&context->vc) ;
+ return ret;
+}
+
+
+static inline int conv(int samples, float **pcm, char *buf, int channels) {
+ int i, j;
+ ogg_int16_t *ptr, *data = (ogg_int16_t*)buf ;
+ float *mono ;
+
+ for(i = 0 ; i < channels ; i++){
+ ptr = &data[i];
+ mono = pcm[i] ;
+
+ for(j = 0 ; j < samples ; j++) {
+ *ptr = av_clip_int16(mono[j] * 32767.f);
+ ptr += channels;
+ }
+ }
+
+ return 0 ;
+}
+
+static int oggvorbis_decode_frame(AVCodecContext *avccontext, void *data,
+ int *got_frame_ptr, AVPacket *avpkt)
+{
+ OggVorbisDecContext *context = avccontext->priv_data ;
+ AVFrame *frame = data;
+ float **pcm ;
+ ogg_packet *op= &context->op;
+ int samples, total_samples, total_bytes;
+ int ret;
+ int16_t *output;
+
+ if(!avpkt->size){
+ //FIXME flush
+ return 0;
+ }
+
+ frame->nb_samples = 8192*4;
+ if ((ret = ff_get_buffer(avccontext, frame, 0)) < 0)
+ return ret;
+ output = (int16_t *)frame->data[0];
+
+
+ op->packet = avpkt->data;
+ op->bytes = avpkt->size;
+
+// av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d\n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);
+
+/* for(i=0; i<op->bytes; i++)
+ av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]);
+ av_log(avccontext, AV_LOG_DEBUG, "\n");*/
+
+ if(vorbis_synthesis(&context->vb, op) == 0)
+ vorbis_synthesis_blockin(&context->vd, &context->vb) ;
+
+ total_samples = 0 ;
+ total_bytes = 0 ;
+
+ while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) {
+ conv(samples, pcm, (char*)output + total_bytes, context->vi.channels) ;
+ total_bytes += samples * 2 * context->vi.channels ;
+ total_samples += samples ;
+ vorbis_synthesis_read(&context->vd, samples) ;
+ }
+
+ frame->nb_samples = total_samples;
+ *got_frame_ptr = total_samples > 0;
+ return avpkt->size;
+}
+
+
+static int oggvorbis_decode_close(AVCodecContext *avccontext) {
+ OggVorbisDecContext *context = avccontext->priv_data ;
+
+ vorbis_info_clear(&context->vi) ;
+ vorbis_comment_clear(&context->vc) ;
+
+ return 0 ;
+}
+
+
+AVCodec ff_libvorbis_decoder = {
+ .name = "libvorbis",
+ .long_name = NULL_IF_CONFIG_SMALL("libvorbis"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_VORBIS,
+ .priv_data_size = sizeof(OggVorbisDecContext),
+ .init = oggvorbis_decode_init,
+ .decode = oggvorbis_decode_frame,
+ .close = oggvorbis_decode_close,
++ .capabilities = AV_CODEC_CAP_DELAY,
+};
.init = libvorbis_encode_init,
.encode2 = libvorbis_encode_frame,
.close = libvorbis_encode_close,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
- .capabilities = AV_CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
- .priv_class = &class,
+ .priv_class = &vorbis_class,
.defaults = defaults,
};
*/
#include <vpx/vpx_codec.h>
-
#include "libvpx.h"
+#include "config.h"
+
+#if CONFIG_LIBVPX_VP9_ENCODER
+#include <vpx/vpx_encoder.h>
+#include <vpx/vp8cx.h>
+#endif
+
+static const enum AVPixelFormat vp9_pix_fmts_def[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+};
+
+#if CONFIG_LIBVPX_VP9_ENCODER
+static const enum AVPixelFormat vp9_pix_fmts_highcol[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat vp9_pix_fmts_highbd[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_YUV440P,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_YUV420P10LE,
+ AV_PIX_FMT_YUV422P10LE,
+ AV_PIX_FMT_YUV440P10LE,
+ AV_PIX_FMT_YUV444P10LE,
+ AV_PIX_FMT_YUV420P12LE,
+ AV_PIX_FMT_YUV422P12LE,
+ AV_PIX_FMT_YUV440P12LE,
+ AV_PIX_FMT_YUV444P12LE,
+ AV_PIX_FMT_NONE
+};
+#endif
- codec->capabilities |= CODEC_CAP_EXPERIMENTAL;
+av_cold void ff_vp9_init_static(AVCodec *codec)
+{
+ if ( vpx_codec_version_major() < 1
+ || (vpx_codec_version_major() == 1 && vpx_codec_version_minor() < 3))
++ codec->capabilities |= AV_CODEC_CAP_EXPERIMENTAL;
+ codec->pix_fmts = vp9_pix_fmts_def;
+#if CONFIG_LIBVPX_VP9_ENCODER
+ if ( vpx_codec_version_major() > 1
+ || (vpx_codec_version_major() == 1 && vpx_codec_version_minor() >= 4)) {
+#ifdef VPX_CODEC_CAP_HIGHBITDEPTH
+ vpx_codec_caps_t codec_caps = vpx_codec_get_caps(vpx_codec_vp9_cx());
+ if (codec_caps & VPX_CODEC_CAP_HIGHBITDEPTH)
+ codec->pix_fmts = vp9_pix_fmts_highbd;
+ else
+#endif
+ codec->pix_fmts = vp9_pix_fmts_highcol;
+ }
+#endif
+}
+#if 0
enum AVPixelFormat ff_vpx_imgfmt_to_pixfmt(vpx_img_fmt_t img)
{
switch (img) {
.init = vp9_init,
.close = vp8_free,
.decode = vp8_decode,
- .capabilities = CODEC_CAP_AUTO_THREADS | CODEC_CAP_DR1,
- .capabilities = AV_CODEC_CAP_AUTO_THREADS,
++ .capabilities = AV_CODEC_CAP_AUTO_THREADS | AV_CODEC_CAP_DR1,
+ .init_static_data = ff_vp9_init_static,
+ .profiles = NULL_IF_CONFIG_SMALL(profiles),
};
#endif /* CONFIG_LIBVPX_VP9_DECODER */
.init = vp8_init,
.encode2 = vp8_encode,
.close = vp8_free,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
- .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE },
.priv_class = &class_vp8,
.defaults = defaults,
};
.init = vp9_init,
.encode2 = vp8_encode,
.close = vp8_free,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
- .pix_fmts = (const enum AVPixelFormat[]) {
- AV_PIX_FMT_YUV420P,
-#if VPX_IMAGE_ABI_VERSION >= 3
- AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_YUV440P,
-#endif
- AV_PIX_FMT_NONE,
- },
.profiles = NULL_IF_CONFIG_SMALL(profiles),
.priv_class = &class_vp9,
.defaults = defaults,
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * WebP encoding support via libwebp
+ * Copyright (c) 2015 Urvang Joshi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * WebP encoder using libwebp (WebPAnimEncoder API)
+ */
+
+#include "config.h"
+#include "libwebpenc_common.h"
+
+#include <webp/mux.h>
+
+typedef struct LibWebPAnimContext {
+ LibWebPContextCommon cc;
+ WebPAnimEncoder *enc; // the main AnimEncoder object
+ int64_t prev_frame_pts; // pts of the previously encoded frame.
+ int done; // If true, we have assembled the bitstream already
+} LibWebPAnimContext;
+
+static av_cold int libwebp_anim_encode_init(AVCodecContext *avctx)
+{
+ int ret = ff_libwebp_encode_init_common(avctx);
+ if (!ret) {
+ LibWebPAnimContext *s = avctx->priv_data;
+ WebPAnimEncoderOptions enc_options;
+ WebPAnimEncoderOptionsInit(&enc_options);
+ // TODO(urvang): Expose some options on command-line perhaps.
+ s->enc = WebPAnimEncoderNew(avctx->width, avctx->height, &enc_options);
+ if (!s->enc)
+ return AVERROR(EINVAL);
+ s->prev_frame_pts = -1;
+ s->done = 0;
+ }
+ return ret;
+}
+
+static int libwebp_anim_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *frame, int *got_packet) {
+ LibWebPAnimContext *s = avctx->priv_data;
+ int ret;
+
+ if (!frame) {
+ if (s->done) { // Second flush: return empty package to denote finish.
+ *got_packet = 0;
+ return 0;
+ } else { // First flush: assemble bitstream and return it.
+ WebPData assembled_data = { 0 };
+ ret = WebPAnimEncoderAssemble(s->enc, &assembled_data);
+ if (ret) {
+ ret = ff_alloc_packet(pkt, assembled_data.size);
+ if (ret < 0)
+ return ret;
+ memcpy(pkt->data, assembled_data.bytes, assembled_data.size);
+ s->done = 1;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ pkt->pts = pkt->dts = s->prev_frame_pts + 1;
+ *got_packet = 1;
+ return 0;
+ } else {
+ av_log(s, AV_LOG_ERROR,
+ "WebPAnimEncoderAssemble() failed with error: %d\n",
+ VP8_ENC_ERROR_OUT_OF_MEMORY);
+ return AVERROR(ENOMEM);
+ }
+ }
+ } else {
+ int timestamp_ms;
+ WebPPicture *pic = NULL;
+ AVFrame *alt_frame = NULL;
+ ret = ff_libwebp_get_frame(avctx, &s->cc, frame, &alt_frame, &pic);
+ if (ret < 0)
+ goto end;
+
+ timestamp_ms =
+ avctx->time_base.num * frame->pts * 1000 / avctx->time_base.den;
+ ret = WebPAnimEncoderAdd(s->enc, pic, timestamp_ms, &s->cc.config);
+ if (!ret) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Encoding WebP frame failed with error: %d\n",
+ pic->error_code);
+ ret = ff_libwebp_error_to_averror(pic->error_code);
+ goto end;
+ }
+
+ pkt->pts = pkt->dts = frame->pts;
+ s->prev_frame_pts = frame->pts; // Save for next frame.
+ ret = 0;
+ *got_packet = 1;
+
+end:
+ WebPPictureFree(pic);
+ av_freep(&pic);
+ av_frame_free(&alt_frame);
+ return ret;
+ }
+}
+
+static int libwebp_anim_encode_close(AVCodecContext *avctx)
+{
+ LibWebPAnimContext *s = avctx->priv_data;
+ av_frame_free(&s->cc.ref);
+ WebPAnimEncoderDelete(s->enc);
+
+ return 0;
+}
+
+static const AVClass class = {
+ .class_name = "libwebp_anim",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_libwebp_anim_encoder = {
+ .name = "libwebp_anim",
+ .long_name = NULL_IF_CONFIG_SMALL("libwebp WebP image"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_WEBP,
+ .priv_data_size = sizeof(LibWebPAnimContext),
+ .init = libwebp_anim_encode_init,
+ .encode2 = libwebp_anim_encode_frame,
+ .close = libwebp_anim_encode_close,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_RGB32,
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P,
+ AV_PIX_FMT_NONE
+ },
+ .priv_class = &class,
+ .defaults = libwebp_defaults,
+};
.init = X264_init,
.encode2 = X264_frame,
.close = X264_close,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
- .priv_class = &class,
+ .priv_class = &x264_class,
.defaults = x264_defaults,
.init_static_data = X264_init_static,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
};
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
+
+AVCodec ff_libx264rgb_encoder = {
+ .name = "libx264rgb",
+ .long_name = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(X264Context),
+ .init = X264_init,
+ .encode2 = X264_frame,
+ .close = X264_close,
++ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
+ .priv_class = &rgbclass,
+ .defaults = x264_defaults,
+ .pix_fmts = pix_fmts_8bit_rgb,
+};
#endif
#if CONFIG_LIBX262_ENCODER
.init = X264_init,
.encode2 = X264_frame,
.close = X264_close,
-- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
++ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
.priv_class = &X262_class,
.defaults = x264_defaults,
.pix_fmts = pix_fmts_8bit,
.init = XAVS_init,
.encode2 = XAVS_frame,
.close = XAVS_close,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
- .priv_class = &class,
+ .priv_class = &xavs_class,
.defaults = xavs_defaults,
};
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * Teletext decoding for ffmpeg
+ * Copyright (c) 2005-2010, 2012 Wolfram Gloger
+ * Copyright (c) 2013 Marton Balint
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "libavcodec/ass.h"
+#include "libavutil/opt.h"
+#include "libavutil/bprint.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/log.h"
+
+#include <libzvbi.h>
+
+#define TEXT_MAXSZ (25 * (56 + 1) * 4 + 2)
+#define VBI_NB_COLORS 40
+#define RGBA(r,g,b,a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
+#define VBI_R(rgba) (((rgba) >> 0) & 0xFF)
+#define VBI_G(rgba) (((rgba) >> 8) & 0xFF)
+#define VBI_B(rgba) (((rgba) >> 16) & 0xFF)
+#define VBI_A(rgba) (((rgba) >> 24) & 0xFF)
+#define MAX_BUFFERED_PAGES 25
+#define BITMAP_CHAR_WIDTH 12
+#define BITMAP_CHAR_HEIGHT 10
+#define MAX_SLICES 64
+
+typedef struct TeletextPage
+{
+ AVSubtitleRect *sub_rect;
+ int pgno;
+ int subno;
+ int64_t pts;
+} TeletextPage;
+
+typedef struct TeletextContext
+{
+ AVClass *class;
+ char *pgno;
+ int x_offset;
+ int y_offset;
+ int format_id; /* 0 = bitmap, 1 = text/ass */
+ int chop_top;
+ int sub_duration; /* in msec */
+ int transparent_bg;
+ int chop_spaces;
+
+ int lines_processed;
+ TeletextPage *pages;
+ int nb_pages;
+ int64_t pts;
+ int handler_ret;
+
+ vbi_decoder * vbi;
+#ifdef DEBUG
+ vbi_export * ex;
+#endif
+ vbi_sliced sliced[MAX_SLICES];
+} TeletextContext;
+
+static int chop_spaces_utf8(const unsigned char* t, int len)
+{
+ t += len;
+ while (len > 0) {
+ if (*--t != ' ' || (len-1 > 0 && *(t-1) & 0x80))
+ break;
+ --len;
+ }
+ return len;
+}
+
+static void subtitle_rect_free(AVSubtitleRect **sub_rect)
+{
+ av_freep(&(*sub_rect)->pict.data[0]);
+ av_freep(&(*sub_rect)->pict.data[1]);
+ av_freep(&(*sub_rect)->ass);
+ av_freep(sub_rect);
+}
+
+static int create_ass_text(TeletextContext *ctx, const char *text, char **ass)
+{
+ int ret;
+ AVBPrint buf, buf2;
+ const int ts_start = av_rescale_q(ctx->pts, AV_TIME_BASE_Q, (AVRational){1, 100});
+ const int ts_duration = av_rescale_q(ctx->sub_duration, (AVRational){1, 1000}, (AVRational){1, 100});
+
+ /* First we escape the plain text into buf. */
+ av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+ ff_ass_bprint_text_event(&buf, text, strlen(text), "", 0);
+ av_bprintf(&buf, "\r\n");
+
+ if (!av_bprint_is_complete(&buf)) {
+ av_bprint_finalize(&buf, NULL);
+ return AVERROR(ENOMEM);
+ }
+
+ /* Then we create the ass dialog line in buf2 from the escaped text in buf. */
+ av_bprint_init(&buf2, 0, AV_BPRINT_SIZE_UNLIMITED);
+ ff_ass_bprint_dialog(&buf2, buf.str, ts_start, ts_duration, 0);
+ av_bprint_finalize(&buf, NULL);
+
+ if (!av_bprint_is_complete(&buf2)) {
+ av_bprint_finalize(&buf2, NULL);
+ return AVERROR(ENOMEM);
+ }
+
+ if ((ret = av_bprint_finalize(&buf2, ass)) < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Draw a page as text */
+static int gen_sub_text(TeletextContext *ctx, AVSubtitleRect *sub_rect, vbi_page *page, int chop_top)
+{
+ const char *in;
+ AVBPrint buf;
+ char *vbi_text = av_malloc(TEXT_MAXSZ);
+ int sz;
+
+ if (!vbi_text)
+ return AVERROR(ENOMEM);
+
+ sz = vbi_print_page_region(page, vbi_text, TEXT_MAXSZ-1, "UTF-8",
+ /*table mode*/ TRUE, FALSE,
+ 0, chop_top,
+ page->columns, page->rows-chop_top);
+ if (sz <= 0) {
+ av_log(ctx, AV_LOG_ERROR, "vbi_print error\n");
+ av_free(vbi_text);
+ return AVERROR_EXTERNAL;
+ }
+ vbi_text[sz] = '\0';
+ in = vbi_text;
+ av_bprint_init(&buf, 0, TEXT_MAXSZ);
+
+ if (ctx->chop_spaces) {
+ for (;;) {
+ int nl, sz;
+
+ // skip leading spaces and newlines
+ in += strspn(in, " \n");
+ // compute end of row
+ for (nl = 0; in[nl]; ++nl)
+ if (in[nl] == '\n' && (nl==0 || !(in[nl-1] & 0x80)))
+ break;
+ if (!in[nl])
+ break;
+ // skip trailing spaces
+ sz = chop_spaces_utf8(in, nl);
+ av_bprint_append_data(&buf, in, sz);
+ av_bprintf(&buf, "\n");
+ in += nl;
+ }
+ } else {
+ av_bprintf(&buf, "%s\n", vbi_text);
+ }
+ av_free(vbi_text);
+
+ if (!av_bprint_is_complete(&buf)) {
+ av_bprint_finalize(&buf, NULL);
+ return AVERROR(ENOMEM);
+ }
+
+ if (buf.len) {
+ int ret;
+ sub_rect->type = SUBTITLE_ASS;
+ if ((ret = create_ass_text(ctx, buf.str, &sub_rect->ass)) < 0) {
+ av_bprint_finalize(&buf, NULL);
+ return ret;
+ }
+ av_log(ctx, AV_LOG_DEBUG, "subtext:%s:txetbus\n", sub_rect->ass);
+ } else {
+ sub_rect->type = SUBTITLE_NONE;
+ }
+ av_bprint_finalize(&buf, NULL);
+ return 0;
+}
+
+static void fix_transparency(TeletextContext *ctx, AVSubtitleRect *sub_rect, vbi_page *page,
+ int chop_top, uint8_t transparent_color, int resx, int resy)
+{
+ int iy;
+
+ // Hack for transparency, inspired by VLC code...
+ for (iy = 0; iy < resy; iy++) {
+ uint8_t *pixel = sub_rect->pict.data[0] + iy * sub_rect->pict.linesize[0];
+ vbi_char *vc = page->text + (iy / BITMAP_CHAR_HEIGHT + chop_top) * page->columns;
+ vbi_char *vcnext = vc + page->columns;
+ for (; vc < vcnext; vc++) {
+ uint8_t *pixelnext = pixel + BITMAP_CHAR_WIDTH;
+ switch (vc->opacity) {
+ case VBI_TRANSPARENT_SPACE:
+ memset(pixel, transparent_color, BITMAP_CHAR_WIDTH);
+ break;
+ case VBI_OPAQUE:
+ case VBI_SEMI_TRANSPARENT:
+ if (!ctx->transparent_bg)
+ break;
+ case VBI_TRANSPARENT_FULL:
+ for(; pixel < pixelnext; pixel++)
+ if (*pixel == vc->background)
+ *pixel = transparent_color;
+ break;
+ }
+ pixel = pixelnext;
+ }
+ }
+}
+
+/* Draw a page as bitmap */
+static int gen_sub_bitmap(TeletextContext *ctx, AVSubtitleRect *sub_rect, vbi_page *page, int chop_top)
+{
+ int resx = page->columns * BITMAP_CHAR_WIDTH;
+ int resy = (page->rows - chop_top) * BITMAP_CHAR_HEIGHT;
+ uint8_t ci, cmax = 0;
+ int ret;
+ vbi_char *vc = page->text + (chop_top * page->columns);
+ vbi_char *vcend = page->text + (page->rows * page->columns);
+
+ for (; vc < vcend; vc++) {
+ if (vc->opacity != VBI_TRANSPARENT_SPACE) {
+ cmax = VBI_NB_COLORS;
+ break;
+ }
+ }
+
+ if (cmax == 0) {
+ av_log(ctx, AV_LOG_DEBUG, "dropping empty page %3x\n", page->pgno);
+ sub_rect->type = SUBTITLE_NONE;
+ return 0;
+ }
+
+ if ((ret = avpicture_alloc(&sub_rect->pict, AV_PIX_FMT_PAL8, resx, resy)) < 0)
+ return ret;
+ // Yes, we want to allocate the palette on our own because AVSubtitle works this way
+ sub_rect->pict.data[1] = NULL;
+
+ vbi_draw_vt_page_region(page, VBI_PIXFMT_PAL8,
+ sub_rect->pict.data[0], sub_rect->pict.linesize[0],
+ 0, chop_top, page->columns, page->rows - chop_top,
+ /*reveal*/ 1, /*flash*/ 1);
+
+ fix_transparency(ctx, sub_rect, page, chop_top, cmax, resx, resy);
+ sub_rect->x = ctx->x_offset;
+ sub_rect->y = ctx->y_offset + chop_top * BITMAP_CHAR_HEIGHT;
+ sub_rect->w = resx;
+ sub_rect->h = resy;
+ sub_rect->nb_colors = (int)cmax + 1;
+ sub_rect->pict.data[1] = av_mallocz(AVPALETTE_SIZE);
+ if (!sub_rect->pict.data[1]) {
+ av_freep(&sub_rect->pict.data[0]);
+ return AVERROR(ENOMEM);
+ }
+ for (ci = 0; ci < cmax; ci++) {
+ int r, g, b, a;
+
+ r = VBI_R(page->color_map[ci]);
+ g = VBI_G(page->color_map[ci]);
+ b = VBI_B(page->color_map[ci]);
+ a = VBI_A(page->color_map[ci]);
+ ((uint32_t *)sub_rect->pict.data[1])[ci] = RGBA(r, g, b, a);
+ av_dlog(ctx, "palette %0x\n", ((uint32_t *)sub_rect->pict.data[1])[ci]);
+ }
+ ((uint32_t *)sub_rect->pict.data[1])[cmax] = RGBA(0, 0, 0, 0);
+ sub_rect->type = SUBTITLE_BITMAP;
+ return 0;
+}
+
+static void handler(vbi_event *ev, void *user_data)
+{
+ TeletextContext *ctx = user_data;
+ TeletextPage *new_pages;
+ vbi_page page;
+ int res;
+ char pgno_str[12];
+ vbi_subno subno;
+ vbi_page_type vpt;
+ int chop_top;
+ char *lang;
+
+ snprintf(pgno_str, sizeof pgno_str, "%03x", ev->ev.ttx_page.pgno);
+ av_log(ctx, AV_LOG_DEBUG, "decoded page %s.%02x\n",
+ pgno_str, ev->ev.ttx_page.subno & 0xFF);
+
+ if (strcmp(ctx->pgno, "*") && !strstr(ctx->pgno, pgno_str))
+ return;
+ if (ctx->handler_ret < 0)
+ return;
+
+ res = vbi_fetch_vt_page(ctx->vbi, &page,
+ ev->ev.ttx_page.pgno,
+ ev->ev.ttx_page.subno,
+ VBI_WST_LEVEL_3p5, 25, TRUE);
+
+ if (!res)
+ return;
+
+#ifdef DEBUG
+ fprintf(stderr, "\nSaving res=%d dy0=%d dy1=%d...\n",
+ res, page.dirty.y0, page.dirty.y1);
+ fflush(stderr);
+
+ if (!vbi_export_stdio(ctx->ex, stderr, &page))
+ fprintf(stderr, "failed: %s\n", vbi_export_errstr(ctx->ex));
+#endif
+
+ vpt = vbi_classify_page(ctx->vbi, ev->ev.ttx_page.pgno, &subno, &lang);
+ chop_top = ctx->chop_top ||
+ ((page.rows > 1) && (vpt == VBI_SUBTITLE_PAGE));
+
+ av_log(ctx, AV_LOG_DEBUG, "%d x %d page chop:%d\n",
+ page.columns, page.rows, chop_top);
+
+ if (ctx->nb_pages < MAX_BUFFERED_PAGES) {
+ if ((new_pages = av_realloc_array(ctx->pages, ctx->nb_pages + 1, sizeof(TeletextPage)))) {
+ TeletextPage *cur_page = new_pages + ctx->nb_pages;
+ ctx->pages = new_pages;
+ cur_page->sub_rect = av_mallocz(sizeof(*cur_page->sub_rect));
+ cur_page->pts = ctx->pts;
+ cur_page->pgno = ev->ev.ttx_page.pgno;
+ cur_page->subno = ev->ev.ttx_page.subno;
+ if (cur_page->sub_rect) {
+ res = (ctx->format_id == 0) ?
+ gen_sub_bitmap(ctx, cur_page->sub_rect, &page, chop_top) :
+ gen_sub_text (ctx, cur_page->sub_rect, &page, chop_top);
+ if (res < 0) {
+ av_freep(&cur_page->sub_rect);
+ ctx->handler_ret = res;
+ } else {
+ ctx->pages[ctx->nb_pages++] = *cur_page;
+ }
+ } else {
+ ctx->handler_ret = AVERROR(ENOMEM);
+ }
+ } else {
+ ctx->handler_ret = AVERROR(ENOMEM);
+ }
+ } else {
+ //TODO: If multiple packets contain more than one page, pages may got queued up, and this may happen...
+ av_log(ctx, AV_LOG_ERROR, "Buffered too many pages, dropping page %s.\n", pgno_str);
+ ctx->handler_ret = AVERROR(ENOSYS);
+ }
+
+ vbi_unref_page(&page);
+}
+
+static inline int data_identifier_is_teletext(int data_identifier) {
+ /* See EN 301 775 section 4.4.2. */
+ return (data_identifier >= 0x10 && data_identifier <= 0x1F ||
+ data_identifier >= 0x99 && data_identifier <= 0x9B);
+}
+
+static int slice_to_vbi_lines(TeletextContext *ctx, uint8_t* buf, int size)
+{
+ int lines = 0;
+ while (size >= 2 && lines < MAX_SLICES) {
+ int data_unit_id = buf[0];
+ int data_unit_length = buf[1];
+ if (data_unit_length + 2 > size)
+ return AVERROR_INVALIDDATA;
+ if (data_unit_id == 0x02 || data_unit_id == 0x03) {
+ if (data_unit_length != 0x2c)
+ return AVERROR_INVALIDDATA;
+ else {
+ int line_offset = buf[2] & 0x1f;
+ int field_parity = buf[2] & 0x20;
+ int i;
+ ctx->sliced[lines].id = VBI_SLICED_TELETEXT_B;
+ ctx->sliced[lines].line = (line_offset > 0 ? (line_offset + (field_parity ? 0 : 313)) : 0);
+ for (i = 0; i < 42; i++)
+ ctx->sliced[lines].data[i] = vbi_rev8(buf[4 + i]);
+ lines++;
+ }
+ }
+ size -= data_unit_length + 2;
+ buf += data_unit_length + 2;
+ }
+ if (size)
+ av_log(ctx, AV_LOG_WARNING, "%d bytes remained after slicing data\n", size);
+ return lines;
+}
+
+static int teletext_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *pkt)
+{
+ TeletextContext *ctx = avctx->priv_data;
+ AVSubtitle *sub = data;
+ int ret = 0;
+
+ if (!ctx->vbi) {
+ if (!(ctx->vbi = vbi_decoder_new()))
+ return AVERROR(ENOMEM);
+ if (!vbi_event_handler_add(ctx->vbi, VBI_EVENT_TTX_PAGE, handler, ctx)) {
+ vbi_decoder_delete(ctx->vbi);
+ ctx->vbi = NULL;
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ if (avctx->pkt_timebase.den && pkt->pts != AV_NOPTS_VALUE)
+ ctx->pts = av_rescale_q(pkt->pts, avctx->pkt_timebase, AV_TIME_BASE_Q);
+
+ if (pkt->size) {
+ int lines;
+ const int full_pes_size = pkt->size + 45; /* PES header is 45 bytes */
+
+ // We allow unreasonably big packets, even if the standard only allows a max size of 1472
+ if (full_pes_size < 184 || full_pes_size > 65504 || full_pes_size % 184 != 0)
+ return AVERROR_INVALIDDATA;
+
+ ctx->handler_ret = pkt->size;
+
+ if (data_identifier_is_teletext(*pkt->data)) {
+ if ((lines = slice_to_vbi_lines(ctx, pkt->data + 1, pkt->size - 1)) < 0)
+ return lines;
+ av_dlog(avctx, "ctx=%p buf_size=%d lines=%u pkt_pts=%7.3f\n",
+ ctx, pkt->size, lines, (double)pkt->pts/90000.0);
+ if (lines > 0) {
+#ifdef DEBUG
+ int i;
+ av_log(avctx, AV_LOG_DEBUG, "line numbers:");
+ for(i = 0; i < lines; i++)
+ av_log(avctx, AV_LOG_DEBUG, " %d", ctx->sliced[i].line);
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+#endif
+ vbi_decode(ctx->vbi, ctx->sliced, lines, 0.0);
+ ctx->lines_processed += lines;
+ }
+ }
+ ctx->pts = AV_NOPTS_VALUE;
+ ret = ctx->handler_ret;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ // is there a subtitle to pass?
+ if (ctx->nb_pages) {
+ int i;
+ sub->format = ctx->format_id;
+ sub->start_display_time = 0;
+ sub->end_display_time = ctx->sub_duration;
+ sub->num_rects = 0;
+ sub->pts = ctx->pages->pts;
+
+ if (ctx->pages->sub_rect->type != SUBTITLE_NONE) {
+ sub->rects = av_malloc(sizeof(*sub->rects));
+ if (sub->rects) {
+ sub->num_rects = 1;
+ sub->rects[0] = ctx->pages->sub_rect;
+ } else {
+ ret = AVERROR(ENOMEM);
+ }
+ } else {
+ av_log(avctx, AV_LOG_DEBUG, "sending empty sub\n");
+ sub->rects = NULL;
+ }
+ if (!sub->rects) // no rect was passed
+ subtitle_rect_free(&ctx->pages->sub_rect);
+
+ for (i = 0; i < ctx->nb_pages - 1; i++)
+ ctx->pages[i] = ctx->pages[i + 1];
+ ctx->nb_pages--;
+
+ if (ret >= 0)
+ *data_size = 1;
+ } else
+ *data_size = 0;
+
+ return ret;
+}
+
+static int teletext_init_decoder(AVCodecContext *avctx)
+{
+ TeletextContext *ctx = avctx->priv_data;
+ unsigned int maj, min, rev;
+
+ vbi_version(&maj, &min, &rev);
+ if (!(maj > 0 || min > 2 || min == 2 && rev >= 26)) {
+ av_log(avctx, AV_LOG_ERROR, "decoder needs zvbi version >= 0.2.26.\n");
+ return AVERROR_EXTERNAL;
+ }
+
+ if (ctx->format_id == 0) {
+ avctx->width = 41 * BITMAP_CHAR_WIDTH;
+ avctx->height = 25 * BITMAP_CHAR_HEIGHT;
+ }
+
+ ctx->vbi = NULL;
+ ctx->pts = AV_NOPTS_VALUE;
+
+#ifdef DEBUG
+ {
+ char *t;
+ ctx->ex = vbi_export_new("text", &t);
+ }
+#endif
+ av_log(avctx, AV_LOG_VERBOSE, "page filter: %s\n", ctx->pgno);
+ return (ctx->format_id == 1) ? ff_ass_subtitle_header_default(avctx) : 0;
+}
+
+static int teletext_close_decoder(AVCodecContext *avctx)
+{
+ TeletextContext *ctx = avctx->priv_data;
+
+ av_dlog(avctx, "lines_total=%u\n", ctx->lines_processed);
+ while (ctx->nb_pages)
+ subtitle_rect_free(&ctx->pages[--ctx->nb_pages].sub_rect);
+ av_freep(&ctx->pages);
+
+ vbi_decoder_delete(ctx->vbi);
+ ctx->vbi = NULL;
+ ctx->pts = AV_NOPTS_VALUE;
+ return 0;
+}
+
+static void teletext_flush(AVCodecContext *avctx)
+{
+ teletext_close_decoder(avctx);
+}
+
+#define OFFSET(x) offsetof(TeletextContext, x)
+#define SD AV_OPT_FLAG_SUBTITLE_PARAM | AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+ {"txt_page", "list of teletext page numbers to decode, * is all", OFFSET(pgno), AV_OPT_TYPE_STRING, {.str = "*"}, 0, 0, SD},
+ {"txt_chop_top", "discards the top teletext line", OFFSET(chop_top), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, SD},
+ {"txt_format", "format of the subtitles (bitmap or text)", OFFSET(format_id), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, SD, "txt_format"},
+ {"bitmap", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, SD, "txt_format"},
+ {"text", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, SD, "txt_format"},
+ {"txt_left", "x offset of generated bitmaps", OFFSET(x_offset), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 65535, SD},
+ {"txt_top", "y offset of generated bitmaps", OFFSET(y_offset), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 65535, SD},
+ {"txt_chop_spaces", "chops leading and trailing spaces from text", OFFSET(chop_spaces), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, SD},
+ {"txt_duration", "display duration of teletext pages in msecs", OFFSET(sub_duration), AV_OPT_TYPE_INT, {.i64 = 30000}, 0, 86400000, SD},
+ {"txt_transparent", "force transparent background of the teletext", OFFSET(transparent_bg), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, SD},
+ { NULL },
+};
+
+static const AVClass teletext_class = {
+ .class_name = "libzvbi_teletextdec",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_libzvbi_teletext_decoder = {
+ .name = "libzvbi_teletextdec",
+ .long_name = NULL_IF_CONFIG_SMALL("Libzvbi DVB teletext decoder"),
+ .type = AVMEDIA_TYPE_SUBTITLE,
+ .id = AV_CODEC_ID_DVB_TELETEXT,
+ .priv_data_size = sizeof(TeletextContext),
+ .init = teletext_init_decoder,
+ .close = teletext_close_decoder,
+ .decode = teletext_decode_frame,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .flush = teletext_flush,
+ .priv_class= &teletext_class,
+};
.init = ljpeg_encode_init,
.encode2 = ljpeg_encode_frame,
.close = ljpeg_encode_close,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
- .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ420P,
- AV_PIX_FMT_YUVJ422P,
- AV_PIX_FMT_YUVJ444P,
- AV_PIX_FMT_BGR24,
- AV_PIX_FMT_YUV420P,
- AV_PIX_FMT_YUV422P,
- AV_PIX_FMT_YUV444P,
- AV_PIX_FMT_NONE },
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .pix_fmts = (const enum AVPixelFormat[]){
+ AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
+ AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P,
+ AV_PIX_FMT_NONE},
};
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
.decode = mjpegb_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
.decode = ff_mjpeg_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .flush = decode_flush,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.priv_class = &mjpegdec_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
.decode = ff_mjpeg_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .flush = decode_flush,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
+#endif
.init = ff_mpv_encode_init,
.encode2 = ff_mpv_encode_picture,
.close = ff_mpv_encode_end,
- .capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .pix_fmts = (const enum AVPixelFormat[]){
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE
+ },
+ .priv_class = &mjpeg_class,
+};
+#endif
+#if CONFIG_AMV_ENCODER
+static const AVClass amv_class = {
+ .class_name = "amv encoder",
+ .item_name = av_default_item_name,
+ .option = ff_mpv_generic_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_amv_encoder = {
+ .name = "amv",
+ .long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AMV,
+ .priv_data_size = sizeof(MpegEncContext),
+ .init = ff_mpv_encode_init,
+ .encode2 = amv_encode_picture,
+ .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){
- AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE
+ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_NONE
},
+ .priv_class = &amv_class,
};
+#endif
.priv_data_size = sizeof(MLPDecodeContext),
.init = mlp_decode_init,
.decode = read_access_unit,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
-
+#endif
#if CONFIG_TRUEHD_DECODER
AVCodec ff_truehd_decoder = {
.name = "truehd",
.init = mpeg_decode_init,
.close = mpeg_decode_end,
.decode = mpeg_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
- CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
- CODEC_CAP_SLICE_THREADS,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
+ AV_CODEC_CAP_SLICE_THREADS,
.flush = flush,
+ .max_lowres = 3,
.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)
};
.init = mpeg_decode_init,
.close = mpeg_decode_end,
.decode = mpeg_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
- CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
- CODEC_CAP_SLICE_THREADS,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
+ AV_CODEC_CAP_SLICE_THREADS,
.flush = flush,
+ .max_lowres = 3,
.profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles),
};
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
+//legacy decoder
+AVCodec ff_mpegvideo_decoder = {
+ .name = "mpegvideo",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .priv_data_size = sizeof(Mpeg1Context),
+ .init = mpeg_decode_init,
+ .close = mpeg_decode_end,
+ .decode = mpeg_decode_frame,
++ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
+ .flush = flush,
+ .max_lowres = 3,
+};
+
#if FF_API_XVMC
#if CONFIG_MPEG_XVMC_DECODER
static av_cold int mpeg_mc_decode_init(AVCodecContext *avctx)
.init = mpeg_mc_decode_init,
.close = mpeg_decode_end,
.decode = mpeg_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
- CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL | CODEC_CAP_DELAY,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
- AV_CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL |
++ AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_HWACCEL |
+ AV_CODEC_CAP_DELAY,
.flush = flush,
};
#endif
#endif /* FF_API_XVMC */
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED |
- CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
+
+#if CONFIG_MPEG_VDPAU_DECODER
+AVCodec ff_mpeg_vdpau_decoder = {
+ .name = "mpegvideo_vdpau",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
+ .priv_data_size = sizeof(Mpeg1Context),
+ .init = mpeg_decode_init,
+ .close = mpeg_decode_end,
+ .decode = mpeg_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED |
- CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED |
++ AV_CODEC_CAP_HWACCEL_VDPAU | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+};
+#endif
+
+#if CONFIG_MPEG1_VDPAU_DECODER
+AVCodec ff_mpeg1_vdpau_decoder = {
+ .name = "mpeg1video_vdpau",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG1VIDEO,
+ .priv_data_size = sizeof(Mpeg1Context),
+ .init = mpeg_decode_init,
+ .close = mpeg_decode_end,
+ .decode = mpeg_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED |
++ AV_CODEC_CAP_HWACCEL_VDPAU | AV_CODEC_CAP_DELAY,
+ .flush = flush,
+};
+#endif
.init = decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
- CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
- CODEC_CAP_FRAME_THREADS,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
+ AV_CODEC_CAP_FRAME_THREADS,
.flush = ff_mpeg_flush,
+ .max_lowres = 3,
.pix_fmts = ff_h263_hwaccel_pixfmt_list_420,
.profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
- CODEC_CAP_HWACCEL_VDPAU,
+ .priv_class = &mpeg4_class,
+};
+
+
+#if CONFIG_MPEG4_VDPAU_DECODER
+static const AVClass mpeg4_vdpau_class = {
+ "MPEG4 Video VDPAU Decoder",
+ av_default_item_name,
+ mpeg4_options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_mpeg4_vdpau_decoder = {
+ .name = "mpeg4_vdpau",
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_MPEG4,
+ .priv_data_size = sizeof(Mpeg4DecContext),
+ .init = decode_init,
+ .close = ff_h263_decode_end,
+ .decode = ff_h263_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |
++ AV_CODEC_CAP_HWACCEL_VDPAU,
+ .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_MPEG4,
+ AV_PIX_FMT_NONE },
+ .priv_class = &mpeg4_vdpau_class,
};
+#endif
.id = AV_CODEC_ID_MP1,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
+ .close = decode_close,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
.flush = flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_FLT,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .close = decode_close,
+ .capabilities = AV_CODEC_CAP_DR1,
.flush = flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_FLT,
.id = AV_CODEC_ID_MP3,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
+ .close = decode_close,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
.flush = flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_FLT,
.id = AV_CODEC_ID_MP3ADU,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
+ .close = decode_close,
.decode = decode_frame_adu,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
.flush = flush,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_FLT,
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
ScratchpadContext *sc, int linesize)
{
- int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
+ int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
+
- if (avctx->hwaccel || avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)
++ if (avctx->hwaccel || avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
+ return 0;
+
+ if (linesize < 24) {
+ av_log(avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
+ return AVERROR_PATCHWELCOME;
+ }
// edge emu needs blocksize + filter length - 1
// (= 17x17 for halfpel / 21x21 for h264)
return -1;
}
- if (!avctx->hwaccel && !(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)) {
- memset(s->last_picture_ptr->f->data[0], 0,
- avctx->height * s->last_picture_ptr->f->linesize[0]);
- memset(s->last_picture_ptr->f->data[1], 0x80,
- (avctx->height >> v_chroma_shift) *
- s->last_picture_ptr->f->linesize[1]);
- memset(s->last_picture_ptr->f->data[2], 0x80,
- (avctx->height >> v_chroma_shift) *
- s->last_picture_ptr->f->linesize[2]);
++ if (!avctx->hwaccel && !(avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)) {
+ for(i=0; i<avctx->height; i++)
+ memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
+ 0x80, avctx->width);
+ if (s->last_picture_ptr->f->data[2]) {
+ for(i=0; i<FF_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
+ memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
+ 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
+ memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
+ 0x80, FF_CEIL_RSHIFT(avctx->width, h_chroma_shift));
+ }
+ }
+
+ if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
+ for(i=0; i<avctx->height; i++)
+ memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
+ }
+ }
ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
/**
* Print debugging info for the given picture.
*/
-void ff_print_debug_info(MpegEncContext *s, Picture *p)
+void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
+ uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
+ int *low_delay,
+ int mb_width, int mb_height, int mb_stride, int quarter_sample)
{
- AVFrame *pict;
- if (s->avctx->hwaccel || !p || !p->mb_type)
+ if ((avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
+ const int shift = 1 + quarter_sample;
+ const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
+ const int mv_stride = (mb_width << mv_sample_log2) +
+ (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
+ int mb_x, mb_y, mbcount = 0;
+
+ /* size is width * height * 2 * 4 where 2 is for directions and 4 is
+ * for the maximum number of MB (4 MB in case of IS_8x8) */
+ AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
+ if (!mvs)
+ return;
+
+ for (mb_y = 0; mb_y < mb_height; mb_y++) {
+ for (mb_x = 0; mb_x < mb_width; mb_x++) {
+ int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
+ for (direction = 0; direction < 2; direction++) {
+ if (!USES_LIST(mb_type, direction))
+ continue;
+ if (IS_8X8(mb_type)) {
+ for (i = 0; i < 4; i++) {
+ int sx = mb_x * 16 + 4 + 8 * (i & 1);
+ int sy = mb_y * 16 + 4 + 8 * (i >> 1);
+ int xy = (mb_x * 2 + (i & 1) +
+ (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift) + sx;
+ int my = (motion_val[direction][xy][1] >> shift) + sy;
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
+ }
+ } else if (IS_16X8(mb_type)) {
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 8;
+ int sy = mb_y * 16 + 4 + 8 * i;
+ int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
+ int mx = (motion_val[direction][xy][0] >> shift);
+ int my = (motion_val[direction][xy][1] >> shift);
+
+ if (IS_INTERLACED(mb_type))
+ my *= 2;
+
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
+ }
+ } else if (IS_8X16(mb_type)) {
+ for (i = 0; i < 2; i++) {
+ int sx = mb_x * 16 + 4 + 8 * i;
+ int sy = mb_y * 16 + 8;
+ int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
+ int mx = motion_val[direction][xy][0] >> shift;
+ int my = motion_val[direction][xy][1] >> shift;
+
+ if (IS_INTERLACED(mb_type))
+ my *= 2;
+
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
+ }
+ } else {
+ int sx = mb_x * 16 + 8;
+ int sy = mb_y * 16 + 8;
+ int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
+ int mx = (motion_val[direction][xy][0]>>shift) + sx;
+ int my = (motion_val[direction][xy][1]>>shift) + sy;
+ mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
+ }
+ }
+ }
+ }
+
+ if (mbcount) {
+ AVFrameSideData *sd;
+
+ av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
+ sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
+ if (!sd) {
+ av_freep(&mvs);
+ return;
+ }
+ memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
+ }
+
+ av_freep(&mvs);
+ }
+
+ /* TODO: export all the following to make them accessible for users (and filters) */
+ if (avctx->hwaccel || !mbtype_table
- || (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU))
++ || (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU))
return;
- pict = p->f;
- if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
+
+ if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
int x,y;
- av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
- switch (pict->pict_type) {
- case AV_PICTURE_TYPE_I:
- av_log(s->avctx,AV_LOG_DEBUG,"I\n");
- break;
- case AV_PICTURE_TYPE_P:
- av_log(s->avctx,AV_LOG_DEBUG,"P\n");
- break;
- case AV_PICTURE_TYPE_B:
- av_log(s->avctx,AV_LOG_DEBUG,"B\n");
- break;
- case AV_PICTURE_TYPE_S:
- av_log(s->avctx,AV_LOG_DEBUG,"S\n");
- break;
- case AV_PICTURE_TYPE_SI:
- av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
- break;
- case AV_PICTURE_TYPE_SP:
- av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
- break;
- }
- for (y = 0; y < s->mb_height; y++) {
- for (x = 0; x < s->mb_width; x++) {
- if (s->avctx->debug & FF_DEBUG_SKIP) {
- int count = s->mbskip_table[x + y * s->mb_stride];
+ av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
+ av_get_picture_type_char(pict->pict_type));
+ for (y = 0; y < mb_height; y++) {
+ for (x = 0; x < mb_width; x++) {
+ if (avctx->debug & FF_DEBUG_SKIP) {
+ int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
if (count > 9)
count = 9;
- av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
+ av_log(avctx, AV_LOG_DEBUG, "%1d", count);
}
- if (s->avctx->debug & FF_DEBUG_QP) {
- av_log(s->avctx, AV_LOG_DEBUG, "%2d",
- p->qscale_table[x + y * s->mb_stride]);
+ if (avctx->debug & FF_DEBUG_QP) {
+ av_log(avctx, AV_LOG_DEBUG, "%2d",
+ qscale_table[x + y * mb_stride]);
}
- if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
- int mb_type = p->mb_type[x + y * s->mb_stride];
+ if (avctx->debug & FF_DEBUG_MB_TYPE) {
+ int mb_type = mbtype_table[x + y * mb_stride];
// Type & MV direction
if (IS_PCM(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "P");
+ av_log(avctx, AV_LOG_DEBUG, "P");
else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "A");
+ av_log(avctx, AV_LOG_DEBUG, "A");
else if (IS_INTRA4x4(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "i");
+ av_log(avctx, AV_LOG_DEBUG, "i");
else if (IS_INTRA16x16(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "I");
+ av_log(avctx, AV_LOG_DEBUG, "I");
else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "d");
+ av_log(avctx, AV_LOG_DEBUG, "d");
else if (IS_DIRECT(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "D");
+ av_log(avctx, AV_LOG_DEBUG, "D");
else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "g");
+ av_log(avctx, AV_LOG_DEBUG, "g");
else if (IS_GMC(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "G");
+ av_log(avctx, AV_LOG_DEBUG, "G");
else if (IS_SKIP(mb_type))
- av_log(s->avctx, AV_LOG_DEBUG, "S");
+ av_log(avctx, AV_LOG_DEBUG, "S");
else if (!USES_LIST(mb_type, 1))
- av_log(s->avctx, AV_LOG_DEBUG, ">");
+ av_log(avctx, AV_LOG_DEBUG, ">");
else if (!USES_LIST(mb_type, 0))
- av_log(s->avctx, AV_LOG_DEBUG, "<");
+ av_log(avctx, AV_LOG_DEBUG, "<");
else {
- assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
- av_log(s->avctx, AV_LOG_DEBUG, "X");
+ av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
+ av_log(avctx, AV_LOG_DEBUG, "X");
}
// segmentation
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
.init = mxpeg_decode_init,
.close = mxpeg_decode_end,
.decode = mxpeg_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
return 0;
}
- .capabilities = CODEC_CAP_DELAY,
+
+static const enum AVPixelFormat pix_fmts_nvenc[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_YUV444P,
+ AV_PIX_FMT_NONE
+};
+
+#define OFFSET(x) offsetof(NvencContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+ { "preset", "Set the encoding preset (one of hq, hp, bd, ll, llhq, llhp, default)", OFFSET(preset), AV_OPT_TYPE_STRING, { .str = "hq" }, 0, 0, VE },
+ { "profile", "Set the encoding profile (high, main or baseline)", OFFSET(profile), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
+ { "level", "Set the encoding level restriction (auto, 1.0, 1.0b, 1.1, 1.2, ..., 4.2, 5.0, 5.1)", OFFSET(level), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
+ { "tier", "Set the encoding tier (main or high)", OFFSET(tier), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
+ { "cbr", "Use cbr encoding mode", OFFSET(cbr), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
+ { "2pass", "Use 2pass cbr encoding mode", OFFSET(twopass), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE },
+ { "gpu", "Selects which NVENC capable GPU to use. First GPU is 0, second is 1, and so on.", OFFSET(gpu), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
+ { "delay", "Delays frame output by the given amount of frames.", OFFSET(buffer_delay), AV_OPT_TYPE_INT, { .i64 = INT_MAX }, 0, INT_MAX, VE },
+ { NULL }
+};
+
+static const AVCodecDefault nvenc_defaults[] = {
+ { "b", "0" },
+ { "qmin", "-1" },
+ { "qmax", "-1" },
+ { "qdiff", "-1" },
+ { "qblur", "-1" },
+ { "qcomp", "-1" },
+ { NULL },
+};
+
+#if CONFIG_NVENC_ENCODER
+static const AVClass nvenc_class = {
+ .class_name = "nvenc",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_nvenc_encoder = {
+ .name = "nvenc",
+ .long_name = NULL_IF_CONFIG_SMALL("Nvidia NVENC h264 encoder"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(NvencContext),
+ .init = nvenc_encode_init,
+ .encode2 = nvenc_encode_frame,
+ .close = nvenc_encode_close,
- .capabilities = CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .priv_class = &nvenc_class,
+ .defaults = nvenc_defaults,
+ .pix_fmts = pix_fmts_nvenc,
+};
+#endif
+
+/* Add an alias for nvenc_h264 */
+#if CONFIG_NVENC_H264_ENCODER
+static const AVClass nvenc_h264_class = {
+ .class_name = "nvenc_h264",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_nvenc_h264_encoder = {
+ .name = "nvenc_h264",
+ .long_name = NULL_IF_CONFIG_SMALL("Nvidia NVENC h264 encoder"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(NvencContext),
+ .init = nvenc_encode_init,
+ .encode2 = nvenc_encode_frame,
+ .close = nvenc_encode_close,
- .capabilities = CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .priv_class = &nvenc_h264_class,
+ .defaults = nvenc_defaults,
+ .pix_fmts = pix_fmts_nvenc,
+};
+#endif
+
+#if CONFIG_NVENC_HEVC_ENCODER
+static const AVClass nvenc_hevc_class = {
+ .class_name = "nvenc_hevc",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_nvenc_hevc_encoder = {
+ .name = "nvenc_hevc",
+ .long_name = NULL_IF_CONFIG_SMALL("Nvidia NVENC hevc encoder"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H265,
+ .priv_data_size = sizeof(NvencContext),
+ .init = nvenc_encode_init,
+ .encode2 = nvenc_encode_frame,
+ .close = nvenc_encode_close,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .priv_class = &nvenc_hevc_class,
+ .defaults = nvenc_defaults,
+ .pix_fmts = pix_fmts_nvenc,
+};
+#endif
return 0;
}
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
+#if CONFIG_APNG_DECODER
+AVCodec ff_apng_decoder = {
+ .name = "apng",
+ .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_APNG,
+ .priv_data_size = sizeof(PNGDecContext),
+ .init = png_dec_init,
+ .close = png_dec_end,
+ .decode = decode_frame_apng,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
+};
+#endif
+
+#if CONFIG_PNG_DECODER
AVCodec ff_png_decoder = {
.name = "png",
.long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
.priv_data_size = sizeof(PNGDecContext),
.init = png_dec_init,
.close = png_dec_end,
- .decode = decode_frame,
- .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
+ .decode = decode_frame_png,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
};
+#endif
.id = AV_CODEC_ID_PNG,
.priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init,
- .encode2 = encode_frame,
+ .close = png_enc_close,
+ .encode2 = encode_png,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGBA64BE,
+ AV_PIX_FMT_PAL8,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A,
+ AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_YA16BE,
+ AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE
+ },
+ .priv_class = &pngenc_class,
+};
+
+AVCodec ff_apng_encoder = {
+ .name = "apng",
+ .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_APNG,
+ .priv_data_size = sizeof(PNGEncContext),
+ .init = png_enc_init,
+ .close = png_enc_close,
+ .encode2 = encode_apng,
.pix_fmts = (const enum AVPixelFormat[]) {
- AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
- AV_PIX_FMT_RGBA64BE, AV_PIX_FMT_RGB48BE, AV_PIX_FMT_GRAY16BE,
+ AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGBA64BE,
+ AV_PIX_FMT_PAL8,
+ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY8A,
+ AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_YA16BE,
AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_NONE
},
+ .priv_class = &apngenc_class,
};
--- /dev/null
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
+/*
+ * Copyright (c) 2010-2011 Maxim Poliakovski
+ * Copyright (c) 2010-2011 Elvis Presley
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
+ */
+
+//#define DEBUG
+
+#define LONG_BITSTREAM_READER
+
+#include "avcodec.h"
+#include "get_bits.h"
+#include "idctdsp.h"
+#include "internal.h"
+#include "simple_idct.h"
+#include "proresdec.h"
+#include "proresdata.h"
+
+static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
+{
+ int i;
+ for (i = 0; i < 64; i++)
+ dst[i] = permutation[src[i]];
+}
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+ uint8_t idct_permutation[64];
+
+ avctx->bits_per_raw_sample = 10;
+
+ ff_blockdsp_init(&ctx->bdsp, avctx);
+ ff_proresdsp_init(&ctx->prodsp, avctx);
+
+ ff_init_scantable_permutation(idct_permutation,
+ ctx->prodsp.idct_permutation_type);
+
+ permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
+ permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
+
+ return 0;
+}
+
+static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
+ const int data_size, AVCodecContext *avctx)
+{
+ int hdr_size, width, height, flags;
+ int version;
+ const uint8_t *ptr;
+
+ hdr_size = AV_RB16(buf);
+ av_dlog(avctx, "header size %d\n", hdr_size);
+ if (hdr_size > data_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ version = AV_RB16(buf + 2);
+ av_dlog(avctx, "%.4s version %d\n", buf+4, version);
+ if (version > 1) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ width = AV_RB16(buf + 8);
+ height = AV_RB16(buf + 10);
+ if (width != avctx->width || height != avctx->height) {
+ av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
+ avctx->width, avctx->height, width, height);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ ctx->frame_type = (buf[12] >> 2) & 3;
+ ctx->alpha_info = buf[17] & 0xf;
+
+ if (ctx->alpha_info > 2) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
+ return AVERROR_INVALIDDATA;
+ }
+ if (avctx->skip_alpha) ctx->alpha_info = 0;
+
+ av_dlog(avctx, "frame type %d\n", ctx->frame_type);
+
+ if (ctx->frame_type == 0) {
+ ctx->scan = ctx->progressive_scan; // permuted
+ } else {
+ ctx->scan = ctx->interlaced_scan; // permuted
+ ctx->frame->interlaced_frame = 1;
+ ctx->frame->top_field_first = ctx->frame_type == 1;
+ }
+
+ if (ctx->alpha_info) {
+ avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
+ } else {
+ avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
+ }
+
+ ptr = buf + 20;
+ flags = buf[19];
+ av_dlog(avctx, "flags %x\n", flags);
+
+ if (flags & 2) {
+ if(buf + data_size - ptr < 64) {
+ av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
+ return AVERROR_INVALIDDATA;
+ }
+ permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
+ ptr += 64;
+ } else {
+ memset(ctx->qmat_luma, 4, 64);
+ }
+
+ if (flags & 1) {
+ if(buf + data_size - ptr < 64) {
+ av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
+ return AVERROR_INVALIDDATA;
+ }
+ permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
+ } else {
+ memset(ctx->qmat_chroma, 4, 64);
+ }
+
+ return hdr_size;
+}
+
+static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i, hdr_size, slice_count;
+ unsigned pic_data_size;
+ int log2_slice_mb_width, log2_slice_mb_height;
+ int slice_mb_count, mb_x, mb_y;
+ const uint8_t *data_ptr, *index_ptr;
+
+ hdr_size = buf[0] >> 3;
+ if (hdr_size < 8 || hdr_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ pic_data_size = AV_RB32(buf + 1);
+ if (pic_data_size > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ log2_slice_mb_width = buf[7] >> 4;
+ log2_slice_mb_height = buf[7] & 0xF;
+ if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
+ av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
+ 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
+ return AVERROR_INVALIDDATA;
+ }
+
+ ctx->mb_width = (avctx->width + 15) >> 4;
+ if (ctx->frame_type)
+ ctx->mb_height = (avctx->height + 31) >> 5;
+ else
+ ctx->mb_height = (avctx->height + 15) >> 4;
+
+ slice_count = AV_RB16(buf + 5);
+
+ if (ctx->slice_count != slice_count || !ctx->slices) {
+ av_freep(&ctx->slices);
+ ctx->slice_count = 0;
+ ctx->slices = av_mallocz_array(slice_count, sizeof(*ctx->slices));
+ if (!ctx->slices)
+ return AVERROR(ENOMEM);
+ ctx->slice_count = slice_count;
+ }
+
+ if (!slice_count)
+ return AVERROR(EINVAL);
+
+ if (hdr_size + slice_count*2 > buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ // parse slice information
+ index_ptr = buf + hdr_size;
+ data_ptr = index_ptr + slice_count*2;
+
+ slice_mb_count = 1 << log2_slice_mb_width;
+ mb_x = 0;
+ mb_y = 0;
+
+ for (i = 0; i < slice_count; i++) {
+ SliceContext *slice = &ctx->slices[i];
+
+ slice->data = data_ptr;
+ data_ptr += AV_RB16(index_ptr + i*2);
+
+ while (ctx->mb_width - mb_x < slice_mb_count)
+ slice_mb_count >>= 1;
+
+ slice->mb_x = mb_x;
+ slice->mb_y = mb_y;
+ slice->mb_count = slice_mb_count;
+ slice->data_size = data_ptr - slice->data;
+
+ if (slice->data_size < 6) {
+ av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ mb_x += slice_mb_count;
+ if (mb_x == ctx->mb_width) {
+ slice_mb_count = 1 << log2_slice_mb_width;
+ mb_x = 0;
+ mb_y++;
+ }
+ if (data_ptr > buf + buf_size) {
+ av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (mb_x || mb_y != ctx->mb_height) {
+ av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
+ mb_y, ctx->mb_height);
+ return AVERROR_INVALIDDATA;
+ }
+
+ return pic_data_size;
+}
+
+#define DECODE_CODEWORD(val, codebook) \
+ do { \
+ unsigned int rice_order, exp_order, switch_bits; \
+ unsigned int q, buf, bits; \
+ \
+ UPDATE_CACHE(re, gb); \
+ buf = GET_CACHE(re, gb); \
+ \
+ /* number of bits to switch between rice and exp golomb */ \
+ switch_bits = codebook & 3; \
+ rice_order = codebook >> 5; \
+ exp_order = (codebook >> 2) & 7; \
+ \
+ q = 31 - av_log2(buf); \
+ \
+ if (q > switch_bits) { /* exp golomb */ \
+ bits = exp_order - switch_bits + (q<<1); \
+ val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
+ ((switch_bits + 1) << rice_order); \
+ SKIP_BITS(re, gb, bits); \
+ } else if (rice_order) { \
+ SKIP_BITS(re, gb, q+1); \
+ val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
+ SKIP_BITS(re, gb, rice_order); \
+ } else { \
+ val = q; \
+ SKIP_BITS(re, gb, q+1); \
+ } \
+ } while (0)
+
+#define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
+
+#define FIRST_DC_CB 0xB8
+
+static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
+
+static av_always_inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out,
+ int blocks_per_slice)
+{
+ int16_t prev_dc;
+ int code, i, sign;
+
+ OPEN_READER(re, gb);
+
+ DECODE_CODEWORD(code, FIRST_DC_CB);
+ prev_dc = TOSIGNED(code);
+ out[0] = prev_dc;
+
+ out += 64; // dc coeff for the next block
+
+ code = 5;
+ sign = 0;
+ for (i = 1; i < blocks_per_slice; i++, out += 64) {
+ DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]);
+ if(code) sign ^= -(code & 1);
+ else sign = 0;
+ prev_dc += (((code + 1) >> 1) ^ sign) - sign;
+ out[0] = prev_dc;
+ }
+ CLOSE_READER(re, gb);
+}
+
+// adaptive codebook switching lut according to previous run/level values
+static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
+static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
+
+static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
+ int16_t *out, int blocks_per_slice)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int block_mask, sign;
+ unsigned pos, run, level;
+ int max_coeffs, i, bits_left;
+ int log2_block_count = av_log2(blocks_per_slice);
+
+ OPEN_READER(re, gb);
+ UPDATE_CACHE(re, gb); \
+ run = 4;
+ level = 2;
+
+ max_coeffs = 64 << log2_block_count;
+ block_mask = blocks_per_slice - 1;
+
+ for (pos = block_mask;;) {
+ bits_left = gb->size_in_bits - re_index;
+ if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
+ break;
+
+ DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
+ pos += run + 1;
+ if (pos >= max_coeffs) {
+ av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
+ return AVERROR_INVALIDDATA;
+ }
+
+ DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
+ level += 1;
+
+ i = pos >> log2_block_count;
+
+ sign = SHOW_SBITS(re, gb, 1);
+ SKIP_BITS(re, gb, 1);
+ out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
+ }
+
+ CLOSE_READER(re, gb);
+ return 0;
+}
+
+static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
+ uint16_t *dst, int dst_stride,
+ const uint8_t *buf, unsigned buf_size,
+ const int16_t *qmat)
+{
+ ProresContext *ctx = avctx->priv_data;
+ LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
+ int16_t *block;
+ GetBitContext gb;
+ int i, blocks_per_slice = slice->mb_count<<2;
+ int ret;
+
+ for (i = 0; i < blocks_per_slice; i++)
+ ctx->bdsp.clear_block(blocks+(i<<6));
+
+ init_get_bits(&gb, buf, buf_size << 3);
+
+ decode_dc_coeffs(&gb, blocks, blocks_per_slice);
+ if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
+ return ret;
+
+ block = blocks;
+ for (i = 0; i < slice->mb_count; i++) {
+ ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
+ ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
+ ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
+ ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
+ block += 4*64;
+ dst += 16;
+ }
+ return 0;
+}
+
+static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
+ uint16_t *dst, int dst_stride,
+ const uint8_t *buf, unsigned buf_size,
+ const int16_t *qmat, int log2_blocks_per_mb)
+{
+ ProresContext *ctx = avctx->priv_data;
+ LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
+ int16_t *block;
+ GetBitContext gb;
+ int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
+ int ret;
+
+ for (i = 0; i < blocks_per_slice; i++)
+ ctx->bdsp.clear_block(blocks+(i<<6));
+
+ init_get_bits(&gb, buf, buf_size << 3);
+
+ decode_dc_coeffs(&gb, blocks, blocks_per_slice);
+ if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
+ return ret;
+
+ block = blocks;
+ for (i = 0; i < slice->mb_count; i++) {
+ for (j = 0; j < log2_blocks_per_mb; j++) {
+ ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
+ ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
+ block += 2*64;
+ dst += 8;
+ }
+ }
+ return 0;
+}
+
+static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
+ const int num_bits)
+{
+ const int mask = (1 << num_bits) - 1;
+ int i, idx, val, alpha_val;
+
+ idx = 0;
+ alpha_val = mask;
+ do {
+ do {
+ if (get_bits1(gb)) {
+ val = get_bits(gb, num_bits);
+ } else {
+ int sign;
+ val = get_bits(gb, num_bits == 16 ? 7 : 4);
+ sign = val & 1;
+ val = (val + 2) >> 1;
+ if (sign)
+ val = -val;
+ }
+ alpha_val = (alpha_val + val) & mask;
+ if (num_bits == 16) {
+ dst[idx++] = alpha_val >> 6;
+ } else {
+ dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
+ }
+ if (idx >= num_coeffs)
+ break;
+ } while (get_bits_left(gb)>0 && get_bits1(gb));
+ val = get_bits(gb, 4);
+ if (!val)
+ val = get_bits(gb, 11);
+ if (idx + val > num_coeffs)
+ val = num_coeffs - idx;
+ if (num_bits == 16) {
+ for (i = 0; i < val; i++)
+ dst[idx++] = alpha_val >> 6;
+ } else {
+ for (i = 0; i < val; i++)
+ dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
+
+ }
+ } while (idx < num_coeffs);
+}
+
+/**
+ * Decode alpha slice plane.
+ */
+static void decode_slice_alpha(ProresContext *ctx,
+ uint16_t *dst, int dst_stride,
+ const uint8_t *buf, int buf_size,
+ int blocks_per_slice)
+{
+ GetBitContext gb;
+ int i;
+ LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
+ int16_t *block;
+
+ for (i = 0; i < blocks_per_slice<<2; i++)
+ ctx->bdsp.clear_block(blocks+(i<<6));
+
+ init_get_bits(&gb, buf, buf_size << 3);
+
+ if (ctx->alpha_info == 2) {
+ unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
+ } else {
+ unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
+ }
+
+ block = blocks;
+ for (i = 0; i < 16; i++) {
+ memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
+ dst += dst_stride >> 1;
+ block += 16 * blocks_per_slice;
+ }
+}
+
+static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
+{
+ ProresContext *ctx = avctx->priv_data;
+ SliceContext *slice = &ctx->slices[jobnr];
+ const uint8_t *buf = slice->data;
+ AVFrame *pic = ctx->frame;
+ int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
+ int luma_stride, chroma_stride;
+ int y_data_size, u_data_size, v_data_size, a_data_size;
+ uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
+ int16_t qmat_luma_scaled[64];
+ int16_t qmat_chroma_scaled[64];
+ int mb_x_shift;
+ int ret;
+
+ slice->ret = -1;
+ //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
+ // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
+
+ // slice header
+ hdr_size = buf[0] >> 3;
+ qscale = av_clip(buf[1], 1, 224);
+ qscale = qscale > 128 ? qscale - 96 << 2: qscale;
+ y_data_size = AV_RB16(buf + 2);
+ u_data_size = AV_RB16(buf + 4);
+ v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
+ if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
+ a_data_size = slice->data_size - y_data_size - u_data_size -
+ v_data_size - hdr_size;
+
+ if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
+ || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
+ av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ buf += hdr_size;
+
+ for (i = 0; i < 64; i++) {
+ qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
+ qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
+ }
+
+ if (ctx->frame_type == 0) {
+ luma_stride = pic->linesize[0];
+ chroma_stride = pic->linesize[1];
+ } else {
+ luma_stride = pic->linesize[0] << 1;
+ chroma_stride = pic->linesize[1] << 1;
+ }
+
+ if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
+ mb_x_shift = 5;
+ log2_chroma_blocks_per_mb = 2;
+ } else {
+ mb_x_shift = 4;
+ log2_chroma_blocks_per_mb = 1;
+ }
+
+ dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
+ dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
+ dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
+ dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
+
+ if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
+ dest_y += pic->linesize[0];
+ dest_u += pic->linesize[1];
+ dest_v += pic->linesize[2];
+ dest_a += pic->linesize[3];
+ }
+
+ ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
+ buf, y_data_size, qmat_luma_scaled);
+ if (ret < 0)
+ return ret;
+
+ if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
+ buf + y_data_size, u_data_size,
+ qmat_chroma_scaled, log2_chroma_blocks_per_mb);
+ if (ret < 0)
+ return ret;
+
+ ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
+ buf + y_data_size + u_data_size, v_data_size,
+ qmat_chroma_scaled, log2_chroma_blocks_per_mb);
+ if (ret < 0)
+ return ret;
+ }
+ /* decode alpha plane if available */
+ if (ctx->alpha_info && pic->data[3] && a_data_size)
+ decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
+ buf + y_data_size + u_data_size + v_data_size,
+ a_data_size, slice->mb_count);
+
+ slice->ret = 0;
+ return 0;
+}
+
+static int decode_picture(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i;
+
+ avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
+
+ for (i = 0; i < ctx->slice_count; i++)
+ if (ctx->slices[i].ret < 0)
+ return ctx->slices[i].ret;
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
+ AVPacket *avpkt)
+{
+ ProresContext *ctx = avctx->priv_data;
+ AVFrame *frame = data;
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ int frame_hdr_size, pic_size, ret;
+
+ if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
+ av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ ctx->frame = frame;
+ ctx->frame->pict_type = AV_PICTURE_TYPE_I;
+ ctx->frame->key_frame = 1;
+ ctx->first_field = 1;
+
+ buf += 8;
+ buf_size -= 8;
+
+ frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
+ if (frame_hdr_size < 0)
+ return frame_hdr_size;
+
+ buf += frame_hdr_size;
+ buf_size -= frame_hdr_size;
+
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+
+ decode_picture:
+ pic_size = decode_picture_header(avctx, buf, buf_size);
+ if (pic_size < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
+ return pic_size;
+ }
+
+ if ((ret = decode_picture(avctx)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
+ return ret;
+ }
+
+ buf += pic_size;
+ buf_size -= pic_size;
+
+ if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
+ ctx->first_field = 0;
+ goto decode_picture;
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+static av_cold int decode_close(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+
+ av_freep(&ctx->slices);
+
+ return 0;
+}
+
+AVCodec ff_prores_decoder = {
+ .name = "prores",
+ .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = decode_init,
+ .close = decode_close,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
+};
--- /dev/null
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
+/*
+ * Apple ProRes encoder
+ *
+ * Copyright (c) 2011 Anatoliy Wasserman
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Apple ProRes encoder (Anatoliy Wasserman version)
+ * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy)
+ */
+
+#include "avcodec.h"
+#include "dct.h"
+#include "internal.h"
+#include "put_bits.h"
+#include "bytestream.h"
+#include "fdctdsp.h"
+
+#define DEFAULT_SLICE_MB_WIDTH 8
+
+#define FF_PROFILE_PRORES_PROXY 0
+#define FF_PROFILE_PRORES_LT 1
+#define FF_PROFILE_PRORES_STANDARD 2
+#define FF_PROFILE_PRORES_HQ 3
+
+static const AVProfile profiles[] = {
+ { FF_PROFILE_PRORES_PROXY, "apco"},
+ { FF_PROFILE_PRORES_LT, "apcs"},
+ { FF_PROFILE_PRORES_STANDARD, "apcn"},
+ { FF_PROFILE_PRORES_HQ, "apch"},
+ { FF_PROFILE_UNKNOWN }
+};
+
+static const int qp_start_table[4] = { 4, 1, 1, 1 };
+static const int qp_end_table[4] = { 8, 9, 6, 6 };
+static const int bitrate_table[5] = { 1000, 2100, 3500, 5400 };
+
+static const uint8_t progressive_scan[64] = {
+ 0, 1, 8, 9, 2, 3, 10, 11,
+ 16, 17, 24, 25, 18, 19, 26, 27,
+ 4, 5, 12, 20, 13, 6, 7, 14,
+ 21, 28, 29, 22, 15, 23, 30, 31,
+ 32, 33, 40, 48, 41, 34, 35, 42,
+ 49, 56, 57, 50, 43, 36, 37, 44,
+ 51, 58, 59, 52, 45, 38, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static const uint8_t QMAT_LUMA[4][64] = {
+ {
+ 4, 7, 9, 11, 13, 14, 15, 63,
+ 7, 7, 11, 12, 14, 15, 63, 63,
+ 9, 11, 13, 14, 15, 63, 63, 63,
+ 11, 11, 13, 14, 63, 63, 63, 63,
+ 11, 13, 14, 63, 63, 63, 63, 63,
+ 13, 14, 63, 63, 63, 63, 63, 63,
+ 13, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63
+ }, {
+ 4, 5, 6, 7, 9, 11, 13, 15,
+ 5, 5, 7, 8, 11, 13, 15, 17,
+ 6, 7, 9, 11, 13, 15, 15, 17,
+ 7, 7, 9, 11, 13, 15, 17, 19,
+ 7, 9, 11, 13, 14, 16, 19, 23,
+ 9, 11, 13, 14, 16, 19, 23, 29,
+ 9, 11, 13, 15, 17, 21, 28, 35,
+ 11, 13, 16, 17, 21, 28, 35, 41
+ }, {
+ 4, 4, 5, 5, 6, 7, 7, 9,
+ 4, 4, 5, 6, 7, 7, 9, 9,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 6, 7, 7, 8, 9, 10, 12,
+ 6, 7, 7, 8, 9, 10, 12, 15,
+ 6, 7, 7, 9, 10, 11, 14, 17,
+ 7, 7, 9, 10, 11, 14, 17, 21
+ }, {
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 4, 5, 5, 6,
+ 4, 4, 4, 4, 5, 5, 6, 7,
+ 4, 4, 4, 4, 5, 6, 7, 7
+ }
+};
+
+static const uint8_t QMAT_CHROMA[4][64] = {
+ {
+ 4, 7, 9, 11, 13, 14, 63, 63,
+ 7, 7, 11, 12, 14, 63, 63, 63,
+ 9, 11, 13, 14, 63, 63, 63, 63,
+ 11, 11, 13, 14, 63, 63, 63, 63,
+ 11, 13, 14, 63, 63, 63, 63, 63,
+ 13, 14, 63, 63, 63, 63, 63, 63,
+ 13, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63
+ }, {
+ 4, 5, 6, 7, 9, 11, 13, 15,
+ 5, 5, 7, 8, 11, 13, 15, 17,
+ 6, 7, 9, 11, 13, 15, 15, 17,
+ 7, 7, 9, 11, 13, 15, 17, 19,
+ 7, 9, 11, 13, 14, 16, 19, 23,
+ 9, 11, 13, 14, 16, 19, 23, 29,
+ 9, 11, 13, 15, 17, 21, 28, 35,
+ 11, 13, 16, 17, 21, 28, 35, 41
+ }, {
+ 4, 4, 5, 5, 6, 7, 7, 9,
+ 4, 4, 5, 6, 7, 7, 9, 9,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 6, 7, 7, 8, 9, 10, 12,
+ 6, 7, 7, 8, 9, 10, 12, 15,
+ 6, 7, 7, 9, 10, 11, 14, 17,
+ 7, 7, 9, 10, 11, 14, 17, 21
+ }, {
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 4, 5, 5, 6,
+ 4, 4, 4, 4, 5, 5, 6, 7,
+ 4, 4, 4, 4, 5, 6, 7, 7
+ }
+};
+
+
+typedef struct {
+ FDCTDSPContext fdsp;
+ uint8_t* fill_y;
+ uint8_t* fill_u;
+ uint8_t* fill_v;
+
+ int qmat_luma[16][64];
+ int qmat_chroma[16][64];
+} ProresContext;
+
+static void encode_codeword(PutBitContext *pb, int val, int codebook)
+{
+ unsigned int rice_order, exp_order, switch_bits, first_exp, exp, zeros;
+
+ /* number of bits to switch between rice and exp golomb */
+ switch_bits = codebook & 3;
+ rice_order = codebook >> 5;
+ exp_order = (codebook >> 2) & 7;
+
+ first_exp = ((switch_bits + 1) << rice_order);
+
+ if (val >= first_exp) { /* exp golomb */
+ val -= first_exp;
+ val += (1 << exp_order);
+ exp = av_log2(val);
+ zeros = exp - exp_order + switch_bits + 1;
+ put_bits(pb, zeros, 0);
+ put_bits(pb, exp + 1, val);
+ } else if (rice_order) {
+ put_bits(pb, (val >> rice_order), 0);
+ put_bits(pb, 1, 1);
+ put_sbits(pb, rice_order, val);
+ } else {
+ put_bits(pb, val, 0);
+ put_bits(pb, 1, 1);
+ }
+}
+
+#define QSCALE(qmat,ind,val) ((val) / ((qmat)[ind]))
+#define TO_GOLOMB(val) (((val) << 1) ^ ((val) >> 31))
+#define DIFF_SIGN(val, sign) (((val) >> 31) ^ (sign))
+#define IS_NEGATIVE(val) ((((val) >> 31) ^ -1) + 1)
+#define TO_GOLOMB2(val,sign) ((val)==0 ? 0 : ((val) << 1) + (sign))
+
+static av_always_inline int get_level(int val)
+{
+ int sign = (val >> 31);
+ return (val ^ sign) - sign;
+}
+
+#define FIRST_DC_CB 0xB8
+
+static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
+
+static void encode_dc_coeffs(PutBitContext *pb, int16_t *in,
+ int blocks_per_slice, int *qmat)
+{
+ int prev_dc, code;
+ int i, sign, idx;
+ int new_dc, delta, diff_sign, new_code;
+
+ prev_dc = QSCALE(qmat, 0, in[0] - 16384);
+ code = TO_GOLOMB(prev_dc);
+ encode_codeword(pb, code, FIRST_DC_CB);
+
+ code = 5; sign = 0; idx = 64;
+ for (i = 1; i < blocks_per_slice; i++, idx += 64) {
+ new_dc = QSCALE(qmat, 0, in[idx] - 16384);
+ delta = new_dc - prev_dc;
+ diff_sign = DIFF_SIGN(delta, sign);
+ new_code = TO_GOLOMB2(get_level(delta), diff_sign);
+
+ encode_codeword(pb, new_code, dc_codebook[FFMIN(code, 6)]);
+
+ code = new_code;
+ sign = delta >> 31;
+ prev_dc = new_dc;
+ }
+}
+
+static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29,
+ 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
+static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28,
+ 0x28, 0x28, 0x28, 0x4C };
+
+static void encode_ac_coeffs(AVCodecContext *avctx, PutBitContext *pb,
+ int16_t *in, int blocks_per_slice, int *qmat)
+{
+ int prev_run = 4;
+ int prev_level = 2;
+
+ int run = 0, level, code, i, j;
+ for (i = 1; i < 64; i++) {
+ int indp = progressive_scan[i];
+ for (j = 0; j < blocks_per_slice; j++) {
+ int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
+ if (val) {
+ encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
+
+ prev_run = run;
+ run = 0;
+ level = get_level(val);
+ code = level - 1;
+
+ encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
+
+ prev_level = level;
+
+ put_bits(pb, 1, IS_NEGATIVE(val));
+ } else {
+ ++run;
+ }
+ }
+ }
+}
+
+static void get(uint8_t *pixels, int stride, int16_t* block)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ AV_WN64(block, AV_RN64(pixels));
+ AV_WN64(block+4, AV_RN64(pixels+8));
+ pixels += stride;
+ block += 8;
+ }
+}
+
+static void fdct_get(FDCTDSPContext *fdsp, uint8_t *pixels, int stride, int16_t* block)
+{
+ get(pixels, stride, block);
+ fdsp->fdct(block);
+}
+
+static int encode_slice_plane(AVCodecContext *avctx, int mb_count,
+ uint8_t *src, int src_stride, uint8_t *buf, unsigned buf_size,
+ int *qmat, int chroma)
+{
+ ProresContext* ctx = avctx->priv_data;
+ FDCTDSPContext *fdsp = &ctx->fdsp;
+ LOCAL_ALIGNED(16, int16_t, blocks, [DEFAULT_SLICE_MB_WIDTH << 8]);
+ int16_t *block;
+ int i, blocks_per_slice;
+ PutBitContext pb;
+
+ block = blocks;
+ for (i = 0; i < mb_count; i++) {
+ fdct_get(fdsp, src, src_stride, block + (0 << 6));
+ fdct_get(fdsp, src + 8 * src_stride, src_stride, block + ((2 - chroma) << 6));
+ if (!chroma) {
+ fdct_get(fdsp, src + 16, src_stride, block + (1 << 6));
+ fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
+ }
+
+ block += (256 >> chroma);
+ src += (32 >> chroma);
+ }
+
+ blocks_per_slice = mb_count << (2 - chroma);
+ init_put_bits(&pb, buf, buf_size);
+
+ encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
+ encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
+
+ flush_put_bits(&pb);
+ return put_bits_ptr(&pb) - pb.buf;
+}
+
+static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx,
+ uint8_t *dest_y, uint8_t *dest_u, uint8_t *dest_v, int luma_stride,
+ int chroma_stride, unsigned mb_count, uint8_t *buf, unsigned data_size,
+ unsigned* y_data_size, unsigned* u_data_size, unsigned* v_data_size,
+ int qp)
+{
+ ProresContext* ctx = avctx->priv_data;
+
+ *y_data_size = encode_slice_plane(avctx, mb_count, dest_y, luma_stride,
+ buf, data_size, ctx->qmat_luma[qp - 1], 0);
+
+ if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
+ *u_data_size = encode_slice_plane(avctx, mb_count, dest_u,
+ chroma_stride, buf + *y_data_size, data_size - *y_data_size,
+ ctx->qmat_chroma[qp - 1], 1);
+
+ *v_data_size = encode_slice_plane(avctx, mb_count, dest_v,
+ chroma_stride, buf + *y_data_size + *u_data_size,
+ data_size - *y_data_size - *u_data_size,
+ ctx->qmat_chroma[qp - 1], 1);
+ }
+
+ return *y_data_size + *u_data_size + *v_data_size;
+}
+
+static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
+ unsigned stride, unsigned width, unsigned height, uint16_t *dst,
+ unsigned dst_width, unsigned dst_height)
+{
+
+ int box_width = FFMIN(width - x, dst_width);
+ int box_height = FFMIN(height - y, dst_height);
+ int i, j, src_stride = stride >> 1;
+ uint16_t last_pix, *last_line;
+
+ src += y * src_stride + x;
+ for (i = 0; i < box_height; ++i) {
+ for (j = 0; j < box_width; ++j) {
+ dst[j] = src[j];
+ }
+ last_pix = dst[j - 1];
+ for (; j < dst_width; j++)
+ dst[j] = last_pix;
+ src += src_stride;
+ dst += dst_width;
+ }
+ last_line = dst - dst_width;
+ for (; i < dst_height; i++) {
+ for (j = 0; j < dst_width; ++j) {
+ dst[j] = last_line[j];
+ }
+ dst += dst_width;
+ }
+}
+
+static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
+ int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
+ int unsafe, int *qp)
+{
+ int luma_stride, chroma_stride;
+ int hdr_size = 6, slice_size;
+ uint8_t *dest_y, *dest_u, *dest_v;
+ unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0;
+ ProresContext* ctx = avctx->priv_data;
+ int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2;
+ int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation
+ int high_bytes = (tgt_bits + (tgt_bits >> 3)) >> 3;
+
+ luma_stride = pic->linesize[0];
+ chroma_stride = pic->linesize[1];
+
+ dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
+ dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << 4);
+ dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << 4);
+
+ if (unsafe) {
+
+ subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
+ luma_stride, avctx->width, avctx->height,
+ (uint16_t *) ctx->fill_y, mb_count << 4, 16);
+ subimage_with_fill((uint16_t *) pic->data[1], mb_x << 3, mb_y << 4,
+ chroma_stride, avctx->width >> 1, avctx->height,
+ (uint16_t *) ctx->fill_u, mb_count << 3, 16);
+ subimage_with_fill((uint16_t *) pic->data[2], mb_x << 3, mb_y << 4,
+ chroma_stride, avctx->width >> 1, avctx->height,
+ (uint16_t *) ctx->fill_v, mb_count << 3, 16);
+
+ encode_slice_data(avctx, ctx->fill_y, ctx->fill_u, ctx->fill_v,
+ mb_count << 5, mb_count << 4, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size,
+ *qp);
+ } else {
+ slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
+ luma_stride, chroma_stride, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size,
+ *qp);
+
+ if (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]) {
+ do {
+ *qp += 1;
+ slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
+ luma_stride, chroma_stride, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size,
+ &v_data_size, *qp);
+ } while (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]);
+ } else if (slice_size < low_bytes && *qp
+ > qp_start_table[avctx->profile]) {
+ do {
+ *qp -= 1;
+ slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
+ luma_stride, chroma_stride, mb_count, buf + hdr_size,
+ data_size - hdr_size, &y_data_size, &u_data_size,
+ &v_data_size, *qp);
+ } while (slice_size < low_bytes && *qp > qp_start_table[avctx->profile]);
+ }
+ }
+
+ buf[0] = hdr_size << 3;
+ buf[1] = *qp;
+ AV_WB16(buf + 2, y_data_size);
+ AV_WB16(buf + 4, u_data_size);
+
+ return hdr_size + y_data_size + u_data_size + v_data_size;
+}
+
+static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
+ uint8_t *buf, const int buf_size)
+{
+ int mb_width = (avctx->width + 15) >> 4;
+ int mb_height = (avctx->height + 15) >> 4;
+ int hdr_size, sl_size, i;
+ int mb_y, sl_data_size, qp;
+ int unsafe_bot, unsafe_right;
+ uint8_t *sl_data, *sl_data_sizes;
+ int slice_per_line = 0, rem = mb_width;
+
+ for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
+ slice_per_line += rem >> i;
+ rem &= (1 << i) - 1;
+ }
+
+ qp = qp_start_table[avctx->profile];
+ hdr_size = 8; sl_data_size = buf_size - hdr_size;
+ sl_data_sizes = buf + hdr_size;
+ sl_data = sl_data_sizes + (slice_per_line * mb_height * 2);
+ for (mb_y = 0; mb_y < mb_height; mb_y++) {
+ int mb_x = 0;
+ int slice_mb_count = DEFAULT_SLICE_MB_WIDTH;
+ while (mb_x < mb_width) {
+ while (mb_width - mb_x < slice_mb_count)
+ slice_mb_count >>= 1;
+
+ unsafe_bot = (avctx->height & 0xf) && (mb_y == mb_height - 1);
+ unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
+
+ sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
+ sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp);
+
+ bytestream_put_be16(&sl_data_sizes, sl_size);
+ sl_data += sl_size;
+ sl_data_size -= sl_size;
+ mb_x += slice_mb_count;
+ }
+ }
+
+ buf[0] = hdr_size << 3;
+ AV_WB32(buf + 1, sl_data - buf);
+ AV_WB16(buf + 5, slice_per_line * mb_height);
+ buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4;
+
+ return sl_data - buf;
+}
+
+static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pict, int *got_packet)
+{
+ int header_size = 148;
+ uint8_t *buf;
+ int pic_size, ret;
+ int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + FF_MIN_BUFFER_SIZE; //FIXME choose tighter limit
+
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, frame_size + FF_MIN_BUFFER_SIZE, 0)) < 0)
+ return ret;
+
+ buf = pkt->data;
+ pic_size = prores_encode_picture(avctx, pict, buf + header_size + 8,
+ pkt->size - header_size - 8);
+
+ bytestream_put_be32(&buf, pic_size + 8 + header_size);
+ bytestream_put_buffer(&buf, "icpf", 4);
+
+ bytestream_put_be16(&buf, header_size);
+ bytestream_put_be16(&buf, 0);
+ bytestream_put_buffer(&buf, "fmpg", 4);
+ bytestream_put_be16(&buf, avctx->width);
+ bytestream_put_be16(&buf, avctx->height);
+ *buf++ = 0x83; // {10}(422){00}{00}(frame){11}
+ *buf++ = 0;
+ *buf++ = 2;
+ *buf++ = 2;
+ *buf++ = 6;
+ *buf++ = 32;
+ *buf++ = 0;
+ *buf++ = 3;
+
+ bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
+ bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
+
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ pkt->size = pic_size + 8 + header_size;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static void scale_mat(const uint8_t* src, int* dst, int scale)
+{
+ int i;
+ for (i = 0; i < 64; i++)
+ dst[i] = src[i] * scale;
+}
+
+static av_cold int prores_encode_init(AVCodecContext *avctx)
+{
+ int i;
+ ProresContext* ctx = avctx->priv_data;
+
+ if (avctx->pix_fmt != AV_PIX_FMT_YUV422P10) {
+ av_log(avctx, AV_LOG_ERROR, "need YUV422P10\n");
+ return AVERROR_PATCHWELCOME;
+ }
+ avctx->bits_per_raw_sample = 10;
+
+ if (avctx->width & 0x1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "frame width needs to be multiple of 2\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->width > 65534 || avctx->height > 65535) {
+ av_log(avctx, AV_LOG_ERROR,
+ "The maximum dimensions are 65534x65535\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
+ ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8));
+ if (!ctx->fill_y)
+ return AVERROR(ENOMEM);
+ ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
+ ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 8);
+ }
+
+ if (avctx->profile == FF_PROFILE_UNKNOWN) {
+ avctx->profile = FF_PROFILE_PRORES_STANDARD;
+ av_log(avctx, AV_LOG_INFO,
+ "encoding with ProRes standard (apcn) profile\n");
+
+ } else if (avctx->profile < FF_PROFILE_PRORES_PROXY
+ || avctx->profile > FF_PROFILE_PRORES_HQ) {
+ av_log(
+ avctx,
+ AV_LOG_ERROR,
+ "unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch]\n",
+ avctx->profile);
+ return AVERROR(EINVAL);
+ }
+
+ ff_fdctdsp_init(&ctx->fdsp, avctx);
+
+ avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
+
+ for (i = 1; i <= 16; i++) {
+ scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
+ scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
+ }
+
+ avctx->coded_frame->key_frame = 1;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+
+ return 0;
+}
+
+static av_cold int prores_encode_close(AVCodecContext *avctx)
+{
+ ProresContext* ctx = avctx->priv_data;
+ av_freep(&ctx->fill_y);
+
+ return 0;
+}
+
+AVCodec ff_prores_aw_encoder = {
+ .name = "prores_aw",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = prores_encode_init,
+ .close = prores_encode_close,
+ .encode2 = prores_encode_frame,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_NONE},
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .profiles = profiles
+};
+
+AVCodec ff_prores_encoder = {
+ .name = "prores",
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = prores_encode_init,
+ .close = prores_encode_close,
+ .encode2 = prores_encode_frame,
+ .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_NONE},
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
+ .profiles = profiles
+};
.id = AV_CODEC_ID_QDM2,
.priv_data_size = sizeof(QDM2Context),
.init = qdm2_decode_init,
- .init_static_data = qdm2_init_static_data,
.close = qdm2_decode_close,
.decode = qdm2_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .flush = decode_flush,
+ .capabilities = AV_CODEC_CAP_DR1,
};
.decode = qsv_decode_frame,
.flush = qsv_decode_flush,
.close = qsv_decode_close,
-- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.priv_class = &hevc_class,
};
#endif
.decode = qsv_decode_frame,
.flush = qsv_decode_flush,
.close = qsv_decode_close,
-- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.priv_class = &class,
};
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * Intel MediaSDK QSV based VC-1 video decoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include "libavutil/common.h"
+#include "libavutil/fifo.h"
+#include "libavutil/opt.h"
+
+#include "avcodec.h"
+#include "qsvdec.h"
+
+typedef struct QSVVC1Context {
+ AVClass *class;
+ QSVContext qsv;
+} QSVVC1Context;
+
+
+static av_cold int qsv_decode_close(AVCodecContext *avctx)
+{
+ QSVVC1Context *s = avctx->priv_data;
+
+ ff_qsv_decode_close(&s->qsv);
+
+ return 0;
+}
+
+static int qsv_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ QSVVC1Context *s = avctx->priv_data;
+ AVFrame *frame = data;
+
+ return ff_qsv_decode(avctx, &s->qsv, frame, got_frame, avpkt);
+}
+
+AVHWAccel ff_vc1_qsv_hwaccel = {
+ .name = "vc1_qsv",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VC1,
+ .pix_fmt = AV_PIX_FMT_QSV,
+};
+
+#define OFFSET(x) offsetof(QSVVC1Context, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+ { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 0, INT_MAX, VD },
+ { NULL },
+};
+
+static const AVClass class = {
+ .class_name = "vc1_qsv",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_vc1_qsv_decoder = {
+ .name = "vc1_qsv",
+ .long_name = NULL_IF_CONFIG_SMALL("VC-1 video (Intel Quick Sync Video acceleration)"),
+ .priv_data_size = sizeof(QSVVC1Context),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VC1,
+ .init = NULL,
+ .decode = qsv_decode_frame,
+ .flush = NULL,
+ .close = qsv_decode_close,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .priv_class = &class,
+};
.init = qsv_enc_init,
.encode2 = qsv_enc_frame,
.close = qsv_enc_close,
-- .capabilities = CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DELAY,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
AV_PIX_FMT_QSV,
AV_PIX_FMT_NONE },
.init = qsv_enc_init,
.encode2 = qsv_enc_frame,
.close = qsv_enc_close,
-- .capabilities = CODEC_CAP_DELAY,
++ .capabilities = AV_CODEC_CAP_DELAY,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
AV_PIX_FMT_QSV,
AV_PIX_FMT_NONE },
.id = AV_CODEC_ID_R10K,
.init = decode_init,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
#endif
- .capabilities = CODEC_CAP_DR1,
+#if CONFIG_AVRP_DECODER
+AVCodec ff_avrp_decoder = {
+ .name = "avrp",
+ .long_name = NULL_IF_CONFIG_SMALL("Avid 1:1 10-bit RGB Packer"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AVRP,
+ .init = decode_init,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
.init = ra144_encode_init,
.encode2 = ra144_encode_frame,
.close = ra144_encode_close,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SMALL_LAST_FRAME,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
+ .supported_samplerates = (const int[]){ 8000, 0 },
+ .channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO, 0 },
};
.priv_data_size = sizeof(RA288Context),
.init = ra288_decode_init,
.decode = ra288_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .close = ra288_decode_close,
+ .capabilities = AV_CODEC_CAP_DR1,
};
.init = raw_init_decoder,
.close = raw_close_decoder,
.decode = raw_decode,
- .capabilities = CODEC_CAP_PARAM_CHANGE,
+ .priv_class = &rawdec_class,
++ .capabilities = AV_CODEC_CAP_PARAM_CHANGE,
};
.init = rv10_decode_init,
.close = rv10_decode_end,
.decode = rv10_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
.init = rv10_decode_init,
.close = rv10_decode_end,
.decode = rv10_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
+ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = ff_mpeg_flush,
+ .max_lowres = 3,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE
.long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
.type = AVMEDIA_TYPE_AUDIO,
.id = AV_CODEC_ID_S302M,
+ .priv_data_size = sizeof(S302Context),
.decode = s302m_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .priv_class = &s302m_class,
};
--- /dev/null
- .capabilities = CODEC_CAP_VARIABLE_FRAME_SIZE | CODEC_CAP_EXPERIMENTAL,
+/*
+ * SMPTE 302M encoder
+ * Copyright (c) 2010 Google, Inc.
+ * Copyright (c) 2013 Darryl Wallace <wallacdj@gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+#include "mathops.h"
+#include "put_bits.h"
+
+#define AES3_HEADER_LEN 4
+
+typedef struct S302MEncContext {
+ uint8_t framing_index; /* Set for even channels on multiple of 192 samples */
+} S302MEncContext;
+
+static av_cold int s302m_encode_init(AVCodecContext *avctx)
+{
+ S302MEncContext *s = avctx->priv_data;
+
+ if (avctx->channels & 1 || avctx->channels > 8) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Encoding %d channel(s) is not allowed. Only 2, 4, 6 and 8 channels are supported.\n",
+ avctx->channels);
+ return AVERROR(EINVAL);
+ }
+
+ switch (avctx->sample_fmt) {
+ case AV_SAMPLE_FMT_S16:
+ avctx->bits_per_raw_sample = 16;
+ break;
+ case AV_SAMPLE_FMT_S32:
+ if (avctx->bits_per_raw_sample > 20) {
+ if (avctx->bits_per_raw_sample > 24)
+ av_log(avctx, AV_LOG_WARNING, "encoding as 24 bits-per-sample\n");
+ avctx->bits_per_raw_sample = 24;
+ } else if (!avctx->bits_per_raw_sample) {
+ avctx->bits_per_raw_sample = 24;
+ } else if (avctx->bits_per_raw_sample <= 20) {
+ avctx->bits_per_raw_sample = 20;
+ }
+ }
+
+ avctx->frame_size = 0;
+ avctx->bit_rate = 48000 * avctx->channels *
+ (avctx->bits_per_raw_sample + 4);
+ s->framing_index = 0;
+
+ return 0;
+}
+
+static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ S302MEncContext *s = avctx->priv_data;
+ const int buf_size = AES3_HEADER_LEN +
+ (frame->nb_samples *
+ avctx->channels *
+ (avctx->bits_per_raw_sample + 4)) / 8;
+ int ret, c, channels;
+ uint8_t *o;
+ PutBitContext pb;
+
+ if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size, 0)) < 0)
+ return ret;
+
+ o = avpkt->data;
+ init_put_bits(&pb, o, buf_size);
+ put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
+ put_bits(&pb, 2, (avctx->channels - 2) >> 1); // number of channels
+ put_bits(&pb, 8, 0); // channel ID
+ put_bits(&pb, 2, (avctx->bits_per_raw_sample - 16) / 4); // bits per samples (0 = 16bit, 1 = 20bit, 2 = 24bit)
+ put_bits(&pb, 4, 0); // alignments
+ flush_put_bits(&pb);
+ o += AES3_HEADER_LEN;
+
+ if (avctx->bits_per_raw_sample == 24) {
+ const uint32_t *samples = (uint32_t *)frame->data[0];
+
+ for (c = 0; c < frame->nb_samples; c++) {
+ uint8_t vucf = s->framing_index == 0 ? 0x10: 0;
+
+ for (channels = 0; channels < avctx->channels; channels += 2) {
+ o[0] = ff_reverse[(samples[0] & 0x0000FF00) >> 8];
+ o[1] = ff_reverse[(samples[0] & 0x00FF0000) >> 16];
+ o[2] = ff_reverse[(samples[0] & 0xFF000000) >> 24];
+ o[3] = ff_reverse[(samples[1] & 0x00000F00) >> 4] | vucf;
+ o[4] = ff_reverse[(samples[1] & 0x000FF000) >> 12];
+ o[5] = ff_reverse[(samples[1] & 0x0FF00000) >> 20];
+ o[6] = ff_reverse[(samples[1] & 0xF0000000) >> 28];
+ o += 7;
+ samples += 2;
+ }
+
+ s->framing_index++;
+ if (s->framing_index >= 192)
+ s->framing_index = 0;
+ }
+ } else if (avctx->bits_per_raw_sample == 20) {
+ const uint32_t *samples = (uint32_t *)frame->data[0];
+
+ for (c = 0; c < frame->nb_samples; c++) {
+ uint8_t vucf = s->framing_index == 0 ? 0x80: 0;
+
+ for (channels = 0; channels < avctx->channels; channels += 2) {
+ o[0] = ff_reverse[ (samples[0] & 0x000FF000) >> 12];
+ o[1] = ff_reverse[ (samples[0] & 0x0FF00000) >> 20];
+ o[2] = ff_reverse[((samples[0] & 0xF0000000) >> 28) | vucf];
+ o[3] = ff_reverse[ (samples[1] & 0x000FF000) >> 12];
+ o[4] = ff_reverse[ (samples[1] & 0x0FF00000) >> 20];
+ o[5] = ff_reverse[ (samples[1] & 0xF0000000) >> 28];
+ o += 6;
+ samples += 2;
+ }
+
+ s->framing_index++;
+ if (s->framing_index >= 192)
+ s->framing_index = 0;
+ }
+ } else if (avctx->bits_per_raw_sample == 16) {
+ const uint16_t *samples = (uint16_t *)frame->data[0];
+
+ for (c = 0; c < frame->nb_samples; c++) {
+ uint8_t vucf = s->framing_index == 0 ? 0x10 : 0;
+
+ for (channels = 0; channels < avctx->channels; channels += 2) {
+ o[0] = ff_reverse[ samples[0] & 0xFF];
+ o[1] = ff_reverse[(samples[0] & 0xFF00) >> 8];
+ o[2] = ff_reverse[(samples[1] & 0x0F) << 4] | vucf;
+ o[3] = ff_reverse[(samples[1] & 0x0FF0) >> 4];
+ o[4] = ff_reverse[(samples[1] & 0xF000) >> 12];
+ o += 5;
+ samples += 2;
+
+ }
+
+ s->framing_index++;
+ if (s->framing_index >= 192)
+ s->framing_index = 0;
+ }
+ }
+
+ *got_packet_ptr = 1;
+
+ return 0;
+}
+
+AVCodec ff_s302m_encoder = {
+ .name = "s302m",
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE 302M"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_S302M,
+ .priv_data_size = sizeof(S302MEncContext),
+ .init = s302m_encode_init,
+ .encode2 = s302m_encode2_frame,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
++ .capabilities = AV_CODEC_CAP_VARIABLE_FRAME_SIZE | AV_CODEC_CAP_EXPERIMENTAL,
+ .supported_samplerates = (const int[]) { 48000, 0 },
+};
.init = shorten_decode_init,
.close = shorten_decode_close,
.decode = shorten_decode_frame,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
};
--- /dev/null
- .capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/,
+/*
+ * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/intmath.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "snow_dwt.h"
+#include "internal.h"
+#include "snow.h"
+
+#include "rangecoder.h"
+#include "mathops.h"
+
+#include "mpegvideo.h"
+#include "h263.h"
+
+static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer * sb, IDWTELEM * old_buffer, int plane_index, int add, int mb_y){
+ Plane *p= &s->plane[plane_index];
+ const int mb_w= s->b_width << s->block_max_depth;
+ const int mb_h= s->b_height << s->block_max_depth;
+ int x, y, mb_x;
+ int block_size = MB_SIZE >> s->block_max_depth;
+ int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
+ int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
+ int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
+ int ref_stride= s->current_picture->linesize[plane_index];
+ uint8_t *dst8= s->current_picture->data[plane_index];
+ int w= p->width;
+ int h= p->height;
+
+ if(s->keyframe || (s->avctx->debug&512)){
+ if(mb_y==mb_h)
+ return;
+
+ if(add){
+ for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
+// DWTELEM * line = slice_buffer_get_line(sb, y);
+ IDWTELEM * line = sb->line[y];
+ for(x=0; x<w; x++){
+// int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
+ int v= line[x] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
+ v >>= FRAC_BITS;
+ if(v&(~255)) v= ~(v>>31);
+ dst8[x + y*ref_stride]= v;
+ }
+ }
+ }else{
+ for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
+// DWTELEM * line = slice_buffer_get_line(sb, y);
+ IDWTELEM * line = sb->line[y];
+ for(x=0; x<w; x++){
+ line[x] -= 128 << FRAC_BITS;
+// buf[x + y*w]-= 128<<FRAC_BITS;
+ }
+ }
+ }
+
+ return;
+ }
+
+ for(mb_x=0; mb_x<=mb_w; mb_x++){
+ add_yblock(s, 1, sb, old_buffer, dst8, obmc,
+ block_w*mb_x - block_w/2,
+ block_h*mb_y - block_h/2,
+ block_w, block_h,
+ w, h,
+ w, ref_stride, obmc_stride,
+ mb_x - 1, mb_y - 1,
+ add, 0, plane_index);
+ }
+
+ if(s->avmv && mb_y < mb_h && plane_index == 0)
+ for(mb_x=0; mb_x<mb_w; mb_x++){
+ AVMotionVector *avmv = s->avmv + s->avmv_index;
+ const int b_width = s->b_width << s->block_max_depth;
+ const int b_stride= b_width;
+ BlockNode *bn= &s->block[mb_x + mb_y*b_stride];
+
+ if (bn->type)
+ continue;
+
+ s->avmv_index++;
+
+ avmv->w = block_w;
+ avmv->h = block_h;
+ avmv->dst_x = block_w*mb_x - block_w/2;
+ avmv->dst_y = block_h*mb_y - block_h/2;
+ avmv->src_x = avmv->dst_x + (bn->mx * s->mv_scale)/8;
+ avmv->src_y = avmv->dst_y + (bn->my * s->mv_scale)/8;
+ avmv->source= -1 - bn->ref;
+ avmv->flags = 0;
+ }
+}
+
+static inline void decode_subband_slice_buffered(SnowContext *s, SubBand *b, slice_buffer * sb, int start_y, int h, int save_state[1]){
+ const int w= b->width;
+ int y;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
+ int new_index = 0;
+
+ if(b->ibuf == s->spatial_idwt_buffer || s->qlog == LOSSLESS_QLOG){
+ qadd= 0;
+ qmul= 1<<QEXPSHIFT;
+ }
+
+ /* If we are on the second or later slice, restore our index. */
+ if (start_y != 0)
+ new_index = save_state[0];
+
+
+ for(y=start_y; y<h; y++){
+ int x = 0;
+ int v;
+ IDWTELEM * line = slice_buffer_get_line(sb, y * b->stride_line + b->buf_y_offset) + b->buf_x_offset;
+ memset(line, 0, b->width*sizeof(IDWTELEM));
+ v = b->x_coeff[new_index].coeff;
+ x = b->x_coeff[new_index++].x;
+ while(x < w){
+ register int t= ( (v>>1)*qmul + qadd)>>QEXPSHIFT;
+ register int u= -(v&1);
+ line[x] = (t^u) - u;
+
+ v = b->x_coeff[new_index].coeff;
+ x = b->x_coeff[new_index++].x;
+ }
+ }
+
+ /* Save our variables for the next slice. */
+ save_state[0] = new_index;
+
+ return;
+}
+
+static int decode_q_branch(SnowContext *s, int level, int x, int y){
+ const int w= s->b_width << s->block_max_depth;
+ const int rem_depth= s->block_max_depth - level;
+ const int index= (x + y*w) << rem_depth;
+ int trx= (x+1)<<rem_depth;
+ const BlockNode *left = x ? &s->block[index-1] : &null_block;
+ const BlockNode *top = y ? &s->block[index-w] : &null_block;
+ const BlockNode *tl = y && x ? &s->block[index-w-1] : left;
+ const BlockNode *tr = y && trx<w && ((x&1)==0 || level==0) ? &s->block[index-w+(1<<rem_depth)] : tl; //FIXME use lt
+ int s_context= 2*left->level + 2*top->level + tl->level + tr->level;
+ int res;
+
+ if(s->keyframe){
+ set_blocks(s, level, x, y, null_block.color[0], null_block.color[1], null_block.color[2], null_block.mx, null_block.my, null_block.ref, BLOCK_INTRA);
+ return 0;
+ }
+
+ if(level==s->block_max_depth || get_rac(&s->c, &s->block_state[4 + s_context])){
+ int type, mx, my;
+ int l = left->color[0];
+ int cb= left->color[1];
+ int cr= left->color[2];
+ unsigned ref = 0;
+ int ref_context= av_log2(2*left->ref) + av_log2(2*top->ref);
+ int mx_context= av_log2(2*FFABS(left->mx - top->mx)) + 0*av_log2(2*FFABS(tr->mx - top->mx));
+ int my_context= av_log2(2*FFABS(left->my - top->my)) + 0*av_log2(2*FFABS(tr->my - top->my));
+
+ type= get_rac(&s->c, &s->block_state[1 + left->type + top->type]) ? BLOCK_INTRA : 0;
+
+ if(type){
+ pred_mv(s, &mx, &my, 0, left, top, tr);
+ l += get_symbol(&s->c, &s->block_state[32], 1);
+ if (s->nb_planes > 2) {
+ cb+= get_symbol(&s->c, &s->block_state[64], 1);
+ cr+= get_symbol(&s->c, &s->block_state[96], 1);
+ }
+ }else{
+ if(s->ref_frames > 1)
+ ref= get_symbol(&s->c, &s->block_state[128 + 1024 + 32*ref_context], 0);
+ if (ref >= s->ref_frames) {
+ av_log(s->avctx, AV_LOG_ERROR, "Invalid ref\n");
+ return AVERROR_INVALIDDATA;
+ }
+ pred_mv(s, &mx, &my, ref, left, top, tr);
+ mx+= get_symbol(&s->c, &s->block_state[128 + 32*(mx_context + 16*!!ref)], 1);
+ my+= get_symbol(&s->c, &s->block_state[128 + 32*(my_context + 16*!!ref)], 1);
+ }
+ set_blocks(s, level, x, y, l, cb, cr, mx, my, ref, type);
+ }else{
+ if ((res = decode_q_branch(s, level+1, 2*x+0, 2*y+0)) < 0 ||
+ (res = decode_q_branch(s, level+1, 2*x+1, 2*y+0)) < 0 ||
+ (res = decode_q_branch(s, level+1, 2*x+0, 2*y+1)) < 0 ||
+ (res = decode_q_branch(s, level+1, 2*x+1, 2*y+1)) < 0)
+ return res;
+ }
+ return 0;
+}
+
+static void dequantize_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int start_y, int end_y){
+ const int w= b->width;
+ const int qlog= av_clip(s->qlog + b->qlog, 0, QROOT*16);
+ const int qmul= ff_qexp[qlog&(QROOT-1)]<<(qlog>>QSHIFT);
+ const int qadd= (s->qbias*qmul)>>QBIAS_SHIFT;
+ int x,y;
+
+ if(s->qlog == LOSSLESS_QLOG) return;
+
+ for(y=start_y; y<end_y; y++){
+// DWTELEM * line = slice_buffer_get_line_from_address(sb, src + (y * stride));
+ IDWTELEM * line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
+ for(x=0; x<w; x++){
+ int i= line[x];
+ if(i<0){
+ line[x]= -((-i*qmul + qadd)>>(QEXPSHIFT)); //FIXME try different bias
+ }else if(i>0){
+ line[x]= (( i*qmul + qadd)>>(QEXPSHIFT));
+ }
+ }
+ }
+}
+
+static void correlate_slice_buffered(SnowContext *s, slice_buffer * sb, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median, int start_y, int end_y){
+ const int w= b->width;
+ int x,y;
+
+ IDWTELEM * line=0; // silence silly "could be used without having been initialized" warning
+ IDWTELEM * prev;
+
+ if (start_y != 0)
+ line = slice_buffer_get_line(sb, ((start_y - 1) * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
+
+ for(y=start_y; y<end_y; y++){
+ prev = line;
+// line = slice_buffer_get_line_from_address(sb, src + (y * stride));
+ line = slice_buffer_get_line(sb, (y * b->stride_line) + b->buf_y_offset) + b->buf_x_offset;
+ for(x=0; x<w; x++){
+ if(x){
+ if(use_median){
+ if(y && x+1<w) line[x] += mid_pred(line[x - 1], prev[x], prev[x + 1]);
+ else line[x] += line[x - 1];
+ }else{
+ if(y) line[x] += mid_pred(line[x - 1], prev[x], line[x - 1] + prev[x] - prev[x - 1]);
+ else line[x] += line[x - 1];
+ }
+ }else{
+ if(y) line[x] += prev[x];
+ }
+ }
+ }
+}
+
+static void decode_qlogs(SnowContext *s){
+ int plane_index, level, orientation;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1:0; orientation<4; orientation++){
+ int q;
+ if (plane_index==2) q= s->plane[1].band[level][orientation].qlog;
+ else if(orientation==2) q= s->plane[plane_index].band[level][1].qlog;
+ else q= get_symbol(&s->c, s->header_state, 1);
+ s->plane[plane_index].band[level][orientation].qlog= q;
+ }
+ }
+ }
+}
+
+#define GET_S(dst, check) \
+ tmp= get_symbol(&s->c, s->header_state, 0);\
+ if(!(check)){\
+ av_log(s->avctx, AV_LOG_ERROR, "Error " #dst " is %d\n", tmp);\
+ return AVERROR_INVALIDDATA;\
+ }\
+ dst= tmp;
+
+static int decode_header(SnowContext *s){
+ int plane_index, tmp;
+ uint8_t kstate[32];
+
+ memset(kstate, MID_STATE, sizeof(kstate));
+
+ s->keyframe= get_rac(&s->c, kstate);
+ if(s->keyframe || s->always_reset){
+ ff_snow_reset_contexts(s);
+ s->spatial_decomposition_type=
+ s->qlog=
+ s->qbias=
+ s->mv_scale=
+ s->block_max_depth= 0;
+ }
+ if(s->keyframe){
+ GET_S(s->version, tmp <= 0U)
+ s->always_reset= get_rac(&s->c, s->header_state);
+ s->temporal_decomposition_type= get_symbol(&s->c, s->header_state, 0);
+ s->temporal_decomposition_count= get_symbol(&s->c, s->header_state, 0);
+ GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
+ s->colorspace_type= get_symbol(&s->c, s->header_state, 0);
+ if (s->colorspace_type == 1) {
+ s->avctx->pix_fmt= AV_PIX_FMT_GRAY8;
+ s->nb_planes = 1;
+ } else if(s->colorspace_type == 0) {
+ s->chroma_h_shift= get_symbol(&s->c, s->header_state, 0);
+ s->chroma_v_shift= get_symbol(&s->c, s->header_state, 0);
+
+ if(s->chroma_h_shift == 1 && s->chroma_v_shift==1){
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ }else if(s->chroma_h_shift == 0 && s->chroma_v_shift==0){
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV444P;
+ }else if(s->chroma_h_shift == 2 && s->chroma_v_shift==2){
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV410P;
+ } else {
+ av_log(s, AV_LOG_ERROR, "unsupported color subsample mode %d %d\n", s->chroma_h_shift, s->chroma_v_shift);
+ s->chroma_h_shift = s->chroma_v_shift = 1;
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ return AVERROR_INVALIDDATA;
+ }
+ s->nb_planes = 3;
+ } else {
+ av_log(s, AV_LOG_ERROR, "unsupported color space\n");
+ s->chroma_h_shift = s->chroma_v_shift = 1;
+ s->avctx->pix_fmt= AV_PIX_FMT_YUV420P;
+ return AVERROR_INVALIDDATA;
+ }
+
+
+ s->spatial_scalability= get_rac(&s->c, s->header_state);
+// s->rate_scalability= get_rac(&s->c, s->header_state);
+ GET_S(s->max_ref_frames, tmp < (unsigned)MAX_REF_FRAMES)
+ s->max_ref_frames++;
+
+ decode_qlogs(s);
+ }
+
+ if(!s->keyframe){
+ if(get_rac(&s->c, s->header_state)){
+ for(plane_index=0; plane_index<FFMIN(s->nb_planes, 2); plane_index++){
+ int htaps, i, sum=0;
+ Plane *p= &s->plane[plane_index];
+ p->diag_mc= get_rac(&s->c, s->header_state);
+ htaps= get_symbol(&s->c, s->header_state, 0)*2 + 2;
+ if((unsigned)htaps > HTAPS_MAX || htaps==0)
+ return AVERROR_INVALIDDATA;
+ p->htaps= htaps;
+ for(i= htaps/2; i; i--){
+ p->hcoeff[i]= get_symbol(&s->c, s->header_state, 0) * (1-2*(i&1));
+ sum += p->hcoeff[i];
+ }
+ p->hcoeff[0]= 32-sum;
+ }
+ s->plane[2].diag_mc= s->plane[1].diag_mc;
+ s->plane[2].htaps = s->plane[1].htaps;
+ memcpy(s->plane[2].hcoeff, s->plane[1].hcoeff, sizeof(s->plane[1].hcoeff));
+ }
+ if(get_rac(&s->c, s->header_state)){
+ GET_S(s->spatial_decomposition_count, 0 < tmp && tmp <= MAX_DECOMPOSITIONS)
+ decode_qlogs(s);
+ }
+ }
+
+ s->spatial_decomposition_type+= get_symbol(&s->c, s->header_state, 1);
+ if(s->spatial_decomposition_type > 1U){
+ av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_type %d not supported\n", s->spatial_decomposition_type);
+ return AVERROR_INVALIDDATA;
+ }
+ if(FFMIN(s->avctx-> width>>s->chroma_h_shift,
+ s->avctx->height>>s->chroma_v_shift) >> (s->spatial_decomposition_count-1) <= 1){
+ av_log(s->avctx, AV_LOG_ERROR, "spatial_decomposition_count %d too large for size\n", s->spatial_decomposition_count);
+ return AVERROR_INVALIDDATA;
+ }
+
+
+ s->qlog += get_symbol(&s->c, s->header_state, 1);
+ s->mv_scale += get_symbol(&s->c, s->header_state, 1);
+ s->qbias += get_symbol(&s->c, s->header_state, 1);
+ s->block_max_depth+= get_symbol(&s->c, s->header_state, 1);
+ if(s->block_max_depth > 1 || s->block_max_depth < 0){
+ av_log(s->avctx, AV_LOG_ERROR, "block_max_depth= %d is too large\n", s->block_max_depth);
+ s->block_max_depth= 0;
+ return AVERROR_INVALIDDATA;
+ }
+
+ return 0;
+}
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+ int ret;
+
+ if ((ret = ff_snow_common_init(avctx)) < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+static int decode_blocks(SnowContext *s){
+ int x, y;
+ int w= s->b_width;
+ int h= s->b_height;
+ int res;
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ if ((res = decode_q_branch(s, 0, x, y)) < 0)
+ return res;
+ }
+ }
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
+ AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ SnowContext *s = avctx->priv_data;
+ RangeCoder * const c= &s->c;
+ int bytes_read;
+ AVFrame *picture = data;
+ int level, orientation, plane_index;
+ int res;
+
+ ff_init_range_decoder(c, buf, buf_size);
+ ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
+
+ s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
+ if ((res = decode_header(s)) < 0)
+ return res;
+ if ((res=ff_snow_common_init_after_header(avctx)) < 0)
+ return res;
+
+ // realloc slice buffer for the case that spatial_decomposition_count changed
+ ff_slice_buffer_destroy(&s->sb);
+ if ((res = ff_slice_buffer_init(&s->sb, s->plane[0].height,
+ (MB_SIZE >> s->block_max_depth) +
+ s->spatial_decomposition_count * 11 + 1,
+ s->plane[0].width,
+ s->spatial_idwt_buffer)) < 0)
+ return res;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ p->fast_mc= p->diag_mc && p->htaps==6 && p->hcoeff[0]==40
+ && p->hcoeff[1]==-10
+ && p->hcoeff[2]==2;
+ }
+
+ ff_snow_alloc_blocks(s);
+
+ if((res = ff_snow_frame_start(s)) < 0)
+ return res;
+
+ s->current_picture->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
+
+ //keyframe flag duplication mess FIXME
+ if(avctx->debug&FF_DEBUG_PICT_INFO)
+ av_log(avctx, AV_LOG_ERROR,
+ "keyframe:%d qlog:%d qbias: %d mvscale: %d "
+ "decomposition_type:%d decomposition_count:%d\n",
+ s->keyframe, s->qlog, s->qbias, s->mv_scale,
+ s->spatial_decomposition_type,
+ s->spatial_decomposition_count
+ );
+
+ av_assert0(!s->avmv);
+ if (s->avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) {
+ s->avmv = av_malloc_array(s->b_width * s->b_height, sizeof(AVMotionVector) << (s->block_max_depth*2));
+ }
+ s->avmv_index = 0;
+
+ if ((res = decode_blocks(s)) < 0)
+ return res;
+
+ for(plane_index=0; plane_index < s->nb_planes; plane_index++){
+ Plane *p= &s->plane[plane_index];
+ int w= p->width;
+ int h= p->height;
+ int x, y;
+ int decode_state[MAX_DECOMPOSITIONS][4][1]; /* Stored state info for unpack_coeffs. 1 variable per instance. */
+
+ if(s->avctx->debug&2048){
+ memset(s->spatial_dwt_buffer, 0, sizeof(DWTELEM)*w*h);
+ predict_plane(s, s->spatial_idwt_buffer, plane_index, 1);
+
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x++){
+ int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
+ s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
+ }
+ }
+ }
+
+ {
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+ unpack_coeffs(s, b, b->parent, orientation);
+ }
+ }
+ }
+
+ {
+ const int mb_h= s->b_height << s->block_max_depth;
+ const int block_size = MB_SIZE >> s->block_max_depth;
+ const int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
+ int mb_y;
+ DWTCompose cs[MAX_DECOMPOSITIONS];
+ int yd=0, yq=0;
+ int y;
+ int end_y;
+
+ ff_spatial_idwt_buffered_init(cs, &s->sb, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count);
+ for(mb_y=0; mb_y<=mb_h; mb_y++){
+
+ int slice_starty = block_h*mb_y;
+ int slice_h = block_h*(mb_y+1);
+
+ if (!(s->keyframe || s->avctx->debug&512)){
+ slice_starty = FFMAX(0, slice_starty - (block_h >> 1));
+ slice_h -= (block_h >> 1);
+ }
+
+ for(level=0; level<s->spatial_decomposition_count; level++){
+ for(orientation=level ? 1 : 0; orientation<4; orientation++){
+ SubBand *b= &p->band[level][orientation];
+ int start_y;
+ int end_y;
+ int our_mb_start = mb_y;
+ int our_mb_end = (mb_y + 1);
+ const int extra= 3;
+ start_y = (mb_y ? ((block_h * our_mb_start) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra: 0);
+ end_y = (((block_h * our_mb_end) >> (s->spatial_decomposition_count - level)) + s->spatial_decomposition_count - level + extra);
+ if (!(s->keyframe || s->avctx->debug&512)){
+ start_y = FFMAX(0, start_y - (block_h >> (1+s->spatial_decomposition_count - level)));
+ end_y = FFMAX(0, end_y - (block_h >> (1+s->spatial_decomposition_count - level)));
+ }
+ start_y = FFMIN(b->height, start_y);
+ end_y = FFMIN(b->height, end_y);
+
+ if (start_y != end_y){
+ if (orientation == 0){
+ SubBand * correlate_band = &p->band[0][0];
+ int correlate_end_y = FFMIN(b->height, end_y + 1);
+ int correlate_start_y = FFMIN(b->height, (start_y ? start_y + 1 : 0));
+ decode_subband_slice_buffered(s, correlate_band, &s->sb, correlate_start_y, correlate_end_y, decode_state[0][0]);
+ correlate_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, 1, 0, correlate_start_y, correlate_end_y);
+ dequantize_slice_buffered(s, &s->sb, correlate_band, correlate_band->ibuf, correlate_band->stride, start_y, end_y);
+ }
+ else
+ decode_subband_slice_buffered(s, b, &s->sb, start_y, end_y, decode_state[level][orientation]);
+ }
+ }
+ }
+
+ for(; yd<slice_h; yd+=4){
+ ff_spatial_idwt_buffered_slice(&s->dwt, cs, &s->sb, s->temp_idwt_buffer, w, h, 1, s->spatial_decomposition_type, s->spatial_decomposition_count, yd);
+ }
+
+ if(s->qlog == LOSSLESS_QLOG){
+ for(; yq<slice_h && yq<h; yq++){
+ IDWTELEM * line = slice_buffer_get_line(&s->sb, yq);
+ for(x=0; x<w; x++){
+ line[x] <<= FRAC_BITS;
+ }
+ }
+ }
+
+ predict_slice_buffered(s, &s->sb, s->spatial_idwt_buffer, plane_index, 1, mb_y);
+
+ y = FFMIN(p->height, slice_starty);
+ end_y = FFMIN(p->height, slice_h);
+ while(y < end_y)
+ ff_slice_buffer_release(&s->sb, y++);
+ }
+
+ ff_slice_buffer_flush(&s->sb);
+ }
+
+ }
+
+ emms_c();
+
+ ff_snow_release_buffer(avctx);
+
+ if(!(s->avctx->debug&2048))
+ res = av_frame_ref(picture, s->current_picture);
+ else
+ res = av_frame_ref(picture, s->mconly_picture);
+ if (res >= 0 && s->avmv_index) {
+ AVFrameSideData *sd;
+
+ sd = av_frame_new_side_data(picture, AV_FRAME_DATA_MOTION_VECTORS, s->avmv_index * sizeof(AVMotionVector));
+ if (!sd)
+ return AVERROR(ENOMEM);
+ memcpy(sd->data, s->avmv, s->avmv_index * sizeof(AVMotionVector));
+ }
+
+ av_freep(&s->avmv);
+
+ if (res < 0)
+ return res;
+
+ *got_frame = 1;
+
+ bytes_read= c->bytestream - c->bytestream_start;
+ if(bytes_read ==0) av_log(s->avctx, AV_LOG_ERROR, "error at end of frame\n"); //FIXME
+
+ return bytes_read;
+}
+
+static av_cold int decode_end(AVCodecContext *avctx)
+{
+ SnowContext *s = avctx->priv_data;
+
+ ff_slice_buffer_destroy(&s->sb);
+
+ ff_snow_common_end(s);
+
+ return 0;
+}
+
+AVCodec ff_snow_decoder = {
+ .name = "snow",
+ .long_name = NULL_IF_CONFIG_SMALL("Snow"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_SNOW,
+ .priv_data_size = sizeof(SnowContext),
+ .init = decode_init,
+ .close = decode_end,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
+ .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
+ FF_CODEC_CAP_INIT_CLEANUP,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_EXPERIMENTAL,
+/*
+ * Simple free lossless/lossy audio codec
+ * Copyright (c) 2004 Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "avcodec.h"
+#include "get_bits.h"
+#include "golomb.h"
+#include "internal.h"
+#include "rangecoder.h"
+
+
+/**
+ * @file
+ * Simple free lossless/lossy audio codec
+ * Based on Paul Francis Harrison's Bonk (http://www.logarithmic.net/pfh/bonk)
+ * Written and designed by Alex Beregszaszi
+ *
+ * TODO:
+ * - CABAC put/get_symbol
+ * - independent quantizer for channels
+ * - >2 channels support
+ * - more decorrelation types
+ * - more tap_quant tests
+ * - selectable intlist writers/readers (bonk-style, golomb, cabac)
+ */
+
+#define MAX_CHANNELS 2
+
+#define MID_SIDE 0
+#define LEFT_SIDE 1
+#define RIGHT_SIDE 2
+
+typedef struct SonicContext {
+ int version;
+ int minor_version;
+ int lossless, decorrelation;
+
+ int num_taps, downsampling;
+ double quantization;
+
+ int channels, samplerate, block_align, frame_size;
+
+ int *tap_quant;
+ int *int_samples;
+ int *coded_samples[MAX_CHANNELS];
+
+ // for encoding
+ int *tail;
+ int tail_size;
+ int *window;
+ int window_size;
+
+ // for decoding
+ int *predictor_k;
+ int *predictor_state[MAX_CHANNELS];
+} SonicContext;
+
+#define LATTICE_SHIFT 10
+#define SAMPLE_SHIFT 4
+#define LATTICE_FACTOR (1 << LATTICE_SHIFT)
+#define SAMPLE_FACTOR (1 << SAMPLE_SHIFT)
+
+#define BASE_QUANT 0.6
+#define RATE_VARIATION 3.0
+
+static inline int shift(int a,int b)
+{
+ return (a+(1<<(b-1))) >> b;
+}
+
+static inline int shift_down(int a,int b)
+{
+ return (a>>b)+(a<0);
+}
+
+static av_always_inline av_flatten void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed, uint64_t rc_stat[256][2], uint64_t rc_stat2[32][2]){
+ int i;
+
+#define put_rac(C,S,B) \
+do{\
+ if(rc_stat){\
+ rc_stat[*(S)][B]++;\
+ rc_stat2[(S)-state][B]++;\
+ }\
+ put_rac(C,S,B);\
+}while(0)
+
+ if(v){
+ const int a= FFABS(v);
+ const int e= av_log2(a);
+ put_rac(c, state+0, 0);
+ if(e<=9){
+ for(i=0; i<e; i++){
+ put_rac(c, state+1+i, 1); //1..10
+ }
+ put_rac(c, state+1+i, 0);
+
+ for(i=e-1; i>=0; i--){
+ put_rac(c, state+22+i, (a>>i)&1); //22..31
+ }
+
+ if(is_signed)
+ put_rac(c, state+11 + e, v < 0); //11..21
+ }else{
+ for(i=0; i<e; i++){
+ put_rac(c, state+1+FFMIN(i,9), 1); //1..10
+ }
+ put_rac(c, state+1+9, 0);
+
+ for(i=e-1; i>=0; i--){
+ put_rac(c, state+22+FFMIN(i,9), (a>>i)&1); //22..31
+ }
+
+ if(is_signed)
+ put_rac(c, state+11 + 10, v < 0); //11..21
+ }
+ }else{
+ put_rac(c, state+0, 1);
+ }
+#undef put_rac
+}
+
+static inline av_flatten int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
+ if(get_rac(c, state+0))
+ return 0;
+ else{
+ int i, e, a;
+ e= 0;
+ while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
+ e++;
+ }
+
+ a= 1;
+ for(i=e-1; i>=0; i--){
+ a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
+ }
+
+ e= -(is_signed && get_rac(c, state+11 + FFMIN(e, 10))); //11..21
+ return (a^e)-e;
+ }
+}
+
+#if 1
+static inline int intlist_write(RangeCoder *c, uint8_t *state, int *buf, int entries, int base_2_part)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ put_symbol(c, state, buf[i], 1, NULL, NULL);
+
+ return 1;
+}
+
+static inline int intlist_read(RangeCoder *c, uint8_t *state, int *buf, int entries, int base_2_part)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ buf[i] = get_symbol(c, state, 1);
+
+ return 1;
+}
+#elif 1
+static inline int intlist_write(PutBitContext *pb, int *buf, int entries, int base_2_part)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ set_se_golomb(pb, buf[i]);
+
+ return 1;
+}
+
+static inline int intlist_read(GetBitContext *gb, int *buf, int entries, int base_2_part)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ buf[i] = get_se_golomb(gb);
+
+ return 1;
+}
+
+#else
+
+#define ADAPT_LEVEL 8
+
+static int bits_to_store(uint64_t x)
+{
+ int res = 0;
+
+ while(x)
+ {
+ res++;
+ x >>= 1;
+ }
+ return res;
+}
+
+static void write_uint_max(PutBitContext *pb, unsigned int value, unsigned int max)
+{
+ int i, bits;
+
+ if (!max)
+ return;
+
+ bits = bits_to_store(max);
+
+ for (i = 0; i < bits-1; i++)
+ put_bits(pb, 1, value & (1 << i));
+
+ if ( (value | (1 << (bits-1))) <= max)
+ put_bits(pb, 1, value & (1 << (bits-1)));
+}
+
+static unsigned int read_uint_max(GetBitContext *gb, int max)
+{
+ int i, bits, value = 0;
+
+ if (!max)
+ return 0;
+
+ bits = bits_to_store(max);
+
+ for (i = 0; i < bits-1; i++)
+ if (get_bits1(gb))
+ value += 1 << i;
+
+ if ( (value | (1<<(bits-1))) <= max)
+ if (get_bits1(gb))
+ value += 1 << (bits-1);
+
+ return value;
+}
+
+static int intlist_write(PutBitContext *pb, int *buf, int entries, int base_2_part)
+{
+ int i, j, x = 0, low_bits = 0, max = 0;
+ int step = 256, pos = 0, dominant = 0, any = 0;
+ int *copy, *bits;
+
+ copy = av_calloc(entries, sizeof(*copy));
+ if (!copy)
+ return AVERROR(ENOMEM);
+
+ if (base_2_part)
+ {
+ int energy = 0;
+
+ for (i = 0; i < entries; i++)
+ energy += abs(buf[i]);
+
+ low_bits = bits_to_store(energy / (entries * 2));
+ if (low_bits > 15)
+ low_bits = 15;
+
+ put_bits(pb, 4, low_bits);
+ }
+
+ for (i = 0; i < entries; i++)
+ {
+ put_bits(pb, low_bits, abs(buf[i]));
+ copy[i] = abs(buf[i]) >> low_bits;
+ if (copy[i] > max)
+ max = abs(copy[i]);
+ }
+
+ bits = av_calloc(entries*max, sizeof(*bits));
+ if (!bits)
+ {
+ av_free(copy);
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i <= max; i++)
+ {
+ for (j = 0; j < entries; j++)
+ if (copy[j] >= i)
+ bits[x++] = copy[j] > i;
+ }
+
+ // store bitstream
+ while (pos < x)
+ {
+ int steplet = step >> 8;
+
+ if (pos + steplet > x)
+ steplet = x - pos;
+
+ for (i = 0; i < steplet; i++)
+ if (bits[i+pos] != dominant)
+ any = 1;
+
+ put_bits(pb, 1, any);
+
+ if (!any)
+ {
+ pos += steplet;
+ step += step / ADAPT_LEVEL;
+ }
+ else
+ {
+ int interloper = 0;
+
+ while (((pos + interloper) < x) && (bits[pos + interloper] == dominant))
+ interloper++;
+
+ // note change
+ write_uint_max(pb, interloper, (step >> 8) - 1);
+
+ pos += interloper + 1;
+ step -= step / ADAPT_LEVEL;
+ }
+
+ if (step < 256)
+ {
+ step = 65536 / step;
+ dominant = !dominant;
+ }
+ }
+
+ // store signs
+ for (i = 0; i < entries; i++)
+ if (buf[i])
+ put_bits(pb, 1, buf[i] < 0);
+
+ av_free(bits);
+ av_free(copy);
+
+ return 0;
+}
+
+static int intlist_read(GetBitContext *gb, int *buf, int entries, int base_2_part)
+{
+ int i, low_bits = 0, x = 0;
+ int n_zeros = 0, step = 256, dominant = 0;
+ int pos = 0, level = 0;
+ int *bits = av_calloc(entries, sizeof(*bits));
+
+ if (!bits)
+ return AVERROR(ENOMEM);
+
+ if (base_2_part)
+ {
+ low_bits = get_bits(gb, 4);
+
+ if (low_bits)
+ for (i = 0; i < entries; i++)
+ buf[i] = get_bits(gb, low_bits);
+ }
+
+// av_log(NULL, AV_LOG_INFO, "entries: %d, low bits: %d\n", entries, low_bits);
+
+ while (n_zeros < entries)
+ {
+ int steplet = step >> 8;
+
+ if (!get_bits1(gb))
+ {
+ for (i = 0; i < steplet; i++)
+ bits[x++] = dominant;
+
+ if (!dominant)
+ n_zeros += steplet;
+
+ step += step / ADAPT_LEVEL;
+ }
+ else
+ {
+ int actual_run = read_uint_max(gb, steplet-1);
+
+// av_log(NULL, AV_LOG_INFO, "actual run: %d\n", actual_run);
+
+ for (i = 0; i < actual_run; i++)
+ bits[x++] = dominant;
+
+ bits[x++] = !dominant;
+
+ if (!dominant)
+ n_zeros += actual_run;
+ else
+ n_zeros++;
+
+ step -= step / ADAPT_LEVEL;
+ }
+
+ if (step < 256)
+ {
+ step = 65536 / step;
+ dominant = !dominant;
+ }
+ }
+
+ // reconstruct unsigned values
+ n_zeros = 0;
+ for (i = 0; n_zeros < entries; i++)
+ {
+ while(1)
+ {
+ if (pos >= entries)
+ {
+ pos = 0;
+ level += 1 << low_bits;
+ }
+
+ if (buf[pos] >= level)
+ break;
+
+ pos++;
+ }
+
+ if (bits[i])
+ buf[pos] += 1 << low_bits;
+ else
+ n_zeros++;
+
+ pos++;
+ }
+ av_free(bits);
+
+ // read signs
+ for (i = 0; i < entries; i++)
+ if (buf[i] && get_bits1(gb))
+ buf[i] = -buf[i];
+
+// av_log(NULL, AV_LOG_INFO, "zeros: %d pos: %d\n", n_zeros, pos);
+
+ return 0;
+}
+#endif
+
+static void predictor_init_state(int *k, int *state, int order)
+{
+ int i;
+
+ for (i = order-2; i >= 0; i--)
+ {
+ int j, p, x = state[i];
+
+ for (j = 0, p = i+1; p < order; j++,p++)
+ {
+ int tmp = x + shift_down(k[j] * state[p], LATTICE_SHIFT);
+ state[p] += shift_down(k[j]*x, LATTICE_SHIFT);
+ x = tmp;
+ }
+ }
+}
+
+static int predictor_calc_error(int *k, int *state, int order, int error)
+{
+ int i, x = error - shift_down(k[order-1] * state[order-1], LATTICE_SHIFT);
+
+#if 1
+ int *k_ptr = &(k[order-2]),
+ *state_ptr = &(state[order-2]);
+ for (i = order-2; i >= 0; i--, k_ptr--, state_ptr--)
+ {
+ int k_value = *k_ptr, state_value = *state_ptr;
+ x -= shift_down(k_value * state_value, LATTICE_SHIFT);
+ state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
+ }
+#else
+ for (i = order-2; i >= 0; i--)
+ {
+ x -= shift_down(k[i] * state[i], LATTICE_SHIFT);
+ state[i+1] = state[i] + shift_down(k[i] * x, LATTICE_SHIFT);
+ }
+#endif
+
+ // don't drift too far, to avoid overflows
+ if (x > (SAMPLE_FACTOR<<16)) x = (SAMPLE_FACTOR<<16);
+ if (x < -(SAMPLE_FACTOR<<16)) x = -(SAMPLE_FACTOR<<16);
+
+ state[0] = x;
+
+ return x;
+}
+
+#if CONFIG_SONIC_ENCODER || CONFIG_SONIC_LS_ENCODER
+// Heavily modified Levinson-Durbin algorithm which
+// copes better with quantization, and calculates the
+// actual whitened result as it goes.
+
+static int modified_levinson_durbin(int *window, int window_entries,
+ int *out, int out_entries, int channels, int *tap_quant)
+{
+ int i;
+ int *state = av_calloc(window_entries, sizeof(*state));
+
+ if (!state)
+ return AVERROR(ENOMEM);
+
+ memcpy(state, window, 4* window_entries);
+
+ for (i = 0; i < out_entries; i++)
+ {
+ int step = (i+1)*channels, k, j;
+ double xx = 0.0, xy = 0.0;
+#if 1
+ int *x_ptr = &(window[step]);
+ int *state_ptr = &(state[0]);
+ j = window_entries - step;
+ for (;j>0;j--,x_ptr++,state_ptr++)
+ {
+ double x_value = *x_ptr;
+ double state_value = *state_ptr;
+ xx += state_value*state_value;
+ xy += x_value*state_value;
+ }
+#else
+ for (j = 0; j <= (window_entries - step); j++);
+ {
+ double stepval = window[step+j];
+ double stateval = window[j];
+// xx += (double)window[j]*(double)window[j];
+// xy += (double)window[step+j]*(double)window[j];
+ xx += stateval*stateval;
+ xy += stepval*stateval;
+ }
+#endif
+ if (xx == 0.0)
+ k = 0;
+ else
+ k = (int)(floor(-xy/xx * (double)LATTICE_FACTOR / (double)(tap_quant[i]) + 0.5));
+
+ if (k > (LATTICE_FACTOR/tap_quant[i]))
+ k = LATTICE_FACTOR/tap_quant[i];
+ if (-k > (LATTICE_FACTOR/tap_quant[i]))
+ k = -(LATTICE_FACTOR/tap_quant[i]);
+
+ out[i] = k;
+ k *= tap_quant[i];
+
+#if 1
+ x_ptr = &(window[step]);
+ state_ptr = &(state[0]);
+ j = window_entries - step;
+ for (;j>0;j--,x_ptr++,state_ptr++)
+ {
+ int x_value = *x_ptr;
+ int state_value = *state_ptr;
+ *x_ptr = x_value + shift_down(k*state_value,LATTICE_SHIFT);
+ *state_ptr = state_value + shift_down(k*x_value, LATTICE_SHIFT);
+ }
+#else
+ for (j=0; j <= (window_entries - step); j++)
+ {
+ int stepval = window[step+j];
+ int stateval=state[j];
+ window[step+j] += shift_down(k * stateval, LATTICE_SHIFT);
+ state[j] += shift_down(k * stepval, LATTICE_SHIFT);
+ }
+#endif
+ }
+
+ av_free(state);
+ return 0;
+}
+
+static inline int code_samplerate(int samplerate)
+{
+ switch (samplerate)
+ {
+ case 44100: return 0;
+ case 22050: return 1;
+ case 11025: return 2;
+ case 96000: return 3;
+ case 48000: return 4;
+ case 32000: return 5;
+ case 24000: return 6;
+ case 16000: return 7;
+ case 8000: return 8;
+ }
+ return AVERROR(EINVAL);
+}
+
+static av_cold int sonic_encode_init(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ PutBitContext pb;
+ int i;
+
+ s->version = 2;
+
+ if (avctx->channels > MAX_CHANNELS)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
+ return AVERROR(EINVAL); /* only stereo or mono for now */
+ }
+
+ if (avctx->channels == 2)
+ s->decorrelation = MID_SIDE;
+ else
+ s->decorrelation = 3;
+
+ if (avctx->codec->id == AV_CODEC_ID_SONIC_LS)
+ {
+ s->lossless = 1;
+ s->num_taps = 32;
+ s->downsampling = 1;
+ s->quantization = 0.0;
+ }
+ else
+ {
+ s->num_taps = 128;
+ s->downsampling = 2;
+ s->quantization = 1.0;
+ }
+
+ // max tap 2048
+ if (s->num_taps < 32 || s->num_taps > 1024 || s->num_taps % 32) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid number of taps\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ // generate taps
+ s->tap_quant = av_calloc(s->num_taps, sizeof(*s->tap_quant));
+ if (!s->tap_quant)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->num_taps; i++)
+ s->tap_quant[i] = ff_sqrt(i+1);
+
+ s->channels = avctx->channels;
+ s->samplerate = avctx->sample_rate;
+
+ s->block_align = 2048LL*s->samplerate/(44100*s->downsampling);
+ s->frame_size = s->channels*s->block_align*s->downsampling;
+
+ s->tail_size = s->num_taps*s->channels;
+ s->tail = av_calloc(s->tail_size, sizeof(*s->tail));
+ if (!s->tail)
+ return AVERROR(ENOMEM);
+
+ s->predictor_k = av_calloc(s->num_taps, sizeof(*s->predictor_k) );
+ if (!s->predictor_k)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->channels; i++)
+ {
+ s->coded_samples[i] = av_calloc(s->block_align, sizeof(**s->coded_samples));
+ if (!s->coded_samples[i])
+ return AVERROR(ENOMEM);
+ }
+
+ s->int_samples = av_calloc(s->frame_size, sizeof(*s->int_samples));
+
+ s->window_size = ((2*s->tail_size)+s->frame_size);
+ s->window = av_calloc(s->window_size, sizeof(*s->window));
+ if (!s->window || !s->int_samples)
+ return AVERROR(ENOMEM);
+
+ avctx->extradata = av_mallocz(16);
+ if (!avctx->extradata)
+ return AVERROR(ENOMEM);
+ init_put_bits(&pb, avctx->extradata, 16*8);
+
+ put_bits(&pb, 2, s->version); // version
+ if (s->version >= 1)
+ {
+ if (s->version >= 2) {
+ put_bits(&pb, 8, s->version);
+ put_bits(&pb, 8, s->minor_version);
+ }
+ put_bits(&pb, 2, s->channels);
+ put_bits(&pb, 4, code_samplerate(s->samplerate));
+ }
+ put_bits(&pb, 1, s->lossless);
+ if (!s->lossless)
+ put_bits(&pb, 3, SAMPLE_SHIFT); // XXX FIXME: sample precision
+ put_bits(&pb, 2, s->decorrelation);
+ put_bits(&pb, 2, s->downsampling);
+ put_bits(&pb, 5, (s->num_taps >> 5)-1); // 32..1024
+ put_bits(&pb, 1, 0); // XXX FIXME: no custom tap quant table
+
+ flush_put_bits(&pb);
+ avctx->extradata_size = put_bits_count(&pb)/8;
+
+ av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
+ s->version, s->minor_version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
+
+ avctx->frame_size = s->block_align*s->downsampling;
+
+ return 0;
+}
+
+static av_cold int sonic_encode_close(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < s->channels; i++)
+ av_freep(&s->coded_samples[i]);
+
+ av_freep(&s->predictor_k);
+ av_freep(&s->tail);
+ av_freep(&s->tap_quant);
+ av_freep(&s->window);
+ av_freep(&s->int_samples);
+
+ return 0;
+}
+
+static int sonic_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ SonicContext *s = avctx->priv_data;
+ RangeCoder c;
+ int i, j, ch, quant = 0, x = 0;
+ int ret;
+ const short *samples = (const int16_t*)frame->data[0];
+ uint8_t state[32];
+
+ if ((ret = ff_alloc_packet2(avctx, avpkt, s->frame_size * 5 + 1000, 0)) < 0)
+ return ret;
+
+ ff_init_range_encoder(&c, avpkt->data, avpkt->size);
+ ff_build_rac_states(&c, 0.05*(1LL<<32), 256-8);
+ memset(state, 128, sizeof(state));
+
+ // short -> internal
+ for (i = 0; i < s->frame_size; i++)
+ s->int_samples[i] = samples[i];
+
+ if (!s->lossless)
+ for (i = 0; i < s->frame_size; i++)
+ s->int_samples[i] = s->int_samples[i] << SAMPLE_SHIFT;
+
+ switch(s->decorrelation)
+ {
+ case MID_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ {
+ s->int_samples[i] += s->int_samples[i+1];
+ s->int_samples[i+1] -= shift(s->int_samples[i], 1);
+ }
+ break;
+ case LEFT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i+1] -= s->int_samples[i];
+ break;
+ case RIGHT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i] -= s->int_samples[i+1];
+ break;
+ }
+
+ memset(s->window, 0, 4* s->window_size);
+
+ for (i = 0; i < s->tail_size; i++)
+ s->window[x++] = s->tail[i];
+
+ for (i = 0; i < s->frame_size; i++)
+ s->window[x++] = s->int_samples[i];
+
+ for (i = 0; i < s->tail_size; i++)
+ s->window[x++] = 0;
+
+ for (i = 0; i < s->tail_size; i++)
+ s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i];
+
+ // generate taps
+ ret = modified_levinson_durbin(s->window, s->window_size,
+ s->predictor_k, s->num_taps, s->channels, s->tap_quant);
+ if (ret < 0)
+ return ret;
+
+ if ((ret = intlist_write(&c, state, s->predictor_k, s->num_taps, 0)) < 0)
+ return ret;
+
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ x = s->tail_size+ch;
+ for (i = 0; i < s->block_align; i++)
+ {
+ int sum = 0;
+ for (j = 0; j < s->downsampling; j++, x += s->channels)
+ sum += s->window[x];
+ s->coded_samples[ch][i] = sum;
+ }
+ }
+
+ // simple rate control code
+ if (!s->lossless)
+ {
+ double energy1 = 0.0, energy2 = 0.0;
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ for (i = 0; i < s->block_align; i++)
+ {
+ double sample = s->coded_samples[ch][i];
+ energy2 += sample*sample;
+ energy1 += fabs(sample);
+ }
+ }
+
+ energy2 = sqrt(energy2/(s->channels*s->block_align));
+ energy1 = M_SQRT2*energy1/(s->channels*s->block_align);
+
+ // increase bitrate when samples are like a gaussian distribution
+ // reduce bitrate when samples are like a two-tailed exponential distribution
+
+ if (energy2 > energy1)
+ energy2 += (energy2-energy1)*RATE_VARIATION;
+
+ quant = (int)(BASE_QUANT*s->quantization*energy2/SAMPLE_FACTOR);
+// av_log(avctx, AV_LOG_DEBUG, "quant: %d energy: %f / %f\n", quant, energy1, energy2);
+
+ quant = av_clip(quant, 1, 65534);
+
+ put_symbol(&c, state, quant, 0, NULL, NULL);
+
+ quant *= SAMPLE_FACTOR;
+ }
+
+ // write out coded samples
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ if (!s->lossless)
+ for (i = 0; i < s->block_align; i++)
+ s->coded_samples[ch][i] = ROUNDED_DIV(s->coded_samples[ch][i], quant);
+
+ if ((ret = intlist_write(&c, state, s->coded_samples[ch], s->block_align, 1)) < 0)
+ return ret;
+ }
+
+// av_log(avctx, AV_LOG_DEBUG, "used bytes: %d\n", (put_bits_count(&pb)+7)/8);
+
+ avpkt->size = ff_rac_terminate(&c);
+ *got_packet_ptr = 1;
+ return 0;
+
+}
+#endif /* CONFIG_SONIC_ENCODER || CONFIG_SONIC_LS_ENCODER */
+
+#if CONFIG_SONIC_DECODER
+static const int samplerate_table[] =
+ { 44100, 22050, 11025, 96000, 48000, 32000, 24000, 16000, 8000 };
+
+static av_cold int sonic_decode_init(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ GetBitContext gb;
+ int i;
+
+ s->channels = avctx->channels;
+ s->samplerate = avctx->sample_rate;
+
+ if (!avctx->extradata)
+ {
+ av_log(avctx, AV_LOG_ERROR, "No mandatory headers present\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ init_get_bits8(&gb, avctx->extradata, avctx->extradata_size);
+
+ s->version = get_bits(&gb, 2);
+ if (s->version >= 2) {
+ s->version = get_bits(&gb, 8);
+ s->minor_version = get_bits(&gb, 8);
+ }
+ if (s->version != 2)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported Sonic version, please report\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ if (s->version >= 1)
+ {
+ int sample_rate_index;
+ s->channels = get_bits(&gb, 2);
+ sample_rate_index = get_bits(&gb, 4);
+ if (sample_rate_index >= FF_ARRAY_ELEMS(samplerate_table)) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid sample_rate_index %d\n", sample_rate_index);
+ return AVERROR_INVALIDDATA;
+ }
+ s->samplerate = samplerate_table[sample_rate_index];
+ av_log(avctx, AV_LOG_INFO, "Sonicv2 chans: %d samprate: %d\n",
+ s->channels, s->samplerate);
+ }
+
+ if (s->channels > MAX_CHANNELS || s->channels < 1)
+ {
+ av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
+ return AVERROR_INVALIDDATA;
+ }
+ avctx->channels = s->channels;
+
+ s->lossless = get_bits1(&gb);
+ if (!s->lossless)
+ skip_bits(&gb, 3); // XXX FIXME
+ s->decorrelation = get_bits(&gb, 2);
+ if (s->decorrelation != 3 && s->channels != 2) {
+ av_log(avctx, AV_LOG_ERROR, "invalid decorrelation %d\n", s->decorrelation);
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->downsampling = get_bits(&gb, 2);
+ if (!s->downsampling) {
+ av_log(avctx, AV_LOG_ERROR, "invalid downsampling value\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->num_taps = (get_bits(&gb, 5)+1)<<5;
+ if (get_bits1(&gb)) // XXX FIXME
+ av_log(avctx, AV_LOG_INFO, "Custom quant table\n");
+
+ s->block_align = 2048LL*s->samplerate/(44100*s->downsampling);
+ s->frame_size = s->channels*s->block_align*s->downsampling;
+// avctx->frame_size = s->block_align;
+
+ av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d.%d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
+ s->version, s->minor_version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
+
+ // generate taps
+ s->tap_quant = av_calloc(s->num_taps, sizeof(*s->tap_quant));
+ if (!s->tap_quant)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < s->num_taps; i++)
+ s->tap_quant[i] = ff_sqrt(i+1);
+
+ s->predictor_k = av_calloc(s->num_taps, sizeof(*s->predictor_k));
+
+ for (i = 0; i < s->channels; i++)
+ {
+ s->predictor_state[i] = av_calloc(s->num_taps, sizeof(**s->predictor_state));
+ if (!s->predictor_state[i])
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; i < s->channels; i++)
+ {
+ s->coded_samples[i] = av_calloc(s->block_align, sizeof(**s->coded_samples));
+ if (!s->coded_samples[i])
+ return AVERROR(ENOMEM);
+ }
+ s->int_samples = av_calloc(s->frame_size, sizeof(*s->int_samples));
+ if (!s->int_samples)
+ return AVERROR(ENOMEM);
+
+ avctx->sample_fmt = AV_SAMPLE_FMT_S16;
+ return 0;
+}
+
+static av_cold int sonic_decode_close(AVCodecContext *avctx)
+{
+ SonicContext *s = avctx->priv_data;
+ int i;
+
+ av_freep(&s->int_samples);
+ av_freep(&s->tap_quant);
+ av_freep(&s->predictor_k);
+
+ for (i = 0; i < s->channels; i++)
+ {
+ av_freep(&s->predictor_state[i]);
+ av_freep(&s->coded_samples[i]);
+ }
+
+ return 0;
+}
+
+static int sonic_decode_frame(AVCodecContext *avctx,
+ void *data, int *got_frame_ptr,
+ AVPacket *avpkt)
+{
+ const uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ SonicContext *s = avctx->priv_data;
+ RangeCoder c;
+ uint8_t state[32];
+ int i, quant, ch, j, ret;
+ int16_t *samples;
+ AVFrame *frame = data;
+
+ if (buf_size == 0) return 0;
+
+ frame->nb_samples = s->frame_size / avctx->channels;
+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
+ return ret;
+ samples = (int16_t *)frame->data[0];
+
+// av_log(NULL, AV_LOG_INFO, "buf_size: %d\n", buf_size);
+
+ memset(state, 128, sizeof(state));
+ ff_init_range_decoder(&c, buf, buf_size);
+ ff_build_rac_states(&c, 0.05*(1LL<<32), 256-8);
+
+ intlist_read(&c, state, s->predictor_k, s->num_taps, 0);
+
+ // dequantize
+ for (i = 0; i < s->num_taps; i++)
+ s->predictor_k[i] *= s->tap_quant[i];
+
+ if (s->lossless)
+ quant = 1;
+ else
+ quant = get_symbol(&c, state, 0) * SAMPLE_FACTOR;
+
+// av_log(NULL, AV_LOG_INFO, "quant: %d\n", quant);
+
+ for (ch = 0; ch < s->channels; ch++)
+ {
+ int x = ch;
+
+ predictor_init_state(s->predictor_k, s->predictor_state[ch], s->num_taps);
+
+ intlist_read(&c, state, s->coded_samples[ch], s->block_align, 1);
+
+ for (i = 0; i < s->block_align; i++)
+ {
+ for (j = 0; j < s->downsampling - 1; j++)
+ {
+ s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, 0);
+ x += s->channels;
+ }
+
+ s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, s->coded_samples[ch][i] * quant);
+ x += s->channels;
+ }
+
+ for (i = 0; i < s->num_taps; i++)
+ s->predictor_state[ch][i] = s->int_samples[s->frame_size - s->channels + ch - i*s->channels];
+ }
+
+ switch(s->decorrelation)
+ {
+ case MID_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ {
+ s->int_samples[i+1] += shift(s->int_samples[i], 1);
+ s->int_samples[i] -= s->int_samples[i+1];
+ }
+ break;
+ case LEFT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i+1] += s->int_samples[i];
+ break;
+ case RIGHT_SIDE:
+ for (i = 0; i < s->frame_size; i += s->channels)
+ s->int_samples[i] += s->int_samples[i+1];
+ break;
+ }
+
+ if (!s->lossless)
+ for (i = 0; i < s->frame_size; i++)
+ s->int_samples[i] = shift(s->int_samples[i], SAMPLE_SHIFT);
+
+ // internal -> short
+ for (i = 0; i < s->frame_size; i++)
+ samples[i] = av_clip_int16(s->int_samples[i]);
+
+ *got_frame_ptr = 1;
+
+ return buf_size;
+}
+
+AVCodec ff_sonic_decoder = {
+ .name = "sonic",
+ .long_name = NULL_IF_CONFIG_SMALL("Sonic"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_SONIC,
+ .priv_data_size = sizeof(SonicContext),
+ .init = sonic_decode_init,
+ .close = sonic_decode_close,
+ .decode = sonic_decode_frame,
- .capabilities = CODEC_CAP_EXPERIMENTAL,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL,
+};
+#endif /* CONFIG_SONIC_DECODER */
+
+#if CONFIG_SONIC_ENCODER
+AVCodec ff_sonic_encoder = {
+ .name = "sonic",
+ .long_name = NULL_IF_CONFIG_SMALL("Sonic"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_SONIC,
+ .priv_data_size = sizeof(SonicContext),
+ .init = sonic_encode_init,
+ .encode2 = sonic_encode_frame,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
- .capabilities = CODEC_CAP_EXPERIMENTAL,
++ .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
+ .close = sonic_encode_close,
+};
+#endif
+
+#if CONFIG_SONIC_LS_ENCODER
+AVCodec ff_sonic_ls_encoder = {
+ .name = "sonicls",
+ .long_name = NULL_IF_CONFIG_SMALL("Sonic lossless"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_SONIC_LS,
+ .priv_data_size = sizeof(SonicContext),
+ .init = sonic_encode_init,
+ .encode2 = sonic_encode_frame,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
++ .capabilities = AV_CODEC_CAP_EXPERIMENTAL,
+ .close = sonic_encode_close,
+};
+#endif
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
.decode = sp5x_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
-
+#endif
+#if CONFIG_AMV_DECODER
AVCodec ff_amv_decoder = {
.name = "amv",
.long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
.decode = sp5x_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .max_lowres = 3,
+ .capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
+#endif
.id = AV_CODEC_ID_TAK,
.priv_data_size = sizeof(TAKDecContext),
.init = tak_decode_init,
- .init_static_data = tak_init_static_data,
.close = tak_decode_close,
.decode = tak_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * Pinnacle TARGA CineWave YUV16 decoder
+ * Copyright (c) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+
+static av_cold int y216_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
+ avctx->bits_per_raw_sample = 14;
+
+ return 0;
+}
+
+static int y216_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVFrame *pic = data;
+ const uint16_t *src = (uint16_t *)avpkt->data;
+ uint16_t *y, *u, *v, aligned_width = FFALIGN(avctx->width, 4);
+ int i, j, ret;
+
+ if (avpkt->size < 4 * avctx->height * aligned_width) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ y = (uint16_t *)pic->data[0];
+ u = (uint16_t *)pic->data[1];
+ v = (uint16_t *)pic->data[2];
+
+ for (i = 0; i < avctx->height; i++) {
+ for (j = 0; j < avctx->width >> 1; j++) {
+ u[ j ] = src[4 * j ] << 2 | src[4 * j ] >> 14;
+ y[2 * j ] = src[4 * j + 1] << 2 | src[4 * j + 1] >> 14;
+ v[ j ] = src[4 * j + 2] << 2 | src[4 * j + 2] >> 14;
+ y[2 * j + 1] = src[4 * j + 3] << 2 | src[4 * j + 3] >> 14;
+ }
+
+ y += pic->linesize[0] >> 1;
+ u += pic->linesize[1] >> 1;
+ v += pic->linesize[2] >> 1;
+ src += aligned_width << 1;
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+AVCodec ff_targa_y216_decoder = {
+ .name = "targa_y216",
+ .long_name = NULL_IF_CONFIG_SMALL("Pinnacle TARGA CineWave YUV16"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_TARGA_Y216,
+ .init = y216_decode_init,
+ .decode = y216_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
.init = tiff_init,
.close = tiff_end,
.decode = decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
};
.id = AV_CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext),
.init = encode_init,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
+ .close = encode_close,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB48LE, AV_PIX_FMT_PAL8,
.init = tta_decode_init,
.close = tta_decode_close,
.decode = tta_decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+ .priv_class = &tta_decoder_class,
};
--- /dev/null
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_LOSSLESS,
+/*
+ * TTA (The Lossless True Audio) encoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BITSTREAM_WRITER_LE
+#include "ttadata.h"
+#include "avcodec.h"
+#include "put_bits.h"
+#include "internal.h"
+#include "libavutil/crc.h"
+
+typedef struct TTAEncContext {
+ const AVCRC *crc_table;
+ int bps;
+ TTAChannel *ch_ctx;
+} TTAEncContext;
+
+static av_cold int tta_encode_init(AVCodecContext *avctx)
+{
+ TTAEncContext *s = avctx->priv_data;
+
+ s->crc_table = av_crc_get_table(AV_CRC_32_IEEE_LE);
+
+ switch (avctx->sample_fmt) {
+ case AV_SAMPLE_FMT_U8:
+ avctx->bits_per_raw_sample = 8;
+ break;
+ case AV_SAMPLE_FMT_S16:
+ avctx->bits_per_raw_sample = 16;
+ break;
+ case AV_SAMPLE_FMT_S32:
+ if (avctx->bits_per_raw_sample > 24)
+ av_log(avctx, AV_LOG_WARNING, "encoding as 24 bits-per-sample\n");
+ avctx->bits_per_raw_sample = 24;
+ }
+
+ s->bps = avctx->bits_per_raw_sample >> 3;
+ avctx->frame_size = 256 * avctx->sample_rate / 245;
+
+ s->ch_ctx = av_malloc_array(avctx->channels, sizeof(*s->ch_ctx));
+ if (!s->ch_ctx)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
+
+static inline void ttafilter_process(TTAFilter *c, int32_t *in)
+{
+ register int32_t *dl = c->dl, *qm = c->qm, *dx = c->dx, sum = c->round;
+
+ if (c->error < 0) {
+ qm[0] -= dx[0]; qm[1] -= dx[1]; qm[2] -= dx[2]; qm[3] -= dx[3];
+ qm[4] -= dx[4]; qm[5] -= dx[5]; qm[6] -= dx[6]; qm[7] -= dx[7];
+ } else if (c->error > 0) {
+ qm[0] += dx[0]; qm[1] += dx[1]; qm[2] += dx[2]; qm[3] += dx[3];
+ qm[4] += dx[4]; qm[5] += dx[5]; qm[6] += dx[6]; qm[7] += dx[7];
+ }
+
+ sum += dl[0] * qm[0] + dl[1] * qm[1] + dl[2] * qm[2] + dl[3] * qm[3] +
+ dl[4] * qm[4] + dl[5] * qm[5] + dl[6] * qm[6] + dl[7] * qm[7];
+
+ dx[0] = dx[1]; dx[1] = dx[2]; dx[2] = dx[3]; dx[3] = dx[4];
+ dl[0] = dl[1]; dl[1] = dl[2]; dl[2] = dl[3]; dl[3] = dl[4];
+
+ dx[4] = ((dl[4] >> 30) | 1);
+ dx[5] = ((dl[5] >> 30) | 2) & ~1;
+ dx[6] = ((dl[6] >> 30) | 2) & ~1;
+ dx[7] = ((dl[7] >> 30) | 4) & ~3;
+
+ dl[4] = -dl[5]; dl[5] = -dl[6];
+ dl[6] = *in - dl[7]; dl[7] = *in;
+ dl[5] += dl[6]; dl[4] += dl[5];
+
+ *in -= (sum >> c->shift);
+ c->error = *in;
+}
+
+static int32_t get_sample(const AVFrame *frame, int sample,
+ enum AVSampleFormat format)
+{
+ int32_t ret;
+
+ if (format == AV_SAMPLE_FMT_U8) {
+ ret = frame->data[0][sample] - 0x80;
+ } else if (format == AV_SAMPLE_FMT_S16) {
+ const int16_t *ptr = (const int16_t *)frame->data[0];
+ ret = ptr[sample];
+ } else {
+ const int32_t *ptr = (const int32_t *)frame->data[0];
+ ret = ptr[sample] >> 8;
+ }
+
+ return ret;
+}
+
+static int tta_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ TTAEncContext *s = avctx->priv_data;
+ PutBitContext pb;
+ int ret, i, out_bytes, cur_chan = 0, res = 0, samples = 0;
+
+ if ((ret = ff_alloc_packet2(avctx, avpkt, frame->nb_samples * 2 * avctx->channels * s->bps, 0)) < 0)
+ return ret;
+ init_put_bits(&pb, avpkt->data, avpkt->size);
+
+ // init per channel states
+ for (i = 0; i < avctx->channels; i++) {
+ s->ch_ctx[i].predictor = 0;
+ ff_tta_filter_init(&s->ch_ctx[i].filter, ff_tta_filter_configs[s->bps - 1]);
+ ff_tta_rice_init(&s->ch_ctx[i].rice, 10, 10);
+ }
+
+ for (i = 0; i < frame->nb_samples * avctx->channels; i++) {
+ TTAChannel *c = &s->ch_ctx[cur_chan];
+ TTAFilter *filter = &c->filter;
+ TTARice *rice = &c->rice;
+ uint32_t k, unary, outval;
+ int32_t value, temp;
+
+ value = get_sample(frame, samples++, avctx->sample_fmt);
+
+ if (avctx->channels > 1) {
+ if (cur_chan < avctx->channels - 1)
+ value = res = get_sample(frame, samples, avctx->sample_fmt) - value;
+ else
+ value -= res / 2;
+ }
+
+ temp = value;
+#define PRED(x, k) (int32_t)((((uint64_t)(x) << (k)) - (x)) >> (k))
+ switch (s->bps) {
+ case 1: value -= PRED(c->predictor, 4); break;
+ case 2:
+ case 3: value -= PRED(c->predictor, 5); break;
+ }
+ c->predictor = temp;
+
+ ttafilter_process(filter, &value);
+ outval = (value > 0) ? (value << 1) - 1: -value << 1;
+
+ k = rice->k0;
+
+ rice->sum0 += outval - (rice->sum0 >> 4);
+ if (rice->k0 > 0 && rice->sum0 < ff_tta_shift_16[rice->k0])
+ rice->k0--;
+ else if (rice->sum0 > ff_tta_shift_16[rice->k0 + 1])
+ rice->k0++;
+
+ if (outval >= ff_tta_shift_1[k]) {
+ outval -= ff_tta_shift_1[k];
+ k = rice->k1;
+
+ rice->sum1 += outval - (rice->sum1 >> 4);
+ if (rice->k1 > 0 && rice->sum1 < ff_tta_shift_16[rice->k1])
+ rice->k1--;
+ else if (rice->sum1 > ff_tta_shift_16[rice->k1 + 1])
+ rice->k1++;
+
+ unary = 1 + (outval >> k);
+ do {
+ if (unary > 31) {
+ put_bits(&pb, 31, 0x7FFFFFFF);
+ unary -= 31;
+ } else {
+ put_bits(&pb, unary, (1 << unary) - 1);
+ unary = 0;
+ }
+ } while (unary);
+ }
+
+ put_bits(&pb, 1, 0);
+
+ if (k)
+ put_bits(&pb, k, outval & (ff_tta_shift_1[k] - 1));
+
+ if (cur_chan < avctx->channels - 1)
+ cur_chan++;
+ else
+ cur_chan = 0;
+ }
+
+ flush_put_bits(&pb);
+ out_bytes = put_bits_count(&pb) >> 3;
+ put_bits32(&pb, av_crc(s->crc_table, UINT32_MAX, avpkt->data, out_bytes) ^ UINT32_MAX);
+ flush_put_bits(&pb);
+
+ avpkt->pts = frame->pts;
+ avpkt->size = out_bytes + 4;
+ avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
+ *got_packet_ptr = 1;
+ return 0;
+}
+
+static av_cold int tta_encode_close(AVCodecContext *avctx)
+{
+ TTAEncContext *s = avctx->priv_data;
+ av_freep(&s->ch_ctx);
+ return 0;
+}
+
+AVCodec ff_tta_encoder = {
+ .name = "tta",
+ .long_name = NULL_IF_CONFIG_SMALL("TTA (True Audio)"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_TTA,
+ .priv_data_size = sizeof(TTAEncContext),
+ .init = tta_encode_init,
+ .close = tta_encode_close,
+ .encode2 = tta_encode_frame,
++ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_LOSSLESS,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_NONE },
+};
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
break;
- if (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
++ if (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
+ break;
if (!setup_hwaccel(avctx, ret, desc->name))
break;
goto free_and_end;
}
avctx->frame_number = 0;
+ avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
- if (avctx->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
+ if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
- if (!(codec2->capabilities & CODEC_CAP_EXPERIMENTAL))
+ const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder";
+ AVCodec *codec2;
+ av_log(avctx, AV_LOG_ERROR,
+ "The %s '%s' is experimental but experimental codecs are not enabled, "
+ "add '-strict %d' if you want to use it.\n",
+ codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL);
+ codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
++ if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
+ av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n",
+ codec_string, codec2->name);
ret = AVERROR_EXPERIMENTAL;
goto free_and_end;
}
goto free_and_end;
}
}
- if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS))
+ if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS))
avctx->thread_count = 1;
+ if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
+ av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n",
+ avctx->codec->max_lowres);
+ ret = AVERROR(EINVAL);
+ goto free_and_end;
+ }
+
+#if FF_API_VISMV
+ if (avctx->debug_mv)
+ av_log(avctx, AV_LOG_WARNING, "The 'vismv' option is deprecated, "
+ "see the codecview filter instead.\n");
+#endif
+
if (av_codec_is_encoder(avctx->codec)) {
int i;
#if FF_API_CODED_FRAME
/* check for valid frame size */
if (frame) {
- if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) {
+ if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
- if (frame->nb_samples > avctx->frame_size)
- return AVERROR(EINVAL);
+ if (frame->nb_samples > avctx->frame_size) {
+ av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
- } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
+ } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
if (frame->nb_samples < avctx->frame_size &&
!avctx->internal->last_audio_frame) {
ret = pad_last_frame(avctx, &padded_frame, frame);
*got_packet_ptr = 0;
- if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) {
+ if(CONFIG_FRAME_THREAD_ENCODER &&
+ avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
+ return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
+
+ if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
+ avctx->stats_out[0] = '\0';
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
av_free_packet(avpkt);
av_init_packet(avpkt);
avpkt->size = 0;
if (!ret) {
if (!*got_packet_ptr)
avpkt->size = 0;
- else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY))
+ else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
- if (!user_packet && avpkt->size) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size);
+ if (needs_realloc && avpkt->data) {
+ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
av_frame_unref(picture);
- if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
+ if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size ||
+ (avctx->active_thread_type & FF_THREAD_FRAME)) {
+ int did_split = av_packet_split_side_data(&tmp);
+ ret = apply_param_change(avctx, &tmp);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
+ if (avctx->err_recognition & AV_EF_EXPLODE)
+ goto fail;
+ }
+
+ avctx->internal->pkt = &tmp;
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
- avpkt);
+ &tmp);
else {
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
- avpkt);
+ &tmp);
picture->pkt_dts = avpkt->dts;
+
+ if(!avctx->has_b_frames){
+ av_frame_set_pkt_pos(picture, avpkt->pos);
+ }
+ //FIXME these should be under if(!avctx->has_b_frames)
/* get_buffer is supposed to set frame parameters */
- if (!(avctx->codec->capabilities & CODEC_CAP_DR1)) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
- picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
- picture->width = avctx->width;
- picture->height = avctx->height;
- picture->format = avctx->pix_fmt;
+ if (!picture->sample_aspect_ratio.num) picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
+ if (!picture->width) picture->width = avctx->width;
+ if (!picture->height) picture->height = avctx->height;
+ if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt;
}
}
av_frame_unref(frame);
- if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
- if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
- ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
++ if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
+ uint8_t *side;
+ int side_size;
+ uint32_t discard_padding = 0;
+ uint8_t skip_reason = 0;
+ uint8_t discard_reason = 0;
+ // copy to ensure we do not change avpkt
+ AVPacket tmp = *avpkt;
+ int did_split = av_packet_split_side_data(&tmp);
+ ret = apply_param_change(avctx, &tmp);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
+ if (avctx->err_recognition & AV_EF_EXPLODE)
+ goto fail;
+ }
+
+ avctx->internal->pkt = &tmp;
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
+ ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp);
+ else {
+ ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp);
+ av_assert0(ret <= tmp.size);
+ frame->pkt_dts = avpkt->dts;
+ }
if (ret >= 0 && *got_frame_ptr) {
avctx->frame_number++;
- frame->pkt_dts = avpkt->dts;
+ av_frame_set_best_effort_timestamp(frame,
+ guess_correct_pts(avctx,
+ frame->pkt_pts,
+ frame->pkt_dts));
if (frame->format == AV_SAMPLE_FMT_NONE)
frame->format = avctx->sample_fmt;
+ if (!frame->channel_layout)
+ frame->channel_layout = avctx->channel_layout;
+ if (!av_frame_get_channels(frame))
+ av_frame_set_channels(frame, avctx->channels);
+ if (!frame->sample_rate)
+ frame->sample_rate = avctx->sample_rate;
+ }
+
+ side= av_packet_get_side_data(avctx->internal->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
+ if(side && side_size>=10) {
+ avctx->internal->skip_samples = AV_RL32(side);
+ discard_padding = AV_RL32(side + 4);
+ av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
+ avctx->internal->skip_samples, (int)discard_padding);
+ skip_reason = AV_RL8(side + 8);
+ discard_reason = AV_RL8(side + 9);
+ }
+ if (avctx->internal->skip_samples && *got_frame_ptr &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if(frame->nb_samples <= avctx->internal->skip_samples){
+ *got_frame_ptr = 0;
+ avctx->internal->skip_samples -= frame->nb_samples;
+ av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
+ avctx->internal->skip_samples);
+ } else {
+ av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
+ frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ if(frame->pkt_pts!=AV_NOPTS_VALUE)
+ frame->pkt_pts += diff_ts;
+ if(frame->pkt_dts!=AV_NOPTS_VALUE)
+ frame->pkt_dts += diff_ts;
+ if (av_frame_get_pkt_duration(frame) >= diff_ts)
+ av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
+ avctx->internal->skip_samples, frame->nb_samples);
+ frame->nb_samples -= avctx->internal->skip_samples;
+ avctx->internal->skip_samples = 0;
+ }
+ }
+
+ if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if (discard_padding == frame->nb_samples) {
+ *got_frame_ptr = 0;
+ } else {
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ if (av_frame_get_pkt_duration(frame) >= diff_ts)
+ av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
+ (int)discard_padding, frame->nb_samples);
+ frame->nb_samples -= discard_padding;
+ }
+ }
+
+ if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) {
+ AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
+ if (fside) {
+ AV_WL32(fside->data, avctx->internal->skip_samples);
+ AV_WL32(fside->data + 4, discard_padding);
+ AV_WL8(fside->data + 8, skip_reason);
+ AV_WL8(fside->data + 9, discard_reason);
+ avctx->internal->skip_samples = 0;
+ }
+ }
+fail:
+ avctx->internal->pkt = NULL;
+ if (did_split) {
+ av_packet_free_side_data(&tmp);
+ if(ret == tmp.size)
+ ret = avpkt->size;
+ }
+ if (ret >= 0 && *got_frame_ptr) {
if (!avctx->refcounted_frames) {
int err = unrefcount_frame(avci, frame);
if (err < 0)
int *got_sub_ptr,
AVPacket *avpkt)
{
- int ret;
+ int i, ret = 0;
+
+ if (!avpkt->data && avpkt->size) {
+ av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
+ return AVERROR(EINVAL);
+ }
+ if (!avctx->codec)
+ return AVERROR(EINVAL);
+ if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
+ return AVERROR(EINVAL);
+ }
- avctx->internal->pkt = avpkt;
*got_sub_ptr = 0;
- ret = avctx->codec->decode(avctx, sub, got_sub_ptr, avpkt);
- if (*got_sub_ptr)
- avctx->frame_number++;
+ get_subtitle_defaults(sub);
+
- if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) {
++ if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
+ AVPacket pkt_recoded;
+ AVPacket tmp = *avpkt;
+ int did_split = av_packet_split_side_data(&tmp);
+ //apply_param_change(avctx, &tmp);
+
+ if (did_split) {
+ /* FFMIN() prevents overflow in case the packet wasn't allocated with
+ * proper padding.
+ * If the side data is smaller than the buffer padding size, the
+ * remaining bytes should have already been filled with zeros by the
+ * original packet allocation anyway. */
+ memset(tmp.data + tmp.size, 0,
+ FFMIN(avpkt->size - tmp.size, FF_INPUT_BUFFER_PADDING_SIZE));
+ }
+
+ pkt_recoded = tmp;
+ ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
+ if (ret < 0) {
+ *got_sub_ptr = 0;
+ } else {
+ avctx->internal->pkt = &pkt_recoded;
+
+ if (avctx->pkt_timebase.den && avpkt->pts != AV_NOPTS_VALUE)
+ sub->pts = av_rescale_q(avpkt->pts,
+ avctx->pkt_timebase, AV_TIME_BASE_Q);
+ ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
+ av_assert1((ret >= 0) >= !!*got_sub_ptr &&
+ !!*got_sub_ptr >= !!sub->num_rects);
+
+ if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
+ avctx->pkt_timebase.num) {
+ AVRational ms = { 1, 1000 };
+ sub->end_display_time = av_rescale_q(avpkt->duration,
+ avctx->pkt_timebase, ms);
+ }
+
+ for (i = 0; i < sub->num_rects; i++) {
+ if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid UTF-8 in decoded subtitles text; "
+ "maybe missing -sub_charenc option\n");
+ avsubtitle_free(sub);
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (tmp.data != pkt_recoded.data) { // did we recode?
+ /* prevent from destroying side data from original packet */
+ pkt_recoded.side_data = NULL;
+ pkt_recoded.side_data_elems = 0;
+
+ av_free_packet(&pkt_recoded);
+ }
+ if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
+ sub->format = 0;
+ else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
+ sub->format = 1;
+ avctx->internal->pkt = NULL;
+ }
+
+ if (did_split) {
+ av_packet_free_side_data(&tmp);
+ if(ret == tmp.size)
+ ret = avpkt->size;
+ }
+
+ if (*got_sub_ptr)
+ avctx->frame_number++;
+ }
+
return ret;
}
.init = utvideo_encode_init,
.encode2 = utvideo_encode_frame,
.close = utvideo_encode_close,
- .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
++ .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
.long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_V210,
+ .priv_data_size = sizeof(V210DecContext),
.init = decode_init,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
+ .priv_class = &v210dec_class,
};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * v308 decoder
+ * Copyright (c) 2011 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+
+static av_cold int v308_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUV444P;
+
+ if (avctx->width & 1)
+ av_log(avctx, AV_LOG_WARNING, "v308 requires width to be even.\n");
+
+ return 0;
+}
+
+static int v308_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVFrame *pic = data;
+ const uint8_t *src = avpkt->data;
+ uint8_t *y, *u, *v;
+ int i, j, ret;
+
+ if (avpkt->size < 3 * avctx->height * avctx->width) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ y = pic->data[0];
+ u = pic->data[1];
+ v = pic->data[2];
+
+ for (i = 0; i < avctx->height; i++) {
+ for (j = 0; j < avctx->width; j++) {
+ v[j] = *src++;
+ y[j] = *src++;
+ u[j] = *src++;
+ }
+
+ y += pic->linesize[0];
+ u += pic->linesize[1];
+ v += pic->linesize[2];
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+AVCodec ff_v308_decoder = {
+ .name = "v308",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed 4:4:4"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_V308,
+ .init = v308_decode_init,
+ .decode = v308_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * v408 decoder
+ * Copyright (c) 2012 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+
+static av_cold int v408_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
+
+ return 0;
+}
+
+static int v408_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVFrame *pic = data;
+ const uint8_t *src = avpkt->data;
+ uint8_t *y, *u, *v, *a;
+ int i, j, ret;
+
+ if (avpkt->size < 4 * avctx->height * avctx->width) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ y = pic->data[0];
+ u = pic->data[1];
+ v = pic->data[2];
+ a = pic->data[3];
+
+ for (i = 0; i < avctx->height; i++) {
+ for (j = 0; j < avctx->width; j++) {
+ if (avctx->codec_id==AV_CODEC_ID_AYUV) {
+ v[j] = *src++;
+ u[j] = *src++;
+ y[j] = *src++;
+ a[j] = *src++;
+ } else {
+ u[j] = *src++;
+ y[j] = *src++;
+ v[j] = *src++;
+ a[j] = *src++;
+ }
+ }
+
+ y += pic->linesize[0];
+ u += pic->linesize[1];
+ v += pic->linesize[2];
+ a += pic->linesize[3];
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+#if CONFIG_AYUV_DECODER
+AVCodec ff_ayuv_decoder = {
+ .name = "ayuv",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed MS 4:4:4:4"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_AYUV,
+ .init = v408_decode_init,
+ .decode = v408_decode_frame,
- .capabilities = CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
+#if CONFIG_V408_DECODER
+AVCodec ff_v408_decoder = {
+ .name = "v408",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed QT 4:4:4:4"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_V408,
+ .init = v408_decode_init,
+ .decode = v408_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
*got_frame = 1;
}
- return 0;
+ return buf_size;
+ }
+
- if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
++ if (s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
+ if (v->profile < PROFILE_ADVANCED)
+ avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
+ else
+ avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
}
//for advanced profile we may need to parse and unescape data
if (size <= 0) continue;
switch (AV_RB32(start)) {
case VC1_CODE_FRAME:
- if (avctx->hwaccel)
+ if (avctx->hwaccel ||
- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
++ s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
buf_start = start;
buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
break;
case VC1_CODE_FIELD: {
int buf_size3;
- tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
+ if (avctx->hwaccel ||
- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
++ s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
+ buf_start_second_field = start;
+ tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
if (!tmp)
goto err;
slices = tmp;
av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
goto err;
} else { // found field marker, unescape second field
- tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
+ if (avctx->hwaccel ||
- s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
++ s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
+ buf_start_second_field = divider;
+ tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
if (!tmp)
goto err;
slices = tmp;
s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
- if (avctx->hwaccel) {
- if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
- goto err;
- if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
- goto err;
- if (avctx->hwaccel->end_frame(avctx) < 0)
- goto err;
+ if ((CONFIG_VC1_VDPAU_DECODER)
- &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
++ &&s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
+ if (v->field_mode && buf_start_second_field) {
+ ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
+ ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
+ } else {
+ ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
+ }
+ } else if (avctx->hwaccel) {
+ if (v->field_mode && buf_start_second_field) {
+ // decode first field
+ s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
+ if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ goto err;
+
+ // decode second field
+ s->gb = slices[n_slices1 + 1].gb;
+ s->picture_structure = PICT_TOP_FIELD + v->tff;
+ v->second_field = 1;
+ v->pic_header_flag = 0;
+ if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
+ goto err;
+ }
+ v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
+
+ if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
+ goto err;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
+ goto err;
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ goto err;
+ } else {
+ s->picture_structure = PICT_FRAME;
+ if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
+ goto err;
+ if (avctx->hwaccel->end_frame(avctx) < 0)
+ goto err;
+ }
} else {
int header_ret = 0;
};
#endif
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
+#if CONFIG_WMV3_VDPAU_DECODER
+AVCodec ff_wmv3_vdpau_decoder = {
+ .name = "wmv3_vdpau",
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_WMV3,
+ .priv_data_size = sizeof(VC1Context),
+ .init = vc1_decode_init,
+ .close = ff_vc1_decode_end,
+ .decode = vc1_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
+ .profiles = NULL_IF_CONFIG_SMALL(profiles)
+};
+#endif
+
+#if CONFIG_VC1_VDPAU_DECODER
+AVCodec ff_vc1_vdpau_decoder = {
+ .name = "vc1_vdpau",
+ .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VC1,
+ .priv_data_size = sizeof(VC1Context),
+ .init = vc1_decode_init,
+ .close = ff_vc1_decode_end,
+ .decode = vc1_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
+ .profiles = NULL_IF_CONFIG_SMALL(profiles)
+};
+#endif
+
#if CONFIG_WMV3IMAGE_DECODER
AVCodec ff_wmv3image_decoder = {
.name = "wmv3image",
--- /dev/null
- .capabilities = CODEC_CAP_DELAY,
+/*
+ * Copyright (c) 2012, Xidorn Quan
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * H.264 decoder via VDA
+ * @author Xidorn Quan <quanxunzhen@gmail.com>
+ */
+
+#include <string.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#include "vda.h"
+#include "h264.h"
+#include "avcodec.h"
+
+#ifndef kCFCoreFoundationVersionNumber10_7
+#define kCFCoreFoundationVersionNumber10_7 635.00
+#endif
+
+extern AVCodec ff_h264_decoder, ff_h264_vda_decoder;
+
+static const enum AVPixelFormat vda_pixfmts_prior_10_7[] = {
+ AV_PIX_FMT_UYVY422,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+};
+
+static const enum AVPixelFormat vda_pixfmts[] = {
+ AV_PIX_FMT_UYVY422,
+ AV_PIX_FMT_YUYV422,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+};
+
+typedef struct {
+ H264Context h264ctx;
+ int h264_initialized;
+ struct vda_context vda_ctx;
+ enum AVPixelFormat pix_fmt;
+
+ /* for backing-up fields set by user.
+ * we have to gain full control of such fields here */
+ void *hwaccel_context;
+ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+#if FF_API_GET_BUFFER
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
+} VDADecoderContext;
+
+static enum AVPixelFormat get_format(struct AVCodecContext *avctx,
+ const enum AVPixelFormat *fmt)
+{
+ return AV_PIX_FMT_VDA_VLD;
+}
+
+typedef struct {
+ CVPixelBufferRef cv_buffer;
+} VDABufferContext;
+
+static void release_buffer(void *opaque, uint8_t *data)
+{
+ VDABufferContext *context = opaque;
+ CVPixelBufferUnlockBaseAddress(context->cv_buffer, 0);
+ CVPixelBufferRelease(context->cv_buffer);
+ av_free(context);
+}
+
+static int get_buffer2(AVCodecContext *avctx, AVFrame *pic, int flag)
+{
+ VDABufferContext *context = av_mallocz(sizeof(VDABufferContext));
+ AVBufferRef *buffer = av_buffer_create(NULL, 0, release_buffer, context, 0);
+ if (!context || !buffer) {
+ av_free(context);
+ return AVERROR(ENOMEM);
+ }
+
+ pic->buf[0] = buffer;
+ pic->data[0] = (void *)1;
+ return 0;
+}
+
+static inline void set_context(AVCodecContext *avctx)
+{
+ VDADecoderContext *ctx = avctx->priv_data;
+ ctx->hwaccel_context = avctx->hwaccel_context;
+ avctx->hwaccel_context = &ctx->vda_ctx;
+ ctx->get_format = avctx->get_format;
+ avctx->get_format = get_format;
+ ctx->get_buffer2 = avctx->get_buffer2;
+ avctx->get_buffer2 = get_buffer2;
+#if FF_API_GET_BUFFER
+ ctx->get_buffer = avctx->get_buffer;
+ avctx->get_buffer = NULL;
+#endif
+}
+
+static inline void restore_context(AVCodecContext *avctx)
+{
+ VDADecoderContext *ctx = avctx->priv_data;
+ avctx->hwaccel_context = ctx->hwaccel_context;
+ avctx->get_format = ctx->get_format;
+ avctx->get_buffer2 = ctx->get_buffer2;
+#if FF_API_GET_BUFFER
+ avctx->get_buffer = ctx->get_buffer;
+#endif
+}
+
+static int vdadec_decode(AVCodecContext *avctx,
+ void *data, int *got_frame, AVPacket *avpkt)
+{
+ VDADecoderContext *ctx = avctx->priv_data;
+ AVFrame *pic = data;
+ int ret;
+
+ set_context(avctx);
+ ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt);
+ restore_context(avctx);
+ if (*got_frame) {
+ AVBufferRef *buffer = pic->buf[0];
+ VDABufferContext *context = av_buffer_get_opaque(buffer);
+ CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3];
+
+ CVPixelBufferRetain(cv_buffer);
+ CVPixelBufferLockBaseAddress(cv_buffer, 0);
+ context->cv_buffer = cv_buffer;
+ pic->format = ctx->pix_fmt;
+ if (CVPixelBufferIsPlanar(cv_buffer)) {
+ int i, count = CVPixelBufferGetPlaneCount(cv_buffer);
+ av_assert0(count < 4);
+ for (i = 0; i < count; i++) {
+ pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i);
+ pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i);
+ }
+ } else {
+ pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer);
+ pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer);
+ }
+ }
+ avctx->pix_fmt = ctx->pix_fmt;
+
+ return ret;
+}
+
+static av_cold int vdadec_close(AVCodecContext *avctx)
+{
+ VDADecoderContext *ctx = avctx->priv_data;
+ /* release buffers and decoder */
+ ff_vda_destroy_decoder(&ctx->vda_ctx);
+ /* close H.264 decoder */
+ if (ctx->h264_initialized) {
+ set_context(avctx);
+ ff_h264_decoder.close(avctx);
+ restore_context(avctx);
+ }
+ return 0;
+}
+
+static av_cold int vdadec_init(AVCodecContext *avctx)
+{
+ VDADecoderContext *ctx = avctx->priv_data;
+ struct vda_context *vda_ctx = &ctx->vda_ctx;
+ OSStatus status;
+ int ret, i;
+
+ ctx->h264_initialized = 0;
+
+ /* init pix_fmts of codec */
+ if (!ff_h264_vda_decoder.pix_fmts) {
+ if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7)
+ ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7;
+ else
+ ff_h264_vda_decoder.pix_fmts = vda_pixfmts;
+ }
+
+ /* init vda */
+ memset(vda_ctx, 0, sizeof(struct vda_context));
+ vda_ctx->width = avctx->width;
+ vda_ctx->height = avctx->height;
+ vda_ctx->format = 'avc1';
+ vda_ctx->use_sync_decoding = 1;
+ vda_ctx->use_ref_buffer = 1;
+ ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
+ switch (ctx->pix_fmt) {
+ case AV_PIX_FMT_UYVY422:
+ vda_ctx->cv_pix_fmt_type = '2vuy';
+ break;
+ case AV_PIX_FMT_YUYV422:
+ vda_ctx->cv_pix_fmt_type = 'yuvs';
+ break;
+ case AV_PIX_FMT_NV12:
+ vda_ctx->cv_pix_fmt_type = '420v';
+ break;
+ case AV_PIX_FMT_YUV420P:
+ vda_ctx->cv_pix_fmt_type = 'y420';
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt);
+ goto failed;
+ }
+ status = ff_vda_create_decoder(vda_ctx,
+ avctx->extradata, avctx->extradata_size);
+ if (status != kVDADecoderNoErr) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Failed to init VDA decoder: %d.\n", status);
+ goto failed;
+ }
+
+ /* init H.264 decoder */
+ set_context(avctx);
+ ret = ff_h264_decoder.init(avctx);
+ restore_context(avctx);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n");
+ goto failed;
+ }
+ ctx->h264_initialized = 1;
+
+ for (i = 0; i < MAX_SPS_COUNT; i++) {
+ SPS *sps = ctx->h264ctx.sps_buffers[i];
+ if (sps && (sps->bit_depth_luma != 8 ||
+ sps->chroma_format_idc == 2 ||
+ sps->chroma_format_idc == 3)) {
+ av_log(avctx, AV_LOG_ERROR, "Format is not supported.\n");
+ goto failed;
+ }
+ }
+
+ return 0;
+
+failed:
+ vdadec_close(avctx);
+ return -1;
+}
+
+static void vdadec_flush(AVCodecContext *avctx)
+{
+ set_context(avctx);
+ ff_h264_decoder.flush(avctx);
+ restore_context(avctx);
+}
+
+AVCodec ff_h264_vda_decoder = {
+ .name = "h264_vda",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(VDADecoderContext),
+ .init = vdadec_init,
+ .close = vdadec_close,
+ .decode = vdadec_decode,
++ .capabilities = AV_CODEC_CAP_DELAY,
+ .flush = vdadec_flush,
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 (VDA acceleration)"),
+};
.id = AV_CODEC_ID_ADPCM_VIMA,
.init = decode_init,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1,
};
- .capabilities = CODEC_CAP_DR1,
+
+#if FF_API_VIMA_DECODER
+AVCodec ff_vima_decoder = {
+ .name = "vima",
+ .long_name = NULL_IF_CONFIG_SMALL("LucasArts VIMA audio"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_ADPCM_VIMA,
+ .init = decode_init,
+ .decode = decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
+#endif
.init = vp6_decode_init,
.close = vp6_decode_free,
.decode = ff_vp56_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
- .capabilities = AV_CODEC_CAP_DR1,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
};
return 0;
}
+static const AVProfile profiles[] = {
+ { FF_PROFILE_VP9_0, "Profile 0" },
+ { FF_PROFILE_VP9_1, "Profile 1" },
+ { FF_PROFILE_VP9_2, "Profile 2" },
+ { FF_PROFILE_VP9_3, "Profile 3" },
+ { FF_PROFILE_UNKNOWN },
+};
+
AVCodec ff_vp9_decoder = {
- .name = "vp9",
- .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
- .type = AVMEDIA_TYPE_VIDEO,
- .id = AV_CODEC_ID_VP9,
- .priv_data_size = sizeof(VP9Context),
- .init = vp9_decode_init,
- .decode = vp9_decode_packet,
- .flush = vp9_decode_flush,
- .close = vp9_decode_free,
- .capabilities = AV_CODEC_CAP_DR1,
+ .name = "vp9",
+ .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .priv_data_size = sizeof(VP9Context),
+ .init = vp9_decode_init,
+ .close = vp9_decode_free,
+ .decode = vp9_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+ .flush = vp9_decode_flush,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
+ .profiles = NULL_IF_CONFIG_SMALL(profiles),
};
.close = wavpack_decode_end,
.decode = wavpack_decode_frame,
.flush = wavpack_decode_flush,
- .capabilities = AV_CODEC_CAP_DR1,
+ .init_thread_copy = ONLY_IF_THREADS_ENABLED(init_thread_copy),
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
};
--- /dev/null
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
+/*
+ * WavPack lossless audio encoder
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define BITSTREAM_WRITER_LE
+
+#include "libavutil/intreadwrite.h"
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "put_bits.h"
+#include "bytestream.h"
+#include "wavpackenc.h"
+#include "wavpack.h"
+
+#define UPDATE_WEIGHT(weight, delta, source, result) \
+ if ((source) && (result)) { \
+ int32_t s = (int32_t) ((source) ^ (result)) >> 31; \
+ weight = ((delta) ^ s) + ((weight) - s); \
+ }
+
+#define APPLY_WEIGHT_F(weight, sample) ((((((sample) & 0xffff) * (weight)) >> 9) + \
+ ((((sample) & ~0xffff) >> 9) * (weight)) + 1) >> 1)
+
+#define APPLY_WEIGHT_I(weight, sample) (((weight) * (sample) + 512) >> 10)
+
+#define APPLY_WEIGHT(weight, sample) ((sample) != (short) (sample) ? \
+ APPLY_WEIGHT_F(weight, sample) : APPLY_WEIGHT_I (weight, sample))
+
+#define CLEAR(destin) memset(&destin, 0, sizeof(destin));
+
+#define SHIFT_LSB 13
+#define SHIFT_MASK (0x1FU << SHIFT_LSB)
+
+#define MAG_LSB 18
+#define MAG_MASK (0x1FU << MAG_LSB)
+
+#define SRATE_LSB 23
+#define SRATE_MASK (0xFU << SRATE_LSB)
+
+#define EXTRA_TRY_DELTAS 1
+#define EXTRA_ADJUST_DELTAS 2
+#define EXTRA_SORT_FIRST 4
+#define EXTRA_BRANCHES 8
+#define EXTRA_SORT_LAST 16
+
+typedef struct WavPackExtraInfo {
+ struct Decorr dps[MAX_TERMS];
+ int nterms, log_limit, gt16bit;
+ uint32_t best_bits;
+} WavPackExtraInfo;
+
+typedef struct WavPackWords {
+ int pend_data, holding_one, zeros_acc;
+ int holding_zero, pend_count;
+ WvChannel c[2];
+} WavPackWords;
+
+typedef struct WavPackEncodeContext {
+ AVClass *class;
+ AVCodecContext *avctx;
+ PutBitContext pb;
+ int block_samples;
+ int buffer_size;
+ int sample_index;
+ int stereo, stereo_in;
+ int ch_offset;
+
+ int32_t *samples[2];
+ int samples_size[2];
+
+ int32_t *sampleptrs[MAX_TERMS+2][2];
+ int sampleptrs_size[MAX_TERMS+2][2];
+
+ int32_t *temp_buffer[2][2];
+ int temp_buffer_size[2][2];
+
+ int32_t *best_buffer[2];
+ int best_buffer_size[2];
+
+ int32_t *js_left, *js_right;
+ int js_left_size, js_right_size;
+
+ int32_t *orig_l, *orig_r;
+ int orig_l_size, orig_r_size;
+
+ unsigned extra_flags;
+ int optimize_mono;
+ int decorr_filter;
+ int joint;
+ int num_branches;
+
+ uint32_t flags;
+ uint32_t crc_x;
+ WavPackWords w;
+
+ uint8_t int32_sent_bits, int32_zeros, int32_ones, int32_dups;
+ uint8_t float_flags, float_shift, float_max_exp, max_exp;
+ int32_t shifted_ones, shifted_zeros, shifted_both;
+ int32_t false_zeros, neg_zeros, ordata;
+
+ int num_terms, shift, joint_stereo, false_stereo;
+ int num_decorrs, num_passes, best_decorr, mask_decorr;
+ struct Decorr decorr_passes[MAX_TERMS];
+ const WavPackDecorrSpec *decorr_specs;
+ float delta_decay;
+} WavPackEncodeContext;
+
+static av_cold int wavpack_encode_init(AVCodecContext *avctx)
+{
+ WavPackEncodeContext *s = avctx->priv_data;
+
+ s->avctx = avctx;
+
+ if (!avctx->frame_size) {
+ int block_samples;
+ if (!(avctx->sample_rate & 1))
+ block_samples = avctx->sample_rate / 2;
+ else
+ block_samples = avctx->sample_rate;
+
+ while (block_samples * avctx->channels > WV_MAX_SAMPLES)
+ block_samples /= 2;
+
+ while (block_samples * avctx->channels < 40000)
+ block_samples *= 2;
+ avctx->frame_size = block_samples;
+ } else if (avctx->frame_size && (avctx->frame_size < 128 ||
+ avctx->frame_size > WV_MAX_SAMPLES)) {
+ av_log(avctx, AV_LOG_ERROR, "invalid block size: %d\n", avctx->frame_size);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx->compression_level != FF_COMPRESSION_DEFAULT) {
+ if (avctx->compression_level >= 3) {
+ s->decorr_filter = 3;
+ s->num_passes = 9;
+ if (avctx->compression_level >= 8) {
+ s->num_branches = 4;
+ s->extra_flags = EXTRA_TRY_DELTAS|EXTRA_ADJUST_DELTAS|EXTRA_SORT_FIRST|EXTRA_SORT_LAST|EXTRA_BRANCHES;
+ } else if (avctx->compression_level >= 7) {
+ s->num_branches = 3;
+ s->extra_flags = EXTRA_TRY_DELTAS|EXTRA_ADJUST_DELTAS|EXTRA_SORT_FIRST|EXTRA_BRANCHES;
+ } else if (avctx->compression_level >= 6) {
+ s->num_branches = 2;
+ s->extra_flags = EXTRA_TRY_DELTAS|EXTRA_ADJUST_DELTAS|EXTRA_SORT_FIRST|EXTRA_BRANCHES;
+ } else if (avctx->compression_level >= 5) {
+ s->num_branches = 1;
+ s->extra_flags = EXTRA_TRY_DELTAS|EXTRA_ADJUST_DELTAS|EXTRA_SORT_FIRST|EXTRA_BRANCHES;
+ } else if (avctx->compression_level >= 4) {
+ s->num_branches = 1;
+ s->extra_flags = EXTRA_TRY_DELTAS|EXTRA_ADJUST_DELTAS|EXTRA_BRANCHES;
+ }
+ } else if (avctx->compression_level == 2) {
+ s->decorr_filter = 2;
+ s->num_passes = 4;
+ } else if (avctx->compression_level == 1) {
+ s->decorr_filter = 1;
+ s->num_passes = 2;
+ } else if (avctx->compression_level < 1) {
+ s->decorr_filter = 0;
+ s->num_passes = 0;
+ }
+ }
+
+ s->num_decorrs = decorr_filter_sizes[s->decorr_filter];
+ s->decorr_specs = decorr_filters[s->decorr_filter];
+
+ s->delta_decay = 2.0;
+
+ return 0;
+}
+
+static void shift_mono(int32_t *samples, int nb_samples, int shift)
+{
+ int i;
+ for (i = 0; i < nb_samples; i++)
+ samples[i] >>= shift;
+}
+
+static void shift_stereo(int32_t *left, int32_t *right,
+ int nb_samples, int shift)
+{
+ int i;
+ for (i = 0; i < nb_samples; i++) {
+ left [i] >>= shift;
+ right[i] >>= shift;
+ }
+}
+
+#define FLOAT_SHIFT_ONES 1
+#define FLOAT_SHIFT_SAME 2
+#define FLOAT_SHIFT_SENT 4
+#define FLOAT_ZEROS_SENT 8
+#define FLOAT_NEG_ZEROS 0x10
+#define FLOAT_EXCEPTIONS 0x20
+
+#define get_mantissa(f) ((f) & 0x7fffff)
+#define get_exponent(f) (((f) >> 23) & 0xff)
+#define get_sign(f) (((f) >> 31) & 0x1)
+
+static void process_float(WavPackEncodeContext *s, int32_t *sample)
+{
+ int32_t shift_count, value, f = *sample;
+
+ if (get_exponent(f) == 255) {
+ s->float_flags |= FLOAT_EXCEPTIONS;
+ value = 0x1000000;
+ shift_count = 0;
+ } else if (get_exponent(f)) {
+ shift_count = s->max_exp - get_exponent(f);
+ value = 0x800000 + get_mantissa(f);
+ } else {
+ shift_count = s->max_exp ? s->max_exp - 1 : 0;
+ value = get_mantissa(f);
+ }
+
+ if (shift_count < 25)
+ value >>= shift_count;
+ else
+ value = 0;
+
+ if (!value) {
+ if (get_exponent(f) || get_mantissa(f))
+ s->false_zeros++;
+ else if (get_sign(f))
+ s->neg_zeros++;
+ } else if (shift_count) {
+ int32_t mask = (1 << shift_count) - 1;
+
+ if (!(get_mantissa(f) & mask))
+ s->shifted_zeros++;
+ else if ((get_mantissa(f) & mask) == mask)
+ s->shifted_ones++;
+ else
+ s->shifted_both++;
+ }
+
+ s->ordata |= value;
+ *sample = get_sign(f) ? -value : value;
+}
+
+static int scan_float(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ uint32_t crc = 0xffffffffu;
+ int i;
+
+ s->shifted_ones = s->shifted_zeros = s->shifted_both = s->ordata = 0;
+ s->float_shift = s->float_flags = 0;
+ s->false_zeros = s->neg_zeros = 0;
+ s->max_exp = 0;
+
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t f = samples_l[i];
+ crc = crc * 27 + get_mantissa(f) * 9 + get_exponent(f) * 3 + get_sign(f);
+
+ if (get_exponent(f) > s->max_exp && get_exponent(f) < 255)
+ s->max_exp = get_exponent(f);
+ }
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t f;
+
+ f = samples_l[i];
+ crc = crc * 27 + get_mantissa(f) * 9 + get_exponent(f) * 3 + get_sign(f);
+ if (get_exponent(f) > s->max_exp && get_exponent(f) < 255)
+ s->max_exp = get_exponent(f);
+
+ f = samples_r[i];
+ crc = crc * 27 + get_mantissa(f) * 9 + get_exponent(f) * 3 + get_sign(f);
+
+ if (get_exponent(f) > s->max_exp && get_exponent(f) < 255)
+ s->max_exp = get_exponent(f);
+ }
+ }
+
+ s->crc_x = crc;
+
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++)
+ process_float(s, &samples_l[i]);
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ process_float(s, &samples_l[i]);
+ process_float(s, &samples_r[i]);
+ }
+ }
+
+ s->float_max_exp = s->max_exp;
+
+ if (s->shifted_both)
+ s->float_flags |= FLOAT_SHIFT_SENT;
+ else if (s->shifted_ones && !s->shifted_zeros)
+ s->float_flags |= FLOAT_SHIFT_ONES;
+ else if (s->shifted_ones && s->shifted_zeros)
+ s->float_flags |= FLOAT_SHIFT_SAME;
+ else if (s->ordata && !(s->ordata & 1)) {
+ do {
+ s->float_shift++;
+ s->ordata >>= 1;
+ } while (!(s->ordata & 1));
+
+ if (s->flags & WV_MONO_DATA)
+ shift_mono(samples_l, nb_samples, s->float_shift);
+ else
+ shift_stereo(samples_l, samples_r, nb_samples, s->float_shift);
+ }
+
+ s->flags &= ~MAG_MASK;
+
+ while (s->ordata) {
+ s->flags += 1 << MAG_LSB;
+ s->ordata >>= 1;
+ }
+
+ if (s->false_zeros || s->neg_zeros)
+ s->float_flags |= FLOAT_ZEROS_SENT;
+
+ if (s->neg_zeros)
+ s->float_flags |= FLOAT_NEG_ZEROS;
+
+ return s->float_flags & (FLOAT_EXCEPTIONS | FLOAT_ZEROS_SENT |
+ FLOAT_SHIFT_SENT | FLOAT_SHIFT_SAME);
+}
+
+static void scan_int23(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ uint32_t magdata = 0, ordata = 0, xordata = 0, anddata = ~0;
+ int i, total_shift = 0;
+
+ s->int32_sent_bits = s->int32_zeros = s->int32_ones = s->int32_dups = 0;
+
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t M = samples_l[i];
+
+ magdata |= (M < 0) ? ~M : M;
+ xordata |= M ^ -(M & 1);
+ anddata &= M;
+ ordata |= M;
+
+ if ((ordata & 1) && !(anddata & 1) && (xordata & 2))
+ return;
+ }
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t L = samples_l[i];
+ int32_t R = samples_r[i];
+
+ magdata |= (L < 0) ? ~L : L;
+ magdata |= (R < 0) ? ~R : R;
+ xordata |= L ^ -(L & 1);
+ xordata |= R ^ -(R & 1);
+ anddata &= L & R;
+ ordata |= L | R;
+
+ if ((ordata & 1) && !(anddata & 1) && (xordata & 2))
+ return;
+ }
+ }
+
+ s->flags &= ~MAG_MASK;
+
+ while (magdata) {
+ s->flags += 1 << MAG_LSB;
+ magdata >>= 1;
+ }
+
+ if (!(s->flags & MAG_MASK))
+ return;
+
+ if (!(ordata & 1)) {
+ do {
+ s->flags -= 1 << MAG_LSB;
+ s->int32_zeros++;
+ total_shift++;
+ ordata >>= 1;
+ } while (!(ordata & 1));
+ } else if (anddata & 1) {
+ do {
+ s->flags -= 1 << MAG_LSB;
+ s->int32_ones++;
+ total_shift++;
+ anddata >>= 1;
+ } while (anddata & 1);
+ } else if (!(xordata & 2)) {
+ do {
+ s->flags -= 1 << MAG_LSB;
+ s->int32_dups++;
+ total_shift++;
+ xordata >>= 1;
+ } while (!(xordata & 2));
+ }
+
+ if (total_shift) {
+ s->flags |= WV_INT32_DATA;
+
+ if (s->flags & WV_MONO_DATA)
+ shift_mono(samples_l, nb_samples, total_shift);
+ else
+ shift_stereo(samples_l, samples_r, nb_samples, total_shift);
+ }
+}
+
+static int scan_int32(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ uint32_t magdata = 0, ordata = 0, xordata = 0, anddata = ~0;
+ uint32_t crc = 0xffffffffu;
+ int i, total_shift = 0;
+
+ s->int32_sent_bits = s->int32_zeros = s->int32_ones = s->int32_dups = 0;
+
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t M = samples_l[i];
+
+ crc = crc * 9 + (M & 0xffff) * 3 + ((M >> 16) & 0xffff);
+ magdata |= (M < 0) ? ~M : M;
+ xordata |= M ^ -(M & 1);
+ anddata &= M;
+ ordata |= M;
+ }
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t L = samples_l[i];
+ int32_t R = samples_r[i];
+
+ crc = crc * 9 + (L & 0xffff) * 3 + ((L >> 16) & 0xffff);
+ crc = crc * 9 + (R & 0xffff) * 3 + ((R >> 16) & 0xffff);
+ magdata |= (L < 0) ? ~L : L;
+ magdata |= (R < 0) ? ~R : R;
+ xordata |= L ^ -(L & 1);
+ xordata |= R ^ -(R & 1);
+ anddata &= L & R;
+ ordata |= L | R;
+ }
+ }
+
+ s->crc_x = crc;
+ s->flags &= ~MAG_MASK;
+
+ while (magdata) {
+ s->flags += 1 << MAG_LSB;
+ magdata >>= 1;
+ }
+
+ if (!((s->flags & MAG_MASK) >> MAG_LSB)) {
+ s->flags &= ~WV_INT32_DATA;
+ return 0;
+ }
+
+ if (!(ordata & 1))
+ do {
+ s->flags -= 1 << MAG_LSB;
+ s->int32_zeros++;
+ total_shift++;
+ ordata >>= 1;
+ } while (!(ordata & 1));
+ else if (anddata & 1)
+ do {
+ s->flags -= 1 << MAG_LSB;
+ s->int32_ones++;
+ total_shift++;
+ anddata >>= 1;
+ } while (anddata & 1);
+ else if (!(xordata & 2))
+ do {
+ s->flags -= 1 << MAG_LSB;
+ s->int32_dups++;
+ total_shift++;
+ xordata >>= 1;
+ } while (!(xordata & 2));
+
+ if (((s->flags & MAG_MASK) >> MAG_LSB) > 23) {
+ s->int32_sent_bits = (uint8_t)(((s->flags & MAG_MASK) >> MAG_LSB) - 23);
+ total_shift += s->int32_sent_bits;
+ s->flags &= ~MAG_MASK;
+ s->flags += 23 << MAG_LSB;
+ }
+
+ if (total_shift) {
+ s->flags |= WV_INT32_DATA;
+
+ if (s->flags & WV_MONO_DATA)
+ shift_mono(samples_l, nb_samples, total_shift);
+ else
+ shift_stereo(samples_l, samples_r, nb_samples, total_shift);
+ }
+
+ return s->int32_sent_bits;
+}
+
+static int8_t store_weight(int weight)
+{
+ weight = av_clip(weight, -1024, 1024);
+ if (weight > 0)
+ weight -= (weight + 64) >> 7;
+
+ return (weight + 4) >> 3;
+}
+
+static int restore_weight(int8_t weight)
+{
+ int result;
+
+ if ((result = (int) weight << 3) > 0)
+ result += (result + 64) >> 7;
+
+ return result;
+}
+
+static int log2s(int32_t value)
+{
+ return (value < 0) ? -wp_log2(-value) : wp_log2(value);
+}
+
+static void decorr_mono(int32_t *in_samples, int32_t *out_samples,
+ int nb_samples, struct Decorr *dpp, int dir)
+{
+ int m = 0, i;
+
+ dpp->sumA = 0;
+
+ if (dir < 0) {
+ out_samples += (nb_samples - 1);
+ in_samples += (nb_samples - 1);
+ }
+
+ dpp->weightA = restore_weight(store_weight(dpp->weightA));
+
+ for (i = 0; i < MAX_TERM; i++)
+ dpp->samplesA[i] = wp_exp2(log2s(dpp->samplesA[i]));
+
+ if (dpp->value > MAX_TERM) {
+ while (nb_samples--) {
+ int32_t left, sam_A;
+
+ sam_A = ((3 - (dpp->value & 1)) * dpp->samplesA[0] - dpp->samplesA[1]) >> !(dpp->value & 1);
+
+ dpp->samplesA[1] = dpp->samplesA[0];
+ dpp->samplesA[0] = left = in_samples[0];
+
+ left -= APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam_A, left);
+ dpp->sumA += dpp->weightA;
+ out_samples[0] = left;
+ in_samples += dir;
+ out_samples += dir;
+ }
+ } else if (dpp->value > 0) {
+ while (nb_samples--) {
+ int k = (m + dpp->value) & (MAX_TERM - 1);
+ int32_t left, sam_A;
+
+ sam_A = dpp->samplesA[m];
+ dpp->samplesA[k] = left = in_samples[0];
+ m = (m + 1) & (MAX_TERM - 1);
+
+ left -= APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam_A, left);
+ dpp->sumA += dpp->weightA;
+ out_samples[0] = left;
+ in_samples += dir;
+ out_samples += dir;
+ }
+ }
+
+ if (m && dpp->value > 0 && dpp->value <= MAX_TERM) {
+ int32_t temp_A[MAX_TERM];
+
+ memcpy(temp_A, dpp->samplesA, sizeof(dpp->samplesA));
+
+ for (i = 0; i < MAX_TERM; i++) {
+ dpp->samplesA[i] = temp_A[m];
+ m = (m + 1) & (MAX_TERM - 1);
+ }
+ }
+}
+
+static void reverse_mono_decorr(struct Decorr *dpp)
+{
+ if (dpp->value > MAX_TERM) {
+ int32_t sam_A;
+
+ if (dpp->value & 1)
+ sam_A = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ else
+ sam_A = (3 * dpp->samplesA[0] - dpp->samplesA[1]) >> 1;
+
+ dpp->samplesA[1] = dpp->samplesA[0];
+ dpp->samplesA[0] = sam_A;
+
+ if (dpp->value & 1)
+ sam_A = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ else
+ sam_A = (3 * dpp->samplesA[0] - dpp->samplesA[1]) >> 1;
+
+ dpp->samplesA[1] = sam_A;
+ } else if (dpp->value > 1) {
+ int i, j, k;
+
+ for (i = 0, j = dpp->value - 1, k = 0; k < dpp->value / 2; i++, j--, k++) {
+ i &= (MAX_TERM - 1);
+ j &= (MAX_TERM - 1);
+ dpp->samplesA[i] ^= dpp->samplesA[j];
+ dpp->samplesA[j] ^= dpp->samplesA[i];
+ dpp->samplesA[i] ^= dpp->samplesA[j];
+ }
+ }
+}
+
+static uint32_t log2sample(uint32_t v, int limit, uint32_t *result)
+{
+ uint32_t dbits;
+
+ if ((v += v >> 9) < (1 << 8)) {
+ dbits = nbits_table[v];
+ *result += (dbits << 8) + wp_log2_table[(v << (9 - dbits)) & 0xff];
+ } else {
+ if (v < (1 << 16))
+ dbits = nbits_table[v >> 8] + 8;
+ else if (v < (1 << 24))
+ dbits = nbits_table[v >> 16] + 16;
+ else
+ dbits = nbits_table[v >> 24] + 24;
+
+ *result += dbits = (dbits << 8) + wp_log2_table[(v >> (dbits - 9)) & 0xff];
+
+ if (limit && dbits >= limit)
+ return 1;
+ }
+
+ return 0;
+}
+
+static uint32_t log2mono(int32_t *samples, int nb_samples, int limit)
+{
+ uint32_t result = 0;
+ while (nb_samples--) {
+ if (log2sample(abs(*samples++), limit, &result))
+ return UINT32_MAX;
+ }
+ return result;
+}
+
+static uint32_t log2stereo(int32_t *samples_l, int32_t *samples_r,
+ int nb_samples, int limit)
+{
+ uint32_t result = 0;
+ while (nb_samples--) {
+ if (log2sample(abs(*samples_l++), limit, &result) ||
+ log2sample(abs(*samples_r++), limit, &result))
+ return UINT32_MAX;
+ }
+ return result;
+}
+
+static void decorr_mono_buffer(int32_t *samples, int32_t *outsamples,
+ int nb_samples, struct Decorr *dpp,
+ int tindex)
+{
+ struct Decorr dp, *dppi = dpp + tindex;
+ int delta = dppi->delta, pre_delta, term = dppi->value;
+
+ if (delta == 7)
+ pre_delta = 7;
+ else if (delta < 2)
+ pre_delta = 3;
+ else
+ pre_delta = delta + 1;
+
+ CLEAR(dp);
+ dp.value = term;
+ dp.delta = pre_delta;
+ decorr_mono(samples, outsamples, FFMIN(2048, nb_samples), &dp, -1);
+ dp.delta = delta;
+
+ if (tindex == 0)
+ reverse_mono_decorr(&dp);
+ else
+ CLEAR(dp.samplesA);
+
+ memcpy(dppi->samplesA, dp.samplesA, sizeof(dp.samplesA));
+ dppi->weightA = dp.weightA;
+
+ if (delta == 0) {
+ dp.delta = 1;
+ decorr_mono(samples, outsamples, nb_samples, &dp, 1);
+ dp.delta = 0;
+ memcpy(dp.samplesA, dppi->samplesA, sizeof(dp.samplesA));
+ dppi->weightA = dp.weightA = dp.sumA / nb_samples;
+ }
+
+ decorr_mono(samples, outsamples, nb_samples, &dp, 1);
+}
+
+static void recurse_mono(WavPackEncodeContext *s, WavPackExtraInfo *info,
+ int depth, int delta, uint32_t input_bits)
+{
+ int term, branches = s->num_branches - depth;
+ int32_t *samples, *outsamples;
+ uint32_t term_bits[22], bits;
+
+ if (branches < 1 || depth + 1 == info->nterms)
+ branches = 1;
+
+ CLEAR(term_bits);
+ samples = s->sampleptrs[depth][0];
+ outsamples = s->sampleptrs[depth + 1][0];
+
+ for (term = 1; term <= 18; term++) {
+ if (term == 17 && branches == 1 && depth + 1 < info->nterms)
+ continue;
+
+ if (term > 8 && term < 17)
+ continue;
+
+ if (!s->extra_flags && (term > 4 && term < 17))
+ continue;
+
+ info->dps[depth].value = term;
+ info->dps[depth].delta = delta;
+ decorr_mono_buffer(samples, outsamples, s->block_samples, info->dps, depth);
+ bits = log2mono(outsamples, s->block_samples, info->log_limit);
+
+ if (bits < info->best_bits) {
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * (depth + 1));
+ memcpy(s->sampleptrs[info->nterms + 1][0],
+ s->sampleptrs[depth + 1][0], s->block_samples * 4);
+ }
+
+ term_bits[term + 3] = bits;
+ }
+
+ while (depth + 1 < info->nterms && branches--) {
+ uint32_t local_best_bits = input_bits;
+ int best_term = 0, i;
+
+ for (i = 0; i < 22; i++)
+ if (term_bits[i] && term_bits[i] < local_best_bits) {
+ local_best_bits = term_bits[i];
+ best_term = i - 3;
+ }
+
+ if (!best_term)
+ break;
+
+ term_bits[best_term + 3] = 0;
+
+ info->dps[depth].value = best_term;
+ info->dps[depth].delta = delta;
+ decorr_mono_buffer(samples, outsamples, s->block_samples, info->dps, depth);
+
+ recurse_mono(s, info, depth + 1, delta, local_best_bits);
+ }
+}
+
+static void sort_mono(WavPackEncodeContext *s, WavPackExtraInfo *info)
+{
+ int reversed = 1;
+ uint32_t bits;
+
+ while (reversed) {
+ int ri, i;
+
+ memcpy(info->dps, s->decorr_passes, sizeof(s->decorr_passes));
+ reversed = 0;
+
+ for (ri = 0; ri < info->nterms && s->decorr_passes[ri].value; ri++) {
+
+ if (ri + 1 >= info->nterms || !s->decorr_passes[ri+1].value)
+ break;
+
+ if (s->decorr_passes[ri].value == s->decorr_passes[ri+1].value) {
+ decorr_mono_buffer(s->sampleptrs[ri][0], s->sampleptrs[ri+1][0],
+ s->block_samples, info->dps, ri);
+ continue;
+ }
+
+ info->dps[ri ] = s->decorr_passes[ri+1];
+ info->dps[ri+1] = s->decorr_passes[ri ];
+
+ for (i = ri; i < info->nterms && s->decorr_passes[i].value; i++)
+ decorr_mono_buffer(s->sampleptrs[i][0], s->sampleptrs[i+1][0],
+ s->block_samples, info->dps, i);
+
+ bits = log2mono(s->sampleptrs[i][0], s->block_samples, info->log_limit);
+ if (bits < info->best_bits) {
+ reversed = 1;
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * i);
+ memcpy(s->sampleptrs[info->nterms + 1][0], s->sampleptrs[i][0],
+ s->block_samples * 4);
+ } else {
+ info->dps[ri ] = s->decorr_passes[ri];
+ info->dps[ri+1] = s->decorr_passes[ri+1];
+ decorr_mono_buffer(s->sampleptrs[ri][0], s->sampleptrs[ri+1][0],
+ s->block_samples, info->dps, ri);
+ }
+ }
+ }
+}
+
+static void delta_mono(WavPackEncodeContext *s, WavPackExtraInfo *info)
+{
+ int lower = 0, delta, d;
+ uint32_t bits;
+
+ if (!s->decorr_passes[0].value)
+ return;
+ delta = s->decorr_passes[0].delta;
+
+ for (d = delta - 1; d >= 0; d--) {
+ int i;
+
+ for (i = 0; i < info->nterms && s->decorr_passes[i].value; i++) {
+ info->dps[i].value = s->decorr_passes[i].value;
+ info->dps[i].delta = d;
+ decorr_mono_buffer(s->sampleptrs[i][0], s->sampleptrs[i+1][0],
+ s->block_samples, info->dps, i);
+ }
+
+ bits = log2mono(s->sampleptrs[i][0], s->block_samples, info->log_limit);
+ if (bits >= info->best_bits)
+ break;
+
+ lower = 1;
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * i);
+ memcpy(s->sampleptrs[info->nterms + 1][0], s->sampleptrs[i][0],
+ s->block_samples * 4);
+ }
+
+ for (d = delta + 1; !lower && d <= 7; d++) {
+ int i;
+
+ for (i = 0; i < info->nterms && s->decorr_passes[i].value; i++) {
+ info->dps[i].value = s->decorr_passes[i].value;
+ info->dps[i].delta = d;
+ decorr_mono_buffer(s->sampleptrs[i][0], s->sampleptrs[i+1][0],
+ s->block_samples, info->dps, i);
+ }
+
+ bits = log2mono(s->sampleptrs[i][0], s->block_samples, info->log_limit);
+ if (bits >= info->best_bits)
+ break;
+
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * i);
+ memcpy(s->sampleptrs[info->nterms + 1][0], s->sampleptrs[i][0],
+ s->block_samples * 4);
+ }
+}
+
+static int allocate_buffers2(WavPackEncodeContext *s, int nterms)
+{
+ int i;
+
+ for (i = 0; i < nterms + 2; i++) {
+ av_fast_padded_malloc(&s->sampleptrs[i][0], &s->sampleptrs_size[i][0],
+ s->block_samples * 4);
+ if (!s->sampleptrs[i][0])
+ return AVERROR(ENOMEM);
+ if (!(s->flags & WV_MONO_DATA)) {
+ av_fast_padded_malloc(&s->sampleptrs[i][1], &s->sampleptrs_size[i][1],
+ s->block_samples * 4);
+ if (!s->sampleptrs[i][1])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+static int allocate_buffers(WavPackEncodeContext *s)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ av_fast_padded_malloc(&s->best_buffer[0], &s->best_buffer_size[0],
+ s->block_samples * 4);
+ if (!s->best_buffer[0])
+ return AVERROR(ENOMEM);
+
+ av_fast_padded_malloc(&s->temp_buffer[i][0], &s->temp_buffer_size[i][0],
+ s->block_samples * 4);
+ if (!s->temp_buffer[i][0])
+ return AVERROR(ENOMEM);
+ if (!(s->flags & WV_MONO_DATA)) {
+ av_fast_padded_malloc(&s->best_buffer[1], &s->best_buffer_size[1],
+ s->block_samples * 4);
+ if (!s->best_buffer[1])
+ return AVERROR(ENOMEM);
+
+ av_fast_padded_malloc(&s->temp_buffer[i][1], &s->temp_buffer_size[i][1],
+ s->block_samples * 4);
+ if (!s->temp_buffer[i][1])
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ return 0;
+}
+
+static void analyze_mono(WavPackEncodeContext *s, int32_t *samples, int do_samples)
+{
+ WavPackExtraInfo info;
+ int i;
+
+ info.log_limit = (((s->flags & MAG_MASK) >> MAG_LSB) + 4) * 256;
+ info.log_limit = FFMIN(6912, info.log_limit);
+
+ info.nterms = s->num_terms;
+
+ if (allocate_buffers2(s, s->num_terms))
+ return;
+
+ memcpy(info.dps, s->decorr_passes, sizeof(info.dps));
+ memcpy(s->sampleptrs[0][0], samples, s->block_samples * 4);
+
+ for (i = 0; i < info.nterms && info.dps[i].value; i++)
+ decorr_mono(s->sampleptrs[i][0], s->sampleptrs[i + 1][0],
+ s->block_samples, info.dps + i, 1);
+
+ info.best_bits = log2mono(s->sampleptrs[info.nterms][0], s->block_samples, 0) * 1;
+ memcpy(s->sampleptrs[info.nterms + 1][0], s->sampleptrs[i][0], s->block_samples * 4);
+
+ if (s->extra_flags & EXTRA_BRANCHES)
+ recurse_mono(s, &info, 0, (int) floor(s->delta_decay + 0.5),
+ log2mono(s->sampleptrs[0][0], s->block_samples, 0));
+
+ if (s->extra_flags & EXTRA_SORT_FIRST)
+ sort_mono(s, &info);
+
+ if (s->extra_flags & EXTRA_TRY_DELTAS) {
+ delta_mono(s, &info);
+
+ if ((s->extra_flags & EXTRA_ADJUST_DELTAS) && s->decorr_passes[0].value)
+ s->delta_decay = (float)((s->delta_decay * 2.0 + s->decorr_passes[0].delta) / 3.0);
+ else
+ s->delta_decay = 2.0;
+ }
+
+ if (s->extra_flags & EXTRA_SORT_LAST)
+ sort_mono(s, &info);
+
+ if (do_samples)
+ memcpy(samples, s->sampleptrs[info.nterms + 1][0], s->block_samples * 4);
+
+ for (i = 0; i < info.nterms; i++)
+ if (!s->decorr_passes[i].value)
+ break;
+
+ s->num_terms = i;
+}
+
+static void scan_word(WavPackEncodeContext *s, WvChannel *c,
+ int32_t *samples, int nb_samples, int dir)
+{
+ if (dir < 0)
+ samples += nb_samples - 1;
+
+ while (nb_samples--) {
+ uint32_t low, value = labs(samples[0]);
+
+ if (value < GET_MED(0)) {
+ DEC_MED(0);
+ } else {
+ low = GET_MED(0);
+ INC_MED(0);
+
+ if (value - low < GET_MED(1)) {
+ DEC_MED(1);
+ } else {
+ low += GET_MED(1);
+ INC_MED(1);
+
+ if (value - low < GET_MED(2)) {
+ DEC_MED(2);
+ } else {
+ INC_MED(2);
+ }
+ }
+ }
+ samples += dir;
+ }
+}
+
+static int wv_mono(WavPackEncodeContext *s, int32_t *samples,
+ int no_history, int do_samples)
+{
+ struct Decorr temp_decorr_pass, save_decorr_passes[MAX_TERMS] = {{0}};
+ int nb_samples = s->block_samples;
+ int buf_size = sizeof(int32_t) * nb_samples;
+ uint32_t best_size = UINT32_MAX, size;
+ int log_limit, pi, i, ret;
+
+ for (i = 0; i < nb_samples; i++)
+ if (samples[i])
+ break;
+
+ if (i == nb_samples) {
+ CLEAR(s->decorr_passes);
+ CLEAR(s->w);
+ s->num_terms = 0;
+ return 0;
+ }
+
+ log_limit = (((s->flags & MAG_MASK) >> MAG_LSB) + 4) * 256;
+ log_limit = FFMIN(6912, log_limit);
+
+ if ((ret = allocate_buffers(s)) < 0)
+ return ret;
+
+ if (no_history || s->num_passes >= 7)
+ s->best_decorr = s->mask_decorr = 0;
+
+ for (pi = 0; pi < s->num_passes;) {
+ const WavPackDecorrSpec *wpds;
+ int nterms, c, j;
+
+ if (!pi) {
+ c = s->best_decorr;
+ } else {
+ if (s->mask_decorr == 0)
+ c = 0;
+ else
+ c = (s->best_decorr & (s->mask_decorr - 1)) | s->mask_decorr;
+
+ if (c == s->best_decorr) {
+ s->mask_decorr = s->mask_decorr ? ((s->mask_decorr << 1) & (s->num_decorrs - 1)) : 1;
+ continue;
+ }
+ }
+
+ wpds = &s->decorr_specs[c];
+ nterms = decorr_filter_nterms[s->decorr_filter];
+
+ while (1) {
+ memcpy(s->temp_buffer[0][0], samples, buf_size);
+ CLEAR(save_decorr_passes);
+
+ for (j = 0; j < nterms; j++) {
+ CLEAR(temp_decorr_pass);
+ temp_decorr_pass.delta = wpds->delta;
+ temp_decorr_pass.value = wpds->terms[j];
+
+ if (temp_decorr_pass.value < 0)
+ temp_decorr_pass.value = 1;
+
+ decorr_mono(s->temp_buffer[j&1][0], s->temp_buffer[~j&1][0],
+ FFMIN(nb_samples, 2048), &temp_decorr_pass, -1);
+
+ if (j) {
+ CLEAR(temp_decorr_pass.samplesA);
+ } else {
+ reverse_mono_decorr(&temp_decorr_pass);
+ }
+
+ memcpy(save_decorr_passes + j, &temp_decorr_pass, sizeof(struct Decorr));
+ decorr_mono(s->temp_buffer[j&1][0], s->temp_buffer[~j&1][0],
+ nb_samples, &temp_decorr_pass, 1);
+ }
+
+ size = log2mono(s->temp_buffer[j&1][0], nb_samples, log_limit);
+ if (size != UINT32_MAX || !nterms)
+ break;
+ nterms >>= 1;
+ }
+
+ if (size < best_size) {
+ memcpy(s->best_buffer[0], s->temp_buffer[j&1][0], buf_size);
+ memcpy(s->decorr_passes, save_decorr_passes, sizeof(struct Decorr) * MAX_TERMS);
+ s->num_terms = nterms;
+ s->best_decorr = c;
+ best_size = size;
+ }
+
+ if (pi++)
+ s->mask_decorr = s->mask_decorr ? ((s->mask_decorr << 1) & (s->num_decorrs - 1)) : 1;
+ }
+
+ if (s->extra_flags)
+ analyze_mono(s, samples, do_samples);
+ else if (do_samples)
+ memcpy(samples, s->best_buffer[0], buf_size);
+
+ if (no_history || s->extra_flags) {
+ CLEAR(s->w);
+ scan_word(s, &s->w.c[0], s->best_buffer[0], nb_samples, -1);
+ }
+ return 0;
+}
+
+static void decorr_stereo(int32_t *in_left, int32_t *in_right,
+ int32_t *out_left, int32_t *out_right,
+ int nb_samples, struct Decorr *dpp, int dir)
+{
+ int m = 0, i;
+
+ dpp->sumA = dpp->sumB = 0;
+
+ if (dir < 0) {
+ out_left += nb_samples - 1;
+ out_right += nb_samples - 1;
+ in_left += nb_samples - 1;
+ in_right += nb_samples - 1;
+ }
+
+ dpp->weightA = restore_weight(store_weight(dpp->weightA));
+ dpp->weightB = restore_weight(store_weight(dpp->weightB));
+
+ for (i = 0; i < MAX_TERM; i++) {
+ dpp->samplesA[i] = wp_exp2(log2s(dpp->samplesA[i]));
+ dpp->samplesB[i] = wp_exp2(log2s(dpp->samplesB[i]));
+ }
+
+ switch (dpp->value) {
+ case 2:
+ while (nb_samples--) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[0];
+ dpp->samplesA[0] = dpp->samplesA[1];
+ out_left[0] = tmp = (dpp->samplesA[1] = in_left[0]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+ dpp->sumA += dpp->weightA;
+
+ sam = dpp->samplesB[0];
+ dpp->samplesB[0] = dpp->samplesB[1];
+ out_right[0] = tmp = (dpp->samplesB[1] = in_right[0]) - APPLY_WEIGHT(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ dpp->sumB += dpp->weightB;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ }
+ break;
+ case 17:
+ while (nb_samples--) {
+ int32_t sam, tmp;
+
+ sam = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ dpp->samplesA[1] = dpp->samplesA[0];
+ out_left[0] = tmp = (dpp->samplesA[0] = in_left[0]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+ dpp->sumA += dpp->weightA;
+
+ sam = 2 * dpp->samplesB[0] - dpp->samplesB[1];
+ dpp->samplesB[1] = dpp->samplesB[0];
+ out_right[0] = tmp = (dpp->samplesB[0] = in_right[0]) - APPLY_WEIGHT (dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ dpp->sumB += dpp->weightB;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ }
+ break;
+ case 18:
+ while (nb_samples--) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[0] + ((dpp->samplesA[0] - dpp->samplesA[1]) >> 1);
+ dpp->samplesA[1] = dpp->samplesA[0];
+ out_left[0] = tmp = (dpp->samplesA[0] = in_left[0]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+ dpp->sumA += dpp->weightA;
+
+ sam = dpp->samplesB[0] + ((dpp->samplesB[0] - dpp->samplesB[1]) >> 1);
+ dpp->samplesB[1] = dpp->samplesB[0];
+ out_right[0] = tmp = (dpp->samplesB[0] = in_right[0]) - APPLY_WEIGHT(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ dpp->sumB += dpp->weightB;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ }
+ break;
+ default: {
+ int k = dpp->value & (MAX_TERM - 1);
+
+ while (nb_samples--) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[m];
+ out_left[0] = tmp = (dpp->samplesA[k] = in_left[0]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+ dpp->sumA += dpp->weightA;
+
+ sam = dpp->samplesB[m];
+ out_right[0] = tmp = (dpp->samplesB[k] = in_right[0]) - APPLY_WEIGHT(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ dpp->sumB += dpp->weightB;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ m = (m + 1) & (MAX_TERM - 1);
+ k = (k + 1) & (MAX_TERM - 1);
+ }
+
+ if (m) {
+ int32_t temp_A[MAX_TERM], temp_B[MAX_TERM];
+ int k;
+
+ memcpy(temp_A, dpp->samplesA, sizeof(dpp->samplesA));
+ memcpy(temp_B, dpp->samplesB, sizeof(dpp->samplesB));
+
+ for (k = 0; k < MAX_TERM; k++) {
+ dpp->samplesA[k] = temp_A[m];
+ dpp->samplesB[k] = temp_B[m];
+ m = (m + 1) & (MAX_TERM - 1);
+ }
+ }
+ break;
+ }
+ case -1:
+ while (nb_samples--) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ out_left[0] = tmp = (sam_B = in_left[0]) - APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ dpp->sumA += dpp->weightA;
+
+ out_right[0] = tmp = (dpp->samplesA[0] = in_right[0]) - APPLY_WEIGHT(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+ dpp->sumB += dpp->weightB;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ }
+ break;
+ case -2:
+ while (nb_samples--) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_B = dpp->samplesB[0];
+ out_right[0] = tmp = (sam_A = in_right[0]) - APPLY_WEIGHT(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+ dpp->sumB += dpp->weightB;
+
+ out_left[0] = tmp = (dpp->samplesB[0] = in_left[0]) - APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ dpp->sumA += dpp->weightA;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ }
+ break;
+ case -3:
+ while (nb_samples--) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ sam_B = dpp->samplesB[0];
+
+ dpp->samplesA[0] = tmp = in_right[0];
+ out_right[0] = tmp -= APPLY_WEIGHT(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+ dpp->sumB += dpp->weightB;
+
+ dpp->samplesB[0] = tmp = in_left[0];
+ out_left[0] = tmp -= APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ dpp->sumA += dpp->weightA;
+
+ in_left += dir;
+ out_left += dir;
+ in_right += dir;
+ out_right += dir;
+ }
+ break;
+ }
+}
+
+static void reverse_decorr(struct Decorr *dpp)
+{
+ if (dpp->value > MAX_TERM) {
+ int32_t sam_A, sam_B;
+
+ if (dpp->value & 1) {
+ sam_A = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ sam_B = 2 * dpp->samplesB[0] - dpp->samplesB[1];
+ } else {
+ sam_A = (3 * dpp->samplesA[0] - dpp->samplesA[1]) >> 1;
+ sam_B = (3 * dpp->samplesB[0] - dpp->samplesB[1]) >> 1;
+ }
+
+ dpp->samplesA[1] = dpp->samplesA[0];
+ dpp->samplesB[1] = dpp->samplesB[0];
+ dpp->samplesA[0] = sam_A;
+ dpp->samplesB[0] = sam_B;
+
+ if (dpp->value & 1) {
+ sam_A = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ sam_B = 2 * dpp->samplesB[0] - dpp->samplesB[1];
+ } else {
+ sam_A = (3 * dpp->samplesA[0] - dpp->samplesA[1]) >> 1;
+ sam_B = (3 * dpp->samplesB[0] - dpp->samplesB[1]) >> 1;
+ }
+
+ dpp->samplesA[1] = sam_A;
+ dpp->samplesB[1] = sam_B;
+ } else if (dpp->value > 1) {
+ int i, j, k;
+
+ for (i = 0, j = dpp->value - 1, k = 0; k < dpp->value / 2; i++, j--, k++) {
+ i &= (MAX_TERM - 1);
+ j &= (MAX_TERM - 1);
+ dpp->samplesA[i] ^= dpp->samplesA[j];
+ dpp->samplesA[j] ^= dpp->samplesA[i];
+ dpp->samplesA[i] ^= dpp->samplesA[j];
+ dpp->samplesB[i] ^= dpp->samplesB[j];
+ dpp->samplesB[j] ^= dpp->samplesB[i];
+ dpp->samplesB[i] ^= dpp->samplesB[j];
+ }
+ }
+}
+
+static void decorr_stereo_quick(int32_t *in_left, int32_t *in_right,
+ int32_t *out_left, int32_t *out_right,
+ int nb_samples, struct Decorr *dpp)
+{
+ int m = 0, i;
+
+ dpp->weightA = restore_weight(store_weight(dpp->weightA));
+ dpp->weightB = restore_weight(store_weight(dpp->weightB));
+
+ for (i = 0; i < MAX_TERM; i++) {
+ dpp->samplesA[i] = wp_exp2(log2s(dpp->samplesA[i]));
+ dpp->samplesB[i] = wp_exp2(log2s(dpp->samplesB[i]));
+ }
+
+ switch (dpp->value) {
+ case 2:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[0];
+ dpp->samplesA[0] = dpp->samplesA[1];
+ out_left[i] = tmp = (dpp->samplesA[1] = in_left[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[0];
+ dpp->samplesB[0] = dpp->samplesB[1];
+ out_right[i] = tmp = (dpp->samplesB[1] = in_right[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ case 17:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ dpp->samplesA[1] = dpp->samplesA[0];
+ out_left[i] = tmp = (dpp->samplesA[0] = in_left[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = 2 * dpp->samplesB[0] - dpp->samplesB[1];
+ dpp->samplesB[1] = dpp->samplesB[0];
+ out_right[i] = tmp = (dpp->samplesB[0] = in_right[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ case 18:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[0] + ((dpp->samplesA[0] - dpp->samplesA[1]) >> 1);
+ dpp->samplesA[1] = dpp->samplesA[0];
+ out_left[i] = tmp = (dpp->samplesA[0] = in_left[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[0] + ((dpp->samplesB[0] - dpp->samplesB[1]) >> 1);
+ dpp->samplesB[1] = dpp->samplesB[0];
+ out_right[i] = tmp = (dpp->samplesB[0] = in_right[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ default: {
+ int k = dpp->value & (MAX_TERM - 1);
+
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[m];
+ out_left[i] = tmp = (dpp->samplesA[k] = in_left[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[m];
+ out_right[i] = tmp = (dpp->samplesB[k] = in_right[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+
+ m = (m + 1) & (MAX_TERM - 1);
+ k = (k + 1) & (MAX_TERM - 1);
+ }
+
+ if (m) {
+ int32_t temp_A[MAX_TERM], temp_B[MAX_TERM];
+ int k;
+
+ memcpy(temp_A, dpp->samplesA, sizeof(dpp->samplesA));
+ memcpy(temp_B, dpp->samplesB, sizeof(dpp->samplesB));
+
+ for (k = 0; k < MAX_TERM; k++) {
+ dpp->samplesA[k] = temp_A[m];
+ dpp->samplesB[k] = temp_B[m];
+ m = (m + 1) & (MAX_TERM - 1);
+ }
+ }
+ break;
+ }
+ case -1:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ out_left[i] = tmp = (sam_B = in_left[i]) - APPLY_WEIGHT_I(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+
+ out_right[i] = tmp = (dpp->samplesA[0] = in_right[i]) - APPLY_WEIGHT_I(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+ }
+ break;
+ case -2:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_B = dpp->samplesB[0];
+ out_right[i] = tmp = (sam_A = in_right[i]) - APPLY_WEIGHT_I(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+
+ out_left[i] = tmp = (dpp->samplesB[0] = in_left[i]) - APPLY_WEIGHT_I(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ }
+ break;
+ case -3:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ sam_B = dpp->samplesB[0];
+
+ dpp->samplesA[0] = tmp = in_right[i];
+ out_right[i] = tmp -= APPLY_WEIGHT_I(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+
+ dpp->samplesB[0] = tmp = in_left[i];
+ out_left[i] = tmp -= APPLY_WEIGHT_I(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ }
+ break;
+ }
+}
+
+static void decorr_stereo_buffer(WavPackExtraInfo *info,
+ int32_t *in_left, int32_t *in_right,
+ int32_t *out_left, int32_t *out_right,
+ int nb_samples, int tindex)
+{
+ struct Decorr dp = {0}, *dppi = info->dps + tindex;
+ int delta = dppi->delta, pre_delta;
+ int term = dppi->value;
+
+ if (delta == 7)
+ pre_delta = 7;
+ else if (delta < 2)
+ pre_delta = 3;
+ else
+ pre_delta = delta + 1;
+
+ dp.value = term;
+ dp.delta = pre_delta;
+ decorr_stereo(in_left, in_right, out_left, out_right,
+ FFMIN(2048, nb_samples), &dp, -1);
+ dp.delta = delta;
+
+ if (tindex == 0) {
+ reverse_decorr(&dp);
+ } else {
+ CLEAR(dp.samplesA);
+ CLEAR(dp.samplesB);
+ }
+
+ memcpy(dppi->samplesA, dp.samplesA, sizeof(dp.samplesA));
+ memcpy(dppi->samplesB, dp.samplesB, sizeof(dp.samplesB));
+ dppi->weightA = dp.weightA;
+ dppi->weightB = dp.weightB;
+
+ if (delta == 0) {
+ dp.delta = 1;
+ decorr_stereo(in_left, in_right, out_left, out_right, nb_samples, &dp, 1);
+ dp.delta = 0;
+ memcpy(dp.samplesA, dppi->samplesA, sizeof(dp.samplesA));
+ memcpy(dp.samplesB, dppi->samplesB, sizeof(dp.samplesB));
+ dppi->weightA = dp.weightA = dp.sumA / nb_samples;
+ dppi->weightB = dp.weightB = dp.sumB / nb_samples;
+ }
+
+ if (info->gt16bit)
+ decorr_stereo(in_left, in_right, out_left, out_right,
+ nb_samples, &dp, 1);
+ else
+ decorr_stereo_quick(in_left, in_right, out_left, out_right,
+ nb_samples, &dp);
+}
+
+static void sort_stereo(WavPackEncodeContext *s, WavPackExtraInfo *info)
+{
+ int reversed = 1;
+ uint32_t bits;
+
+ while (reversed) {
+ int ri, i;
+
+ memcpy(info->dps, s->decorr_passes, sizeof(s->decorr_passes));
+ reversed = 0;
+
+ for (ri = 0; ri < info->nterms && s->decorr_passes[ri].value; ri++) {
+
+ if (ri + 1 >= info->nterms || !s->decorr_passes[ri+1].value)
+ break;
+
+ if (s->decorr_passes[ri].value == s->decorr_passes[ri+1].value) {
+ decorr_stereo_buffer(info,
+ s->sampleptrs[ri ][0], s->sampleptrs[ri ][1],
+ s->sampleptrs[ri+1][0], s->sampleptrs[ri+1][1],
+ s->block_samples, ri);
+ continue;
+ }
+
+ info->dps[ri ] = s->decorr_passes[ri+1];
+ info->dps[ri+1] = s->decorr_passes[ri ];
+
+ for (i = ri; i < info->nterms && s->decorr_passes[i].value; i++)
+ decorr_stereo_buffer(info,
+ s->sampleptrs[i ][0], s->sampleptrs[i ][1],
+ s->sampleptrs[i+1][0], s->sampleptrs[i+1][1],
+ s->block_samples, i);
+
+ bits = log2stereo(s->sampleptrs[i][0], s->sampleptrs[i][1],
+ s->block_samples, info->log_limit);
+
+ if (bits < info->best_bits) {
+ reversed = 1;
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * i);
+ memcpy(s->sampleptrs[info->nterms + 1][0],
+ s->sampleptrs[i][0], s->block_samples * 4);
+ memcpy(s->sampleptrs[info->nterms + 1][1],
+ s->sampleptrs[i][1], s->block_samples * 4);
+ } else {
+ info->dps[ri ] = s->decorr_passes[ri ];
+ info->dps[ri+1] = s->decorr_passes[ri+1];
+ decorr_stereo_buffer(info,
+ s->sampleptrs[ri ][0], s->sampleptrs[ri ][1],
+ s->sampleptrs[ri+1][0], s->sampleptrs[ri+1][1],
+ s->block_samples, ri);
+ }
+ }
+ }
+}
+
+static void delta_stereo(WavPackEncodeContext *s, WavPackExtraInfo *info)
+{
+ int lower = 0, delta, d, i;
+ uint32_t bits;
+
+ if (!s->decorr_passes[0].value)
+ return;
+ delta = s->decorr_passes[0].delta;
+
+ for (d = delta - 1; d >= 0; d--) {
+ for (i = 0; i < info->nterms && s->decorr_passes[i].value; i++) {
+ info->dps[i].value = s->decorr_passes[i].value;
+ info->dps[i].delta = d;
+ decorr_stereo_buffer(info,
+ s->sampleptrs[i ][0], s->sampleptrs[i ][1],
+ s->sampleptrs[i+1][0], s->sampleptrs[i+1][1],
+ s->block_samples, i);
+ }
+
+ bits = log2stereo(s->sampleptrs[i][0], s->sampleptrs[i][1],
+ s->block_samples, info->log_limit);
+ if (bits >= info->best_bits)
+ break;
+ lower = 1;
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * i);
+ memcpy(s->sampleptrs[info->nterms + 1][0], s->sampleptrs[i][0],
+ s->block_samples * 4);
+ memcpy(s->sampleptrs[info->nterms + 1][1], s->sampleptrs[i][1],
+ s->block_samples * 4);
+ }
+
+ for (d = delta + 1; !lower && d <= 7; d++) {
+ for (i = 0; i < info->nterms && s->decorr_passes[i].value; i++) {
+ info->dps[i].value = s->decorr_passes[i].value;
+ info->dps[i].delta = d;
+ decorr_stereo_buffer(info,
+ s->sampleptrs[i ][0], s->sampleptrs[i ][1],
+ s->sampleptrs[i+1][0], s->sampleptrs[i+1][1],
+ s->block_samples, i);
+ }
+
+ bits = log2stereo(s->sampleptrs[i][0], s->sampleptrs[i][1],
+ s->block_samples, info->log_limit);
+
+ if (bits < info->best_bits) {
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * i);
+ memcpy(s->sampleptrs[info->nterms + 1][0],
+ s->sampleptrs[i][0], s->block_samples * 4);
+ memcpy(s->sampleptrs[info->nterms + 1][1],
+ s->sampleptrs[i][1], s->block_samples * 4);
+ }
+ else
+ break;
+ }
+}
+
+static void recurse_stereo(WavPackEncodeContext *s, WavPackExtraInfo *info,
+ int depth, int delta, uint32_t input_bits)
+{
+ int term, branches = s->num_branches - depth;
+ int32_t *in_left, *in_right, *out_left, *out_right;
+ uint32_t term_bits[22], bits;
+
+ if (branches < 1 || depth + 1 == info->nterms)
+ branches = 1;
+
+ CLEAR(term_bits);
+ in_left = s->sampleptrs[depth ][0];
+ in_right = s->sampleptrs[depth ][1];
+ out_left = s->sampleptrs[depth + 1][0];
+ out_right = s->sampleptrs[depth + 1][1];
+
+ for (term = -3; term <= 18; term++) {
+ if (!term || (term > 8 && term < 17))
+ continue;
+
+ if (term == 17 && branches == 1 && depth + 1 < info->nterms)
+ continue;
+
+ if (term == -1 || term == -2)
+ if (!(s->flags & WV_CROSS_DECORR))
+ continue;
+
+ if (!s->extra_flags && (term > 4 && term < 17))
+ continue;
+
+ info->dps[depth].value = term;
+ info->dps[depth].delta = delta;
+ decorr_stereo_buffer(info, in_left, in_right, out_left, out_right,
+ s->block_samples, depth);
+ bits = log2stereo(out_left, out_right, s->block_samples, info->log_limit);
+
+ if (bits < info->best_bits) {
+ info->best_bits = bits;
+ CLEAR(s->decorr_passes);
+ memcpy(s->decorr_passes, info->dps, sizeof(info->dps[0]) * (depth + 1));
+ memcpy(s->sampleptrs[info->nterms + 1][0], s->sampleptrs[depth + 1][0],
+ s->block_samples * 4);
+ memcpy(s->sampleptrs[info->nterms + 1][1], s->sampleptrs[depth + 1][1],
+ s->block_samples * 4);
+ }
+
+ term_bits[term + 3] = bits;
+ }
+
+ while (depth + 1 < info->nterms && branches--) {
+ uint32_t local_best_bits = input_bits;
+ int best_term = 0, i;
+
+ for (i = 0; i < 22; i++)
+ if (term_bits[i] && term_bits[i] < local_best_bits) {
+ local_best_bits = term_bits[i];
+ best_term = i - 3;
+ }
+
+ if (!best_term)
+ break;
+
+ term_bits[best_term + 3] = 0;
+
+ info->dps[depth].value = best_term;
+ info->dps[depth].delta = delta;
+ decorr_stereo_buffer(info, in_left, in_right, out_left, out_right,
+ s->block_samples, depth);
+
+ recurse_stereo(s, info, depth + 1, delta, local_best_bits);
+ }
+}
+
+static void analyze_stereo(WavPackEncodeContext *s,
+ int32_t *in_left, int32_t *in_right,
+ int do_samples)
+{
+ WavPackExtraInfo info;
+ int i;
+
+ info.gt16bit = ((s->flags & MAG_MASK) >> MAG_LSB) >= 16;
+
+ info.log_limit = (((s->flags & MAG_MASK) >> MAG_LSB) + 4) * 256;
+ info.log_limit = FFMIN(6912, info.log_limit);
+
+ info.nterms = s->num_terms;
+
+ if (allocate_buffers2(s, s->num_terms))
+ return;
+
+ memcpy(info.dps, s->decorr_passes, sizeof(info.dps));
+ memcpy(s->sampleptrs[0][0], in_left, s->block_samples * 4);
+ memcpy(s->sampleptrs[0][1], in_right, s->block_samples * 4);
+
+ for (i = 0; i < info.nterms && info.dps[i].value; i++)
+ if (info.gt16bit)
+ decorr_stereo(s->sampleptrs[i ][0], s->sampleptrs[i ][1],
+ s->sampleptrs[i + 1][0], s->sampleptrs[i + 1][1],
+ s->block_samples, info.dps + i, 1);
+ else
+ decorr_stereo_quick(s->sampleptrs[i ][0], s->sampleptrs[i ][1],
+ s->sampleptrs[i + 1][0], s->sampleptrs[i + 1][1],
+ s->block_samples, info.dps + i);
+
+ info.best_bits = log2stereo(s->sampleptrs[info.nterms][0], s->sampleptrs[info.nterms][1],
+ s->block_samples, 0);
+
+ memcpy(s->sampleptrs[info.nterms + 1][0], s->sampleptrs[i][0], s->block_samples * 4);
+ memcpy(s->sampleptrs[info.nterms + 1][1], s->sampleptrs[i][1], s->block_samples * 4);
+
+ if (s->extra_flags & EXTRA_BRANCHES)
+ recurse_stereo(s, &info, 0, (int) floor(s->delta_decay + 0.5),
+ log2stereo(s->sampleptrs[0][0], s->sampleptrs[0][1],
+ s->block_samples, 0));
+
+ if (s->extra_flags & EXTRA_SORT_FIRST)
+ sort_stereo(s, &info);
+
+ if (s->extra_flags & EXTRA_TRY_DELTAS) {
+ delta_stereo(s, &info);
+
+ if ((s->extra_flags & EXTRA_ADJUST_DELTAS) && s->decorr_passes[0].value)
+ s->delta_decay = (float)((s->delta_decay * 2.0 + s->decorr_passes[0].delta) / 3.0);
+ else
+ s->delta_decay = 2.0;
+ }
+
+ if (s->extra_flags & EXTRA_SORT_LAST)
+ sort_stereo(s, &info);
+
+ if (do_samples) {
+ memcpy(in_left, s->sampleptrs[info.nterms + 1][0], s->block_samples * 4);
+ memcpy(in_right, s->sampleptrs[info.nterms + 1][1], s->block_samples * 4);
+ }
+
+ for (i = 0; i < info.nterms; i++)
+ if (!s->decorr_passes[i].value)
+ break;
+
+ s->num_terms = i;
+}
+
+static int wv_stereo(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ int no_history, int do_samples)
+{
+ struct Decorr temp_decorr_pass, save_decorr_passes[MAX_TERMS] = {{0}};
+ int nb_samples = s->block_samples, ret;
+ int buf_size = sizeof(int32_t) * nb_samples;
+ int log_limit, force_js = 0, force_ts = 0, got_js = 0, pi, i;
+ uint32_t best_size = UINT32_MAX, size;
+
+ for (i = 0; i < nb_samples; i++)
+ if (samples_l[i] || samples_r[i])
+ break;
+
+ if (i == nb_samples) {
+ s->flags &= ~((uint32_t) WV_JOINT_STEREO);
+ CLEAR(s->decorr_passes);
+ CLEAR(s->w);
+ s->num_terms = 0;
+ return 0;
+ }
+
+ log_limit = (((s->flags & MAG_MASK) >> MAG_LSB) + 4) * 256;
+ log_limit = FFMIN(6912, log_limit);
+
+ if (s->joint) {
+ force_js = s->joint > 0;
+ force_ts = s->joint < 0;
+ }
+
+ if ((ret = allocate_buffers(s)) < 0)
+ return ret;
+
+ if (no_history || s->num_passes >= 7)
+ s->best_decorr = s->mask_decorr = 0;
+
+ for (pi = 0; pi < s->num_passes;) {
+ const WavPackDecorrSpec *wpds;
+ int nterms, c, j;
+
+ if (!pi)
+ c = s->best_decorr;
+ else {
+ if (s->mask_decorr == 0)
+ c = 0;
+ else
+ c = (s->best_decorr & (s->mask_decorr - 1)) | s->mask_decorr;
+
+ if (c == s->best_decorr) {
+ s->mask_decorr = s->mask_decorr ? ((s->mask_decorr << 1) & (s->num_decorrs - 1)) : 1;
+ continue;
+ }
+ }
+
+ wpds = &s->decorr_specs[c];
+ nterms = decorr_filter_nterms[s->decorr_filter];
+
+ while (1) {
+ if (force_js || (wpds->joint_stereo && !force_ts)) {
+ if (!got_js) {
+ av_fast_padded_malloc(&s->js_left, &s->js_left_size, buf_size);
+ av_fast_padded_malloc(&s->js_right, &s->js_right_size, buf_size);
+ memcpy(s->js_left, samples_l, buf_size);
+ memcpy(s->js_right, samples_r, buf_size);
+
+ for (i = 0; i < nb_samples; i++)
+ s->js_right[i] += ((s->js_left[i] -= s->js_right[i]) >> 1);
+ got_js = 1;
+ }
+
+ memcpy(s->temp_buffer[0][0], s->js_left, buf_size);
+ memcpy(s->temp_buffer[0][1], s->js_right, buf_size);
+ } else {
+ memcpy(s->temp_buffer[0][0], samples_l, buf_size);
+ memcpy(s->temp_buffer[0][1], samples_r, buf_size);
+ }
+
+ CLEAR(save_decorr_passes);
+
+ for (j = 0; j < nterms; j++) {
+ CLEAR(temp_decorr_pass);
+ temp_decorr_pass.delta = wpds->delta;
+ temp_decorr_pass.value = wpds->terms[j];
+
+ if (temp_decorr_pass.value < 0 && !(s->flags & WV_CROSS_DECORR))
+ temp_decorr_pass.value = -3;
+
+ decorr_stereo(s->temp_buffer[ j&1][0], s->temp_buffer[ j&1][1],
+ s->temp_buffer[~j&1][0], s->temp_buffer[~j&1][1],
+ FFMIN(2048, nb_samples), &temp_decorr_pass, -1);
+
+ if (j) {
+ CLEAR(temp_decorr_pass.samplesA);
+ CLEAR(temp_decorr_pass.samplesB);
+ } else {
+ reverse_decorr(&temp_decorr_pass);
+ }
+
+ memcpy(save_decorr_passes + j, &temp_decorr_pass, sizeof(struct Decorr));
+
+ if (((s->flags & MAG_MASK) >> MAG_LSB) >= 16)
+ decorr_stereo(s->temp_buffer[ j&1][0], s->temp_buffer[ j&1][1],
+ s->temp_buffer[~j&1][0], s->temp_buffer[~j&1][1],
+ nb_samples, &temp_decorr_pass, 1);
+ else
+ decorr_stereo_quick(s->temp_buffer[ j&1][0], s->temp_buffer[ j&1][1],
+ s->temp_buffer[~j&1][0], s->temp_buffer[~j&1][1],
+ nb_samples, &temp_decorr_pass);
+ }
+
+ size = log2stereo(s->temp_buffer[j&1][0], s->temp_buffer[j&1][1],
+ nb_samples, log_limit);
+ if (size != UINT32_MAX || !nterms)
+ break;
+ nterms >>= 1;
+ }
+
+ if (size < best_size) {
+ memcpy(s->best_buffer[0], s->temp_buffer[j&1][0], buf_size);
+ memcpy(s->best_buffer[1], s->temp_buffer[j&1][1], buf_size);
+ memcpy(s->decorr_passes, save_decorr_passes, sizeof(struct Decorr) * MAX_TERMS);
+ s->num_terms = nterms;
+ s->best_decorr = c;
+ best_size = size;
+ }
+
+ if (pi++)
+ s->mask_decorr = s->mask_decorr ? ((s->mask_decorr << 1) & (s->num_decorrs - 1)) : 1;
+ }
+
+ if (force_js || (s->decorr_specs[s->best_decorr].joint_stereo && !force_ts))
+ s->flags |= WV_JOINT_STEREO;
+ else
+ s->flags &= ~((uint32_t) WV_JOINT_STEREO);
+
+ if (s->extra_flags) {
+ if (s->flags & WV_JOINT_STEREO) {
+ analyze_stereo(s, s->js_left, s->js_right, do_samples);
+
+ if (do_samples) {
+ memcpy(samples_l, s->js_left, buf_size);
+ memcpy(samples_r, s->js_right, buf_size);
+ }
+ } else
+ analyze_stereo(s, samples_l, samples_r, do_samples);
+ } else if (do_samples) {
+ memcpy(samples_l, s->best_buffer[0], buf_size);
+ memcpy(samples_r, s->best_buffer[1], buf_size);
+ }
+
+ if (s->extra_flags || no_history ||
+ s->joint_stereo != s->decorr_specs[s->best_decorr].joint_stereo) {
+ s->joint_stereo = s->decorr_specs[s->best_decorr].joint_stereo;
+ CLEAR(s->w);
+ scan_word(s, &s->w.c[0], s->best_buffer[0], nb_samples, -1);
+ scan_word(s, &s->w.c[1], s->best_buffer[1], nb_samples, -1);
+ }
+ return 0;
+}
+
+#define count_bits(av) ( \
+ (av) < (1 << 8) ? nbits_table[av] : \
+ ( \
+ (av) < (1 << 16) ? nbits_table[(av) >> 8] + 8 : \
+ ((av) < (1 << 24) ? nbits_table[(av) >> 16] + 16 : nbits_table[(av) >> 24] + 24) \
+ ) \
+)
+
+static void encode_flush(WavPackEncodeContext *s)
+{
+ WavPackWords *w = &s->w;
+ PutBitContext *pb = &s->pb;
+
+ if (w->zeros_acc) {
+ int cbits = count_bits(w->zeros_acc);
+
+ do {
+ if (cbits > 31) {
+ put_bits(pb, 31, 0x7FFFFFFF);
+ cbits -= 31;
+ } else {
+ put_bits(pb, cbits, (1 << cbits) - 1);
+ cbits = 0;
+ }
+ } while (cbits);
+
+ put_bits(pb, 1, 0);
+
+ while (w->zeros_acc > 1) {
+ put_bits(pb, 1, w->zeros_acc & 1);
+ w->zeros_acc >>= 1;
+ }
+
+ w->zeros_acc = 0;
+ }
+
+ if (w->holding_one) {
+ if (w->holding_one >= 16) {
+ int cbits;
+
+ put_bits(pb, 16, (1 << 16) - 1);
+ put_bits(pb, 1, 0);
+ w->holding_one -= 16;
+ cbits = count_bits(w->holding_one);
+
+ do {
+ if (cbits > 31) {
+ put_bits(pb, 31, 0x7FFFFFFF);
+ cbits -= 31;
+ } else {
+ put_bits(pb, cbits, (1 << cbits) - 1);
+ cbits = 0;
+ }
+ } while (cbits);
+
+ put_bits(pb, 1, 0);
+
+ while (w->holding_one > 1) {
+ put_bits(pb, 1, w->holding_one & 1);
+ w->holding_one >>= 1;
+ }
+
+ w->holding_zero = 0;
+ } else {
+ put_bits(pb, w->holding_one, (1 << w->holding_one) - 1);
+ }
+
+ w->holding_one = 0;
+ }
+
+ if (w->holding_zero) {
+ put_bits(pb, 1, 0);
+ w->holding_zero = 0;
+ }
+
+ if (w->pend_count) {
+ put_bits(pb, w->pend_count, w->pend_data);
+ w->pend_data = w->pend_count = 0;
+ }
+}
+
+static void wavpack_encode_sample(WavPackEncodeContext *s, WvChannel *c, int32_t sample)
+{
+ WavPackWords *w = &s->w;
+ uint32_t ones_count, low, high;
+ int sign = sample < 0;
+
+ if (s->w.c[0].median[0] < 2 && !s->w.holding_zero && s->w.c[1].median[0] < 2) {
+ if (w->zeros_acc) {
+ if (sample)
+ encode_flush(s);
+ else {
+ w->zeros_acc++;
+ return;
+ }
+ } else if (sample) {
+ put_bits(&s->pb, 1, 0);
+ } else {
+ CLEAR(s->w.c[0].median);
+ CLEAR(s->w.c[1].median);
+ w->zeros_acc = 1;
+ return;
+ }
+ }
+
+ if (sign)
+ sample = ~sample;
+
+ if (sample < (int32_t) GET_MED(0)) {
+ ones_count = low = 0;
+ high = GET_MED(0) - 1;
+ DEC_MED(0);
+ } else {
+ low = GET_MED(0);
+ INC_MED(0);
+
+ if (sample - low < GET_MED(1)) {
+ ones_count = 1;
+ high = low + GET_MED(1) - 1;
+ DEC_MED(1);
+ } else {
+ low += GET_MED(1);
+ INC_MED(1);
+
+ if (sample - low < GET_MED(2)) {
+ ones_count = 2;
+ high = low + GET_MED(2) - 1;
+ DEC_MED(2);
+ } else {
+ ones_count = 2 + (sample - low) / GET_MED(2);
+ low += (ones_count - 2) * GET_MED(2);
+ high = low + GET_MED(2) - 1;
+ INC_MED(2);
+ }
+ }
+ }
+
+ if (w->holding_zero) {
+ if (ones_count)
+ w->holding_one++;
+
+ encode_flush(s);
+
+ if (ones_count) {
+ w->holding_zero = 1;
+ ones_count--;
+ } else
+ w->holding_zero = 0;
+ } else
+ w->holding_zero = 1;
+
+ w->holding_one = ones_count * 2;
+
+ if (high != low) {
+ uint32_t maxcode = high - low, code = sample - low;
+ int bitcount = count_bits(maxcode);
+ uint32_t extras = (1 << bitcount) - maxcode - 1;
+
+ if (code < extras) {
+ w->pend_data |= code << w->pend_count;
+ w->pend_count += bitcount - 1;
+ } else {
+ w->pend_data |= ((code + extras) >> 1) << w->pend_count;
+ w->pend_count += bitcount - 1;
+ w->pend_data |= ((code + extras) & 1) << w->pend_count++;
+ }
+ }
+
+ w->pend_data |= ((int32_t) sign << w->pend_count++);
+
+ if (!w->holding_zero)
+ encode_flush(s);
+}
+
+static void pack_int32(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ const int sent_bits = s->int32_sent_bits;
+ PutBitContext *pb = &s->pb;
+ int i, pre_shift;
+
+ pre_shift = s->int32_zeros + s->int32_ones + s->int32_dups;
+
+ if (!sent_bits)
+ return;
+
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++) {
+ put_sbits(pb, sent_bits, samples_l[i] >> pre_shift);
+ }
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ put_sbits(pb, sent_bits, samples_l[i] >> pre_shift);
+ put_sbits(pb, sent_bits, samples_r[i] >> pre_shift);
+ }
+ }
+}
+
+static void pack_float_sample(WavPackEncodeContext *s, int32_t *sample)
+{
+ const int max_exp = s->float_max_exp;
+ PutBitContext *pb = &s->pb;
+ int32_t value, shift_count;
+
+ if (get_exponent(*sample) == 255) {
+ if (get_mantissa(*sample)) {
+ put_bits(pb, 1, 1);
+ put_bits(pb, 23, get_mantissa(*sample));
+ } else {
+ put_bits(pb, 1, 0);
+ }
+
+ value = 0x1000000;
+ shift_count = 0;
+ } else if (get_exponent(*sample)) {
+ shift_count = max_exp - get_exponent(*sample);
+ value = 0x800000 + get_mantissa(*sample);
+ } else {
+ shift_count = max_exp ? max_exp - 1 : 0;
+ value = get_mantissa(*sample);
+ }
+
+ if (shift_count < 25)
+ value >>= shift_count;
+ else
+ value = 0;
+
+ if (!value) {
+ if (s->float_flags & FLOAT_ZEROS_SENT) {
+ if (get_exponent(*sample) || get_mantissa(*sample)) {
+ put_bits(pb, 1, 1);
+ put_bits(pb, 23, get_mantissa(*sample));
+
+ if (max_exp >= 25)
+ put_bits(pb, 8, get_exponent(*sample));
+
+ put_bits(pb, 1, get_sign(*sample));
+ } else {
+ put_bits(pb, 1, 0);
+
+ if (s->float_flags & FLOAT_NEG_ZEROS)
+ put_bits(pb, 1, get_sign(*sample));
+ }
+ }
+ } else if (shift_count) {
+ if (s->float_flags & FLOAT_SHIFT_SENT) {
+ int32_t data = get_mantissa(*sample) & ((1 << shift_count) - 1);
+ put_bits(pb, shift_count, data);
+ } else if (s->float_flags & FLOAT_SHIFT_SAME) {
+ put_bits(pb, 1, get_mantissa(*sample) & 1);
+ }
+ }
+}
+
+static void pack_float(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ int i;
+
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++)
+ pack_float_sample(s, &samples_l[i]);
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ pack_float_sample(s, &samples_l[i]);
+ pack_float_sample(s, &samples_r[i]);
+ }
+ }
+}
+
+static void decorr_stereo_pass2(struct Decorr *dpp,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ int i, m, k;
+
+ switch (dpp->value) {
+ case 17:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ dpp->samplesA[1] = dpp->samplesA[0];
+ samples_l[i] = tmp = (dpp->samplesA[0] = samples_l[i]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = 2 * dpp->samplesB[0] - dpp->samplesB[1];
+ dpp->samplesB[1] = dpp->samplesB[0];
+ samples_r[i] = tmp = (dpp->samplesB[0] = samples_r[i]) - APPLY_WEIGHT(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ case 18:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[0] + ((dpp->samplesA[0] - dpp->samplesA[1]) >> 1);
+ dpp->samplesA[1] = dpp->samplesA[0];
+ samples_l[i] = tmp = (dpp->samplesA[0] = samples_l[i]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[0] + ((dpp->samplesB[0] - dpp->samplesB[1]) >> 1);
+ dpp->samplesB[1] = dpp->samplesB[0];
+ samples_r[i] = tmp = (dpp->samplesB[0] = samples_r[i]) - APPLY_WEIGHT(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ default:
+ for (m = 0, k = dpp->value & (MAX_TERM - 1), i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[m];
+ samples_l[i] = tmp = (dpp->samplesA[k] = samples_l[i]) - APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[m];
+ samples_r[i] = tmp = (dpp->samplesB[k] = samples_r[i]) - APPLY_WEIGHT(dpp->weightB, sam);
+ UPDATE_WEIGHT(dpp->weightB, dpp->delta, sam, tmp);
+
+ m = (m + 1) & (MAX_TERM - 1);
+ k = (k + 1) & (MAX_TERM - 1);
+ }
+ if (m) {
+ int32_t temp_A[MAX_TERM], temp_B[MAX_TERM];
+
+ memcpy(temp_A, dpp->samplesA, sizeof (dpp->samplesA));
+ memcpy(temp_B, dpp->samplesB, sizeof (dpp->samplesB));
+
+ for (k = 0; k < MAX_TERM; k++) {
+ dpp->samplesA[k] = temp_A[m];
+ dpp->samplesB[k] = temp_B[m];
+ m = (m + 1) & (MAX_TERM - 1);
+ }
+ }
+ break;
+ case -1:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ samples_l[i] = tmp = (sam_B = samples_l[i]) - APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+
+ samples_r[i] = tmp = (dpp->samplesA[0] = samples_r[i]) - APPLY_WEIGHT(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+ }
+ break;
+ case -2:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_B = dpp->samplesB[0];
+ samples_r[i] = tmp = (sam_A = samples_r[i]) - APPLY_WEIGHT(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+
+ samples_l[i] = tmp = (dpp->samplesB[0] = samples_l[i]) - APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ }
+ break;
+ case -3:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ sam_B = dpp->samplesB[0];
+
+ dpp->samplesA[0] = tmp = samples_r[i];
+ samples_r[i] = tmp -= APPLY_WEIGHT(dpp->weightB, sam_B);
+ UPDATE_WEIGHT_CLIP(dpp->weightB, dpp->delta, sam_B, tmp);
+
+ dpp->samplesB[0] = tmp = samples_l[i];
+ samples_l[i] = tmp -= APPLY_WEIGHT(dpp->weightA, sam_A);
+ UPDATE_WEIGHT_CLIP(dpp->weightA, dpp->delta, sam_A, tmp);
+ }
+ break;
+ }
+}
+
+#define update_weight_d2(weight, delta, source, result) \
+ if (source && result) \
+ weight -= (((source ^ result) >> 29) & 4) - 2;
+
+#define update_weight_clip_d2(weight, delta, source, result) \
+ if (source && result) { \
+ const int32_t s = (source ^ result) >> 31; \
+ if ((weight = (weight ^ s) + (2 - s)) > 1024) weight = 1024; \
+ weight = (weight ^ s) - s; \
+ }
+
+static void decorr_stereo_pass_id2(struct Decorr *dpp,
+ int32_t *samples_l, int32_t *samples_r,
+ int nb_samples)
+{
+ int i, m, k;
+
+ switch (dpp->value) {
+ case 17:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ dpp->samplesA[1] = dpp->samplesA[0];
+ samples_l[i] = tmp = (dpp->samplesA[0] = samples_l[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ update_weight_d2(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = 2 * dpp->samplesB[0] - dpp->samplesB[1];
+ dpp->samplesB[1] = dpp->samplesB[0];
+ samples_r[i] = tmp = (dpp->samplesB[0] = samples_r[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ update_weight_d2(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ case 18:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[0] + ((dpp->samplesA[0] - dpp->samplesA[1]) >> 1);
+ dpp->samplesA[1] = dpp->samplesA[0];
+ samples_l[i] = tmp = (dpp->samplesA[0] = samples_l[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ update_weight_d2(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[0] + ((dpp->samplesB[0] - dpp->samplesB[1]) >> 1);
+ dpp->samplesB[1] = dpp->samplesB[0];
+ samples_r[i] = tmp = (dpp->samplesB[0] = samples_r[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ update_weight_d2(dpp->weightB, dpp->delta, sam, tmp);
+ }
+ break;
+ default:
+ for (m = 0, k = dpp->value & (MAX_TERM - 1), i = 0; i < nb_samples; i++) {
+ int32_t sam, tmp;
+
+ sam = dpp->samplesA[m];
+ samples_l[i] = tmp = (dpp->samplesA[k] = samples_l[i]) - APPLY_WEIGHT_I(dpp->weightA, sam);
+ update_weight_d2(dpp->weightA, dpp->delta, sam, tmp);
+
+ sam = dpp->samplesB[m];
+ samples_r[i] = tmp = (dpp->samplesB[k] = samples_r[i]) - APPLY_WEIGHT_I(dpp->weightB, sam);
+ update_weight_d2(dpp->weightB, dpp->delta, sam, tmp);
+
+ m = (m + 1) & (MAX_TERM - 1);
+ k = (k + 1) & (MAX_TERM - 1);
+ }
+
+ if (m) {
+ int32_t temp_A[MAX_TERM], temp_B[MAX_TERM];
+
+ memcpy(temp_A, dpp->samplesA, sizeof(dpp->samplesA));
+ memcpy(temp_B, dpp->samplesB, sizeof(dpp->samplesB));
+
+ for (k = 0; k < MAX_TERM; k++) {
+ dpp->samplesA[k] = temp_A[m];
+ dpp->samplesB[k] = temp_B[m];
+ m = (m + 1) & (MAX_TERM - 1);
+ }
+ }
+ break;
+ case -1:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ samples_l[i] = tmp = (sam_B = samples_l[i]) - APPLY_WEIGHT_I(dpp->weightA, sam_A);
+ update_weight_clip_d2(dpp->weightA, dpp->delta, sam_A, tmp);
+
+ samples_r[i] = tmp = (dpp->samplesA[0] = samples_r[i]) - APPLY_WEIGHT_I(dpp->weightB, sam_B);
+ update_weight_clip_d2(dpp->weightB, dpp->delta, sam_B, tmp);
+ }
+ break;
+ case -2:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_B = dpp->samplesB[0];
+ samples_r[i] = tmp = (sam_A = samples_r[i]) - APPLY_WEIGHT_I(dpp->weightB, sam_B);
+ update_weight_clip_d2(dpp->weightB, dpp->delta, sam_B, tmp);
+
+ samples_l[i] = tmp = (dpp->samplesB[0] = samples_l[i]) - APPLY_WEIGHT_I(dpp->weightA, sam_A);
+ update_weight_clip_d2(dpp->weightA, dpp->delta, sam_A, tmp);
+ }
+ break;
+ case -3:
+ for (i = 0; i < nb_samples; i++) {
+ int32_t sam_A, sam_B, tmp;
+
+ sam_A = dpp->samplesA[0];
+ sam_B = dpp->samplesB[0];
+
+ dpp->samplesA[0] = tmp = samples_r[i];
+ samples_r[i] = tmp -= APPLY_WEIGHT_I(dpp->weightB, sam_B);
+ update_weight_clip_d2(dpp->weightB, dpp->delta, sam_B, tmp);
+
+ dpp->samplesB[0] = tmp = samples_l[i];
+ samples_l[i] = tmp -= APPLY_WEIGHT_I(dpp->weightA, sam_A);
+ update_weight_clip_d2(dpp->weightA, dpp->delta, sam_A, tmp);
+ }
+ break;
+ }
+}
+
+static void put_metadata_block(PutByteContext *pb, int flags, int size)
+{
+ if (size & 1)
+ flags |= WP_IDF_ODD;
+
+ bytestream2_put_byte(pb, flags);
+ bytestream2_put_byte(pb, (size + 1) >> 1);
+}
+
+static int wavpack_encode_block(WavPackEncodeContext *s,
+ int32_t *samples_l, int32_t *samples_r,
+ uint8_t *out, int out_size)
+{
+ int block_size, start, end, data_size, tcount, temp, m = 0;
+ int i, j, ret = 0, got_extra = 0, nb_samples = s->block_samples;
+ uint32_t crc = 0xffffffffu;
+ struct Decorr *dpp;
+ PutByteContext pb;
+
+ if (s->flags & WV_MONO_DATA) {
+ CLEAR(s->w);
+ }
+ if (!(s->flags & WV_MONO) && s->optimize_mono) {
+ int32_t lor = 0, diff = 0;
+
+ for (i = 0; i < nb_samples; i++) {
+ lor |= samples_l[i] | samples_r[i];
+ diff |= samples_l[i] - samples_r[i];
+
+ if (lor && diff)
+ break;
+ }
+
+ if (i == nb_samples && lor && !diff) {
+ s->flags &= ~(WV_JOINT_STEREO | WV_CROSS_DECORR);
+ s->flags |= WV_FALSE_STEREO;
+
+ if (!s->false_stereo) {
+ s->false_stereo = 1;
+ s->num_terms = 0;
+ CLEAR(s->w);
+ }
+ } else if (s->false_stereo) {
+ s->false_stereo = 0;
+ s->num_terms = 0;
+ CLEAR(s->w);
+ }
+ }
+
+ if (s->flags & SHIFT_MASK) {
+ int shift = (s->flags & SHIFT_MASK) >> SHIFT_LSB;
+ int mag = (s->flags & MAG_MASK) >> MAG_LSB;
+
+ if (s->flags & WV_MONO_DATA)
+ shift_mono(samples_l, nb_samples, shift);
+ else
+ shift_stereo(samples_l, samples_r, nb_samples, shift);
+
+ if ((mag -= shift) < 0)
+ s->flags &= ~MAG_MASK;
+ else
+ s->flags -= (1 << MAG_LSB) * shift;
+ }
+
+ if ((s->flags & WV_FLOAT_DATA) || (s->flags & MAG_MASK) >> MAG_LSB >= 24) {
+ av_fast_padded_malloc(&s->orig_l, &s->orig_l_size, sizeof(int32_t) * nb_samples);
+ memcpy(s->orig_l, samples_l, sizeof(int32_t) * nb_samples);
+ if (!(s->flags & WV_MONO_DATA)) {
+ av_fast_padded_malloc(&s->orig_r, &s->orig_r_size, sizeof(int32_t) * nb_samples);
+ memcpy(s->orig_r, samples_r, sizeof(int32_t) * nb_samples);
+ }
+
+ if (s->flags & WV_FLOAT_DATA)
+ got_extra = scan_float(s, samples_l, samples_r, nb_samples);
+ else
+ got_extra = scan_int32(s, samples_l, samples_r, nb_samples);
+ s->num_terms = 0;
+ } else {
+ scan_int23(s, samples_l, samples_r, nb_samples);
+ if (s->shift != s->int32_zeros + s->int32_ones + s->int32_dups) {
+ s->shift = s->int32_zeros + s->int32_ones + s->int32_dups;
+ s->num_terms = 0;
+ }
+ }
+
+ if (!s->num_passes && !s->num_terms) {
+ s->num_passes = 1;
+
+ if (s->flags & WV_MONO_DATA)
+ ret = wv_mono(s, samples_l, 1, 0);
+ else
+ ret = wv_stereo(s, samples_l, samples_r, 1, 0);
+
+ s->num_passes = 0;
+ }
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++)
+ crc += (crc << 1) + samples_l[i];
+
+ if (s->num_passes)
+ ret = wv_mono(s, samples_l, !s->num_terms, 1);
+ } else {
+ for (i = 0; i < nb_samples; i++)
+ crc += (crc << 3) + (samples_l[i] << 1) + samples_l[i] + samples_r[i];
+
+ if (s->num_passes)
+ ret = wv_stereo(s, samples_l, samples_r, !s->num_terms, 1);
+ }
+ if (ret < 0)
+ return ret;
+
+ if (!s->ch_offset)
+ s->flags |= WV_INITIAL_BLOCK;
+
+ s->ch_offset += 1 + !(s->flags & WV_MONO);
+
+ if (s->ch_offset == s->avctx->channels)
+ s->flags |= WV_FINAL_BLOCK;
+
+ bytestream2_init_writer(&pb, out, out_size);
+ bytestream2_put_le32(&pb, MKTAG('w', 'v', 'p', 'k'));
+ bytestream2_put_le32(&pb, 0);
+ bytestream2_put_le16(&pb, 0x410);
+ bytestream2_put_le16(&pb, 0);
+ bytestream2_put_le32(&pb, 0);
+ bytestream2_put_le32(&pb, s->sample_index);
+ bytestream2_put_le32(&pb, nb_samples);
+ bytestream2_put_le32(&pb, s->flags);
+ bytestream2_put_le32(&pb, crc);
+
+ if (s->flags & WV_INITIAL_BLOCK &&
+ s->avctx->channel_layout != AV_CH_LAYOUT_MONO &&
+ s->avctx->channel_layout != AV_CH_LAYOUT_STEREO) {
+ put_metadata_block(&pb, WP_ID_CHANINFO, 5);
+ bytestream2_put_byte(&pb, s->avctx->channels);
+ bytestream2_put_le32(&pb, s->avctx->channel_layout);
+ bytestream2_put_byte(&pb, 0);
+ }
+
+ if ((s->flags & SRATE_MASK) == SRATE_MASK) {
+ put_metadata_block(&pb, WP_ID_SAMPLE_RATE, 3);
+ bytestream2_put_le24(&pb, s->avctx->sample_rate);
+ bytestream2_put_byte(&pb, 0);
+ }
+
+ put_metadata_block(&pb, WP_ID_DECTERMS, s->num_terms);
+ for (i = 0; i < s->num_terms; i++) {
+ struct Decorr *dpp = &s->decorr_passes[i];
+ bytestream2_put_byte(&pb, ((dpp->value + 5) & 0x1f) | ((dpp->delta << 5) & 0xe0));
+ }
+ if (s->num_terms & 1)
+ bytestream2_put_byte(&pb, 0);
+
+#define WRITE_DECWEIGHT(type) do { \
+ temp = store_weight(type); \
+ bytestream2_put_byte(&pb, temp); \
+ type = restore_weight(temp); \
+ } while (0)
+
+ bytestream2_put_byte(&pb, WP_ID_DECWEIGHTS);
+ bytestream2_put_byte(&pb, 0);
+ start = bytestream2_tell_p(&pb);
+ for (i = s->num_terms - 1; i >= 0; --i) {
+ struct Decorr *dpp = &s->decorr_passes[i];
+
+ if (store_weight(dpp->weightA) ||
+ (!(s->flags & WV_MONO_DATA) && store_weight(dpp->weightB)))
+ break;
+ }
+ tcount = i + 1;
+ for (i = 0; i < s->num_terms; i++) {
+ struct Decorr *dpp = &s->decorr_passes[i];
+ if (i < tcount) {
+ WRITE_DECWEIGHT(dpp->weightA);
+ if (!(s->flags & WV_MONO_DATA))
+ WRITE_DECWEIGHT(dpp->weightB);
+ } else {
+ dpp->weightA = dpp->weightB = 0;
+ }
+ }
+ end = bytestream2_tell_p(&pb);
+ out[start - 2] = WP_ID_DECWEIGHTS | (((end - start) & 1) ? WP_IDF_ODD: 0);
+ out[start - 1] = (end - start + 1) >> 1;
+ if ((end - start) & 1)
+ bytestream2_put_byte(&pb, 0);
+
+#define WRITE_DECSAMPLE(type) do { \
+ temp = log2s(type); \
+ type = wp_exp2(temp); \
+ bytestream2_put_le16(&pb, temp); \
+ } while (0)
+
+ bytestream2_put_byte(&pb, WP_ID_DECSAMPLES);
+ bytestream2_put_byte(&pb, 0);
+ start = bytestream2_tell_p(&pb);
+ for (i = 0; i < s->num_terms; i++) {
+ struct Decorr *dpp = &s->decorr_passes[i];
+ if (i == 0) {
+ if (dpp->value > MAX_TERM) {
+ WRITE_DECSAMPLE(dpp->samplesA[0]);
+ WRITE_DECSAMPLE(dpp->samplesA[1]);
+ if (!(s->flags & WV_MONO_DATA)) {
+ WRITE_DECSAMPLE(dpp->samplesB[0]);
+ WRITE_DECSAMPLE(dpp->samplesB[1]);
+ }
+ } else if (dpp->value < 0) {
+ WRITE_DECSAMPLE(dpp->samplesA[0]);
+ WRITE_DECSAMPLE(dpp->samplesB[0]);
+ } else {
+ for (j = 0; j < dpp->value; j++) {
+ WRITE_DECSAMPLE(dpp->samplesA[j]);
+ if (!(s->flags & WV_MONO_DATA))
+ WRITE_DECSAMPLE(dpp->samplesB[j]);
+ }
+ }
+ } else {
+ CLEAR(dpp->samplesA);
+ CLEAR(dpp->samplesB);
+ }
+ }
+ end = bytestream2_tell_p(&pb);
+ out[start - 1] = (end - start) >> 1;
+
+#define WRITE_CHAN_ENTROPY(chan) do { \
+ for (i = 0; i < 3; i++) { \
+ temp = wp_log2(s->w.c[chan].median[i]); \
+ bytestream2_put_le16(&pb, temp); \
+ s->w.c[chan].median[i] = wp_exp2(temp); \
+ } \
+ } while (0)
+
+ put_metadata_block(&pb, WP_ID_ENTROPY, 6 * (1 + (!(s->flags & WV_MONO_DATA))));
+ WRITE_CHAN_ENTROPY(0);
+ if (!(s->flags & WV_MONO_DATA))
+ WRITE_CHAN_ENTROPY(1);
+
+ if (s->flags & WV_FLOAT_DATA) {
+ put_metadata_block(&pb, WP_ID_FLOATINFO, 4);
+ bytestream2_put_byte(&pb, s->float_flags);
+ bytestream2_put_byte(&pb, s->float_shift);
+ bytestream2_put_byte(&pb, s->float_max_exp);
+ bytestream2_put_byte(&pb, 127);
+ }
+
+ if (s->flags & WV_INT32_DATA) {
+ put_metadata_block(&pb, WP_ID_INT32INFO, 4);
+ bytestream2_put_byte(&pb, s->int32_sent_bits);
+ bytestream2_put_byte(&pb, s->int32_zeros);
+ bytestream2_put_byte(&pb, s->int32_ones);
+ bytestream2_put_byte(&pb, s->int32_dups);
+ }
+
+ if (s->flags & WV_MONO_DATA && !s->num_passes) {
+ for (i = 0; i < nb_samples; i++) {
+ int32_t code = samples_l[i];
+
+ for (tcount = s->num_terms, dpp = s->decorr_passes; tcount--; dpp++) {
+ int32_t sam;
+
+ if (dpp->value > MAX_TERM) {
+ if (dpp->value & 1)
+ sam = 2 * dpp->samplesA[0] - dpp->samplesA[1];
+ else
+ sam = (3 * dpp->samplesA[0] - dpp->samplesA[1]) >> 1;
+
+ dpp->samplesA[1] = dpp->samplesA[0];
+ dpp->samplesA[0] = code;
+ } else {
+ sam = dpp->samplesA[m];
+ dpp->samplesA[(m + dpp->value) & (MAX_TERM - 1)] = code;
+ }
+
+ code -= APPLY_WEIGHT(dpp->weightA, sam);
+ UPDATE_WEIGHT(dpp->weightA, dpp->delta, sam, code);
+ }
+
+ m = (m + 1) & (MAX_TERM - 1);
+ samples_l[i] = code;
+ }
+ if (m) {
+ for (tcount = s->num_terms, dpp = s->decorr_passes; tcount--; dpp++)
+ if (dpp->value > 0 && dpp->value <= MAX_TERM) {
+ int32_t temp_A[MAX_TERM], temp_B[MAX_TERM];
+ int k;
+
+ memcpy(temp_A, dpp->samplesA, sizeof(dpp->samplesA));
+ memcpy(temp_B, dpp->samplesB, sizeof(dpp->samplesB));
+
+ for (k = 0; k < MAX_TERM; k++) {
+ dpp->samplesA[k] = temp_A[m];
+ dpp->samplesB[k] = temp_B[m];
+ m = (m + 1) & (MAX_TERM - 1);
+ }
+ }
+ }
+ } else if (!s->num_passes) {
+ if (s->flags & WV_JOINT_STEREO) {
+ for (i = 0; i < nb_samples; i++)
+ samples_r[i] += ((samples_l[i] -= samples_r[i]) >> 1);
+ }
+
+ for (i = 0; i < s->num_terms; i++) {
+ struct Decorr *dpp = &s->decorr_passes[i];
+ if (((s->flags & MAG_MASK) >> MAG_LSB) >= 16 || dpp->delta != 2)
+ decorr_stereo_pass2(dpp, samples_l, samples_r, nb_samples);
+ else
+ decorr_stereo_pass_id2(dpp, samples_l, samples_r, nb_samples);
+ }
+ }
+
+ bytestream2_put_byte(&pb, WP_ID_DATA | WP_IDF_LONG);
+ init_put_bits(&s->pb, pb.buffer + 3, bytestream2_get_bytes_left_p(&pb));
+ if (s->flags & WV_MONO_DATA) {
+ for (i = 0; i < nb_samples; i++)
+ wavpack_encode_sample(s, &s->w.c[0], s->samples[0][i]);
+ } else {
+ for (i = 0; i < nb_samples; i++) {
+ wavpack_encode_sample(s, &s->w.c[0], s->samples[0][i]);
+ wavpack_encode_sample(s, &s->w.c[1], s->samples[1][i]);
+ }
+ }
+ encode_flush(s);
+ flush_put_bits(&s->pb);
+ data_size = put_bits_count(&s->pb) >> 3;
+ bytestream2_put_le24(&pb, (data_size + 1) >> 1);
+ bytestream2_skip_p(&pb, data_size);
+ if (data_size & 1)
+ bytestream2_put_byte(&pb, 0);
+
+ if (got_extra) {
+ bytestream2_put_byte(&pb, WP_ID_EXTRABITS | WP_IDF_LONG);
+ init_put_bits(&s->pb, pb.buffer + 7, bytestream2_get_bytes_left_p(&pb));
+ if (s->flags & WV_FLOAT_DATA)
+ pack_float(s, s->orig_l, s->orig_r, nb_samples);
+ else
+ pack_int32(s, s->orig_l, s->orig_r, nb_samples);
+ flush_put_bits(&s->pb);
+ data_size = put_bits_count(&s->pb) >> 3;
+ bytestream2_put_le24(&pb, (data_size + 5) >> 1);
+ bytestream2_put_le32(&pb, s->crc_x);
+ bytestream2_skip_p(&pb, data_size);
+ if (data_size & 1)
+ bytestream2_put_byte(&pb, 0);
+ }
+
+ block_size = bytestream2_tell_p(&pb);
+ AV_WL32(out + 4, block_size - 8);
+
+ av_assert0(!bytestream2_get_eof(&pb));
+
+ return block_size;
+}
+
+static void fill_buffer(WavPackEncodeContext *s,
+ const int8_t *src, int32_t *dst,
+ int nb_samples)
+{
+ int i;
+
+#define COPY_SAMPLES(type, offset, shift) do { \
+ const type *sptr = (const type *)src; \
+ for (i = 0; i < nb_samples; i++) \
+ dst[i] = (sptr[i] - offset) >> shift; \
+ } while (0)
+
+ switch (s->avctx->sample_fmt) {
+ case AV_SAMPLE_FMT_U8P:
+ COPY_SAMPLES(int8_t, 0x80, 0);
+ break;
+ case AV_SAMPLE_FMT_S16P:
+ COPY_SAMPLES(int16_t, 0, 0);
+ break;
+ case AV_SAMPLE_FMT_S32P:
+ if (s->avctx->bits_per_raw_sample <= 24) {
+ COPY_SAMPLES(int32_t, 0, 8);
+ break;
+ }
+ case AV_SAMPLE_FMT_FLTP:
+ memcpy(dst, src, nb_samples * 4);
+ }
+}
+
+static void set_samplerate(WavPackEncodeContext *s)
+{
+ int i;
+
+ for (i = 0; i < 15; i++) {
+ if (wv_rates[i] == s->avctx->sample_rate)
+ break;
+ }
+
+ s->flags = i << SRATE_LSB;
+}
+
+static int wavpack_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr)
+{
+ WavPackEncodeContext *s = avctx->priv_data;
+ int buf_size, ret;
+ uint8_t *buf;
+
+ s->block_samples = frame->nb_samples;
+ av_fast_padded_malloc(&s->samples[0], &s->samples_size[0],
+ sizeof(int32_t) * s->block_samples);
+ if (!s->samples[0])
+ return AVERROR(ENOMEM);
+ if (avctx->channels > 1) {
+ av_fast_padded_malloc(&s->samples[1], &s->samples_size[1],
+ sizeof(int32_t) * s->block_samples);
+ if (!s->samples[1])
+ return AVERROR(ENOMEM);
+ }
+
+ buf_size = s->block_samples * avctx->channels * 8
+ + 200 /* for headers */;
+ if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size, 0)) < 0)
+ return ret;
+ buf = avpkt->data;
+
+ for (s->ch_offset = 0; s->ch_offset < avctx->channels;) {
+ set_samplerate(s);
+
+ switch (s->avctx->sample_fmt) {
+ case AV_SAMPLE_FMT_S16P: s->flags |= 1; break;
+ case AV_SAMPLE_FMT_S32P: s->flags |= 3 - (s->avctx->bits_per_raw_sample <= 24); break;
+ case AV_SAMPLE_FMT_FLTP: s->flags |= 3 | WV_FLOAT_DATA;
+ }
+
+ fill_buffer(s, frame->extended_data[s->ch_offset], s->samples[0], s->block_samples);
+ if (avctx->channels - s->ch_offset == 1) {
+ s->flags |= WV_MONO;
+ } else {
+ s->flags |= WV_CROSS_DECORR;
+ fill_buffer(s, frame->extended_data[s->ch_offset + 1], s->samples[1], s->block_samples);
+ }
+
+ s->flags += (1 << MAG_LSB) * ((s->flags & 3) * 8 + 7);
+
+ if ((ret = wavpack_encode_block(s, s->samples[0], s->samples[1],
+ buf, buf_size)) < 0)
+ return ret;
+
+ buf += ret;
+ buf_size -= ret;
+ }
+ s->sample_index += frame->nb_samples;
+
+ avpkt->pts = frame->pts;
+ avpkt->size = buf - avpkt->data;
+ avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
+ *got_packet_ptr = 1;
+ return 0;
+}
+
+static av_cold int wavpack_encode_close(AVCodecContext *avctx)
+{
+ WavPackEncodeContext *s = avctx->priv_data;
+ int i;
+
+ for (i = 0; i < MAX_TERMS + 2; i++) {
+ av_freep(&s->sampleptrs[i][0]);
+ av_freep(&s->sampleptrs[i][1]);
+ s->sampleptrs_size[i][0] = s->sampleptrs_size[i][1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ av_freep(&s->samples[i]);
+ s->samples_size[i] = 0;
+
+ av_freep(&s->best_buffer[i]);
+ s->best_buffer_size[i] = 0;
+
+ av_freep(&s->temp_buffer[i][0]);
+ av_freep(&s->temp_buffer[i][1]);
+ s->temp_buffer_size[i][0] = s->temp_buffer_size[i][1] = 0;
+ }
+
+ av_freep(&s->js_left);
+ av_freep(&s->js_right);
+ s->js_left_size = s->js_right_size = 0;
+
+ av_freep(&s->orig_l);
+ av_freep(&s->orig_r);
+ s->orig_l_size = s->orig_r_size = 0;
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(WavPackEncodeContext, x)
+#define FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM
+static const AVOption options[] = {
+ { "joint_stereo", "", OFFSET(joint), AV_OPT_TYPE_INT, {.i64=0},-1, 1, FLAGS, "joint" },
+ { "on", "mid/side", 0, AV_OPT_TYPE_CONST, {.i64= 1}, 0, 0, FLAGS, "joint"},
+ { "off", "left/right", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "joint"},
+ { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64= 0}, 0, 0, FLAGS, "joint"},
+ { "optimize_mono", "", OFFSET(optimize_mono), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "opt_mono" },
+ { "on", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "opt_mono"},
+ { "off", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "opt_mono"},
+ { NULL },
+};
+
+static const AVClass wavpack_encoder_class = {
+ .class_name = "WavPack encoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_wavpack_encoder = {
+ .name = "wavpack",
+ .long_name = NULL_IF_CONFIG_SMALL("WavPack"),
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = AV_CODEC_ID_WAVPACK,
+ .priv_data_size = sizeof(WavPackEncodeContext),
+ .priv_class = &wavpack_encoder_class,
+ .init = wavpack_encode_init,
+ .encode2 = wavpack_encode_frame,
+ .close = wavpack_encode_close,
++ .capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8P,
+ AV_SAMPLE_FMT_S16P,
+ AV_SAMPLE_FMT_S32P,
+ AV_SAMPLE_FMT_FLTP,
+ AV_SAMPLE_FMT_NONE },
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * y41p decoder
+ *
+ * Copyright (c) 2012 Paul B Mahol
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+
+static av_cold int y41p_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUV411P;
+ avctx->bits_per_raw_sample = 12;
+
+ if (avctx->width & 7) {
+ av_log(avctx, AV_LOG_WARNING, "y41p requires width to be divisible by 8.\n");
+ }
+
+ return 0;
+}
+
+static int y41p_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVFrame *pic = data;
+ uint8_t *src = avpkt->data;
+ uint8_t *y, *u, *v;
+ int i, j, ret;
+
+ if (avpkt->size < 3LL * avctx->height * avctx->width / 2) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ for (i = avctx->height - 1; i >= 0 ; i--) {
+ y = &pic->data[0][i * pic->linesize[0]];
+ u = &pic->data[1][i * pic->linesize[1]];
+ v = &pic->data[2][i * pic->linesize[2]];
+ for (j = 0; j < avctx->width; j += 8) {
+ *(u++) = *src++;
+ *(y++) = *src++;
+ *(v++) = *src++;
+ *(y++) = *src++;
+
+ *(u++) = *src++;
+ *(y++) = *src++;
+ *(v++) = *src++;
+ *(y++) = *src++;
+
+ *(y++) = *src++;
+ *(y++) = *src++;
+ *(y++) = *src++;
+ *(y++) = *src++;
+ }
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+AVCodec ff_y41p_decoder = {
+ .name = "y41p",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed YUV 4:1:1 12-bit"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_Y41P,
+ .init = y41p_decode_init,
+ .decode = y41p_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
--- /dev/null
- .capabilities = CODEC_CAP_DR1,
+/*
+ * libquicktime yuv4 decoder
+ *
+ * Copyright (c) 2011 Carl Eugen Hoyos
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "internal.h"
+
+static av_cold int yuv4_decode_init(AVCodecContext *avctx)
+{
+ avctx->pix_fmt = AV_PIX_FMT_YUV420P;
+
+ return 0;
+}
+
+static int yuv4_decode_frame(AVCodecContext *avctx, void *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AVFrame *pic = data;
+ const uint8_t *src = avpkt->data;
+ uint8_t *y, *u, *v;
+ int i, j, ret;
+
+ if (avpkt->size < 6 * (avctx->width + 1 >> 1) * (avctx->height + 1 >> 1)) {
+ av_log(avctx, AV_LOG_ERROR, "Insufficient input data.\n");
+ return AVERROR(EINVAL);
+ }
+
+ if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ return ret;
+
+ pic->key_frame = 1;
+ pic->pict_type = AV_PICTURE_TYPE_I;
+
+ y = pic->data[0];
+ u = pic->data[1];
+ v = pic->data[2];
+
+ for (i = 0; i < (avctx->height + 1) >> 1; i++) {
+ for (j = 0; j < (avctx->width + 1) >> 1; j++) {
+ u[j] = *src++ ^ 0x80;
+ v[j] = *src++ ^ 0x80;
+ y[ 2 * j ] = *src++;
+ y[ 2 * j + 1] = *src++;
+ y[pic->linesize[0] + 2 * j ] = *src++;
+ y[pic->linesize[0] + 2 * j + 1] = *src++;
+ }
+
+ y += 2 * pic->linesize[0];
+ u += pic->linesize[1];
+ v += pic->linesize[2];
+ }
+
+ *got_frame = 1;
+
+ return avpkt->size;
+}
+
+AVCodec ff_yuv4_decoder = {
+ .name = "yuv4",
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed packed 4:2:0"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_YUV4,
+ .init = yuv4_decode_init,
+ .decode = yuv4_decode_frame,
++ .capabilities = AV_CODEC_CAP_DR1,
+};
while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
ret >= 0 &&
- (!has_codec_parameters(st) || !has_decode_delay_been_guessed(st) ||
+ (!has_codec_parameters(st, NULL) || !has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames &&
- st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
+ (st->codec->codec->capabilities & AV_CODEC_CAP_CHANNEL_CONF)))) {
got_picture = 0;
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO: