Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
-version <next>:
-
-- XWD encoder and decoder
-- Support for fragmentation in the mov/mp4 muxer
-- ISMV (Smooth Streaming) muxer
+version next:
+- v408 Quicktime and Microsoft AYUV Uncompressed 4:4:4:4 encoder and decoder
+- setfield filter
- CDXL demuxer and decoder
- Apple ProRes encoder
+- ffprobe -count_packets and -count_frames options
- Sun Rasterfile Encoder
-- remove libpostproc
- ID3v2 attached pictures reading and writing
- WMA Lossless decoder
-- XBM encoder
+- bluray protocol
+- blackdetect filter
+- libutvideo encoder wrapper (--enable-libutvideo)
+- swapuv filter
+- bbox filter
+- XBM encoder and decoder
- RealAudio Lossless decoder
- ZeroCodec decoder
-- drop support for avconv without libavfilter
-
-
-version 0.8:
-
+- tile video filter
+- Metal Gear Solid: The Twin Snakes demuxer
+- OpenEXR image decoder
+- removelogo filter
++- drop support for ffmpeg without libavfilter
+
+
+version 0.10:
+- Fixes: CVE-2011-3929, CVE-2011-3934, CVE-2011-3935, CVE-2011-3936,
+ CVE-2011-3937, CVE-2011-3940, CVE-2011-3941, CVE-2011-3944,
+ CVE-2011-3945, CVE-2011-3946, CVE-2011-3947, CVE-2011-3949,
+ CVE-2011-3950, CVE-2011-3951, CVE-2011-3952
+- v410 Quicktime Uncompressed 4:4:4 10-bit encoder and decoder
+- SBaGen (SBG) binaural beats script demuxer
+- OpenMG Audio muxer
+- Timecode extraction in DV and MOV
+- thumbnail video filter
+- XML output in ffprobe
+- asplit audio filter
+- tinterlace video filter
+- astreamsync audio filter
+- amerge audio filter
+- ISMV (Smooth Streaming) muxer
- GSM audio parser
- SMJPEG muxer
-
-
-version 0.8_beta2:
-
+- XWD encoder and decoder
- Automatic thread count based on detection number of (available) CPU cores
-- Deprecate libpostproc. If desired, the switch --enable-postproc will
- enable it but it may be removed in a later Libav release.
+- y41p Brooktree Uncompressed 4:1:1 12-bit encoder and decoder
+- ffprobe -show_error option
+- Avid 1:1 10-bit RGB Packer codec
+- v308 Quicktime Uncompressed 4:4:4 encoder and decoder
+- yuv4 libquicktime packed 4:2:0 encoder and decoder
+- ffprobe -show_frames option
+- silencedetect audio filter
+- ffprobe -show_program_version, -show_library_versions, -show_versions options
- rv34: frame-level multi-threading
- optimized iMDCT transform on x86 using SSE for for mpegaudiodec
+- Improved PGS subtitle decoder
+- dumpgraph option to lavfi device
+- r210 and r10k encoders
+- ffwavesynth decoder
+- aviocat tool
+- ffeval tool
-version 0.8_beta1:
+version 0.9:
+- openal input device added
+- boxblur filter added
- BWF muxer
- Flash Screen Video 2 decoder
-- ffplay/ffprobe/ffserver renamed to avplay/avprobe/avserver
-- ffmpeg deprecated, added avconv, which is almost the same for now, except
+- lavfi input device added
+- added avconv, which is almost the same for now, except
for a few incompatible changes in the options, which will hopefully make them
easier to use. The changes are:
* The options placement is now strictly enforced! While in theory the
# libraries
avdevice_deps="avcodec avformat"
avformat_deps="avcodec"
+postproc_deps="gpl"
# programs
-avconv_deps="avcodec avfilter avformat swscale"
-avplay_deps="avcodec avformat swscale sdl"
-avplay_select="rdft"
-avprobe_deps="avcodec avformat"
-avserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer !shared"
-avserver_extralibs='$ldl'
+ffplay_deps="avcodec avformat swscale swresample sdl"
+ffplay_select="buffersink_filter rdft"
+ffprobe_deps="avcodec avformat"
+ffserver_deps="avformat ffm_muxer fork rtp_protocol rtsp_demuxer"
+ffserver_extralibs='$ldl'
- ffmpeg_deps="avcodec avformat swscale swresample"
++ffmpeg_deps="avcodec avfilter avformat swscale swresample"
+ffmpeg_select="buffersink_filter"
doc_deps="texi2html"
#include "libavutil/avstring.h"
#include "libavutil/libm.h"
#include "libavutil/imgutils.h"
+#include "libavutil/timestamp.h"
#include "libavformat/os_support.h"
+#include "libswresample/swresample.h"
+
+#include "libavformat/ffm.h" // not public API
- #if CONFIG_AVFILTER
+# include "libavfilter/avcodec.h"
# include "libavfilter/avfilter.h"
# include "libavfilter/avfiltergraph.h"
+# include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
# include "libavfilter/vsrc_buffer.h"
- #endif
#if HAVE_SYS_RESOURCE_H
#include <sys/types.h>
AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
FILE *logfile;
- #if CONFIG_AVFILTER
+ SwrContext *swr;
+
AVFilterContext *output_video_filter;
AVFilterContext *input_video_filter;
AVFilterBufferRef *picref;
av_freep(&output_files);
uninit_opts();
- av_free(audio_buf);
+ av_freep(&audio_buf);
allocated_audio_buf_size = 0;
- av_free(async_buf);
+ av_freep(&async_buf);
allocated_async_buf_size = 0;
- #if CONFIG_AVFILTER
avfilter_uninit();
- #endif
avformat_network_deinit();
if (received_sigterm) {
}
}
- static void do_video_resample(OutputStream *ost,
- InputStream *ist,
- AVFrame *in_picture,
- AVFrame **out_picture)
- {
- #if CONFIG_AVFILTER
- *out_picture = in_picture;
- #else
- AVCodecContext *dec = ist->st->codec;
- AVCodecContext *enc = ost->st->codec;
- int resample_changed = ost->resample_width != in_picture->width ||
- ost->resample_height != in_picture->height ||
- ost->resample_pix_fmt != in_picture->format;
-
- *out_picture = in_picture;
- if (resample_changed) {
- av_log(NULL, AV_LOG_INFO,
- "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s / frm size:%dx%d fmt:%s\n",
- ist->file_index, ist->st->index,
- ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
- dec->width , dec->height , av_get_pix_fmt_name(dec->pix_fmt),
- in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format));
- ost->resample_width = in_picture->width;
- ost->resample_height = in_picture->height;
- ost->resample_pix_fmt = in_picture->format;
- }
-
- ost->video_resample = in_picture->width != enc->width ||
- in_picture->height != enc->height ||
- in_picture->format != enc->pix_fmt;
-
- if (ost->video_resample) {
- *out_picture = &ost->resample_frame;
- if (!ost->img_resample_ctx || resample_changed) {
- /* initialize the destination picture */
- if (!ost->resample_frame.data[0]) {
- avcodec_get_frame_defaults(&ost->resample_frame);
- if (avpicture_alloc((AVPicture *)&ost->resample_frame, enc->pix_fmt,
- enc->width, enc->height)) {
- av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
- exit_program(1);
- }
- }
- /* initialize a new scaler context */
- sws_freeContext(ost->img_resample_ctx);
- ost->img_resample_ctx = sws_getContext(in_picture->width, in_picture->height, in_picture->format,
- enc->width, enc->height, enc->pix_fmt,
- ost->sws_flags, NULL, NULL, NULL);
- if (ost->img_resample_ctx == NULL) {
- av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
- exit_program(1);
- }
- }
- sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize,
- 0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize);
- }
- #endif
- }
-
-static void do_video_out(AVFormatContext *s,
- OutputStream *ost,
- AVFrame *in_picture,
- int *frame_size, float quality)
+static double psnr(double d)
+{
+ return -10.0 * log(d) / log(10.0);
+}
+
+static void do_video_stats(AVFormatContext *os, OutputStream *ost,
+ int frame_size)
+{
+ AVCodecContext *enc;
+ int frame_number;
+ double ti1, bitrate, avg_bitrate;
+
+ /* this is executed just the first time do_video_stats is called */
+ if (!vstats_file) {
+ vstats_file = fopen(vstats_filename, "w");
+ if (!vstats_file) {
+ perror("fopen");
+ exit_program(1);
+ }
+ }
+
+ enc = ost->st->codec;
+ if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
+ frame_number = ost->frame_number;
+ fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
+ if (enc->flags&CODEC_FLAG_PSNR)
+ fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
+
+ fprintf(vstats_file,"f_size= %6d ", frame_size);
+ /* compute pts value */
+ ti1 = ost->sync_opts * av_q2d(enc->time_base);
+ if (ti1 < 0.01)
+ ti1 = 0.01;
+
+ bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
+ avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
+ fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
+ (double)video_size / 1024, ti1, bitrate, avg_bitrate);
+ fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
+ }
+}
+
+
+static void do_video_out(AVFormatContext *s, OutputStream *ost,
+ InputStream *ist, AVFrame *in_picture)
{
int nb_frames, i, ret, format_video_sync;
- AVFrame *final_picture;
AVCodecContext *enc;
double sync_ipts, delta;
+ double duration = 0;
+ int frame_size = 0;
+ float quality = same_quant ? in_picture->quality
+ : ost->st->codec->global_quality;
enc = ost->st->codec;
av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
}
- do_video_resample(ost, ist, in_picture, &final_picture);
- if (!ost->frame_number)
- ost->first_pts = ost->sync_opts;
--
/* duplicates frame if needed */
for (i = 0; i < nb_frames; i++) {
AVPacket pkt;
/* no picture yet */
return ret;
}
- decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
- decoded_frame->pkt_dts);
+
+ best_effort_timestamp= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "best_effort_timestamp");
+ if(*best_effort_timestamp != AV_NOPTS_VALUE)
+ ist->next_pts = ist->pts = decoded_frame->pts = *best_effort_timestamp;
+
pkt->size = 0;
+
pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
- #if CONFIG_AVFILTER
+ frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
+ for(i=0;i<nb_output_streams;i++) {
+ OutputStream *ost = ost = &output_streams[i];
+ if(check_output_constraints(ist, ost) && ost->encoding_needed){
+ int changed = ist->st->codec->width != ost->input_video_filter->outputs[0]->w
+ || ist->st->codec->height != ost->input_video_filter->outputs[0]->h
+ || ist->st->codec->pix_fmt != ost->input_video_filter->outputs[0]->format;
+ if (!frame_sample_aspect->num)
+ *frame_sample_aspect = ist->st->sample_aspect_ratio;
+ decoded_frame->pts = ist->pts;
+ if (ist->dr1 && decoded_frame->type==FF_BUFFER_TYPE_USER && !changed) {
+ FrameBuffer *buf = decoded_frame->opaque;
+ AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
+ decoded_frame->data, decoded_frame->linesize,
+ AV_PERM_READ | AV_PERM_PRESERVE,
+ ist->st->codec->width, ist->st->codec->height,
+ ist->st->codec->pix_fmt);
+
+ avfilter_copy_frame_props(fb, decoded_frame);
+ fb->buf->priv = buf;
+ fb->buf->free = filter_release_buffer;
+
+ buf->refcount++;
+ av_buffersrc_buffer(ost->input_video_filter, fb);
+ } else
+ if((av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame, AV_VSRC_BUF_FLAG_OVERWRITE)) < 0){
+ av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
+ exit_program(1);
+ }
+ }
+ }
- #endif
+
rate_emu_sleep(ist);
for (i = 0; i < nb_output_streams; i++) {
if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
continue;
- #if CONFIG_AVFILTER
- resample_changed = ost->resample_width != decoded_frame->width ||
- ost->resample_height != decoded_frame->height ||
- ost->resample_pix_fmt != decoded_frame->format;
- if (resample_changed) {
- av_log(NULL, AV_LOG_INFO,
- "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
- ist->file_index, ist->st->index,
- ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
- decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
-
- avfilter_graph_free(&ost->graph);
- if (configure_video_filters(ist, ost)) {
- av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
- exit_program(1);
- }
-
- ost->resample_width = decoded_frame->width;
- ost->resample_height = decoded_frame->height;
- ost->resample_pix_fmt = decoded_frame->format;
- }
+ while (av_buffersink_poll_frame(ost->output_video_filter)) {
+ AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
+ AVFrame *filtered_frame;
- if (ist->st->sample_aspect_ratio.num)
- decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
- if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
- FrameBuffer *buf = decoded_frame->opaque;
- AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
- decoded_frame->data, decoded_frame->linesize,
- AV_PERM_READ | AV_PERM_PRESERVE,
- ist->st->codec->width, ist->st->codec->height,
- ist->st->codec->pix_fmt);
-
- avfilter_copy_frame_props(fb, decoded_frame);
- fb->buf->priv = buf;
- fb->buf->free = filter_release_buffer;
-
- buf->refcount++;
- av_buffersrc_buffer(ost->input_video_filter, fb);
- } else
- av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame,
- decoded_frame->pts, decoded_frame->sample_aspect_ratio);
-
- if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
- ret = AVERROR(ENOMEM);
- goto fail;
- } else
- avcodec_get_frame_defaults(ist->filtered_frame);
- filtered_frame = ist->filtered_frame;
-
- frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
- while (frame_available) {
- AVRational ist_pts_tb;
- if ((ret = get_filtered_video_frame(ost->output_video_filter,
- filtered_frame, &ost->picref,
- &ist_pts_tb)) < 0)
+ if (av_buffersink_get_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0){
+ av_log(NULL, AV_LOG_WARNING, "AV Filter told us it has a frame available but failed to output one\n");
+ goto cont;
+ }
+ if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
+ ret = AVERROR(ENOMEM);
goto fail;
+ }
+ filtered_frame = ist->filtered_frame;
+ *filtered_frame= *decoded_frame; //for me_threshold
+ avfilter_fill_frame_from_video_buffer_ref(filtered_frame, ost->picref);
filtered_frame->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
if (!ost->frame_aspect_ratio)
- ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
-
- do_video_out(output_files[ost->file_index].ctx, ost, filtered_frame, &frame_size,
- same_quant ? quality : ost->st->codec->global_quality);
- if (vstats_filename && frame_size)
- do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
- frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+ ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
+ do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame);
+ cont:
avfilter_unref_buffer(ost->picref);
}
- #else
- do_video_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
- #endif
}
fail:
ost->resample_width = icodec->width;
ost->resample_pix_fmt = icodec->pix_fmt;
- /*
- * We want CFR output if and only if one of those is true:
- * 1) user specified output framerate with -r
- * 2) user specified -vsync cfr
- * 3) output format is CFR and the user didn't force vsync to
- * something else than CFR
- *
- * in such a case, set ost->frame_rate
- */
- if (!ost->frame_rate.num &&
- (video_sync_method == VSYNC_CFR ||
- (video_sync_method == VSYNC_AUTO &&
- !(oc->oformat->flags & (AVFMT_NOTIMESTAMPS | AVFMT_VARIABLE_FPS))))) {
- ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational){25, 1};
- if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
- int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
- ost->frame_rate = ost->enc->supported_framerates[idx];
- }
+ if (!ost->frame_rate.num)
+ ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
+ if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
+ int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
+ ost->frame_rate = ost->enc->supported_framerates[idx];
}
- if (ost->frame_rate.num) {
- codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
- video_sync_method = VSYNC_CFR;
- } else
- codec->time_base = ist->st->time_base;
+ codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
+ if ( av_q2d(codec->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
+ && (video_sync_method == VSYNC_CFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
+ av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
+ "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
+ }
+ for (j = 0; j < ost->forced_kf_count; j++)
+ ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
+ AV_TIME_BASE_Q,
+ codec->time_base);
- #if CONFIG_AVFILTER
if (configure_video_filters(ist, ost)) {
av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
- exit(1);
+ exit_program(1);
}
- #endif
break;
case AVMEDIA_TYPE_SUBTITLE:
codec->time_base = (AVRational){1, 1000};
ipts_min = INT64_MAX;
opts_min = 1e100;
- #if CONFIG_AVFILTER
+ /* if 'q' pressed, exits */
+ if (!using_stdin) {
+ static int64_t last_time;
+ if (received_nb_signals)
+ break;
+ /* read_key() returns 0 on EOF */
+ if(cur_time - last_time >= 100000 && !run_as_daemon){
+ key = read_key();
+ last_time = cur_time;
+ }else
+ key = -1;
+ if (key == 'q')
+ break;
+ if (key == '+') av_log_set_level(av_log_get_level()+10);
+ if (key == '-') av_log_set_level(av_log_get_level()-10);
+ if (key == 's') qp_hist ^= 1;
+ if (key == 'h'){
+ if (do_hex_dump){
+ do_hex_dump = do_pkt_dump = 0;
+ } else if(do_pkt_dump){
+ do_hex_dump = 1;
+ } else
+ do_pkt_dump = 1;
+ av_log_set_level(AV_LOG_DEBUG);
+ }
- #endif
+ if (key == 'c' || key == 'C'){
+ char buf[4096], target[64], command[256], arg[256] = {0};
+ double time;
+ int k, n = 0;
+ fprintf(stderr, "\nEnter command: <target> <time> <command>[ <argument>]\n");
+ i = 0;
+ while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
+ if (k > 0)
+ buf[i++] = k;
+ buf[i] = 0;
+ if (k > 0 &&
+ (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
+ av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
+ target, time, command, arg);
+ for (i = 0; i < nb_output_streams; i++) {
+ ost = &output_streams[i];
+ if (ost->graph) {
+ if (time < 0) {
+ ret = avfilter_graph_send_command(ost->graph, target, command, arg, buf, sizeof(buf),
+ key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
+ fprintf(stderr, "Command reply for stream %d: ret:%d res:%s\n", i, ret, buf);
+ } else {
+ ret = avfilter_graph_queue_command(ost->graph, target, command, arg, 0, time);
+ }
+ }
+ }
+ } else {
+ av_log(NULL, AV_LOG_ERROR,
+ "Parse error, at least 3 arguments were expected, "
+ "only %d given in string '%s'\n", n, buf);
+ }
+ }
+ if (key == 'd' || key == 'D'){
+ int debug=0;
+ if(key == 'D') {
+ debug = input_streams[0].st->codec->debug<<1;
+ if(!debug) debug = 1;
+ while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
+ debug += debug;
+ }else
+ if(scanf("%d", &debug)!=1)
+ fprintf(stderr,"error parsing debug value\n");
+ for(i=0;i<nb_input_streams;i++) {
+ input_streams[i].st->codec->debug = debug;
+ }
+ for(i=0;i<nb_output_streams;i++) {
+ ost = &output_streams[i];
+ ost->st->codec->debug = debug;
+ }
+ if(debug) av_log_set_level(AV_LOG_DEBUG);
+ fprintf(stderr,"debug=%d\n", debug);
+ }
+ if (key == '?'){
+ fprintf(stderr, "key function\n"
+ "? show this help\n"
+ "+ increase verbosity\n"
+ "- decrease verbosity\n"
+ "c Send command to filtergraph\n"
+ "D cycle through available debug modes\n"
+ "h dump packets/hex press to cycle through the 3 states\n"
+ "q quit\n"
+ "s Show QP histogram\n"
+ );
+ }
+ }
/* select the stream that we must read now by looking at the
smallest output pts */
av_fifo_free(ost->fifo); /* works even if fifo is not
initialized but set to zero */
av_freep(&ost->st->codec->subtitle_header);
- av_free(ost->resample_frame.data[0]);
av_free(ost->forced_kf_pts);
- if (ost->video_resample)
- sws_freeContext(ost->img_resample_ctx);
- if (ost->resample)
- audio_resample_close(ost->resample);
- if (ost->reformat_ctx)
- av_audio_convert_free(ost->reformat_ctx);
+ swr_free(&ost->swr);
av_dict_free(&ost->opts);
}
}
{ "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
{ "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
{ "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
- { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
+ { "qscale", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_qscale}, "use fixed quality scale (VBR)", "q" },
+ { "profile", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_profile}, "set profile", "profile" },
- #if CONFIG_AVFILTER
{ "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
- #endif
{ "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
{ "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
{ "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
{ "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
"use same quantizer as source (implies VBR)" },
{ "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
- { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
+ { "passlogfile", HAS_ARG | OPT_VIDEO, {(void*)&opt_passlogfile}, "select two pass log file name prefix", "prefix" },
{ "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
"this option is deprecated, use the yadif filter instead" },
+ { "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
{ "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
{ "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
- #if CONFIG_AVFILTER
{ "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
- #endif
{ "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
{ "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
{ "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
To do a more complex LATM demuxing a separate LATM demuxer should be used.
*/
AVCodec ff_aac_latm_decoder = {
- .name = "aac_latm",
- .type = AVMEDIA_TYPE_AUDIO,
- .id = CODEC_ID_AAC_LATM,
- .priv_data_size = sizeof(struct LATMContext),
- .init = latm_decode_init,
- .close = aac_decode_close,
- .decode = latm_decode_frame,
- .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"),
- .sample_fmts = (const enum AVSampleFormat[]) {
+ .name = "aac_latm",
+ .type = AVMEDIA_TYPE_AUDIO,
+ .id = CODEC_ID_AAC_LATM,
+ .priv_data_size = sizeof(struct LATMContext),
+ .init = latm_decode_init,
+ .close = aac_decode_close,
+ .decode = latm_decode_frame,
+ .long_name = NULL_IF_CONFIG_SMALL("AAC LATM (Advanced Audio Codec LATM syntax)"),
+ .sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
},
- .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
+ .capabilities = CODEC_CAP_CHANNEL_CONF | CODEC_CAP_DR1,
.channel_layouts = aac_channel_layout,
+ .flush = flush,
};
.init = aac_encode_init,
.encode2 = aac_encode_frame,
.close = aac_encode_end,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
- .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_FLT,AV_SAMPLE_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
- .priv_class = &aacenc_class,
+ .supported_samplerates = avpriv_mpeg4audio_sample_rates,
+ .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY |
+ CODEC_CAP_EXPERIMENTAL,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_NONE },
+ .long_name = NULL_IF_CONFIG_SMALL("Advanced Audio Coding"),
+ .priv_class = &aacenc_class,
};
.priv_data_size = sizeof(BMPContext),
.init = bmp_encode_init,
.encode2 = bmp_encode_frame,
- .pix_fmts = (const enum PixelFormat[]){
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_BGR24,
- PIX_FMT_RGB555, PIX_FMT_RGB444, PIX_FMT_RGB565,
+ PIX_FMT_BGRA, PIX_FMT_BGR24,
+ PIX_FMT_RGB565, PIX_FMT_RGB555, PIX_FMT_RGB444,
PIX_FMT_RGB8, PIX_FMT_BGR8, PIX_FMT_RGB4_BYTE, PIX_FMT_BGR4_BYTE, PIX_FMT_GRAY8, PIX_FMT_PAL8,
PIX_FMT_MONOBLACK,
- PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("BMP image"),
+ PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("BMP image"),
};
.init = dnxhd_decode_init,
.close = dnxhd_decode_close,
.decode = dnxhd_decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
- .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
+ .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
};
.init = dnxhd_encode_init,
.encode2 = dnxhd_encode_picture,
.close = dnxhd_encode_end,
- .capabilities = CODEC_CAP_SLICE_THREADS,
- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_YUV422P10, PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
- .priv_class = &class,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
+ .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV422P,
+ PIX_FMT_YUV422P10,
+ PIX_FMT_NONE },
+ .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
+ .priv_class = &class,
+ .defaults = dnxhd_defaults,
};
.name = "dvdsub",
.type = AVMEDIA_TYPE_SUBTITLE,
.id = CODEC_ID_DVD_SUBTITLE,
+ .priv_data_size = sizeof(DVDSubContext),
+ .init = dvdsub_init,
.decode = dvdsub_decode,
- .long_name = NULL_IF_CONFIG_SMALL("DVD subtitles"),
+ .long_name = NULL_IF_CONFIG_SMALL("DVD subtitles"),
};
.init = encode_init,
.encode2 = encode_frame,
.close = common_end,
- .capabilities = CODEC_CAP_SLICE_THREADS,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
- .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_YUV420P, PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV411P,
- PIX_FMT_YUV410P, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16,
- PIX_FMT_YUV444P16, PIX_FMT_NONE
- },
+ .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUVA420P, PIX_FMT_YUV444P, PIX_FMT_YUVA444P, PIX_FMT_YUV440P, PIX_FMT_YUV422P, PIX_FMT_YUV411P, PIX_FMT_YUV410P, PIX_FMT_0RGB32, PIX_FMT_RGB32, PIX_FMT_YUV420P16, PIX_FMT_YUV422P16, PIX_FMT_YUV444P16, PIX_FMT_YUV444P9, PIX_FMT_YUV422P9, PIX_FMT_YUV420P9, PIX_FMT_YUV420P10, PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_GRAY16, PIX_FMT_GRAY8, PIX_FMT_NONE},
- .long_name= NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
+ .long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
};
#endif
.init = flac_encode_init,
.encode2 = flac_encode_frame,
.close = flac_encode_close,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS,
- .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
- .priv_class = &flac_encoder_class,
- .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
++ .capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY | CODEC_CAP_LOSSLESS,
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+ .long_name = NULL_IF_CONFIG_SMALL("FLAC (Free Lossless Audio Codec)"),
+ .priv_class = &flac_encoder_class,
};
.init = decode_init,
.close = decode_end,
.decode = decode_frame,
- .capabilities = CODEC_CAP_DR1,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
- .long_name = NULL_IF_CONFIG_SMALL("Fraps"),
+ .long_name = NULL_IF_CONFIG_SMALL("Fraps"),
};
.init = encode_init,
.encode2 = encode_frame,
.close = encode_end,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE
++ PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
};
#endif
.init = encode_init,
.encode2 = encode_frame,
.close = encode_end,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE
++ PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
};
#endif
--- /dev/null
- .name = "libopenjpeg",
- .type = AVMEDIA_TYPE_VIDEO,
- .id = CODEC_ID_JPEG2000,
- .priv_data_size = sizeof(LibOpenJPEGContext),
- .init = libopenjpeg_decode_init,
- .close = libopenjpeg_decode_close,
- .decode = libopenjpeg_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
- .max_lowres = 5,
+/*
+ * JPEG 2000 decoding support via OpenJPEG
+ * Copyright (c) 2009 Jaikrishnan Menon <realityman@gmx.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+* @file
+* JPEG 2000 decoder using libopenjpeg
+*/
+
+#include "libavutil/imgutils.h"
+#include "libavutil/pixfmt.h"
+#include "avcodec.h"
+#include "libavutil/intreadwrite.h"
+#include "thread.h"
+#define OPJ_STATIC
+#include <openjpeg.h>
+
+#define JP2_SIG_TYPE 0x6A502020
+#define JP2_SIG_VALUE 0x0D0A870A
+
+typedef struct {
+ opj_dparameters_t dec_params;
+ AVFrame image;
+} LibOpenJPEGContext;
+
+static enum PixelFormat check_image_attributes(AVCodecContext *avctx, opj_image_t *image)
+{
+ opj_image_comp_t c0 = image->comps[0];
+ opj_image_comp_t c1 = image->comps[1];
+ opj_image_comp_t c2 = image->comps[2];
+ int compRatio = 0;
+ compRatio |= c0.dx << 15 | c0.dy << 12;
+ compRatio |= c1.dx << 9 | c1.dy << 6;
+ compRatio |= c2.dx << 3 | c2.dy;
+
+ if (image->numcomps == 4) {
+ if (c0.prec == 8) {
+ if (compRatio == 0112222 &&
+ image->comps[3].dx == 1 && image->comps[3].dy == 1) {
+ return PIX_FMT_YUVA420P;
+ } else {
+ return PIX_FMT_RGBA;
+ }
+ } else {
+ return PIX_FMT_RGBA64;
+ }
+ }
+
+ switch (compRatio) {
+ case 0111111: goto libopenjpeg_yuv444_rgb;
+ case 0111212: return PIX_FMT_YUV440P;
+ case 0112121: goto libopenjpeg_yuv422;
+ case 0112222: goto libopenjpeg_yuv420;
+ default: goto libopenjpeg_rgb;
+ }
+
+libopenjpeg_yuv420:
+ switch (c0.prec) {
+ case 8: return PIX_FMT_YUV420P;
+ case 9: return PIX_FMT_YUV420P9;
+ case 10: return PIX_FMT_YUV420P10;
+ case 16: return PIX_FMT_YUV420P16;
+ }
+
+libopenjpeg_yuv422:
+ switch (c0.prec) {
+ case 8: return PIX_FMT_YUV422P;
+ case 9: return PIX_FMT_YUV422P9;
+ case 10: return PIX_FMT_YUV422P10;
+ case 16: return PIX_FMT_YUV422P16;
+ }
+
+libopenjpeg_yuv444_rgb:
+ switch (c0.prec) {
+ case 8: return PIX_FMT_RGB24;
+ case 9: return PIX_FMT_YUV444P9;
+ case 10: return PIX_FMT_YUV444P10;
+ case 16: return PIX_FMT_YUV444P16;
+ }
+
+libopenjpeg_rgb:
+ switch (c0.prec) {
+ case 8: return PIX_FMT_RGB24;
+ default: return PIX_FMT_RGB48;
+ }
+
+ return PIX_FMT_RGB24;
+}
+
+static inline int libopenjpeg_ispacked(enum PixelFormat pix_fmt) {
+ int i, component_plane;
+
+ if (pix_fmt == PIX_FMT_GRAY16)
+ return 0;
+
+ component_plane = av_pix_fmt_descriptors[pix_fmt].comp[0].plane;
+ for(i = 1; i < av_pix_fmt_descriptors[pix_fmt].nb_components; i++) {
+ if (component_plane != av_pix_fmt_descriptors[pix_fmt].comp[i].plane)
+ return 0;
+ }
+ return 1;
+}
+
+static inline void libopenjpeg_copy_to_packed8(AVFrame *picture, opj_image_t *image) {
+ uint8_t *img_ptr;
+ int index, x, y, c;
+ for(y = 0; y < picture->height; y++) {
+ index = y*picture->width;
+ img_ptr = picture->data[0] + y*picture->linesize[0];
+ for(x = 0; x < picture->width; x++, index++) {
+ for(c = 0; c < image->numcomps; c++) {
+ *img_ptr++ = image->comps[c].data[index];
+ }
+ }
+ }
+}
+
+static inline void libopenjpeg_copy_to_packed16(AVFrame *picture, opj_image_t *image) {
+ uint16_t *img_ptr;
+ int index, x, y, c;
+ int adjust[4];
+ for (x = 0; x < image->numcomps; x++) {
+ adjust[x] = FFMAX(FFMIN(16 - image->comps[x].prec, 8), 0);
+ }
+ for (y = 0; y < picture->height; y++) {
+ index = y*picture->width;
+ img_ptr = (uint16_t*) (picture->data[0] + y*picture->linesize[0]);
+ for (x = 0; x < picture->width; x++, index++) {
+ for (c = 0; c < image->numcomps; c++) {
+ *img_ptr++ = image->comps[c].data[index] << adjust[c];
+ }
+ }
+ }
+}
+
+static inline void libopenjpeg_copyto8(AVFrame *picture, opj_image_t *image) {
+ int *comp_data;
+ uint8_t *img_ptr;
+ int index, x, y;
+
+ for(index = 0; index < image->numcomps; index++) {
+ comp_data = image->comps[index].data;
+ for(y = 0; y < image->comps[index].h; y++) {
+ img_ptr = picture->data[index] + y * picture->linesize[index];
+ for(x = 0; x < image->comps[index].w; x++) {
+ *img_ptr = (uint8_t) *comp_data;
+ img_ptr++;
+ comp_data++;
+ }
+ }
+ }
+}
+
+static inline void libopenjpeg_copyto16(AVFrame *picture, opj_image_t *image) {
+ int *comp_data;
+ uint16_t *img_ptr;
+ int index, x, y;
+ for(index = 0; index < image->numcomps; index++) {
+ comp_data = image->comps[index].data;
+ for(y = 0; y < image->comps[index].h; y++) {
+ img_ptr = (uint16_t*) (picture->data[index] + y * picture->linesize[index]);
+ for(x = 0; x < image->comps[index].w; x++) {
+ *img_ptr = *comp_data;
+ img_ptr++;
+ comp_data++;
+ }
+ }
+ }
+}
+
+static av_cold int libopenjpeg_decode_init(AVCodecContext *avctx)
+{
+ LibOpenJPEGContext *ctx = avctx->priv_data;
+
+ opj_set_default_decoder_parameters(&ctx->dec_params);
+ avcodec_get_frame_defaults(&ctx->image);
+ avctx->coded_frame = &ctx->image;
+ return 0;
+}
+
+static av_cold int libopenjpeg_decode_init_thread_copy(AVCodecContext *avctx)
+{
+ LibOpenJPEGContext *ctx = avctx->priv_data;
+
+ avctx->coded_frame = &ctx->image;
+ return 0;
+}
+
+static int libopenjpeg_decode_frame(AVCodecContext *avctx,
+ void *data, int *data_size,
+ AVPacket *avpkt)
+{
+ uint8_t *buf = avpkt->data;
+ int buf_size = avpkt->size;
+ LibOpenJPEGContext *ctx = avctx->priv_data;
+ AVFrame *picture = &ctx->image, *output = data;
+ opj_dinfo_t *dec;
+ opj_cio_t *stream;
+ opj_image_t *image;
+ int width, height, ret = -1;
+ int pixel_size = 0;
+ int ispacked = 0;
+
+ *data_size = 0;
+
+ // Check if input is a raw jpeg2k codestream or in jp2 wrapping
+ if((AV_RB32(buf) == 12) &&
+ (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
+ (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
+ dec = opj_create_decompress(CODEC_JP2);
+ } else {
+ // If the AVPacket contains a jp2c box, then skip to
+ // the starting byte of the codestream.
+ if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
+ buf += 8;
+ dec = opj_create_decompress(CODEC_J2K);
+ }
+
+ if(!dec) {
+ av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
+ return -1;
+ }
+ opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);
+
+ ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER;
+ // Tie decoder with decoding parameters
+ opj_setup_decoder(dec, &ctx->dec_params);
+ stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
+ if(!stream) {
+ av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
+ opj_destroy_decompress(dec);
+ return -1;
+ }
+
+ // Decode the header only
+ image = opj_decode_with_info(dec, stream, NULL);
+ opj_cio_close(stream);
+ if(!image) {
+ av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
+ opj_destroy_decompress(dec);
+ return -1;
+ }
+ width = image->x1 - image->x0;
+ height = image->y1 - image->y0;
+ if(av_image_check_size(width, height, 0, avctx) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
+ goto done;
+ }
+ avcodec_set_dimensions(avctx, width, height);
+
+ switch (image->numcomps) {
+ case 1: avctx->pix_fmt = (image->comps[0].prec == 8) ? PIX_FMT_GRAY8 : PIX_FMT_GRAY16;
+ break;
+ case 2: avctx->pix_fmt = PIX_FMT_GRAY8A;
+ break;
+ case 3:
+ case 4: avctx->pix_fmt = check_image_attributes(avctx, image);
+ break;
+ default: av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps);
+ goto done;
+ }
+
+ if(picture->data[0])
+ ff_thread_release_buffer(avctx, picture);
+
+ if(ff_thread_get_buffer(avctx, picture) < 0){
+ av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
+ goto done;
+ }
+
+ ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
+ ctx->dec_params.cp_reduce = avctx->lowres;
+ // Tie decoder with decoding parameters
+ opj_setup_decoder(dec, &ctx->dec_params);
+ stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
+ if(!stream) {
+ av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
+ goto done;
+ }
+
+ opj_image_destroy(image);
+ // Decode the codestream
+ image = opj_decode_with_info(dec, stream, NULL);
+ opj_cio_close(stream);
+ if(!image) {
+ av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
+ goto done;
+ }
+
+ pixel_size = av_pix_fmt_descriptors[avctx->pix_fmt].comp[0].step_minus1 + 1;
+ ispacked = libopenjpeg_ispacked(avctx->pix_fmt);
+
+ switch (pixel_size) {
+ case 1:
+ if (ispacked) {
+ libopenjpeg_copy_to_packed8(picture, image);
+ } else {
+ libopenjpeg_copyto8(picture, image);
+ }
+ break;
+ case 2:
+ if (ispacked) {
+ libopenjpeg_copy_to_packed8(picture, image);
+ } else {
+ libopenjpeg_copyto16(picture, image);
+ }
+ break;
+ case 3:
+ case 4:
+ if (ispacked) {
+ libopenjpeg_copy_to_packed8(picture, image);
+ }
+ break;
+ case 6:
+ case 8:
+ if (ispacked) {
+ libopenjpeg_copy_to_packed16(picture, image);
+ }
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "unsupported pixel size %d\n", pixel_size);
+ goto done;
+ }
+
+ *output = ctx->image;
+ *data_size = sizeof(AVPicture);
+ ret = buf_size;
+
+done:
+ opj_image_destroy(image);
+ opj_destroy_decompress(dec);
+ return ret;
+}
+
+static av_cold int libopenjpeg_decode_close(AVCodecContext *avctx)
+{
+ LibOpenJPEGContext *ctx = avctx->priv_data;
+
+ if(ctx->image.data[0])
+ ff_thread_release_buffer(avctx, &ctx->image);
+ return 0 ;
+}
+
+
+AVCodec ff_libopenjpeg_decoder = {
- .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy)
++ .name = "libopenjpeg",
++ .type = AVMEDIA_TYPE_VIDEO,
++ .id = CODEC_ID_JPEG2000,
++ .priv_data_size = sizeof(LibOpenJPEGContext),
++ .init = libopenjpeg_decode_init,
++ .close = libopenjpeg_decode_close,
++ .decode = libopenjpeg_decode_frame,
++ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
++ .max_lowres = 5,
+ .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"),
++ .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy),
+};
.init = aac_encode_init,
.encode2 = aac_encode_frame,
.close = aac_encode_close,
+ .supported_samplerates = avpriv_mpeg4audio_sample_rates,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME | CODEC_CAP_DELAY,
- .sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_S16,AV_SAMPLE_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("Android VisualOn AAC"),
+ .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_NONE },
+ .long_name = NULL_IF_CONFIG_SMALL("Android VisualOn AAC"),
};
};
AVCodec ff_libx264_encoder = {
- .name = "libx264",
- .type = AVMEDIA_TYPE_VIDEO,
- .id = CODEC_ID_H264,
- .priv_data_size = sizeof(X264Context),
- .init = X264_init,
- .encode2 = X264_frame,
- .close = X264_close,
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
- .long_name = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
- .priv_class = &class,
- .defaults = x264_defaults,
+ .name = "libx264",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_H264,
+ .priv_data_size = sizeof(X264Context),
+ .init = X264_init,
+ .encode2 = X264_frame,
+ .close = X264_close,
+ .capabilities = CODEC_CAP_DELAY | CODEC_CAP_AUTO_THREADS,
+ .long_name = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
+ .priv_class = &class,
+ .defaults = x264_defaults,
.init_static_data = X264_init_static,
};
+
+AVCodec ff_libx264rgb_encoder = {
+ .name = "libx264rgb",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_H264,
+ .priv_data_size = sizeof(X264Context),
+ .init = X264_init,
+ .encode2 = X264_frame,
+ .close = X264_close,
+ .capabilities = CODEC_CAP_DELAY,
+ .long_name = NULL_IF_CONFIG_SMALL("libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB"),
+ .priv_class = &rgbclass,
+ .defaults = x264_defaults,
+ .pix_fmts = pix_fmts_8bit_rgb,
+};
/*
- * Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com>
+ * Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com>
* Copyright (C) 2008 Ramiro Polla
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
.init = ff_MPV_encode_init,
.encode2 = ff_MPV_encode_picture,
.close = ff_MPV_encode_end,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
- .long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
+ .pix_fmts = (const enum PixelFormat[]){
+ PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
};
+
+AVCodec ff_amv_encoder = {
+ .name = "amv",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_AMV,
+ .priv_data_size = sizeof(MpegEncContext),
+ .init = ff_MPV_encode_init,
+ .encode2 = amv_encode_picture,
+ .close = ff_MPV_encode_end,
+ .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
+ .long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
+};
mpeg12_class(2)
AVCodec ff_mpeg1video_encoder = {
- .name = "mpeg1video",
- .type = AVMEDIA_TYPE_VIDEO,
- .id = CODEC_ID_MPEG1VIDEO,
- .priv_data_size = sizeof(MpegEncContext),
- .init = encode_init,
- .encode2 = ff_MPV_encode_picture,
- .close = ff_MPV_encode_end,
- .supported_framerates= avpriv_frame_rate_tab+1,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
- .capabilities= CODEC_CAP_DELAY,
- .long_name= NULL_IF_CONFIG_SMALL("MPEG-1 video"),
- .priv_class = &mpeg1_class,
+ .name = "mpeg1video",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_MPEG1VIDEO,
+ .priv_data_size = sizeof(MpegEncContext),
+ .init = encode_init,
+ .encode2 = ff_MPV_encode_picture,
+ .close = ff_MPV_encode_end,
+ .supported_framerates = avpriv_frame_rate_tab+1,
+ .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_YUV420P,
+ PIX_FMT_NONE },
- .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
++ .capabilities = CODEC_CAP_DELAY,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
+ .priv_class = &mpeg1_class,
};
AVCodec ff_mpeg2video_encoder = {
{ FF_PROFILE_MPEG4_ADVANCED_SIMPLE, "Advanced Simple Profile" },
};
+static const AVOption mpeg4_options[] = {
+ {"quarter_sample", "1/4 subpel MC", offsetof(MpegEncContext, quarter_sample), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 1, 0},
+ {"divx_packed", "divx style packed b frames", offsetof(MpegEncContext, divx_packed), FF_OPT_TYPE_INT, {.dbl = 0}, 0, 1, 0},
+ {NULL}
+};
+
+static const AVClass mpeg4_class = {
+ "MPEG4 Video Decoder",
+ av_default_item_name,
+ mpeg4_options,
+ LIBAVUTIL_VERSION_INT,
+};
+
+static const AVClass mpeg4_vdpau_class = {
+ "MPEG4 Video VDPAU Decoder",
+ av_default_item_name,
+ mpeg4_options,
+ LIBAVUTIL_VERSION_INT,
+};
+
AVCodec ff_mpeg4_decoder = {
- .name = "mpeg4",
- .type = AVMEDIA_TYPE_VIDEO,
- .id = CODEC_ID_MPEG4,
- .priv_data_size = sizeof(MpegEncContext),
- .init = decode_init,
- .close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_FRAME_THREADS,
- .flush= ff_mpeg_flush,
- .max_lowres= 3,
- .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
- .pix_fmts= ff_hwaccel_pixfmt_list_420,
- .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
- .update_thread_context= ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context),
+ .name = "mpeg4",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_MPEG4,
+ .priv_data_size = sizeof(MpegEncContext),
+ .init = decode_init,
+ .close = ff_h263_decode_end,
+ .decode = ff_h263_decode_frame,
+ .capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
+ CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
+ CODEC_CAP_FRAME_THREADS,
+ .flush = ff_mpeg_flush,
+ .max_lowres = 3,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
+ .pix_fmts = ff_hwaccel_pixfmt_list_420,
+ .profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
+ .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_mpeg_update_thread_context),
+ .priv_class = &mpeg4_class,
};
.init = decode_init,
.close = ff_h263_decode_end,
.decode = ff_h263_decode_frame,
- .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
- .long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_VDPAU_MPEG4, PIX_FMT_NONE},
- .priv_class = &mpeg4_vdpau_class,
+ .capabilities = CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
+ CODEC_CAP_HWACCEL_VDPAU,
+ .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
+ .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_VDPAU_MPEG4,
+ PIX_FMT_NONE },
++ .priv_class = &mpeg4_vdpau_class,
};
#endif
.priv_data_size = sizeof(PNMContext),
.init = ff_pnm_init,
.encode2 = pam_encode_frame,
- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY16BE, PIX_FMT_MONOBLACK, PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE,
- PIX_FMT_NONE
++ PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE, PIX_FMT_GRAY8, PIX_FMT_GRAY8A, PIX_FMT_GRAY16BE, PIX_FMT_MONOBLACK, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("PAM (Portable AnyMap) image"),
};
.init = init_decoder,
.close = close_decoder,
.decode = decode,
- .long_name = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"),
+ .long_name = NULL_IF_CONFIG_SMALL("HDMV Presentation Graphic Stream subtitles"),
+ .priv_class = &pgsdec_class,
};
.priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init,
.encode2 = encode_frame,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA,
- PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE,
- PIX_FMT_PAL8,
- PIX_FMT_GRAY8, PIX_FMT_GRAY8A,
- PIX_FMT_GRAY16BE,
- PIX_FMT_MONOBLACK, PIX_FMT_NONE},
- .long_name= NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_RGB24, PIX_FMT_RGB32, PIX_FMT_PAL8, PIX_FMT_GRAY8,
++ PIX_FMT_RGB24, PIX_FMT_RGBA,
++ PIX_FMT_RGB48BE, PIX_FMT_RGBA64BE,
++ PIX_FMT_PAL8,
++ PIX_FMT_GRAY8, PIX_FMT_GRAY8A,
++ PIX_FMT_GRAY16BE,
+ PIX_FMT_MONOBLACK, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
};
.init = qtrle_encode_init,
.encode2 = qtrle_encode_frame,
.close = qtrle_encode_end,
- .pix_fmts = (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_NONE
++ PIX_FMT_RGB24, PIX_FMT_RGB555BE, PIX_FMT_ARGB, PIX_FMT_GRAY8, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("QuickTime Animation (RLE) video"),
};
.close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("AJA Kona 10-bit RGB Codec"),
+ .long_name = NULL_IF_CONFIG_SMALL("AJA Kona 10-bit RGB Codec"),
};
#endif
+#if CONFIG_AVRP_DECODER
+AVCodec ff_avrp_decoder = {
+ .name = "avrp",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = CODEC_ID_AVRP,
+ .init = decode_init,
+ .close = decode_close,
+ .decode = decode_frame,
+ .capabilities = CODEC_CAP_DR1,
+ .long_name = NULL_IF_CONFIG_SMALL("Avid 1:1 10-bit RGB Packer"),
+};
+#endif
.init = raw_init_decoder,
.close = raw_close_decoder,
.decode = raw_decode,
- .long_name = NULL_IF_CONFIG_SMALL("raw video"),
- .priv_class= &class,
+ .long_name = NULL_IF_CONFIG_SMALL("raw video"),
++ .priv_class = &class,
};
.priv_data_size = sizeof(SgiContext),
.init = encode_init,
.encode2 = encode_frame,
- .pix_fmts= (const enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA,
- PIX_FMT_RGB48LE, PIX_FMT_RGB48BE,
- PIX_FMT_RGBA64LE, PIX_FMT_RGBA64BE,
- PIX_FMT_GRAY16LE, PIX_FMT_GRAY16BE,
- PIX_FMT_GRAY8, PIX_FMT_NONE},
- .long_name= NULL_IF_CONFIG_SMALL("SGI image"),
+ .pix_fmts = (const enum PixelFormat[]){
- PIX_FMT_RGB24, PIX_FMT_RGBA, PIX_FMT_GRAY8, PIX_FMT_NONE
++ PIX_FMT_RGB24, PIX_FMT_RGBA,
++ PIX_FMT_RGB48LE, PIX_FMT_RGB48BE,
++ PIX_FMT_RGBA64LE, PIX_FMT_RGBA64BE,
++ PIX_FMT_GRAY16LE, PIX_FMT_GRAY16BE,
++ PIX_FMT_GRAY8, PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("SGI image"),
};
.id = CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext),
.encode2 = encode_frame,
- .pix_fmts =
- (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
- PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
- PIX_FMT_YUV420P, PIX_FMT_YUV422P,
- PIX_FMT_YUV444P, PIX_FMT_YUV410P,
- PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
- PIX_FMT_RGBA, PIX_FMT_RGBA64LE, PIX_FMT_NONE},
- .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
+ .pix_fmts = (const enum PixelFormat[]) {
+ PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
+ PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
+ PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P,
- PIX_FMT_YUV410P, PIX_FMT_YUV411P,
++ PIX_FMT_YUV410P, PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
++ PIX_FMT_RGBA, PIX_FMT_RGBA64LE,
+ PIX_FMT_NONE
+ },
+ .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
.priv_class = &tiffenc_class,
};
.close = decode_close,
.decode = decode_frame,
.capabilities = CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+ .long_name = NULL_IF_CONFIG_SMALL("Uncompressed 4:2:2 10-bit"),
+ .priv_class = &v210dec_class,
};
.close = wmv2_decode_end,
.decode = ff_h263_decode_frame,
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
- .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
- .pix_fmts= ff_pixfmt_list_420,
- .max_lowres = 3,
+ .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
+ .pix_fmts = ff_pixfmt_list_420,
};