From: Helge Norberg Date: Tue, 21 Oct 2014 12:22:10 +0000 (+0200) Subject: Updated to ffmpeg version 2.2.3 X-Git-Tag: 2.1.0_Beta1~401 X-Git-Url: https://git.sesse.net/?a=commitdiff_plain;h=5fd24a828ae2a6ed177b392231dcffbcbc312d06;p=casparcg Updated to ffmpeg version 2.2.3 --- diff --git a/dependencies64/ffmpeg/README.txt b/dependencies64/ffmpeg/README.txt new file mode 100644 index 000000000..2e1282951 --- /dev/null +++ b/dependencies64/ffmpeg/README.txt @@ -0,0 +1,107 @@ +This is a FFmpeg Win64 shared build by Kyle Schwarz. + +Zeranoe's FFmpeg Builds Home Page: + +This build was compiled on: Jun 19 2014, at: 20:36:23 + +FFmpeg version: 2.2.3 + libavutil 52. 66.100 / 52. 66.100 + libavcodec 55. 52.102 / 55. 52.102 + libavformat 55. 33.100 / 55. 33.100 + libavdevice 55. 10.100 / 55. 10.100 + libavfilter 4. 2.100 / 4. 2.100 + libswscale 2. 5.102 / 2. 5.102 + libswresample 0. 18.100 / 0. 18.100 + libpostproc 52. 3.100 / 52. 3.100 + +This FFmpeg build was configured with: + --disable-static + --enable-shared + --enable-gpl + --enable-version3 + --disable-w32threads + --enable-avisynth + --enable-bzlib + --enable-fontconfig + --enable-frei0r + --enable-gnutls + --enable-iconv + --enable-libass + --enable-libbluray + --enable-libcaca + --enable-libfreetype + --enable-libgme + --enable-libgsm + --enable-libilbc + --enable-libmodplug + --enable-libmp3lame + --enable-libopencore-amrnb + --enable-libopencore-amrwb + --enable-libopenjpeg + --enable-libopus + --enable-librtmp + --enable-libschroedinger + --enable-libsoxr + --enable-libspeex + --enable-libtheora + --enable-libtwolame + --enable-libvidstab + --enable-libvo-aacenc + --enable-libvo-amrwbenc + --enable-libvorbis + --enable-libvpx + --enable-libwavpack + --enable-libwebp + --enable-libx264 + --enable-libx265 + --enable-libxavs + --enable-libxvid + --enable-decklink + --enable-zlib + +This build was compiled with the following external libraries: + bzip2 1.0.6 + Fontconfig 2.11.1 + Frei0r 20130909-git-10d8360 + GnuTLS 3.2.15 + libiconv 1.14 + libass 0.10.2 + libbluray 0.6.0 + libcaca 0.99.beta18 + FreeType 2.5.3 + Game Music Emu 0.6.0 + GSM 1.0.13-4 + iLBC 20120913-git-b5f9b10 + Modplug-XMMS 0.8.8.4 + LAME 3.99.5 + OpenCORE AMR 0.1.3 + OpenJPEG 1.5.1 + Opus 1.1 + RTMPDump 20140302-git-79459a2 + Schroedinger 1.0.11 + libsoxr 0.1.1 + Speex 1.2rc1 + Theora 1.1.1 + TwoLAME 0.3.13 + vid.stab 0.98 + VisualOn AAC 0.1.3 + VisualOn AMR-WB 0.1.2 + Vorbis 1.3.4 + vpx 1.3.0 + WavPack 4.70.0 + WebP 0.4.0 + x264 20140422-git-ac76440 + x265 1.1 + XAVS svn-r55 + Xvid 1.3.3 + zlib 1.2.8 + +The source code for this FFmpeg build can be found at: + +This build was compiled on Debian jessie/sid (64-bit): + +GCC 4.8.3 was used to compile this FFmpeg build: + +This build was compiled using the MinGW-w64 toolchain: + +Licenses for each library can be found in the 'licenses' folder. diff --git a/dependencies64/ffmpeg/bin/avcodec-55.dll b/dependencies64/ffmpeg/bin/avcodec-55.dll index 256dd898b..4525685b1 100644 Binary files a/dependencies64/ffmpeg/bin/avcodec-55.dll and b/dependencies64/ffmpeg/bin/avcodec-55.dll differ diff --git a/dependencies64/ffmpeg/bin/avdevice-55.dll b/dependencies64/ffmpeg/bin/avdevice-55.dll index 6067d113a..a67faa687 100644 Binary files a/dependencies64/ffmpeg/bin/avdevice-55.dll and b/dependencies64/ffmpeg/bin/avdevice-55.dll differ diff --git a/dependencies64/ffmpeg/bin/avfilter-3.dll b/dependencies64/ffmpeg/bin/avfilter-3.dll deleted file mode 100644 index 0f5d8a4ac..000000000 Binary files a/dependencies64/ffmpeg/bin/avfilter-3.dll and /dev/null differ diff --git a/dependencies64/ffmpeg/bin/avfilter-4.dll b/dependencies64/ffmpeg/bin/avfilter-4.dll new file mode 100644 index 000000000..384f126e0 Binary files /dev/null and b/dependencies64/ffmpeg/bin/avfilter-4.dll differ diff --git a/dependencies64/ffmpeg/bin/avformat-55.dll b/dependencies64/ffmpeg/bin/avformat-55.dll index bb8bac6cb..8585917fd 100644 Binary files a/dependencies64/ffmpeg/bin/avformat-55.dll and b/dependencies64/ffmpeg/bin/avformat-55.dll differ diff --git a/dependencies64/ffmpeg/bin/avutil-52.dll b/dependencies64/ffmpeg/bin/avutil-52.dll index 961e1437f..5bd6f5ca9 100644 Binary files a/dependencies64/ffmpeg/bin/avutil-52.dll and b/dependencies64/ffmpeg/bin/avutil-52.dll differ diff --git a/dependencies64/ffmpeg/bin/ffmpeg.exe b/dependencies64/ffmpeg/bin/ffmpeg.exe new file mode 100644 index 000000000..19cac2a24 Binary files /dev/null and b/dependencies64/ffmpeg/bin/ffmpeg.exe differ diff --git a/dependencies64/ffmpeg/bin/ffplay.exe b/dependencies64/ffmpeg/bin/ffplay.exe new file mode 100644 index 000000000..1663a26f3 Binary files /dev/null and b/dependencies64/ffmpeg/bin/ffplay.exe differ diff --git a/dependencies64/ffmpeg/bin/ffprobe.exe b/dependencies64/ffmpeg/bin/ffprobe.exe new file mode 100644 index 000000000..97a418e7e Binary files /dev/null and b/dependencies64/ffmpeg/bin/ffprobe.exe differ diff --git a/dependencies64/ffmpeg/bin/postproc-52.dll b/dependencies64/ffmpeg/bin/postproc-52.dll index e3885ce26..c6b889fca 100644 Binary files a/dependencies64/ffmpeg/bin/postproc-52.dll and b/dependencies64/ffmpeg/bin/postproc-52.dll differ diff --git a/dependencies64/ffmpeg/bin/swresample-0.dll b/dependencies64/ffmpeg/bin/swresample-0.dll index efc021080..9aaa85039 100644 Binary files a/dependencies64/ffmpeg/bin/swresample-0.dll and b/dependencies64/ffmpeg/bin/swresample-0.dll differ diff --git a/dependencies64/ffmpeg/bin/swscale-2.dll b/dependencies64/ffmpeg/bin/swscale-2.dll index 62823440a..d70864f54 100644 Binary files a/dependencies64/ffmpeg/bin/swscale-2.dll and b/dependencies64/ffmpeg/bin/swscale-2.dll differ diff --git a/dependencies64/ffmpeg/doc/developer.html b/dependencies64/ffmpeg/doc/developer.html new file mode 100644 index 000000000..494cdb951 --- /dev/null +++ b/dependencies64/ffmpeg/doc/developer.html @@ -0,0 +1,877 @@ + + + + + +FFmpeg documentation : Developer + + + + + + + + + + +
+
+ + +

Developer Documentation

+ + +

Table of Contents

+ + + +

1. Developers Guide

+ + +

1.1 Notes for external developers

+ +

This document is mostly useful for internal FFmpeg developers. +External developers who need to use the API in their application should +refer to the API doxygen documentation in the public headers, and +check the examples in ‘doc/examples’ and in the source code to +see how the public API is employed. +

+

You can use the FFmpeg libraries in your commercial program, but you +are encouraged to publish any patch you make. In this case the +best way to proceed is to send your patches to the ffmpeg-devel +mailing list following the guidelines illustrated in the remainder of +this document. +

+

For more detailed legal information about the use of FFmpeg in +external programs read the ‘LICENSE’ file in the source tree and +consult http://ffmpeg.org/legal.html. +

+ +

1.2 Contributing

+ +

There are 3 ways by which code gets into ffmpeg. +

    +
  • Submitting Patches to the main developer mailing list + see Submitting patches for details. +
  • Directly committing changes to the main tree. +
  • Committing changes to a git clone, for example on github.com or + gitorious.org. And asking us to merge these changes. +
+ +

Whichever way, changes should be reviewed by the maintainer of the code +before they are committed. And they should follow the Coding Rules. +The developer making the commit and the author are responsible for their changes +and should try to fix issues their commit causes. +

+

+

+

1.3 Coding Rules

+ + +

1.3.1 Code formatting conventions

+ +

There are the following guidelines regarding the indentation in files: +

+
    +
  • +Indent size is 4. + +
  • +The TAB character is forbidden outside of Makefiles as is any +form of trailing whitespace. Commits containing either will be +rejected by the git repository. + +
  • +You should try to limit your code lines to 80 characters; however, do so if +and only if this improves readability. +
+

The presentation is one inspired by ’indent -i4 -kr -nut’. +

+

The main priority in FFmpeg is simplicity and small code size in order to +minimize the bug count. +

+ +

1.3.2 Comments

+

Use the JavaDoc/Doxygen format (see examples below) so that code documentation +can be generated automatically. All nontrivial functions should have a comment +above them explaining what the function does, even if it is just one sentence. +All structures and their member variables should be documented, too. +

+

Avoid Qt-style and similar Doxygen syntax with ! in it, i.e. replace +//! with /// and similar. Also @ syntax should be employed +for markup commands, i.e. use @param and not \param. +

+
 
/**
+ * @file
+ * MPEG codec.
+ * @author ...
+ */
+
+/**
+ * Summary sentence.
+ * more text ...
+ * ...
+ */
+typedef struct Foobar {
+    int var1; /**< var1 description */
+    int var2; ///< var2 description
+    /** var3 description */
+    int var3;
+} Foobar;
+
+/**
+ * Summary sentence.
+ * more text ...
+ * ...
+ * @param my_parameter description of my_parameter
+ * @return return value description
+ */
+int myfunc(int my_parameter)
+...
+
+ + +

1.3.3 C language features

+ +

FFmpeg is programmed in the ISO C90 language with a few additional +features from ISO C99, namely: +

+
    +
  • +the ‘inline’ keyword; + +
  • +‘//’ comments; + +
  • +designated struct initializers (‘struct s x = { .i = 17 };’) + +
  • +compound literals (‘x = (struct s) { 17, 23 };’) +
+ +

These features are supported by all compilers we care about, so we will not +accept patches to remove their use unless they absolutely do not impair +clarity and performance. +

+

All code must compile with recent versions of GCC and a number of other +currently supported compilers. To ensure compatibility, please do not use +additional C99 features or GCC extensions. Especially watch out for: +

+
    +
  • +mixing statements and declarations; + +
  • +‘long long’ (use ‘int64_t’ instead); + +
  • +‘__attribute__’ not protected by ‘#ifdef __GNUC__’ or similar; + +
  • +GCC statement expressions (‘(x = ({ int y = 4; y; })’). +
+ + +

1.3.4 Naming conventions

+

All names should be composed with underscores (_), not CamelCase. For example, +‘avfilter_get_video_buffer’ is an acceptable function name and +‘AVFilterGetVideo’ is not. The exception from this are type names, like +for example structs and enums; they should always be in the CamelCase +

+

There are the following conventions for naming variables and functions: +

+
    +
  • +For local variables no prefix is required. + +
  • +For file-scope variables and functions declared as static, no prefix +is required. + +
  • +For variables and functions visible outside of file scope, but only used +internally by a library, an ff_ prefix should be used, +e.g. ‘ff_w64_demuxer’. + +
  • +For variables and functions visible outside of file scope, used internally +across multiple libraries, use avpriv_ as prefix, for example, +‘avpriv_aac_parse_header’. + +
  • +Each library has its own prefix for public symbols, in addition to the +commonly used av_ (avformat_ for libavformat, +avcodec_ for libavcodec, swr_ for libswresample, etc). +Check the existing code and choose names accordingly. +Note that some symbols without these prefixes are also exported for +retro-compatibility reasons. These exceptions are declared in the +lib<name>/lib<name>.v files. +
+ +

Furthermore, name space reserved for the system should not be invaded. +Identifiers ending in _t are reserved by +POSIX. +Also avoid names starting with __ or _ followed by an uppercase +letter as they are reserved by the C standard. Names starting with _ +are reserved at the file level and may not be used for externally visible +symbols. If in doubt, just avoid names starting with _ altogether. +

+ +

1.3.5 Miscellaneous conventions

+ +
    +
  • +fprintf and printf are forbidden in libavformat and libavcodec, +please use av_log() instead. + +
  • +Casts should be used only when necessary. Unneeded parentheses +should also be avoided if they don’t make the code easier to understand. +
+ + +

1.3.6 Editor configuration

+

In order to configure Vim to follow FFmpeg formatting conventions, paste +the following snippet into your ‘.vimrc’: +

 
" indentation rules for FFmpeg: 4 spaces, no tabs
+set expandtab
+set shiftwidth=4
+set softtabstop=4
+set cindent
+set cinoptions=(0
+" Allow tabs in Makefiles.
+autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
+" Trailing whitespace and tabs are forbidden, so highlight them.
+highlight ForbiddenWhitespace ctermbg=red guibg=red
+match ForbiddenWhitespace /\s\+$\|\t/
+" Do not highlight spaces at the end of line while typing on that line.
+autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@<!$/
+
+ +

For Emacs, add these roughly equivalent lines to your ‘.emacs.d/init.el’: +

 
(c-add-style "ffmpeg"
+             '("k&r"
+               (c-basic-offset . 4)
+               (indent-tabs-mode . nil)
+               (show-trailing-whitespace . t)
+               (c-offsets-alist
+                (statement-cont . (c-lineup-assignments +)))
+               )
+             )
+(setq c-default-style "ffmpeg")
+
+ + +

1.4 Development Policy

+ +
    +
  1. +Contributions should be licensed under the +LGPL 2.1, +including an "or any later version" clause, or, if you prefer +a gift-style license, the +ISC or +MIT license. +GPL 2 including +an "or any later version" clause is also acceptable, but LGPL is +preferred. +If you add a new file, give it a proper license header. Do not copy and +paste it from a random place, use an existing file as template. + +
  2. +You must not commit code which breaks FFmpeg! (Meaning unfinished but +enabled code which breaks compilation or compiles but does not work or +breaks the regression tests) +You can commit unfinished stuff (for testing etc), but it must be disabled +(#ifdef etc) by default so it does not interfere with other developers’ +work. + +
  3. +The commit message should have a short first line in the form of +a ‘topic: short description’ as a header, separated by a newline +from the body consisting of an explanation of why the change is necessary. +If the commit fixes a known bug on the bug tracker, the commit message +should include its bug ID. Referring to the issue on the bug tracker does +not exempt you from writing an excerpt of the bug in the commit message. + +
  4. +You do not have to over-test things. If it works for you, and you think it +should work for others, then commit. If your code has problems +(portability, triggers compiler bugs, unusual environment etc) they will be +reported and eventually fixed. + +
  5. +Do not commit unrelated changes together, split them into self-contained +pieces. Also do not forget that if part B depends on part A, but A does not +depend on B, then A can and should be committed first and separate from B. +Keeping changes well split into self-contained parts makes reviewing and +understanding them on the commit log mailing list easier. This also helps +in case of debugging later on. +Also if you have doubts about splitting or not splitting, do not hesitate to +ask/discuss it on the developer mailing list. + +
  6. +Do not change behavior of the programs (renaming options etc) or public +API or ABI without first discussing it on the ffmpeg-devel mailing list. +Do not remove functionality from the code. Just improve! + +

    Note: Redundant code can be removed. +

    +
  7. +Do not commit changes to the build system (Makefiles, configure script) +which change behavior, defaults etc, without asking first. The same +applies to compiler warning fixes, trivial looking fixes and to code +maintained by other developers. We usually have a reason for doing things +the way we do. Send your changes as patches to the ffmpeg-devel mailing +list, and if the code maintainers say OK, you may commit. This does not +apply to files you wrote and/or maintain. + +
  8. +We refuse source indentation and other cosmetic changes if they are mixed +with functional changes, such commits will be rejected and removed. Every +developer has his own indentation style, you should not change it. Of course +if you (re)write something, you can use your own style, even though we would +prefer if the indentation throughout FFmpeg was consistent (Many projects +force a given indentation style - we do not.). If you really need to make +indentation changes (try to avoid this), separate them strictly from real +changes. + +

    NOTE: If you had to put if(){ .. } over a large (> 5 lines) chunk of code, +then either do NOT change the indentation of the inner part within (do not +move it to the right)! or do so in a separate commit +

    +
  9. +Always fill out the commit log message. Describe in a few lines what you +changed and why. You can refer to mailing list postings if you fix a +particular bug. Comments such as "fixed!" or "Changed it." are unacceptable. +Recommended format: +area changed: Short 1 line description + +

    details describing what and why and giving references. +

    +
  10. +Make sure the author of the commit is set correctly. (see git commit –author) +If you apply a patch, send an +answer to ffmpeg-devel (or wherever you got the patch from) saying that +you applied the patch. + +
  11. +When applying patches that have been discussed (at length) on the mailing +list, reference the thread in the log message. + +
  12. +Do NOT commit to code actively maintained by others without permission. +Send a patch to ffmpeg-devel instead. If no one answers within a reasonable +timeframe (12h for build failures and security fixes, 3 days small changes, +1 week for big patches) then commit your patch if you think it is OK. +Also note, the maintainer can simply ask for more time to review! + +
  13. +Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits +are sent there and reviewed by all the other developers. Bugs and possible +improvements or general questions regarding commits are discussed there. We +expect you to react if problems with your code are uncovered. + +
  14. +Update the documentation if you change behavior or add features. If you are +unsure how best to do this, send a patch to ffmpeg-devel, the documentation +maintainer(s) will review and commit your stuff. + +
  15. +Try to keep important discussions and requests (also) on the public +developer mailing list, so that all developers can benefit from them. + +
  16. +Never write to unallocated memory, never write over the end of arrays, +always check values read from some untrusted source before using them +as array index or other risky things. + +
  17. +Remember to check if you need to bump versions for the specific libav* +parts (libavutil, libavcodec, libavformat) you are changing. You need +to change the version integer. +Incrementing the first component means no backward compatibility to +previous versions (e.g. removal of a function from the public API). +Incrementing the second component means backward compatible change +(e.g. addition of a function to the public API or extension of an +existing data structure). +Incrementing the third component means a noteworthy binary compatible +change (e.g. encoder bug fix that matters for the decoder). The third +component always starts at 100 to distinguish FFmpeg from Libav. + +
  18. +Compiler warnings indicate potential bugs or code with bad style. If a type of +warning always points to correct and clean code, that warning should +be disabled, not the code changed. +Thus the remaining warnings can either be bugs or correct code. +If it is a bug, the bug has to be fixed. If it is not, the code should +be changed to not generate a warning unless that causes a slowdown +or obfuscates the code. + +
  19. +Make sure that no parts of the codebase that you maintain are missing from the +‘MAINTAINERS’ file. If something that you want to maintain is missing add it with +your name after it. +If at some point you no longer want to maintain some code, then please help +finding a new maintainer and also don’t forget updating the ‘MAINTAINERS’ file. +
+ +

We think our rules are not too hard. If you have comments, contact us. +

+

+

+

1.5 Submitting patches

+ +

First, read the Coding Rules above if you did not yet, in particular +the rules regarding patch submission. +

+

When you submit your patch, please use git format-patch or +git send-email. We cannot read other diffs :-) +

+

Also please do not submit a patch which contains several unrelated changes. +Split it into separate, self-contained pieces. This does not mean splitting +file by file. Instead, make the patch as small as possible while still +keeping it as a logical unit that contains an individual change, even +if it spans multiple files. This makes reviewing your patches much easier +for us and greatly increases your chances of getting your patch applied. +

+

Use the patcheck tool of FFmpeg to check your patch. +The tool is located in the tools directory. +

+

Run the Regression tests before submitting a patch in order to verify +it does not cause unexpected problems. +

+

It also helps quite a bit if you tell us what the patch does (for example +’replaces lrint by lrintf’), and why (for example ’*BSD isn’t C99 compliant +and has no lrint()’) +

+

Also please if you send several patches, send each patch as a separate mail, +do not attach several unrelated patches to the same mail. +

+

Patches should be posted to the +ffmpeg-devel +mailing list. Use git send-email when possible since it will properly +send patches without requiring extra care. If you cannot, then send patches +as base64-encoded attachments, so your patch is not trashed during +transmission. +

+

Your patch will be reviewed on the mailing list. You will likely be asked +to make some changes and are expected to send in an improved version that +incorporates the requests from the review. This process may go through +several iterations. Once your patch is deemed good enough, some developer +will pick it up and commit it to the official FFmpeg tree. +

+

Give us a few days to react. But if some time passes without reaction, +send a reminder by email. Your patch should eventually be dealt with. +

+ + +

1.6 New codecs or formats checklist

+ +
    +
  1. +Did you use av_cold for codec initialization and close functions? + +
  2. +Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or +AVInputFormat/AVOutputFormat struct? + +
  3. +Did you bump the minor version number (and reset the micro version +number) in ‘libavcodec/version.h’ or ‘libavformat/version.h’? + +
  4. +Did you register it in ‘allcodecs.c’ or ‘allformats.c’? + +
  5. +Did you add the AVCodecID to ‘avcodec.h’? +When adding new codec IDs, also add an entry to the codec descriptor +list in ‘libavcodec/codec_desc.c’. + +
  6. +If it has a FourCC, did you add it to ‘libavformat/riff.c’, +even if it is only a decoder? + +
  7. +Did you add a rule to compile the appropriate files in the Makefile? +Remember to do this even if you’re just adding a format to a file that is +already being compiled by some other rule, like a raw demuxer. + +
  8. +Did you add an entry to the table of supported formats or codecs in +‘doc/general.texi’? + +
  9. +Did you add an entry in the Changelog? + +
  10. +If it depends on a parser or a library, did you add that dependency in +configure? + +
  11. +Did you git add the appropriate files before committing? + +
  12. +Did you make sure it compiles standalone, i.e. with +configure --disable-everything --enable-decoder=foo +(or --enable-demuxer or whatever your component is)? +
+ + + +

1.7 patch submission checklist

+ +
    +
  1. +Does make fate pass with the patch applied? + +
  2. +Was the patch generated with git format-patch or send-email? + +
  3. +Did you sign off your patch? (git commit -s) +See http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches for the meaning +of sign off. + +
  4. +Did you provide a clear git commit log message? + +
  5. +Is the patch against latest FFmpeg git master branch? + +
  6. +Are you subscribed to ffmpeg-devel? +(the list is subscribers only due to spam) + +
  7. +Have you checked that the changes are minimal, so that the same cannot be +achieved with a smaller patch and/or simpler final code? + +
  8. +If the change is to speed critical code, did you benchmark it? + +
  9. +If you did any benchmarks, did you provide them in the mail? + +
  10. +Have you checked that the patch does not introduce buffer overflows or +other security issues? + +
  11. +Did you test your decoder or demuxer against damaged data? If no, see +tools/trasher, the noise bitstream filter, and +zzuf. Your decoder or demuxer +should not crash, end in a (near) infinite loop, or allocate ridiculous +amounts of memory when fed damaged data. + +
  12. +Does the patch not mix functional and cosmetic changes? + +
  13. +Did you add tabs or trailing whitespace to the code? Both are forbidden. + +
  14. +Is the patch attached to the email you send? + +
  15. +Is the mime type of the patch correct? It should be text/x-diff or +text/x-patch or at least text/plain and not application/octet-stream. + +
  16. +If the patch fixes a bug, did you provide a verbose analysis of the bug? + +
  17. +If the patch fixes a bug, did you provide enough information, including +a sample, so the bug can be reproduced and the fix can be verified? +Note please do not attach samples >100k to mails but rather provide a +URL, you can upload to ftp://upload.ffmpeg.org + +
  18. +Did you provide a verbose summary about what the patch does change? + +
  19. +Did you provide a verbose explanation why it changes things like it does? + +
  20. +Did you provide a verbose summary of the user visible advantages and +disadvantages if the patch is applied? + +
  21. +Did you provide an example so we can verify the new feature added by the +patch easily? + +
  22. +If you added a new file, did you insert a license header? It should be +taken from FFmpeg, not randomly copied and pasted from somewhere else. + +
  23. +You should maintain alphabetical order in alphabetically ordered lists as +long as doing so does not break API/ABI compatibility. + +
  24. +Lines with similar content should be aligned vertically when doing so +improves readability. + +
  25. +Consider to add a regression test for your code. + +
  26. +If you added YASM code please check that things still work with –disable-yasm + +
  27. +Make sure you check the return values of function and return appropriate +error codes. Especially memory allocation functions like av_malloc() +are notoriously left unchecked, which is a serious problem. + +
  28. +Test your code with valgrind and or Address Sanitizer to ensure it’s free +of leaks, out of array accesses, etc. +
+ + +

1.8 Patch review process

+ +

All patches posted to ffmpeg-devel will be reviewed, unless they contain a +clear note that the patch is not for the git master branch. +Reviews and comments will be posted as replies to the patch on the +mailing list. The patch submitter then has to take care of every comment, +that can be by resubmitting a changed patch or by discussion. Resubmitted +patches will themselves be reviewed like any other patch. If at some point +a patch passes review with no comments then it is approved, that can for +simple and small patches happen immediately while large patches will generally +have to be changed and reviewed many times before they are approved. +After a patch is approved it will be committed to the repository. +

+

We will review all submitted patches, but sometimes we are quite busy so +especially for large patches this can take several weeks. +

+

If you feel that the review process is too slow and you are willing to try to +take over maintainership of the area of code you change then just clone +git master and maintain the area of code there. We will merge each area from +where its best maintained. +

+

When resubmitting patches, please do not make any significant changes +not related to the comments received during review. Such patches will +be rejected. Instead, submit significant changes or new features as +separate patches. +

+

+

+

1.9 Regression tests

+ +

Before submitting a patch (or committing to the repository), you should at least +test that you did not break anything. +

+

Running ’make fate’ accomplishes this, please see fate.html for details. +

+

[Of course, some patches may change the results of the regression tests. In +this case, the reference results of the regression tests shall be modified +accordingly]. +

+ +

1.9.1 Adding files to the fate-suite dataset

+ +

When there is no muxer or encoder available to generate test media for a +specific test then the media has to be inlcuded in the fate-suite. +First please make sure that the sample file is as small as possible to test the +respective decoder or demuxer sufficiently. Large files increase network +bandwidth and disk space requirements. +Once you have a working fate test and fate sample, provide in the commit +message or introductionary message for the patch series that you post to +the ffmpeg-devel mailing list, a direct link to download the sample media. +

+ + +

1.9.2 Visualizing Test Coverage

+ +

The FFmpeg build system allows visualizing the test coverage in an easy +manner with the coverage tools gcov/lcov. This involves +the following steps: +

+
    +
  1. + Configure to compile with instrumentation enabled: + configure --toolchain=gcov. + +
  2. + Run your test case, either manually or via FATE. This can be either + the full FATE regression suite, or any arbitrary invocation of any + front-end tool provided by FFmpeg, in any combination. + +
  3. + Run make lcov to generate coverage data in HTML format. + +
  4. + View lcov/index.html in your preferred HTML viewer. +
+ +

You can use the command make lcov-reset to reset the coverage +measurements. You will need to rerun make lcov after running a +new test. +

+ +

1.9.3 Using Valgrind

+ +

The configure script provides a shortcut for using valgrind to spot bugs +related to memory handling. Just add the option +--toolchain=valgrind-memcheck or --toolchain=valgrind-massif +to your configure line, and reasonable defaults will be set for running +FATE under the supervision of either the memcheck or the +massif tool of the valgrind suite. +

+

In case you need finer control over how valgrind is invoked, use the +--target-exec='valgrind <your_custom_valgrind_options> option in +your configure line instead. +

+

+

+

1.10 Release process

+ +

FFmpeg maintains a set of release branches, which are the +recommended deliverable for system integrators and distributors (such as +Linux distributions, etc.). At regular times, a release +manager prepares, tests and publishes tarballs on the +http://ffmpeg.org website. +

+

There are two kinds of releases: +

+
    +
  1. +Major releases always include the latest and greatest +features and functionality. + +
  2. +Point releases are cut from release branches, +which are named release/X, with X being the release +version number. +
+ +

Note that we promise to our users that shared libraries from any FFmpeg +release never break programs that have been compiled against +previous versions of the same release series in any case! +

+

However, from time to time, we do make API changes that require adaptations +in applications. Such changes are only allowed in (new) major releases and +require further steps such as bumping library version numbers and/or +adjustments to the symbol versioning file. Please discuss such changes +on the ffmpeg-devel mailing list in time to allow forward planning. +

+

+

+

1.10.1 Criteria for Point Releases

+ +

Changes that match the following criteria are valid candidates for +inclusion into a point release: +

+
    +
  1. +Fixes a security issue, preferably identified by a CVE +number issued by http://cve.mitre.org/. + +
  2. +Fixes a documented bug in https://trac.ffmpeg.org. + +
  3. +Improves the included documentation. + +
  4. +Retains both source code and binary compatibility with previous +point releases of the same release branch. +
+ +

The order for checking the rules is (1 OR 2 OR 3) AND 4. +

+ + +

1.10.2 Release Checklist

+ +

The release process involves the following steps: +

+
    +
  1. +Ensure that the ‘RELEASE’ file contains the version number for +the upcoming release. + +
  2. +Add the release at https://trac.ffmpeg.org/admin/ticket/versions. + +
  3. +Announce the intent to do a release to the mailing list. + +
  4. +Make sure all relevant security fixes have been backported. See +https://ffmpeg.org/security.html. + +
  5. +Ensure that the FATE regression suite still passes in the release +branch on at least i386 and amd64 +(cf. Regression tests). + +
  6. +Prepare the release tarballs in bz2 and gz formats, and +supplementing files that contain gpg signatures + +
  7. +Publish the tarballs at http://ffmpeg.org/releases. Create and +push an annotated tag in the form nX, with X +containing the version number. + +
  8. +Propose and send a patch to the ffmpeg-devel mailing list +with a news entry for the website. + +
  9. +Publish the news entry. + +
  10. +Send announcement to the mailing list. +
+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/examples/Makefile b/dependencies64/ffmpeg/doc/examples/Makefile new file mode 100644 index 000000000..1553bab82 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/Makefile @@ -0,0 +1,41 @@ +# use pkg-config for getting CFLAGS and LDLIBS +FFMPEG_LIBS= libavdevice \ + libavformat \ + libavfilter \ + libavcodec \ + libswresample \ + libswscale \ + libavutil \ + +CFLAGS += -Wall -g +CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS) +LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS) + +EXAMPLES= avio_reading \ + avcodec \ + demuxing_decoding \ + filtering_video \ + filtering_audio \ + metadata \ + muxing \ + remuxing \ + resampling_audio \ + scaling_video \ + transcode_aac \ + +OBJS=$(addsuffix .o,$(EXAMPLES)) + +# the following examples make explicit use of the math library +avcodec: LDLIBS += -lm +muxing: LDLIBS += -lm +resampling_audio: LDLIBS += -lm + +.phony: all clean-test clean + +all: $(OBJS) $(EXAMPLES) + +clean-test: + $(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg + +clean: clean-test + $(RM) $(EXAMPLES) $(OBJS) diff --git a/dependencies64/ffmpeg/doc/examples/README b/dependencies64/ffmpeg/doc/examples/README new file mode 100644 index 000000000..c1ce619d3 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/README @@ -0,0 +1,23 @@ +FFmpeg examples README +---------------------- + +Both following use cases rely on pkg-config and make, thus make sure +that you have them installed and working on your system. + + +Method 1: build the installed examples in a generic read/write user directory + +Copy to a read/write user directory and just use "make", it will link +to the libraries on your system, assuming the PKG_CONFIG_PATH is +correctly configured. + +Method 2: build the examples in-tree + +Assuming you are in the source FFmpeg checkout directory, you need to build +FFmpeg (no need to make install in any prefix). Then just run "make examples". +This will build the examples using the FFmpeg build system. You can clean those +examples using "make examplesclean" + +If you want to try the dedicated Makefile examples (to emulate the first +method), go into doc/examples and run a command such as +PKG_CONFIG_PATH=pc-uninstalled make. diff --git a/dependencies64/ffmpeg/doc/examples/avcodec.c b/dependencies64/ffmpeg/doc/examples/avcodec.c new file mode 100644 index 000000000..d56e6a480 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/avcodec.c @@ -0,0 +1,658 @@ +/* + * Copyright (c) 2001 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * libavcodec API use example. + * + * @example avcodec.c + * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...), + * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the + * format handling + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define INBUF_SIZE 4096 +#define AUDIO_INBUF_SIZE 20480 +#define AUDIO_REFILL_THRESH 4096 + +/* check that a given sample format is supported by the encoder */ +static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt) +{ + const enum AVSampleFormat *p = codec->sample_fmts; + + while (*p != AV_SAMPLE_FMT_NONE) { + if (*p == sample_fmt) + return 1; + p++; + } + return 0; +} + +/* just pick the highest supported samplerate */ +static int select_sample_rate(AVCodec *codec) +{ + const int *p; + int best_samplerate = 0; + + if (!codec->supported_samplerates) + return 44100; + + p = codec->supported_samplerates; + while (*p) { + best_samplerate = FFMAX(*p, best_samplerate); + p++; + } + return best_samplerate; +} + +/* select layout with the highest channel count */ +static int select_channel_layout(AVCodec *codec) +{ + const uint64_t *p; + uint64_t best_ch_layout = 0; + int best_nb_channels = 0; + + if (!codec->channel_layouts) + return AV_CH_LAYOUT_STEREO; + + p = codec->channel_layouts; + while (*p) { + int nb_channels = av_get_channel_layout_nb_channels(*p); + + if (nb_channels > best_nb_channels) { + best_ch_layout = *p; + best_nb_channels = nb_channels; + } + p++; + } + return best_ch_layout; +} + +/* + * Audio encoding example + */ +static void audio_encode_example(const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + AVFrame *frame; + AVPacket pkt; + int i, j, k, ret, got_output; + int buffer_size; + FILE *f; + uint16_t *samples; + float t, tincr; + + printf("Encode audio file %s\n", filename); + + /* find the MP2 encoder */ + codec = avcodec_find_encoder(AV_CODEC_ID_MP2); + if (!codec) { + fprintf(stderr, "Codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + if (!c) { + fprintf(stderr, "Could not allocate audio codec context\n"); + exit(1); + } + + /* put sample parameters */ + c->bit_rate = 64000; + + /* check that the encoder supports s16 pcm input */ + c->sample_fmt = AV_SAMPLE_FMT_S16; + if (!check_sample_fmt(codec, c->sample_fmt)) { + fprintf(stderr, "Encoder does not support sample format %s", + av_get_sample_fmt_name(c->sample_fmt)); + exit(1); + } + + /* select other audio parameters supported by the encoder */ + c->sample_rate = select_sample_rate(codec); + c->channel_layout = select_channel_layout(codec); + c->channels = av_get_channel_layout_nb_channels(c->channel_layout); + + /* open it */ + if (avcodec_open2(c, codec, NULL) < 0) { + fprintf(stderr, "Could not open codec\n"); + exit(1); + } + + f = fopen(filename, "wb"); + if (!f) { + fprintf(stderr, "Could not open %s\n", filename); + exit(1); + } + + /* frame containing input raw audio */ + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Could not allocate audio frame\n"); + exit(1); + } + + frame->nb_samples = c->frame_size; + frame->format = c->sample_fmt; + frame->channel_layout = c->channel_layout; + + /* the codec gives us the frame size, in samples, + * we calculate the size of the samples buffer in bytes */ + buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size, + c->sample_fmt, 0); + if (buffer_size < 0) { + fprintf(stderr, "Could not get sample buffer size\n"); + exit(1); + } + samples = av_malloc(buffer_size); + if (!samples) { + fprintf(stderr, "Could not allocate %d bytes for samples buffer\n", + buffer_size); + exit(1); + } + /* setup the data pointers in the AVFrame */ + ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt, + (const uint8_t*)samples, buffer_size, 0); + if (ret < 0) { + fprintf(stderr, "Could not setup audio frame\n"); + exit(1); + } + + /* encode a single tone sound */ + t = 0; + tincr = 2 * M_PI * 440.0 / c->sample_rate; + for (i = 0; i < 200; i++) { + av_init_packet(&pkt); + pkt.data = NULL; // packet data will be allocated by the encoder + pkt.size = 0; + + for (j = 0; j < c->frame_size; j++) { + samples[2*j] = (int)(sin(t) * 10000); + + for (k = 1; k < c->channels; k++) + samples[2*j + k] = samples[2*j]; + t += tincr; + } + /* encode the samples */ + ret = avcodec_encode_audio2(c, &pkt, frame, &got_output); + if (ret < 0) { + fprintf(stderr, "Error encoding audio frame\n"); + exit(1); + } + if (got_output) { + fwrite(pkt.data, 1, pkt.size, f); + av_free_packet(&pkt); + } + } + + /* get the delayed frames */ + for (got_output = 1; got_output; i++) { + ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output); + if (ret < 0) { + fprintf(stderr, "Error encoding frame\n"); + exit(1); + } + + if (got_output) { + fwrite(pkt.data, 1, pkt.size, f); + av_free_packet(&pkt); + } + } + fclose(f); + + av_freep(&samples); + av_frame_free(&frame); + avcodec_close(c); + av_free(c); +} + +/* + * Audio decoding. + */ +static void audio_decode_example(const char *outfilename, const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int len; + FILE *f, *outfile; + uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; + AVPacket avpkt; + AVFrame *decoded_frame = NULL; + + av_init_packet(&avpkt); + + printf("Decode audio file %s to %s\n", filename, outfilename); + + /* find the mpeg audio decoder */ + codec = avcodec_find_decoder(AV_CODEC_ID_MP2); + if (!codec) { + fprintf(stderr, "Codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + if (!c) { + fprintf(stderr, "Could not allocate audio codec context\n"); + exit(1); + } + + /* open it */ + if (avcodec_open2(c, codec, NULL) < 0) { + fprintf(stderr, "Could not open codec\n"); + exit(1); + } + + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "Could not open %s\n", filename); + exit(1); + } + outfile = fopen(outfilename, "wb"); + if (!outfile) { + av_free(c); + exit(1); + } + + /* decode until eof */ + avpkt.data = inbuf; + avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); + + while (avpkt.size > 0) { + int got_frame = 0; + + if (!decoded_frame) { + if (!(decoded_frame = av_frame_alloc())) { + fprintf(stderr, "Could not allocate audio frame\n"); + exit(1); + } + } + + len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); + if (len < 0) { + fprintf(stderr, "Error while decoding\n"); + exit(1); + } + if (got_frame) { + /* if a frame has been decoded, output it */ + int data_size = av_samples_get_buffer_size(NULL, c->channels, + decoded_frame->nb_samples, + c->sample_fmt, 1); + if (data_size < 0) { + /* This should not occur, checking just for paranoia */ + fprintf(stderr, "Failed to calculate data size\n"); + exit(1); + } + fwrite(decoded_frame->data[0], 1, data_size, outfile); + } + avpkt.size -= len; + avpkt.data += len; + avpkt.dts = + avpkt.pts = AV_NOPTS_VALUE; + if (avpkt.size < AUDIO_REFILL_THRESH) { + /* Refill the input buffer, to avoid trying to decode + * incomplete frames. Instead of this, one could also use + * a parser, or use a proper container format through + * libavformat. */ + memmove(inbuf, avpkt.data, avpkt.size); + avpkt.data = inbuf; + len = fread(avpkt.data + avpkt.size, 1, + AUDIO_INBUF_SIZE - avpkt.size, f); + if (len > 0) + avpkt.size += len; + } + } + + fclose(outfile); + fclose(f); + + avcodec_close(c); + av_free(c); + av_frame_free(&decoded_frame); +} + +/* + * Video encoding example + */ +static void video_encode_example(const char *filename, int codec_id) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int i, ret, x, y, got_output; + FILE *f; + AVFrame *frame; + AVPacket pkt; + uint8_t endcode[] = { 0, 0, 1, 0xb7 }; + + printf("Encode video file %s\n", filename); + + /* find the mpeg1 video encoder */ + codec = avcodec_find_encoder(codec_id); + if (!codec) { + fprintf(stderr, "Codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + if (!c) { + fprintf(stderr, "Could not allocate video codec context\n"); + exit(1); + } + + /* put sample parameters */ + c->bit_rate = 400000; + /* resolution must be a multiple of two */ + c->width = 352; + c->height = 288; + /* frames per second */ + c->time_base = (AVRational){1,25}; + c->gop_size = 10; /* emit one intra frame every ten frames */ + c->max_b_frames = 1; + c->pix_fmt = AV_PIX_FMT_YUV420P; + + if (codec_id == AV_CODEC_ID_H264) + av_opt_set(c->priv_data, "preset", "slow", 0); + + /* open it */ + if (avcodec_open2(c, codec, NULL) < 0) { + fprintf(stderr, "Could not open codec\n"); + exit(1); + } + + f = fopen(filename, "wb"); + if (!f) { + fprintf(stderr, "Could not open %s\n", filename); + exit(1); + } + + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Could not allocate video frame\n"); + exit(1); + } + frame->format = c->pix_fmt; + frame->width = c->width; + frame->height = c->height; + + /* the image can be allocated by any means and av_image_alloc() is + * just the most convenient way if av_malloc() is to be used */ + ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, + c->pix_fmt, 32); + if (ret < 0) { + fprintf(stderr, "Could not allocate raw picture buffer\n"); + exit(1); + } + + /* encode 1 second of video */ + for (i = 0; i < 25; i++) { + av_init_packet(&pkt); + pkt.data = NULL; // packet data will be allocated by the encoder + pkt.size = 0; + + fflush(stdout); + /* prepare a dummy image */ + /* Y */ + for (y = 0; y < c->height; y++) { + for (x = 0; x < c->width; x++) { + frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3; + } + } + + /* Cb and Cr */ + for (y = 0; y < c->height/2; y++) { + for (x = 0; x < c->width/2; x++) { + frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2; + frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5; + } + } + + frame->pts = i; + + /* encode the image */ + ret = avcodec_encode_video2(c, &pkt, frame, &got_output); + if (ret < 0) { + fprintf(stderr, "Error encoding frame\n"); + exit(1); + } + + if (got_output) { + printf("Write frame %3d (size=%5d)\n", i, pkt.size); + fwrite(pkt.data, 1, pkt.size, f); + av_free_packet(&pkt); + } + } + + /* get the delayed frames */ + for (got_output = 1; got_output; i++) { + fflush(stdout); + + ret = avcodec_encode_video2(c, &pkt, NULL, &got_output); + if (ret < 0) { + fprintf(stderr, "Error encoding frame\n"); + exit(1); + } + + if (got_output) { + printf("Write frame %3d (size=%5d)\n", i, pkt.size); + fwrite(pkt.data, 1, pkt.size, f); + av_free_packet(&pkt); + } + } + + /* add sequence end code to have a real mpeg file */ + fwrite(endcode, 1, sizeof(endcode), f); + fclose(f); + + avcodec_close(c); + av_free(c); + av_freep(&frame->data[0]); + av_frame_free(&frame); + printf("\n"); +} + +/* + * Video decoding example + */ + +static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, + char *filename) +{ + FILE *f; + int i; + + f = fopen(filename,"w"); + fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255); + for (i = 0; i < ysize; i++) + fwrite(buf + i * wrap, 1, xsize, f); + fclose(f); +} + +static int decode_write_frame(const char *outfilename, AVCodecContext *avctx, + AVFrame *frame, int *frame_count, AVPacket *pkt, int last) +{ + int len, got_frame; + char buf[1024]; + + len = avcodec_decode_video2(avctx, frame, &got_frame, pkt); + if (len < 0) { + fprintf(stderr, "Error while decoding frame %d\n", *frame_count); + return len; + } + if (got_frame) { + printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count); + fflush(stdout); + + /* the picture is allocated by the decoder, no need to free it */ + snprintf(buf, sizeof(buf), outfilename, *frame_count); + pgm_save(frame->data[0], frame->linesize[0], + avctx->width, avctx->height, buf); + (*frame_count)++; + } + if (pkt->data) { + pkt->size -= len; + pkt->data += len; + } + return 0; +} + +static void video_decode_example(const char *outfilename, const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int frame_count; + FILE *f; + AVFrame *frame; + uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; + AVPacket avpkt; + + av_init_packet(&avpkt); + + /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ + memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); + + printf("Decode video file %s to %s\n", filename, outfilename); + + /* find the mpeg1 video decoder */ + codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); + if (!codec) { + fprintf(stderr, "Codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + if (!c) { + fprintf(stderr, "Could not allocate video codec context\n"); + exit(1); + } + + if(codec->capabilities&CODEC_CAP_TRUNCATED) + c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ + + /* For some codecs, such as msmpeg4 and mpeg4, width and height + MUST be initialized there because this information is not + available in the bitstream. */ + + /* open it */ + if (avcodec_open2(c, codec, NULL) < 0) { + fprintf(stderr, "Could not open codec\n"); + exit(1); + } + + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "Could not open %s\n", filename); + exit(1); + } + + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Could not allocate video frame\n"); + exit(1); + } + + frame_count = 0; + for (;;) { + avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); + if (avpkt.size == 0) + break; + + /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) + and this is the only method to use them because you cannot + know the compressed data size before analysing it. + + BUT some other codecs (msmpeg4, mpeg4) are inherently frame + based, so you must call them with all the data for one + frame exactly. You must also initialize 'width' and + 'height' before initializing them. */ + + /* NOTE2: some codecs allow the raw parameters (frame size, + sample rate) to be changed at any frame. We handle this, so + you should also take care of it */ + + /* here, we use a stream based decoder (mpeg1video), so we + feed decoder and see if it could decode a frame */ + avpkt.data = inbuf; + while (avpkt.size > 0) + if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0) + exit(1); + } + + /* some codecs, such as MPEG, transmit the I and P frame with a + latency of one frame. You must do the following to have a + chance to get the last frame of the video */ + avpkt.data = NULL; + avpkt.size = 0; + decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1); + + fclose(f); + + avcodec_close(c); + av_free(c); + av_frame_free(&frame); + printf("\n"); +} + +int main(int argc, char **argv) +{ + const char *output_type; + + /* register all the codecs */ + avcodec_register_all(); + + if (argc < 2) { + printf("usage: %s output_type\n" + "API example program to decode/encode a media stream with libavcodec.\n" + "This program generates a synthetic stream and encodes it to a file\n" + "named test.h264, test.mp2 or test.mpg depending on output_type.\n" + "The encoded stream is then decoded and written to a raw data output.\n" + "output_type must be choosen between 'h264', 'mp2', 'mpg'.\n", + argv[0]); + return 1; + } + output_type = argv[1]; + + if (!strcmp(output_type, "h264")) { + video_encode_example("test.h264", AV_CODEC_ID_H264); + } else if (!strcmp(output_type, "mp2")) { + audio_encode_example("test.mp2"); + audio_decode_example("test.sw", "test.mp2"); + } else if (!strcmp(output_type, "mpg")) { + video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO); + video_decode_example("test%02d.pgm", "test.mpg"); + } else { + fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n", + output_type); + return 1; + } + + return 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/avio_reading.c b/dependencies64/ffmpeg/doc/examples/avio_reading.c new file mode 100644 index 000000000..02474e907 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/avio_reading.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2014 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * libavformat AVIOContext API example. + * + * Make libavformat demuxer access media content through a custom + * AVIOContext read callback. + * @example avio_reading.c + */ + +#include +#include +#include +#include + +struct buffer_data { + uint8_t *ptr; + size_t size; ///< size left in the buffer +}; + +static int read_packet(void *opaque, uint8_t *buf, int buf_size) +{ + struct buffer_data *bd = (struct buffer_data *)opaque; + buf_size = FFMIN(buf_size, bd->size); + + printf("ptr:%p size:%zu\n", bd->ptr, bd->size); + + /* copy internal buffer data to buf */ + memcpy(buf, bd->ptr, buf_size); + bd->ptr += buf_size; + bd->size -= buf_size; + + return buf_size; +} + +int main(int argc, char *argv[]) +{ + AVFormatContext *fmt_ctx = NULL; + AVIOContext *avio_ctx = NULL; + uint8_t *buffer = NULL, *avio_ctx_buffer = NULL; + size_t buffer_size, avio_ctx_buffer_size = 4096; + char *input_filename = NULL; + int ret = 0; + struct buffer_data bd = { 0 }; + + if (argc != 2) { + fprintf(stderr, "usage: %s input_file\n" + "API example program to show how to read from a custom buffer " + "accessed through AVIOContext.\n", argv[0]); + return 1; + } + input_filename = argv[1]; + + /* register codecs and formats and other lavf/lavc components*/ + av_register_all(); + + /* slurp file content into buffer */ + ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL); + if (ret < 0) + goto end; + + /* fill opaque structure used by the AVIOContext read callback */ + bd.ptr = buffer; + bd.size = buffer_size; + + if (!(fmt_ctx = avformat_alloc_context())) { + ret = AVERROR(ENOMEM); + goto end; + } + + avio_ctx_buffer = av_malloc(avio_ctx_buffer_size); + if (!avio_ctx_buffer) { + ret = AVERROR(ENOMEM); + goto end; + } + avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size, + 0, &bd, &read_packet, NULL, NULL); + if (!avio_ctx) { + ret = AVERROR(ENOMEM); + goto end; + } + fmt_ctx->pb = avio_ctx; + + ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL); + if (ret < 0) { + fprintf(stderr, "Could not open input\n"); + goto end; + } + + ret = avformat_find_stream_info(fmt_ctx, NULL); + if (ret < 0) { + fprintf(stderr, "Could not find stream information\n"); + goto end; + } + + av_dump_format(fmt_ctx, 0, input_filename, 0); + +end: + avformat_close_input(&fmt_ctx); + /* note: the internal buffer could have changed, and be != avio_ctx_buffer */ + if (avio_ctx) { + av_freep(&avio_ctx->buffer); + av_freep(&avio_ctx); + } + av_file_unmap(buffer, buffer_size); + + if (ret < 0) { + fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); + return 1; + } + + return 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/demuxing_decoding.c b/dependencies64/ffmpeg/doc/examples/demuxing_decoding.c new file mode 100644 index 000000000..077fc87b5 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/demuxing_decoding.c @@ -0,0 +1,386 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * Demuxing and decoding example. + * + * Show how to use the libavformat and libavcodec API to demux and + * decode audio and video data. + * @example demuxing_decoding.c + */ + +#include +#include +#include +#include + +static AVFormatContext *fmt_ctx = NULL; +static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx; +static AVStream *video_stream = NULL, *audio_stream = NULL; +static const char *src_filename = NULL; +static const char *video_dst_filename = NULL; +static const char *audio_dst_filename = NULL; +static FILE *video_dst_file = NULL; +static FILE *audio_dst_file = NULL; + +static uint8_t *video_dst_data[4] = {NULL}; +static int video_dst_linesize[4]; +static int video_dst_bufsize; + +static int video_stream_idx = -1, audio_stream_idx = -1; +static AVFrame *frame = NULL; +static AVPacket pkt; +static int video_frame_count = 0; +static int audio_frame_count = 0; + +/* The different ways of decoding and managing data memory. You are not + * supposed to support all the modes in your application but pick the one most + * appropriate to your needs. Look for the use of api_mode in this example to + * see what are the differences of API usage between them */ +enum { + API_MODE_OLD = 0, /* old method, deprecated */ + API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */ + API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */ +}; + +static int api_mode = API_MODE_OLD; + +static int decode_packet(int *got_frame, int cached) +{ + int ret = 0; + int decoded = pkt.size; + + *got_frame = 0; + + if (pkt.stream_index == video_stream_idx) { + /* decode video frame */ + ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt); + if (ret < 0) { + fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret)); + return ret; + } + + if (*got_frame) { + printf("video_frame%s n:%d coded_n:%d pts:%s\n", + cached ? "(cached)" : "", + video_frame_count++, frame->coded_picture_number, + av_ts2timestr(frame->pts, &video_dec_ctx->time_base)); + + /* copy decoded frame to destination buffer: + * this is required since rawvideo expects non aligned data */ + av_image_copy(video_dst_data, video_dst_linesize, + (const uint8_t **)(frame->data), frame->linesize, + video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height); + + /* write to rawvideo file */ + fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file); + } + } else if (pkt.stream_index == audio_stream_idx) { + /* decode audio frame */ + ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt); + if (ret < 0) { + fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret)); + return ret; + } + /* Some audio decoders decode only part of the packet, and have to be + * called again with the remainder of the packet data. + * Sample: fate-suite/lossless-audio/luckynight-partial.shn + * Also, some decoders might over-read the packet. */ + decoded = FFMIN(ret, pkt.size); + + if (*got_frame) { + size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format); + printf("audio_frame%s n:%d nb_samples:%d pts:%s\n", + cached ? "(cached)" : "", + audio_frame_count++, frame->nb_samples, + av_ts2timestr(frame->pts, &audio_dec_ctx->time_base)); + + /* Write the raw audio data samples of the first plane. This works + * fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However, + * most audio decoders output planar audio, which uses a separate + * plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P). + * In other words, this code will write only the first audio channel + * in these cases. + * You should use libswresample or libavfilter to convert the frame + * to packed data. */ + fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file); + } + } + + /* If we use the new API with reference counting, we own the data and need + * to de-reference it when we don't use it anymore */ + if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT) + av_frame_unref(frame); + + return decoded; +} + +static int open_codec_context(int *stream_idx, + AVFormatContext *fmt_ctx, enum AVMediaType type) +{ + int ret; + AVStream *st; + AVCodecContext *dec_ctx = NULL; + AVCodec *dec = NULL; + AVDictionary *opts = NULL; + + ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0); + if (ret < 0) { + fprintf(stderr, "Could not find %s stream in input file '%s'\n", + av_get_media_type_string(type), src_filename); + return ret; + } else { + *stream_idx = ret; + st = fmt_ctx->streams[*stream_idx]; + + /* find decoder for the stream */ + dec_ctx = st->codec; + dec = avcodec_find_decoder(dec_ctx->codec_id); + if (!dec) { + fprintf(stderr, "Failed to find %s codec\n", + av_get_media_type_string(type)); + return AVERROR(EINVAL); + } + + /* Init the decoders, with or without reference counting */ + if (api_mode == API_MODE_NEW_API_REF_COUNT) + av_dict_set(&opts, "refcounted_frames", "1", 0); + if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) { + fprintf(stderr, "Failed to open %s codec\n", + av_get_media_type_string(type)); + return ret; + } + } + + return 0; +} + +static int get_format_from_sample_fmt(const char **fmt, + enum AVSampleFormat sample_fmt) +{ + int i; + struct sample_fmt_entry { + enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le; + } sample_fmt_entries[] = { + { AV_SAMPLE_FMT_U8, "u8", "u8" }, + { AV_SAMPLE_FMT_S16, "s16be", "s16le" }, + { AV_SAMPLE_FMT_S32, "s32be", "s32le" }, + { AV_SAMPLE_FMT_FLT, "f32be", "f32le" }, + { AV_SAMPLE_FMT_DBL, "f64be", "f64le" }, + }; + *fmt = NULL; + + for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) { + struct sample_fmt_entry *entry = &sample_fmt_entries[i]; + if (sample_fmt == entry->sample_fmt) { + *fmt = AV_NE(entry->fmt_be, entry->fmt_le); + return 0; + } + } + + fprintf(stderr, + "sample format %s is not supported as output format\n", + av_get_sample_fmt_name(sample_fmt)); + return -1; +} + +int main (int argc, char **argv) +{ + int ret = 0, got_frame; + + if (argc != 4 && argc != 5) { + fprintf(stderr, "usage: %s [-refcount=] " + "input_file video_output_file audio_output_file\n" + "API example program to show how to read frames from an input file.\n" + "This program reads frames from a file, decodes them, and writes decoded\n" + "video frames to a rawvideo file named video_output_file, and decoded\n" + "audio frames to a rawaudio file named audio_output_file.\n\n" + "If the -refcount option is specified, the program use the\n" + "reference counting frame system which allows keeping a copy of\n" + "the data for longer than one decode call. If unset, it's using\n" + "the classic old method.\n" + "\n", argv[0]); + exit(1); + } + if (argc == 5) { + const char *mode = argv[1] + strlen("-refcount="); + if (!strcmp(mode, "old")) api_mode = API_MODE_OLD; + else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT; + else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT; + else { + fprintf(stderr, "unknow mode '%s'\n", mode); + exit(1); + } + argv++; + } + src_filename = argv[1]; + video_dst_filename = argv[2]; + audio_dst_filename = argv[3]; + + /* register all formats and codecs */ + av_register_all(); + + /* open input file, and allocate format context */ + if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) { + fprintf(stderr, "Could not open source file %s\n", src_filename); + exit(1); + } + + /* retrieve stream information */ + if (avformat_find_stream_info(fmt_ctx, NULL) < 0) { + fprintf(stderr, "Could not find stream information\n"); + exit(1); + } + + if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) { + video_stream = fmt_ctx->streams[video_stream_idx]; + video_dec_ctx = video_stream->codec; + + video_dst_file = fopen(video_dst_filename, "wb"); + if (!video_dst_file) { + fprintf(stderr, "Could not open destination file %s\n", video_dst_filename); + ret = 1; + goto end; + } + + /* allocate image where the decoded image will be put */ + ret = av_image_alloc(video_dst_data, video_dst_linesize, + video_dec_ctx->width, video_dec_ctx->height, + video_dec_ctx->pix_fmt, 1); + if (ret < 0) { + fprintf(stderr, "Could not allocate raw video buffer\n"); + goto end; + } + video_dst_bufsize = ret; + } + + if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) { + audio_stream = fmt_ctx->streams[audio_stream_idx]; + audio_dec_ctx = audio_stream->codec; + audio_dst_file = fopen(audio_dst_filename, "wb"); + if (!audio_dst_file) { + fprintf(stderr, "Could not open destination file %s\n", video_dst_filename); + ret = 1; + goto end; + } + } + + /* dump input information to stderr */ + av_dump_format(fmt_ctx, 0, src_filename, 0); + + if (!audio_stream && !video_stream) { + fprintf(stderr, "Could not find audio or video stream in the input, aborting\n"); + ret = 1; + goto end; + } + + /* When using the new API, you need to use the libavutil/frame.h API, while + * the classic frame management is available in libavcodec */ + if (api_mode == API_MODE_OLD) + frame = avcodec_alloc_frame(); + else + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Could not allocate frame\n"); + ret = AVERROR(ENOMEM); + goto end; + } + + /* initialize packet, set data to NULL, let the demuxer fill it */ + av_init_packet(&pkt); + pkt.data = NULL; + pkt.size = 0; + + if (video_stream) + printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename); + if (audio_stream) + printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename); + + /* read frames from the file */ + while (av_read_frame(fmt_ctx, &pkt) >= 0) { + AVPacket orig_pkt = pkt; + do { + ret = decode_packet(&got_frame, 0); + if (ret < 0) + break; + pkt.data += ret; + pkt.size -= ret; + } while (pkt.size > 0); + av_free_packet(&orig_pkt); + } + + /* flush cached frames */ + pkt.data = NULL; + pkt.size = 0; + do { + decode_packet(&got_frame, 1); + } while (got_frame); + + printf("Demuxing succeeded.\n"); + + if (video_stream) { + printf("Play the output video file with the command:\n" + "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", + av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height, + video_dst_filename); + } + + if (audio_stream) { + enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt; + int n_channels = audio_dec_ctx->channels; + const char *fmt; + + if (av_sample_fmt_is_planar(sfmt)) { + const char *packed = av_get_sample_fmt_name(sfmt); + printf("Warning: the sample format the decoder produced is planar " + "(%s). This example will output the first channel only.\n", + packed ? packed : "?"); + sfmt = av_get_packed_sample_fmt(sfmt); + n_channels = 1; + } + + if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0) + goto end; + + printf("Play the output audio file with the command:\n" + "ffplay -f %s -ac %d -ar %d %s\n", + fmt, n_channels, audio_dec_ctx->sample_rate, + audio_dst_filename); + } + +end: + avcodec_close(video_dec_ctx); + avcodec_close(audio_dec_ctx); + avformat_close_input(&fmt_ctx); + if (video_dst_file) + fclose(video_dst_file); + if (audio_dst_file) + fclose(audio_dst_file); + if (api_mode == API_MODE_OLD) + avcodec_free_frame(&frame); + else + av_frame_free(&frame); + av_free(video_dst_data[0]); + + return ret < 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/filter_audio.c b/dependencies64/ffmpeg/doc/examples/filter_audio.c new file mode 100644 index 000000000..8451f9cba --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/filter_audio.c @@ -0,0 +1,364 @@ +/* + * copyright (c) 2013 Andrew Kelley + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * libavfilter API usage example. + * + * @example filter_audio.c + * This example will generate a sine wave audio, + * pass it through a simple filter chain, and then compute the MD5 checksum of + * the output data. + * + * The filter chain it uses is: + * (input) -> abuffer -> volume -> aformat -> abuffersink -> (output) + * + * abuffer: This provides the endpoint where you can feed the decoded samples. + * volume: In this example we hardcode it to 0.90. + * aformat: This converts the samples to the samplefreq, channel layout, + * and sample format required by the audio device. + * abuffersink: This provides the endpoint where you can read the samples after + * they have passed through the filter chain. + */ + +#include +#include +#include +#include + +#include "libavutil/channel_layout.h" +#include "libavutil/md5.h" +#include "libavutil/opt.h" +#include "libavutil/samplefmt.h" + +#include "libavfilter/avfilter.h" +#include "libavfilter/buffersink.h" +#include "libavfilter/buffersrc.h" + +#define INPUT_SAMPLERATE 48000 +#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP +#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0 + +#define VOLUME_VAL 0.90 + +static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src, + AVFilterContext **sink) +{ + AVFilterGraph *filter_graph; + AVFilterContext *abuffer_ctx; + AVFilter *abuffer; + AVFilterContext *volume_ctx; + AVFilter *volume; + AVFilterContext *aformat_ctx; + AVFilter *aformat; + AVFilterContext *abuffersink_ctx; + AVFilter *abuffersink; + + AVDictionary *options_dict = NULL; + uint8_t options_str[1024]; + uint8_t ch_layout[64]; + + int err; + + /* Create a new filtergraph, which will contain all the filters. */ + filter_graph = avfilter_graph_alloc(); + if (!filter_graph) { + fprintf(stderr, "Unable to create filter graph.\n"); + return AVERROR(ENOMEM); + } + + /* Create the abuffer filter; + * it will be used for feeding the data into the graph. */ + abuffer = avfilter_get_by_name("abuffer"); + if (!abuffer) { + fprintf(stderr, "Could not find the abuffer filter.\n"); + return AVERROR_FILTER_NOT_FOUND; + } + + abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src"); + if (!abuffer_ctx) { + fprintf(stderr, "Could not allocate the abuffer instance.\n"); + return AVERROR(ENOMEM); + } + + /* Set the filter options through the AVOptions API. */ + av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT); + av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN); + av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN); + av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN); + av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN); + + /* Now initialize the filter; we pass NULL options, since we have already + * set all the options above. */ + err = avfilter_init_str(abuffer_ctx, NULL); + if (err < 0) { + fprintf(stderr, "Could not initialize the abuffer filter.\n"); + return err; + } + + /* Create volume filter. */ + volume = avfilter_get_by_name("volume"); + if (!volume) { + fprintf(stderr, "Could not find the volume filter.\n"); + return AVERROR_FILTER_NOT_FOUND; + } + + volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume"); + if (!volume_ctx) { + fprintf(stderr, "Could not allocate the volume instance.\n"); + return AVERROR(ENOMEM); + } + + /* A different way of passing the options is as key/value pairs in a + * dictionary. */ + av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0); + err = avfilter_init_dict(volume_ctx, &options_dict); + av_dict_free(&options_dict); + if (err < 0) { + fprintf(stderr, "Could not initialize the volume filter.\n"); + return err; + } + + /* Create the aformat filter; + * it ensures that the output is of the format we want. */ + aformat = avfilter_get_by_name("aformat"); + if (!aformat) { + fprintf(stderr, "Could not find the aformat filter.\n"); + return AVERROR_FILTER_NOT_FOUND; + } + + aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat"); + if (!aformat_ctx) { + fprintf(stderr, "Could not allocate the aformat instance.\n"); + return AVERROR(ENOMEM); + } + + /* A third way of passing the options is in a string of the form + * key1=value1:key2=value2.... */ + snprintf(options_str, sizeof(options_str), + "sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64, + av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100, + (uint64_t)AV_CH_LAYOUT_STEREO); + err = avfilter_init_str(aformat_ctx, options_str); + if (err < 0) { + av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n"); + return err; + } + + /* Finally create the abuffersink filter; + * it will be used to get the filtered data out of the graph. */ + abuffersink = avfilter_get_by_name("abuffersink"); + if (!abuffersink) { + fprintf(stderr, "Could not find the abuffersink filter.\n"); + return AVERROR_FILTER_NOT_FOUND; + } + + abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink"); + if (!abuffersink_ctx) { + fprintf(stderr, "Could not allocate the abuffersink instance.\n"); + return AVERROR(ENOMEM); + } + + /* This filter takes no options. */ + err = avfilter_init_str(abuffersink_ctx, NULL); + if (err < 0) { + fprintf(stderr, "Could not initialize the abuffersink instance.\n"); + return err; + } + + /* Connect the filters; + * in this simple case the filters just form a linear chain. */ + err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0); + if (err >= 0) + err = avfilter_link(volume_ctx, 0, aformat_ctx, 0); + if (err >= 0) + err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0); + if (err < 0) { + fprintf(stderr, "Error connecting filters\n"); + return err; + } + + /* Configure the graph. */ + err = avfilter_graph_config(filter_graph, NULL); + if (err < 0) { + av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n"); + return err; + } + + *graph = filter_graph; + *src = abuffer_ctx; + *sink = abuffersink_ctx; + + return 0; +} + +/* Do something useful with the filtered data: this simple + * example just prints the MD5 checksum of each plane to stdout. */ +static int process_output(struct AVMD5 *md5, AVFrame *frame) +{ + int planar = av_sample_fmt_is_planar(frame->format); + int channels = av_get_channel_layout_nb_channels(frame->channel_layout); + int planes = planar ? channels : 1; + int bps = av_get_bytes_per_sample(frame->format); + int plane_size = bps * frame->nb_samples * (planar ? 1 : channels); + int i, j; + + for (i = 0; i < planes; i++) { + uint8_t checksum[16]; + + av_md5_init(md5); + av_md5_sum(checksum, frame->extended_data[i], plane_size); + + fprintf(stdout, "plane %d: 0x", i); + for (j = 0; j < sizeof(checksum); j++) + fprintf(stdout, "%02X", checksum[j]); + fprintf(stdout, "\n"); + } + fprintf(stdout, "\n"); + + return 0; +} + +/* Construct a frame of audio data to be filtered; + * this simple example just synthesizes a sine wave. */ +static int get_input(AVFrame *frame, int frame_num) +{ + int err, i, j; + +#define FRAME_SIZE 1024 + + /* Set up the frame properties and allocate the buffer for the data. */ + frame->sample_rate = INPUT_SAMPLERATE; + frame->format = INPUT_FORMAT; + frame->channel_layout = INPUT_CHANNEL_LAYOUT; + frame->nb_samples = FRAME_SIZE; + frame->pts = frame_num * FRAME_SIZE; + + err = av_frame_get_buffer(frame, 0); + if (err < 0) + return err; + + /* Fill the data for each channel. */ + for (i = 0; i < 5; i++) { + float *data = (float*)frame->extended_data[i]; + + for (j = 0; j < frame->nb_samples; j++) + data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE); + } + + return 0; +} + +int main(int argc, char *argv[]) +{ + struct AVMD5 *md5; + AVFilterGraph *graph; + AVFilterContext *src, *sink; + AVFrame *frame; + uint8_t errstr[1024]; + float duration; + int err, nb_frames, i; + + if (argc < 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + duration = atof(argv[1]); + nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE; + if (nb_frames <= 0) { + fprintf(stderr, "Invalid duration: %s\n", argv[1]); + return 1; + } + + avfilter_register_all(); + + /* Allocate the frame we will be using to store the data. */ + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Error allocating the frame\n"); + return 1; + } + + md5 = av_md5_alloc(); + if (!md5) { + fprintf(stderr, "Error allocating the MD5 context\n"); + return 1; + } + + /* Set up the filtergraph. */ + err = init_filter_graph(&graph, &src, &sink); + if (err < 0) { + fprintf(stderr, "Unable to init filter graph:"); + goto fail; + } + + /* the main filtering loop */ + for (i = 0; i < nb_frames; i++) { + /* get an input frame to be filtered */ + err = get_input(frame, i); + if (err < 0) { + fprintf(stderr, "Error generating input frame:"); + goto fail; + } + + /* Send the frame to the input of the filtergraph. */ + err = av_buffersrc_add_frame(src, frame); + if (err < 0) { + av_frame_unref(frame); + fprintf(stderr, "Error submitting the frame to the filtergraph:"); + goto fail; + } + + /* Get all the filtered output that is available. */ + while ((err = av_buffersink_get_frame(sink, frame)) >= 0) { + /* now do something with our filtered frame */ + err = process_output(md5, frame); + if (err < 0) { + fprintf(stderr, "Error processing the filtered frame:"); + goto fail; + } + av_frame_unref(frame); + } + + if (err == AVERROR(EAGAIN)) { + /* Need to feed more frames in. */ + continue; + } else if (err == AVERROR_EOF) { + /* Nothing more to do, finish. */ + break; + } else if (err < 0) { + /* An error occurred. */ + fprintf(stderr, "Error filtering the data:"); + goto fail; + } + } + + avfilter_graph_free(&graph); + av_frame_free(&frame); + av_freep(&md5); + + return 0; + +fail: + av_strerror(err, errstr, sizeof(errstr)); + fprintf(stderr, "%s\n", errstr); + return 1; +} diff --git a/dependencies64/ffmpeg/doc/examples/filtering_audio.c b/dependencies64/ffmpeg/doc/examples/filtering_audio.c new file mode 100644 index 000000000..46595fb3b --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/filtering_audio.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2010 Nicolas George + * Copyright (c) 2011 Stefano Sabatini + * Copyright (c) 2012 Clément Bœsch + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * API example for audio decoding and filtering + * @example filtering_audio.c + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono"; +static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -"; + +static AVFormatContext *fmt_ctx; +static AVCodecContext *dec_ctx; +AVFilterContext *buffersink_ctx; +AVFilterContext *buffersrc_ctx; +AVFilterGraph *filter_graph; +static int audio_stream_index = -1; + +static int open_input_file(const char *filename) +{ + int ret; + AVCodec *dec; + + if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); + return ret; + } + + if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); + return ret; + } + + /* select the audio stream */ + ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n"); + return ret; + } + audio_stream_index = ret; + dec_ctx = fmt_ctx->streams[audio_stream_index]->codec; + av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0); + + /* init the audio decoder */ + if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n"); + return ret; + } + + return 0; +} + +static int init_filters(const char *filters_descr) +{ + char args[512]; + int ret = 0; + AVFilter *abuffersrc = avfilter_get_by_name("abuffer"); + AVFilter *abuffersink = avfilter_get_by_name("abuffersink"); + AVFilterInOut *outputs = avfilter_inout_alloc(); + AVFilterInOut *inputs = avfilter_inout_alloc(); + static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 }; + static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 }; + static const int out_sample_rates[] = { 8000, -1 }; + const AVFilterLink *outlink; + AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base; + + filter_graph = avfilter_graph_alloc(); + if (!outputs || !inputs || !filter_graph) { + ret = AVERROR(ENOMEM); + goto end; + } + + /* buffer audio source: the decoded frames from the decoder will be inserted here. */ + if (!dec_ctx->channel_layout) + dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels); + snprintf(args, sizeof(args), + "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64, + time_base.num, time_base.den, dec_ctx->sample_rate, + av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout); + ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in", + args, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n"); + goto end; + } + + /* buffer audio sink: to terminate the filter chain. */ + ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out", + NULL, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); + goto end; + } + + ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1, + AV_OPT_SEARCH_CHILDREN); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n"); + goto end; + } + + ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1, + AV_OPT_SEARCH_CHILDREN); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); + goto end; + } + + ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1, + AV_OPT_SEARCH_CHILDREN); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n"); + goto end; + } + + /* Endpoints for the filter graph. */ + outputs->name = av_strdup("in"); + outputs->filter_ctx = buffersrc_ctx; + outputs->pad_idx = 0; + outputs->next = NULL; + + inputs->name = av_strdup("out"); + inputs->filter_ctx = buffersink_ctx; + inputs->pad_idx = 0; + inputs->next = NULL; + + if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, + &inputs, &outputs, NULL)) < 0) + goto end; + + if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) + goto end; + + /* Print summary of the sink buffer + * Note: args buffer is reused to store channel layout string */ + outlink = buffersink_ctx->inputs[0]; + av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout); + av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n", + (int)outlink->sample_rate, + (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"), + args); + +end: + avfilter_inout_free(&inputs); + avfilter_inout_free(&outputs); + + return ret; +} + +static void print_frame(const AVFrame *frame) +{ + const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame)); + const uint16_t *p = (uint16_t*)frame->data[0]; + const uint16_t *p_end = p + n; + + while (p < p_end) { + fputc(*p & 0xff, stdout); + fputc(*p>>8 & 0xff, stdout); + p++; + } + fflush(stdout); +} + +int main(int argc, char **argv) +{ + int ret; + AVPacket packet0, packet; + AVFrame *frame = av_frame_alloc(); + AVFrame *filt_frame = av_frame_alloc(); + int got_frame; + + if (!frame || !filt_frame) { + perror("Could not allocate frame"); + exit(1); + } + if (argc != 2) { + fprintf(stderr, "Usage: %s file | %s\n", argv[0], player); + exit(1); + } + + av_register_all(); + avfilter_register_all(); + + if ((ret = open_input_file(argv[1])) < 0) + goto end; + if ((ret = init_filters(filter_descr)) < 0) + goto end; + + /* read all packets */ + packet0.data = NULL; + packet.data = NULL; + while (1) { + if (!packet0.data) { + if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) + break; + packet0 = packet; + } + + if (packet.stream_index == audio_stream_index) { + got_frame = 0; + ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n"); + continue; + } + packet.size -= ret; + packet.data += ret; + + if (got_frame) { + /* push the audio data from decoded frame into the filtergraph */ + if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) { + av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n"); + break; + } + + /* pull filtered audio from the filtergraph */ + while (1) { + ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + if (ret < 0) + goto end; + print_frame(filt_frame); + av_frame_unref(filt_frame); + } + } + + if (packet.size <= 0) + av_free_packet(&packet0); + } else { + /* discard non-wanted packets */ + av_free_packet(&packet0); + } + } +end: + avfilter_graph_free(&filter_graph); + avcodec_close(dec_ctx); + avformat_close_input(&fmt_ctx); + av_frame_free(&frame); + av_frame_free(&filt_frame); + + if (ret < 0 && ret != AVERROR_EOF) { + fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); + exit(1); + } + + exit(0); +} diff --git a/dependencies64/ffmpeg/doc/examples/filtering_video.c b/dependencies64/ffmpeg/doc/examples/filtering_video.c new file mode 100644 index 000000000..8d595735b --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/filtering_video.c @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2010 Nicolas George + * Copyright (c) 2011 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * API example for decoding and filtering + * @example filtering_video.c + */ + +#define _XOPEN_SOURCE 600 /* for usleep */ +#include + +#include +#include +#include +#include +#include +#include +#include + +const char *filter_descr = "scale=78:24"; + +static AVFormatContext *fmt_ctx; +static AVCodecContext *dec_ctx; +AVFilterContext *buffersink_ctx; +AVFilterContext *buffersrc_ctx; +AVFilterGraph *filter_graph; +static int video_stream_index = -1; +static int64_t last_pts = AV_NOPTS_VALUE; + +static int open_input_file(const char *filename) +{ + int ret; + AVCodec *dec; + + if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n"); + return ret; + } + + if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n"); + return ret; + } + + /* select the video stream */ + ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n"); + return ret; + } + video_stream_index = ret; + dec_ctx = fmt_ctx->streams[video_stream_index]->codec; + av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0); + + /* init the video decoder */ + if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n"); + return ret; + } + + return 0; +} + +static int init_filters(const char *filters_descr) +{ + char args[512]; + int ret = 0; + AVFilter *buffersrc = avfilter_get_by_name("buffer"); + AVFilter *buffersink = avfilter_get_by_name("buffersink"); + AVFilterInOut *outputs = avfilter_inout_alloc(); + AVFilterInOut *inputs = avfilter_inout_alloc(); + enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; + + filter_graph = avfilter_graph_alloc(); + if (!outputs || !inputs || !filter_graph) { + ret = AVERROR(ENOMEM); + goto end; + } + + /* buffer video source: the decoded frames from the decoder will be inserted here. */ + snprintf(args, sizeof(args), + "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", + dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, + dec_ctx->time_base.num, dec_ctx->time_base.den, + dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den); + + ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", + args, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n"); + goto end; + } + + /* buffer video sink: to terminate the filter chain. */ + ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", + NULL, NULL, filter_graph); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); + goto end; + } + + ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, + AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); + goto end; + } + + /* Endpoints for the filter graph. */ + outputs->name = av_strdup("in"); + outputs->filter_ctx = buffersrc_ctx; + outputs->pad_idx = 0; + outputs->next = NULL; + + inputs->name = av_strdup("out"); + inputs->filter_ctx = buffersink_ctx; + inputs->pad_idx = 0; + inputs->next = NULL; + + if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr, + &inputs, &outputs, NULL)) < 0) + goto end; + + if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) + goto end; + +end: + avfilter_inout_free(&inputs); + avfilter_inout_free(&outputs); + + return ret; +} + +static void display_frame(const AVFrame *frame, AVRational time_base) +{ + int x, y; + uint8_t *p0, *p; + int64_t delay; + + if (frame->pts != AV_NOPTS_VALUE) { + if (last_pts != AV_NOPTS_VALUE) { + /* sleep roughly the right amount of time; + * usleep is in microseconds, just like AV_TIME_BASE. */ + delay = av_rescale_q(frame->pts - last_pts, + time_base, AV_TIME_BASE_Q); + if (delay > 0 && delay < 1000000) + usleep(delay); + } + last_pts = frame->pts; + } + + /* Trivial ASCII grayscale display. */ + p0 = frame->data[0]; + puts("\033c"); + for (y = 0; y < frame->height; y++) { + p = p0; + for (x = 0; x < frame->width; x++) + putchar(" .-+#"[*(p++) / 52]); + putchar('\n'); + p0 += frame->linesize[0]; + } + fflush(stdout); +} + +int main(int argc, char **argv) +{ + int ret; + AVPacket packet; + AVFrame *frame = av_frame_alloc(); + AVFrame *filt_frame = av_frame_alloc(); + int got_frame; + + if (!frame || !filt_frame) { + perror("Could not allocate frame"); + exit(1); + } + if (argc != 2) { + fprintf(stderr, "Usage: %s file\n", argv[0]); + exit(1); + } + + av_register_all(); + avfilter_register_all(); + + if ((ret = open_input_file(argv[1])) < 0) + goto end; + if ((ret = init_filters(filter_descr)) < 0) + goto end; + + /* read all packets */ + while (1) { + if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) + break; + + if (packet.stream_index == video_stream_index) { + got_frame = 0; + ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet); + if (ret < 0) { + av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); + break; + } + + if (got_frame) { + frame->pts = av_frame_get_best_effort_timestamp(frame); + + /* push the decoded frame into the filtergraph */ + if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { + av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); + break; + } + + /* pull filtered frames from the filtergraph */ + while (1) { + ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) + break; + if (ret < 0) + goto end; + display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); + av_frame_unref(filt_frame); + } + av_frame_unref(frame); + } + } + av_free_packet(&packet); + } +end: + avfilter_graph_free(&filter_graph); + avcodec_close(dec_ctx); + avformat_close_input(&fmt_ctx); + av_frame_free(&frame); + av_frame_free(&filt_frame); + + if (ret < 0 && ret != AVERROR_EOF) { + fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); + exit(1); + } + + exit(0); +} diff --git a/dependencies64/ffmpeg/doc/examples/metadata.c b/dependencies64/ffmpeg/doc/examples/metadata.c new file mode 100644 index 000000000..f73c26736 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/metadata.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2011 Reinhard Tartler + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * Shows how the metadata API can be used in application programs. + * @example metadata.c + */ + +#include + +#include +#include + +int main (int argc, char **argv) +{ + AVFormatContext *fmt_ctx = NULL; + AVDictionaryEntry *tag = NULL; + int ret; + + if (argc != 2) { + printf("usage: %s \n" + "example program to demonstrate the use of the libavformat metadata API.\n" + "\n", argv[0]); + return 1; + } + + av_register_all(); + if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL))) + return ret; + + while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) + printf("%s=%s\n", tag->key, tag->value); + + avformat_close_input(&fmt_ctx); + return 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/muxing.c b/dependencies64/ffmpeg/doc/examples/muxing.c new file mode 100644 index 000000000..ad8e02714 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/muxing.c @@ -0,0 +1,606 @@ +/* + * Copyright (c) 2003 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * libavformat API example. + * + * Output a media file in any supported libavformat format. The default + * codecs are used. + * @example muxing.c + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static int audio_is_eof, video_is_eof; + +#define STREAM_DURATION 10.0 +#define STREAM_FRAME_RATE 25 /* 25 images/s */ +#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ + +static int sws_flags = SWS_BICUBIC; + +static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt) +{ + AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base; + + printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n", + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base), + av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base), + pkt->stream_index); +} + +static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt) +{ + /* rescale output packet timestamp values from codec to stream timebase */ + pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base); + pkt->stream_index = st->index; + + /* Write the compressed frame to the media file. */ + log_packet(fmt_ctx, pkt); + return av_interleaved_write_frame(fmt_ctx, pkt); +} + +/* Add an output stream. */ +static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, + enum AVCodecID codec_id) +{ + AVCodecContext *c; + AVStream *st; + + /* find the encoder */ + *codec = avcodec_find_encoder(codec_id); + if (!(*codec)) { + fprintf(stderr, "Could not find encoder for '%s'\n", + avcodec_get_name(codec_id)); + exit(1); + } + + st = avformat_new_stream(oc, *codec); + if (!st) { + fprintf(stderr, "Could not allocate stream\n"); + exit(1); + } + st->id = oc->nb_streams-1; + c = st->codec; + + switch ((*codec)->type) { + case AVMEDIA_TYPE_AUDIO: + c->sample_fmt = (*codec)->sample_fmts ? + (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; + c->bit_rate = 64000; + c->sample_rate = 44100; + c->channels = 2; + break; + + case AVMEDIA_TYPE_VIDEO: + c->codec_id = codec_id; + + c->bit_rate = 400000; + /* Resolution must be a multiple of two. */ + c->width = 352; + c->height = 288; + /* timebase: This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identical to 1. */ + c->time_base.den = STREAM_FRAME_RATE; + c->time_base.num = 1; + c->gop_size = 12; /* emit one intra frame every twelve frames at most */ + c->pix_fmt = STREAM_PIX_FMT; + if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { + /* just for testing, we also add B frames */ + c->max_b_frames = 2; + } + if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) { + /* Needed to avoid using macroblocks in which some coeffs overflow. + * This does not happen with normal video, it just happens here as + * the motion of the chroma plane does not match the luma plane. */ + c->mb_decision = 2; + } + break; + + default: + break; + } + + /* Some formats want stream headers to be separate. */ + if (oc->oformat->flags & AVFMT_GLOBALHEADER) + c->flags |= CODEC_FLAG_GLOBAL_HEADER; + + return st; +} + +/**************************************************************/ +/* audio output */ + +static float t, tincr, tincr2; + +AVFrame *audio_frame; +static uint8_t **src_samples_data; +static int src_samples_linesize; +static int src_nb_samples; + +static int max_dst_nb_samples; +uint8_t **dst_samples_data; +int dst_samples_linesize; +int dst_samples_size; +int samples_count; + +struct SwrContext *swr_ctx = NULL; + +static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st) +{ + AVCodecContext *c; + int ret; + + c = st->codec; + + /* allocate and init a re-usable frame */ + audio_frame = av_frame_alloc(); + if (!audio_frame) { + fprintf(stderr, "Could not allocate audio frame\n"); + exit(1); + } + + /* open it */ + ret = avcodec_open2(c, codec, NULL); + if (ret < 0) { + fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret)); + exit(1); + } + + /* init signal generator */ + t = 0; + tincr = 2 * M_PI * 110.0 / c->sample_rate; + /* increment frequency by 110 Hz per second */ + tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate; + + src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ? + 10000 : c->frame_size; + + ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels, + src_nb_samples, AV_SAMPLE_FMT_S16, 0); + if (ret < 0) { + fprintf(stderr, "Could not allocate source samples\n"); + exit(1); + } + + /* compute the number of converted samples: buffering is avoided + * ensuring that the output buffer will contain at least all the + * converted input samples */ + max_dst_nb_samples = src_nb_samples; + + /* create resampler context */ + if (c->sample_fmt != AV_SAMPLE_FMT_S16) { + swr_ctx = swr_alloc(); + if (!swr_ctx) { + fprintf(stderr, "Could not allocate resampler context\n"); + exit(1); + } + + /* set options */ + av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0); + av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0); + av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0); + av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0); + av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0); + + /* initialize the resampling context */ + if ((ret = swr_init(swr_ctx)) < 0) { + fprintf(stderr, "Failed to initialize the resampling context\n"); + exit(1); + } + + ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels, + max_dst_nb_samples, c->sample_fmt, 0); + if (ret < 0) { + fprintf(stderr, "Could not allocate destination samples\n"); + exit(1); + } + } else { + dst_samples_data = src_samples_data; + } + dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples, + c->sample_fmt, 0); +} + +/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and + * 'nb_channels' channels. */ +static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) +{ + int j, i, v; + int16_t *q; + + q = samples; + for (j = 0; j < frame_size; j++) { + v = (int)(sin(t) * 10000); + for (i = 0; i < nb_channels; i++) + *q++ = v; + t += tincr; + tincr += tincr2; + } +} + +static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush) +{ + AVCodecContext *c; + AVPacket pkt = { 0 }; // data and size must be 0; + int got_packet, ret, dst_nb_samples; + + av_init_packet(&pkt); + c = st->codec; + + if (!flush) { + get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels); + + /* convert samples from native format to destination codec format, using the resampler */ + if (swr_ctx) { + /* compute destination number of samples */ + dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples, + c->sample_rate, c->sample_rate, AV_ROUND_UP); + if (dst_nb_samples > max_dst_nb_samples) { + av_free(dst_samples_data[0]); + ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels, + dst_nb_samples, c->sample_fmt, 0); + if (ret < 0) + exit(1); + max_dst_nb_samples = dst_nb_samples; + dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples, + c->sample_fmt, 0); + } + + /* convert to destination format */ + ret = swr_convert(swr_ctx, + dst_samples_data, dst_nb_samples, + (const uint8_t **)src_samples_data, src_nb_samples); + if (ret < 0) { + fprintf(stderr, "Error while converting\n"); + exit(1); + } + } else { + dst_nb_samples = src_nb_samples; + } + + audio_frame->nb_samples = dst_nb_samples; + audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base); + avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt, + dst_samples_data[0], dst_samples_size, 0); + samples_count += dst_nb_samples; + } + + ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet); + if (ret < 0) { + fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret)); + exit(1); + } + + if (!got_packet) { + if (flush) + audio_is_eof = 1; + return; + } + + ret = write_frame(oc, &c->time_base, st, &pkt); + if (ret < 0) { + fprintf(stderr, "Error while writing audio frame: %s\n", + av_err2str(ret)); + exit(1); + } +} + +static void close_audio(AVFormatContext *oc, AVStream *st) +{ + avcodec_close(st->codec); + if (dst_samples_data != src_samples_data) { + av_free(dst_samples_data[0]); + av_free(dst_samples_data); + } + av_free(src_samples_data[0]); + av_free(src_samples_data); + av_frame_free(&audio_frame); +} + +/**************************************************************/ +/* video output */ + +static AVFrame *frame; +static AVPicture src_picture, dst_picture; +static int frame_count; + +static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) +{ + int ret; + AVCodecContext *c = st->codec; + + /* open the codec */ + ret = avcodec_open2(c, codec, NULL); + if (ret < 0) { + fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); + exit(1); + } + + /* allocate and init a re-usable frame */ + frame = av_frame_alloc(); + if (!frame) { + fprintf(stderr, "Could not allocate video frame\n"); + exit(1); + } + frame->format = c->pix_fmt; + frame->width = c->width; + frame->height = c->height; + + /* Allocate the encoded raw picture. */ + ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height); + if (ret < 0) { + fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret)); + exit(1); + } + + /* If the output format is not YUV420P, then a temporary YUV420P + * picture is needed too. It is then converted to the required + * output format. */ + if (c->pix_fmt != AV_PIX_FMT_YUV420P) { + ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height); + if (ret < 0) { + fprintf(stderr, "Could not allocate temporary picture: %s\n", + av_err2str(ret)); + exit(1); + } + } + + /* copy data and linesize picture pointers to frame */ + *((AVPicture *)frame) = dst_picture; +} + +/* Prepare a dummy image. */ +static void fill_yuv_image(AVPicture *pict, int frame_index, + int width, int height) +{ + int x, y, i; + + i = frame_index; + + /* Y */ + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; + + /* Cb and Cr */ + for (y = 0; y < height / 2; y++) { + for (x = 0; x < width / 2; x++) { + pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; + pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; + } + } +} + +static void write_video_frame(AVFormatContext *oc, AVStream *st, int flush) +{ + int ret; + static struct SwsContext *sws_ctx; + AVCodecContext *c = st->codec; + + if (!flush) { + if (c->pix_fmt != AV_PIX_FMT_YUV420P) { + /* as we only generate a YUV420P picture, we must convert it + * to the codec pixel format if needed */ + if (!sws_ctx) { + sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, + c->width, c->height, c->pix_fmt, + sws_flags, NULL, NULL, NULL); + if (!sws_ctx) { + fprintf(stderr, + "Could not initialize the conversion context\n"); + exit(1); + } + } + fill_yuv_image(&src_picture, frame_count, c->width, c->height); + sws_scale(sws_ctx, + (const uint8_t * const *)src_picture.data, src_picture.linesize, + 0, c->height, dst_picture.data, dst_picture.linesize); + } else { + fill_yuv_image(&dst_picture, frame_count, c->width, c->height); + } + } + + if (oc->oformat->flags & AVFMT_RAWPICTURE && !flush) { + /* Raw video case - directly store the picture in the packet */ + AVPacket pkt; + av_init_packet(&pkt); + + pkt.flags |= AV_PKT_FLAG_KEY; + pkt.stream_index = st->index; + pkt.data = dst_picture.data[0]; + pkt.size = sizeof(AVPicture); + + ret = av_interleaved_write_frame(oc, &pkt); + } else { + AVPacket pkt = { 0 }; + int got_packet; + av_init_packet(&pkt); + + /* encode the image */ + frame->pts = frame_count; + ret = avcodec_encode_video2(c, &pkt, flush ? NULL : frame, &got_packet); + if (ret < 0) { + fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); + exit(1); + } + /* If size is zero, it means the image was buffered. */ + + if (got_packet) { + ret = write_frame(oc, &c->time_base, st, &pkt); + } else { + if (flush) + video_is_eof = 1; + ret = 0; + } + } + + if (ret < 0) { + fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); + exit(1); + } + frame_count++; +} + +static void close_video(AVFormatContext *oc, AVStream *st) +{ + avcodec_close(st->codec); + av_free(src_picture.data[0]); + av_free(dst_picture.data[0]); + av_frame_free(&frame); +} + +/**************************************************************/ +/* media file output */ + +int main(int argc, char **argv) +{ + const char *filename; + AVOutputFormat *fmt; + AVFormatContext *oc; + AVStream *audio_st, *video_st; + AVCodec *audio_codec, *video_codec; + double audio_time, video_time; + int flush, ret; + + /* Initialize libavcodec, and register all codecs and formats. */ + av_register_all(); + + if (argc != 2) { + printf("usage: %s output_file\n" + "API example program to output a media file with libavformat.\n" + "This program generates a synthetic audio and video stream, encodes and\n" + "muxes them into a file named output_file.\n" + "The output format is automatically guessed according to the file extension.\n" + "Raw images can also be output by using '%%d' in the filename.\n" + "\n", argv[0]); + return 1; + } + + filename = argv[1]; + + /* allocate the output media context */ + avformat_alloc_output_context2(&oc, NULL, NULL, filename); + if (!oc) { + printf("Could not deduce output format from file extension: using MPEG.\n"); + avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); + } + if (!oc) + return 1; + + fmt = oc->oformat; + + /* Add the audio and video streams using the default format codecs + * and initialize the codecs. */ + video_st = NULL; + audio_st = NULL; + + if (fmt->video_codec != AV_CODEC_ID_NONE) + video_st = add_stream(oc, &video_codec, fmt->video_codec); + if (fmt->audio_codec != AV_CODEC_ID_NONE) + audio_st = add_stream(oc, &audio_codec, fmt->audio_codec); + + /* Now that all the parameters are set, we can open the audio and + * video codecs and allocate the necessary encode buffers. */ + if (video_st) + open_video(oc, video_codec, video_st); + if (audio_st) + open_audio(oc, audio_codec, audio_st); + + av_dump_format(oc, 0, filename, 1); + + /* open the output file, if needed */ + if (!(fmt->flags & AVFMT_NOFILE)) { + ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); + if (ret < 0) { + fprintf(stderr, "Could not open '%s': %s\n", filename, + av_err2str(ret)); + return 1; + } + } + + /* Write the stream header, if any. */ + ret = avformat_write_header(oc, NULL); + if (ret < 0) { + fprintf(stderr, "Error occurred when opening output file: %s\n", + av_err2str(ret)); + return 1; + } + + flush = 0; + while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) { + /* Compute current audio and video time. */ + audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY; + video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY; + + if (!flush && + (!audio_st || audio_time >= STREAM_DURATION) && + (!video_st || video_time >= STREAM_DURATION)) { + flush = 1; + } + + /* write interleaved audio and video frames */ + if (audio_st && !audio_is_eof && audio_time <= video_time) { + write_audio_frame(oc, audio_st, flush); + } else if (video_st && !video_is_eof && video_time < audio_time) { + write_video_frame(oc, video_st, flush); + } + } + + /* Write the trailer, if any. The trailer must be written before you + * close the CodecContexts open when you wrote the header; otherwise + * av_write_trailer() may try to use memory that was freed on + * av_codec_close(). */ + av_write_trailer(oc); + + /* Close each codec. */ + if (video_st) + close_video(oc, video_st); + if (audio_st) + close_audio(oc, audio_st); + + if (!(fmt->flags & AVFMT_NOFILE)) + /* Close the output file. */ + avio_close(oc->pb); + + /* free the stream */ + avformat_free_context(oc); + + return 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/remuxing.c b/dependencies64/ffmpeg/doc/examples/remuxing.c new file mode 100644 index 000000000..cdb2f6b32 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/remuxing.c @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2013 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * libavformat/libavcodec demuxing and muxing API example. + * + * Remux streams from one container format to another. + * @example remuxing.c + */ + +#include +#include + +static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag) +{ + AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base; + + printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n", + tag, + av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base), + av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base), + av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base), + pkt->stream_index); +} + +int main(int argc, char **argv) +{ + AVOutputFormat *ofmt = NULL; + AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL; + AVPacket pkt; + const char *in_filename, *out_filename; + int ret, i; + + if (argc < 3) { + printf("usage: %s input output\n" + "API example program to remux a media file with libavformat and libavcodec.\n" + "The output format is guessed according to the file extension.\n" + "\n", argv[0]); + return 1; + } + + in_filename = argv[1]; + out_filename = argv[2]; + + av_register_all(); + + if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) { + fprintf(stderr, "Could not open input file '%s'", in_filename); + goto end; + } + + if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) { + fprintf(stderr, "Failed to retrieve input stream information"); + goto end; + } + + av_dump_format(ifmt_ctx, 0, in_filename, 0); + + avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); + if (!ofmt_ctx) { + fprintf(stderr, "Could not create output context\n"); + ret = AVERROR_UNKNOWN; + goto end; + } + + ofmt = ofmt_ctx->oformat; + + for (i = 0; i < ifmt_ctx->nb_streams; i++) { + AVStream *in_stream = ifmt_ctx->streams[i]; + AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); + if (!out_stream) { + fprintf(stderr, "Failed allocating output stream\n"); + ret = AVERROR_UNKNOWN; + goto end; + } + + ret = avcodec_copy_context(out_stream->codec, in_stream->codec); + if (ret < 0) { + fprintf(stderr, "Failed to copy context from input to output stream codec context\n"); + goto end; + } + if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) + out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; + } + av_dump_format(ofmt_ctx, 0, out_filename, 1); + + if (!(ofmt->flags & AVFMT_NOFILE)) { + ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE); + if (ret < 0) { + fprintf(stderr, "Could not open output file '%s'", out_filename); + goto end; + } + } + + ret = avformat_write_header(ofmt_ctx, NULL); + if (ret < 0) { + fprintf(stderr, "Error occurred when opening output file\n"); + goto end; + } + + while (1) { + AVStream *in_stream, *out_stream; + + ret = av_read_frame(ifmt_ctx, &pkt); + if (ret < 0) + break; + + in_stream = ifmt_ctx->streams[pkt.stream_index]; + out_stream = ofmt_ctx->streams[pkt.stream_index]; + + log_packet(ifmt_ctx, &pkt, "in"); + + /* copy packet */ + pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); + pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); + pkt.pos = -1; + log_packet(ofmt_ctx, &pkt, "out"); + + ret = av_interleaved_write_frame(ofmt_ctx, &pkt); + if (ret < 0) { + fprintf(stderr, "Error muxing packet\n"); + break; + } + av_free_packet(&pkt); + } + + av_write_trailer(ofmt_ctx); +end: + + avformat_close_input(&ifmt_ctx); + + /* close output */ + if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) + avio_close(ofmt_ctx->pb); + avformat_free_context(ofmt_ctx); + + if (ret < 0 && ret != AVERROR_EOF) { + fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); + return 1; + } + + return 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/resampling_audio.c b/dependencies64/ffmpeg/doc/examples/resampling_audio.c new file mode 100644 index 000000000..f743cbe55 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/resampling_audio.c @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @example resampling_audio.c + * libswresample API use example. + */ + +#include +#include +#include +#include + +static int get_format_from_sample_fmt(const char **fmt, + enum AVSampleFormat sample_fmt) +{ + int i; + struct sample_fmt_entry { + enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le; + } sample_fmt_entries[] = { + { AV_SAMPLE_FMT_U8, "u8", "u8" }, + { AV_SAMPLE_FMT_S16, "s16be", "s16le" }, + { AV_SAMPLE_FMT_S32, "s32be", "s32le" }, + { AV_SAMPLE_FMT_FLT, "f32be", "f32le" }, + { AV_SAMPLE_FMT_DBL, "f64be", "f64le" }, + }; + *fmt = NULL; + + for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) { + struct sample_fmt_entry *entry = &sample_fmt_entries[i]; + if (sample_fmt == entry->sample_fmt) { + *fmt = AV_NE(entry->fmt_be, entry->fmt_le); + return 0; + } + } + + fprintf(stderr, + "Sample format %s not supported as output format\n", + av_get_sample_fmt_name(sample_fmt)); + return AVERROR(EINVAL); +} + +/** + * Fill dst buffer with nb_samples, generated starting from t. + */ +static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t) +{ + int i, j; + double tincr = 1.0 / sample_rate, *dstp = dst; + const double c = 2 * M_PI * 440.0; + + /* generate sin tone with 440Hz frequency and duplicated channels */ + for (i = 0; i < nb_samples; i++) { + *dstp = sin(c * *t); + for (j = 1; j < nb_channels; j++) + dstp[j] = dstp[0]; + dstp += nb_channels; + *t += tincr; + } +} + +int main(int argc, char **argv) +{ + int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND; + int src_rate = 48000, dst_rate = 44100; + uint8_t **src_data = NULL, **dst_data = NULL; + int src_nb_channels = 0, dst_nb_channels = 0; + int src_linesize, dst_linesize; + int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples; + enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16; + const char *dst_filename = NULL; + FILE *dst_file; + int dst_bufsize; + const char *fmt; + struct SwrContext *swr_ctx; + double t; + int ret; + + if (argc != 2) { + fprintf(stderr, "Usage: %s output_file\n" + "API example program to show how to resample an audio stream with libswresample.\n" + "This program generates a series of audio frames, resamples them to a specified " + "output format and rate and saves them to an output file named output_file.\n", + argv[0]); + exit(1); + } + dst_filename = argv[1]; + + dst_file = fopen(dst_filename, "wb"); + if (!dst_file) { + fprintf(stderr, "Could not open destination file %s\n", dst_filename); + exit(1); + } + + /* create resampler context */ + swr_ctx = swr_alloc(); + if (!swr_ctx) { + fprintf(stderr, "Could not allocate resampler context\n"); + ret = AVERROR(ENOMEM); + goto end; + } + + /* set options */ + av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0); + av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0); + av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0); + + av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0); + av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0); + av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0); + + /* initialize the resampling context */ + if ((ret = swr_init(swr_ctx)) < 0) { + fprintf(stderr, "Failed to initialize the resampling context\n"); + goto end; + } + + /* allocate source and destination samples buffers */ + + src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout); + ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels, + src_nb_samples, src_sample_fmt, 0); + if (ret < 0) { + fprintf(stderr, "Could not allocate source samples\n"); + goto end; + } + + /* compute the number of converted samples: buffering is avoided + * ensuring that the output buffer will contain at least all the + * converted input samples */ + max_dst_nb_samples = dst_nb_samples = + av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); + + /* buffer is going to be directly written to a rawaudio file, no alignment */ + dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout); + ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels, + dst_nb_samples, dst_sample_fmt, 0); + if (ret < 0) { + fprintf(stderr, "Could not allocate destination samples\n"); + goto end; + } + + t = 0; + do { + /* generate synthetic audio */ + fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t); + + /* compute destination number of samples */ + dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) + + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP); + if (dst_nb_samples > max_dst_nb_samples) { + av_free(dst_data[0]); + ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels, + dst_nb_samples, dst_sample_fmt, 1); + if (ret < 0) + break; + max_dst_nb_samples = dst_nb_samples; + } + + /* convert to destination format */ + ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples); + if (ret < 0) { + fprintf(stderr, "Error while converting\n"); + goto end; + } + dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, + ret, dst_sample_fmt, 1); + if (dst_bufsize < 0) { + fprintf(stderr, "Could not get sample buffer size\n"); + goto end; + } + printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret); + fwrite(dst_data[0], 1, dst_bufsize, dst_file); + } while (t < 10); + + if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0) + goto end; + fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n" + "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n", + fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename); + +end: + if (dst_file) + fclose(dst_file); + + if (src_data) + av_freep(&src_data[0]); + av_freep(&src_data); + + if (dst_data) + av_freep(&dst_data[0]); + av_freep(&dst_data); + + swr_free(&swr_ctx); + return ret < 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/scaling_video.c b/dependencies64/ffmpeg/doc/examples/scaling_video.c new file mode 100644 index 000000000..fcb98b748 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/scaling_video.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2012 Stefano Sabatini + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +/** + * @file + * libswscale API use example. + * @example scaling_video.c + */ + +#include +#include +#include + +static void fill_yuv_image(uint8_t *data[4], int linesize[4], + int width, int height, int frame_index) +{ + int x, y; + + /* Y */ + for (y = 0; y < height; y++) + for (x = 0; x < width; x++) + data[0][y * linesize[0] + x] = x + y + frame_index * 3; + + /* Cb and Cr */ + for (y = 0; y < height / 2; y++) { + for (x = 0; x < width / 2; x++) { + data[1][y * linesize[1] + x] = 128 + y + frame_index * 2; + data[2][y * linesize[2] + x] = 64 + x + frame_index * 5; + } + } +} + +int main(int argc, char **argv) +{ + uint8_t *src_data[4], *dst_data[4]; + int src_linesize[4], dst_linesize[4]; + int src_w = 320, src_h = 240, dst_w, dst_h; + enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24; + const char *dst_size = NULL; + const char *dst_filename = NULL; + FILE *dst_file; + int dst_bufsize; + struct SwsContext *sws_ctx; + int i, ret; + + if (argc != 3) { + fprintf(stderr, "Usage: %s output_file output_size\n" + "API example program to show how to scale an image with libswscale.\n" + "This program generates a series of pictures, rescales them to the given " + "output_size and saves them to an output file named output_file\n." + "\n", argv[0]); + exit(1); + } + dst_filename = argv[1]; + dst_size = argv[2]; + + if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) { + fprintf(stderr, + "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n", + dst_size); + exit(1); + } + + dst_file = fopen(dst_filename, "wb"); + if (!dst_file) { + fprintf(stderr, "Could not open destination file %s\n", dst_filename); + exit(1); + } + + /* create scaling context */ + sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt, + dst_w, dst_h, dst_pix_fmt, + SWS_BILINEAR, NULL, NULL, NULL); + if (!sws_ctx) { + fprintf(stderr, + "Impossible to create scale context for the conversion " + "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", + av_get_pix_fmt_name(src_pix_fmt), src_w, src_h, + av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h); + ret = AVERROR(EINVAL); + goto end; + } + + /* allocate source and destination image buffers */ + if ((ret = av_image_alloc(src_data, src_linesize, + src_w, src_h, src_pix_fmt, 16)) < 0) { + fprintf(stderr, "Could not allocate source image\n"); + goto end; + } + + /* buffer is going to be written to rawvideo file, no alignment */ + if ((ret = av_image_alloc(dst_data, dst_linesize, + dst_w, dst_h, dst_pix_fmt, 1)) < 0) { + fprintf(stderr, "Could not allocate destination image\n"); + goto end; + } + dst_bufsize = ret; + + for (i = 0; i < 100; i++) { + /* generate synthetic video */ + fill_yuv_image(src_data, src_linesize, src_w, src_h, i); + + /* convert to destination format */ + sws_scale(sws_ctx, (const uint8_t * const*)src_data, + src_linesize, 0, src_h, dst_data, dst_linesize); + + /* write scaled image to file */ + fwrite(dst_data[0], 1, dst_bufsize, dst_file); + } + + fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n" + "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", + av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename); + +end: + if (dst_file) + fclose(dst_file); + av_freep(&src_data[0]); + av_freep(&dst_data[0]); + sws_freeContext(sws_ctx); + return ret < 0; +} diff --git a/dependencies64/ffmpeg/doc/examples/transcode_aac.c b/dependencies64/ffmpeg/doc/examples/transcode_aac.c new file mode 100644 index 000000000..bf0128f68 --- /dev/null +++ b/dependencies64/ffmpeg/doc/examples/transcode_aac.c @@ -0,0 +1,755 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple audio converter + * + * @example transcode_aac.c + * Convert an input audio file to AAC in an MP4 container using FFmpeg. + * @author Andreas Unterweger (dustsigns@gmail.com) + */ + +#include + +#include "libavformat/avformat.h" +#include "libavformat/avio.h" + +#include "libavcodec/avcodec.h" + +#include "libavutil/audio_fifo.h" +#include "libavutil/avassert.h" +#include "libavutil/avstring.h" +#include "libavutil/frame.h" +#include "libavutil/opt.h" + +#include "libswresample/swresample.h" + +/** The output bit rate in kbit/s */ +#define OUTPUT_BIT_RATE 48000 +/** The number of output channels */ +#define OUTPUT_CHANNELS 2 +/** The audio sample output format */ +#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16 + +/** + * Convert an error code into a text message. + * @param error Error code to be converted + * @return Corresponding error text (not thread-safe) + */ +static char *const get_error_text(const int error) +{ + static char error_buffer[255]; + av_strerror(error, error_buffer, sizeof(error_buffer)); + return error_buffer; +} + +/** Open an input file and the required decoder. */ +static int open_input_file(const char *filename, + AVFormatContext **input_format_context, + AVCodecContext **input_codec_context) +{ + AVCodec *input_codec; + int error; + + /** Open the input file to read from it. */ + if ((error = avformat_open_input(input_format_context, filename, NULL, + NULL)) < 0) { + fprintf(stderr, "Could not open input file '%s' (error '%s')\n", + filename, get_error_text(error)); + *input_format_context = NULL; + return error; + } + + /** Get information on the input file (number of streams etc.). */ + if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) { + fprintf(stderr, "Could not open find stream info (error '%s')\n", + get_error_text(error)); + avformat_close_input(input_format_context); + return error; + } + + /** Make sure that there is only one stream in the input file. */ + if ((*input_format_context)->nb_streams != 1) { + fprintf(stderr, "Expected one audio input stream, but found %d\n", + (*input_format_context)->nb_streams); + avformat_close_input(input_format_context); + return AVERROR_EXIT; + } + + /** Find a decoder for the audio stream. */ + if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) { + fprintf(stderr, "Could not find input codec\n"); + avformat_close_input(input_format_context); + return AVERROR_EXIT; + } + + /** Open the decoder for the audio stream to use it later. */ + if ((error = avcodec_open2((*input_format_context)->streams[0]->codec, + input_codec, NULL)) < 0) { + fprintf(stderr, "Could not open input codec (error '%s')\n", + get_error_text(error)); + avformat_close_input(input_format_context); + return error; + } + + /** Save the decoder context for easier access later. */ + *input_codec_context = (*input_format_context)->streams[0]->codec; + + return 0; +} + +/** + * Open an output file and the required encoder. + * Also set some basic encoder parameters. + * Some of these parameters are based on the input file's parameters. + */ +static int open_output_file(const char *filename, + AVCodecContext *input_codec_context, + AVFormatContext **output_format_context, + AVCodecContext **output_codec_context) +{ + AVIOContext *output_io_context = NULL; + AVStream *stream = NULL; + AVCodec *output_codec = NULL; + int error; + + /** Open the output file to write to it. */ + if ((error = avio_open(&output_io_context, filename, + AVIO_FLAG_WRITE)) < 0) { + fprintf(stderr, "Could not open output file '%s' (error '%s')\n", + filename, get_error_text(error)); + return error; + } + + /** Create a new format context for the output container format. */ + if (!(*output_format_context = avformat_alloc_context())) { + fprintf(stderr, "Could not allocate output format context\n"); + return AVERROR(ENOMEM); + } + + /** Associate the output file (pointer) with the container format context. */ + (*output_format_context)->pb = output_io_context; + + /** Guess the desired container format based on the file extension. */ + if (!((*output_format_context)->oformat = av_guess_format(NULL, filename, + NULL))) { + fprintf(stderr, "Could not find output file format\n"); + goto cleanup; + } + + av_strlcpy((*output_format_context)->filename, filename, + sizeof((*output_format_context)->filename)); + + /** Find the encoder to be used by its name. */ + if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) { + fprintf(stderr, "Could not find an AAC encoder.\n"); + goto cleanup; + } + + /** Create a new audio stream in the output file container. */ + if (!(stream = avformat_new_stream(*output_format_context, output_codec))) { + fprintf(stderr, "Could not create new stream\n"); + error = AVERROR(ENOMEM); + goto cleanup; + } + + /** Save the encoder context for easiert access later. */ + *output_codec_context = stream->codec; + + /** + * Set the basic encoder parameters. + * The input file's sample rate is used to avoid a sample rate conversion. + */ + (*output_codec_context)->channels = OUTPUT_CHANNELS; + (*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS); + (*output_codec_context)->sample_rate = input_codec_context->sample_rate; + (*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16; + (*output_codec_context)->bit_rate = OUTPUT_BIT_RATE; + + /** + * Some container formats (like MP4) require global headers to be present + * Mark the encoder so that it behaves accordingly. + */ + if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER) + (*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER; + + /** Open the encoder for the audio stream to use it later. */ + if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) { + fprintf(stderr, "Could not open output codec (error '%s')\n", + get_error_text(error)); + goto cleanup; + } + + return 0; + +cleanup: + avio_close((*output_format_context)->pb); + avformat_free_context(*output_format_context); + *output_format_context = NULL; + return error < 0 ? error : AVERROR_EXIT; +} + +/** Initialize one data packet for reading or writing. */ +static void init_packet(AVPacket *packet) +{ + av_init_packet(packet); + /** Set the packet data and size so that it is recognized as being empty. */ + packet->data = NULL; + packet->size = 0; +} + +/** Initialize one audio frame for reading from the input file */ +static int init_input_frame(AVFrame **frame) +{ + if (!(*frame = av_frame_alloc())) { + fprintf(stderr, "Could not allocate input frame\n"); + return AVERROR(ENOMEM); + } + return 0; +} + +/** + * Initialize the audio resampler based on the input and output codec settings. + * If the input and output sample formats differ, a conversion is required + * libswresample takes care of this, but requires initialization. + */ +static int init_resampler(AVCodecContext *input_codec_context, + AVCodecContext *output_codec_context, + SwrContext **resample_context) +{ + int error; + + /** + * Create a resampler context for the conversion. + * Set the conversion parameters. + * Default channel layouts based on the number of channels + * are assumed for simplicity (they are sometimes not detected + * properly by the demuxer and/or decoder). + */ + *resample_context = swr_alloc_set_opts(NULL, + av_get_default_channel_layout(output_codec_context->channels), + output_codec_context->sample_fmt, + output_codec_context->sample_rate, + av_get_default_channel_layout(input_codec_context->channels), + input_codec_context->sample_fmt, + input_codec_context->sample_rate, + 0, NULL); + if (!*resample_context) { + fprintf(stderr, "Could not allocate resample context\n"); + return AVERROR(ENOMEM); + } + /** + * Perform a sanity check so that the number of converted samples is + * not greater than the number of samples to be converted. + * If the sample rates differ, this case has to be handled differently + */ + av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate); + + /** Open the resampler with the specified parameters. */ + if ((error = swr_init(*resample_context)) < 0) { + fprintf(stderr, "Could not open resample context\n"); + swr_free(resample_context); + return error; + } + return 0; +} + +/** Initialize a FIFO buffer for the audio samples to be encoded. */ +static int init_fifo(AVAudioFifo **fifo) +{ + /** Create the FIFO buffer based on the specified output sample format. */ + if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) { + fprintf(stderr, "Could not allocate FIFO\n"); + return AVERROR(ENOMEM); + } + return 0; +} + +/** Write the header of the output file container. */ +static int write_output_file_header(AVFormatContext *output_format_context) +{ + int error; + if ((error = avformat_write_header(output_format_context, NULL)) < 0) { + fprintf(stderr, "Could not write output file header (error '%s')\n", + get_error_text(error)); + return error; + } + return 0; +} + +/** Decode one audio frame from the input file. */ +static int decode_audio_frame(AVFrame *frame, + AVFormatContext *input_format_context, + AVCodecContext *input_codec_context, + int *data_present, int *finished) +{ + /** Packet used for temporary storage. */ + AVPacket input_packet; + int error; + init_packet(&input_packet); + + /** Read one audio frame from the input file into a temporary packet. */ + if ((error = av_read_frame(input_format_context, &input_packet)) < 0) { + /** If we are the the end of the file, flush the decoder below. */ + if (error == AVERROR_EOF) + *finished = 1; + else { + fprintf(stderr, "Could not read frame (error '%s')\n", + get_error_text(error)); + return error; + } + } + + /** + * Decode the audio frame stored in the temporary packet. + * The input audio stream decoder is used to do this. + * If we are at the end of the file, pass an empty packet to the decoder + * to flush it. + */ + if ((error = avcodec_decode_audio4(input_codec_context, frame, + data_present, &input_packet)) < 0) { + fprintf(stderr, "Could not decode frame (error '%s')\n", + get_error_text(error)); + av_free_packet(&input_packet); + return error; + } + + /** + * If the decoder has not been flushed completely, we are not finished, + * so that this function has to be called again. + */ + if (*finished && *data_present) + *finished = 0; + av_free_packet(&input_packet); + return 0; +} + +/** + * Initialize a temporary storage for the specified number of audio samples. + * The conversion requires temporary storage due to the different format. + * The number of audio samples to be allocated is specified in frame_size. + */ +static int init_converted_samples(uint8_t ***converted_input_samples, + AVCodecContext *output_codec_context, + int frame_size) +{ + int error; + + /** + * Allocate as many pointers as there are audio channels. + * Each pointer will later point to the audio samples of the corresponding + * channels (although it may be NULL for interleaved formats). + */ + if (!(*converted_input_samples = calloc(output_codec_context->channels, + sizeof(**converted_input_samples)))) { + fprintf(stderr, "Could not allocate converted input sample pointers\n"); + return AVERROR(ENOMEM); + } + + /** + * Allocate memory for the samples of all channels in one consecutive + * block for convenience. + */ + if ((error = av_samples_alloc(*converted_input_samples, NULL, + output_codec_context->channels, + frame_size, + output_codec_context->sample_fmt, 0)) < 0) { + fprintf(stderr, + "Could not allocate converted input samples (error '%s')\n", + get_error_text(error)); + av_freep(&(*converted_input_samples)[0]); + free(*converted_input_samples); + return error; + } + return 0; +} + +/** + * Convert the input audio samples into the output sample format. + * The conversion happens on a per-frame basis, the size of which is specified + * by frame_size. + */ +static int convert_samples(const uint8_t **input_data, + uint8_t **converted_data, const int frame_size, + SwrContext *resample_context) +{ + int error; + + /** Convert the samples using the resampler. */ + if ((error = swr_convert(resample_context, + converted_data, frame_size, + input_data , frame_size)) < 0) { + fprintf(stderr, "Could not convert input samples (error '%s')\n", + get_error_text(error)); + return error; + } + + return 0; +} + +/** Add converted input audio samples to the FIFO buffer for later processing. */ +static int add_samples_to_fifo(AVAudioFifo *fifo, + uint8_t **converted_input_samples, + const int frame_size) +{ + int error; + + /** + * Make the FIFO as large as it needs to be to hold both, + * the old and the new samples. + */ + if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) { + fprintf(stderr, "Could not reallocate FIFO\n"); + return error; + } + + /** Store the new samples in the FIFO buffer. */ + if (av_audio_fifo_write(fifo, (void **)converted_input_samples, + frame_size) < frame_size) { + fprintf(stderr, "Could not write data to FIFO\n"); + return AVERROR_EXIT; + } + return 0; +} + +/** + * Read one audio frame from the input file, decodes, converts and stores + * it in the FIFO buffer. + */ +static int read_decode_convert_and_store(AVAudioFifo *fifo, + AVFormatContext *input_format_context, + AVCodecContext *input_codec_context, + AVCodecContext *output_codec_context, + SwrContext *resampler_context, + int *finished) +{ + /** Temporary storage of the input samples of the frame read from the file. */ + AVFrame *input_frame = NULL; + /** Temporary storage for the converted input samples. */ + uint8_t **converted_input_samples = NULL; + int data_present; + int ret = AVERROR_EXIT; + + /** Initialize temporary storage for one input frame. */ + if (init_input_frame(&input_frame)) + goto cleanup; + /** Decode one frame worth of audio samples. */ + if (decode_audio_frame(input_frame, input_format_context, + input_codec_context, &data_present, finished)) + goto cleanup; + /** + * If we are at the end of the file and there are no more samples + * in the decoder which are delayed, we are actually finished. + * This must not be treated as an error. + */ + if (*finished && !data_present) { + ret = 0; + goto cleanup; + } + /** If there is decoded data, convert and store it */ + if (data_present) { + /** Initialize the temporary storage for the converted input samples. */ + if (init_converted_samples(&converted_input_samples, output_codec_context, + input_frame->nb_samples)) + goto cleanup; + + /** + * Convert the input samples to the desired output sample format. + * This requires a temporary storage provided by converted_input_samples. + */ + if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples, + input_frame->nb_samples, resampler_context)) + goto cleanup; + + /** Add the converted input samples to the FIFO buffer for later processing. */ + if (add_samples_to_fifo(fifo, converted_input_samples, + input_frame->nb_samples)) + goto cleanup; + ret = 0; + } + ret = 0; + +cleanup: + if (converted_input_samples) { + av_freep(&converted_input_samples[0]); + free(converted_input_samples); + } + av_frame_free(&input_frame); + + return ret; +} + +/** + * Initialize one input frame for writing to the output file. + * The frame will be exactly frame_size samples large. + */ +static int init_output_frame(AVFrame **frame, + AVCodecContext *output_codec_context, + int frame_size) +{ + int error; + + /** Create a new frame to store the audio samples. */ + if (!(*frame = av_frame_alloc())) { + fprintf(stderr, "Could not allocate output frame\n"); + return AVERROR_EXIT; + } + + /** + * Set the frame's parameters, especially its size and format. + * av_frame_get_buffer needs this to allocate memory for the + * audio samples of the frame. + * Default channel layouts based on the number of channels + * are assumed for simplicity. + */ + (*frame)->nb_samples = frame_size; + (*frame)->channel_layout = output_codec_context->channel_layout; + (*frame)->format = output_codec_context->sample_fmt; + (*frame)->sample_rate = output_codec_context->sample_rate; + + /** + * Allocate the samples of the created frame. This call will make + * sure that the audio frame can hold as many samples as specified. + */ + if ((error = av_frame_get_buffer(*frame, 0)) < 0) { + fprintf(stderr, "Could allocate output frame samples (error '%s')\n", + get_error_text(error)); + av_frame_free(frame); + return error; + } + + return 0; +} + +/** Encode one frame worth of audio to the output file. */ +static int encode_audio_frame(AVFrame *frame, + AVFormatContext *output_format_context, + AVCodecContext *output_codec_context, + int *data_present) +{ + /** Packet used for temporary storage. */ + AVPacket output_packet; + int error; + init_packet(&output_packet); + + /** + * Encode the audio frame and store it in the temporary packet. + * The output audio stream encoder is used to do this. + */ + if ((error = avcodec_encode_audio2(output_codec_context, &output_packet, + frame, data_present)) < 0) { + fprintf(stderr, "Could not encode frame (error '%s')\n", + get_error_text(error)); + av_free_packet(&output_packet); + return error; + } + + /** Write one audio frame from the temporary packet to the output file. */ + if (*data_present) { + if ((error = av_write_frame(output_format_context, &output_packet)) < 0) { + fprintf(stderr, "Could not write frame (error '%s')\n", + get_error_text(error)); + av_free_packet(&output_packet); + return error; + } + + av_free_packet(&output_packet); + } + + return 0; +} + +/** + * Load one audio frame from the FIFO buffer, encode and write it to the + * output file. + */ +static int load_encode_and_write(AVAudioFifo *fifo, + AVFormatContext *output_format_context, + AVCodecContext *output_codec_context) +{ + /** Temporary storage of the output samples of the frame written to the file. */ + AVFrame *output_frame; + /** + * Use the maximum number of possible samples per frame. + * If there is less than the maximum possible frame size in the FIFO + * buffer use this number. Otherwise, use the maximum possible frame size + */ + const int frame_size = FFMIN(av_audio_fifo_size(fifo), + output_codec_context->frame_size); + int data_written; + + /** Initialize temporary storage for one output frame. */ + if (init_output_frame(&output_frame, output_codec_context, frame_size)) + return AVERROR_EXIT; + + /** + * Read as many samples from the FIFO buffer as required to fill the frame. + * The samples are stored in the frame temporarily. + */ + if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) { + fprintf(stderr, "Could not read data from FIFO\n"); + av_frame_free(&output_frame); + return AVERROR_EXIT; + } + + /** Encode one frame worth of audio samples. */ + if (encode_audio_frame(output_frame, output_format_context, + output_codec_context, &data_written)) { + av_frame_free(&output_frame); + return AVERROR_EXIT; + } + av_frame_free(&output_frame); + return 0; +} + +/** Write the trailer of the output file container. */ +static int write_output_file_trailer(AVFormatContext *output_format_context) +{ + int error; + if ((error = av_write_trailer(output_format_context)) < 0) { + fprintf(stderr, "Could not write output file trailer (error '%s')\n", + get_error_text(error)); + return error; + } + return 0; +} + +/** Convert an audio file to an AAC file in an MP4 container. */ +int main(int argc, char **argv) +{ + AVFormatContext *input_format_context = NULL, *output_format_context = NULL; + AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL; + SwrContext *resample_context = NULL; + AVAudioFifo *fifo = NULL; + int ret = AVERROR_EXIT; + + if (argc < 3) { + fprintf(stderr, "Usage: %s \n", argv[0]); + exit(1); + } + + /** Register all codecs and formats so that they can be used. */ + av_register_all(); + /** Open the input file for reading. */ + if (open_input_file(argv[1], &input_format_context, + &input_codec_context)) + goto cleanup; + /** Open the output file for writing. */ + if (open_output_file(argv[2], input_codec_context, + &output_format_context, &output_codec_context)) + goto cleanup; + /** Initialize the resampler to be able to convert audio sample formats. */ + if (init_resampler(input_codec_context, output_codec_context, + &resample_context)) + goto cleanup; + /** Initialize the FIFO buffer to store audio samples to be encoded. */ + if (init_fifo(&fifo)) + goto cleanup; + /** Write the header of the output file container. */ + if (write_output_file_header(output_format_context)) + goto cleanup; + + /** + * Loop as long as we have input samples to read or output samples + * to write; abort as soon as we have neither. + */ + while (1) { + /** Use the encoder's desired frame size for processing. */ + const int output_frame_size = output_codec_context->frame_size; + int finished = 0; + + /** + * Make sure that there is one frame worth of samples in the FIFO + * buffer so that the encoder can do its work. + * Since the decoder's and the encoder's frame size may differ, we + * need to FIFO buffer to store as many frames worth of input samples + * that they make up at least one frame worth of output samples. + */ + while (av_audio_fifo_size(fifo) < output_frame_size) { + /** + * Decode one frame worth of audio samples, convert it to the + * output sample format and put it into the FIFO buffer. + */ + if (read_decode_convert_and_store(fifo, input_format_context, + input_codec_context, + output_codec_context, + resample_context, &finished)) + goto cleanup; + + /** + * If we are at the end of the input file, we continue + * encoding the remaining audio samples to the output file. + */ + if (finished) + break; + } + + /** + * If we have enough samples for the encoder, we encode them. + * At the end of the file, we pass the remaining samples to + * the encoder. + */ + while (av_audio_fifo_size(fifo) >= output_frame_size || + (finished && av_audio_fifo_size(fifo) > 0)) + /** + * Take one frame worth of audio samples from the FIFO buffer, + * encode it and write it to the output file. + */ + if (load_encode_and_write(fifo, output_format_context, + output_codec_context)) + goto cleanup; + + /** + * If we are at the end of the input file and have encoded + * all remaining samples, we can exit this loop and finish. + */ + if (finished) { + int data_written; + /** Flush the encoder as it may have delayed frames. */ + do { + if (encode_audio_frame(NULL, output_format_context, + output_codec_context, &data_written)) + goto cleanup; + } while (data_written); + break; + } + } + + /** Write the trailer of the output file container. */ + if (write_output_file_trailer(output_format_context)) + goto cleanup; + ret = 0; + +cleanup: + if (fifo) + av_audio_fifo_free(fifo); + swr_free(&resample_context); + if (output_codec_context) + avcodec_close(output_codec_context); + if (output_format_context) { + avio_close(output_format_context->pb); + avformat_free_context(output_format_context); + } + if (input_codec_context) + avcodec_close(input_codec_context); + if (input_format_context) + avformat_close_input(&input_format_context); + + return ret; +} diff --git a/dependencies64/ffmpeg/doc/faq.html b/dependencies64/ffmpeg/doc/faq.html new file mode 100644 index 000000000..a546811c4 --- /dev/null +++ b/dependencies64/ffmpeg/doc/faq.html @@ -0,0 +1,659 @@ + + + + + +FFmpeg documentation : FFmpeg FAQ: + + + + + + + + + + +
+
+ + +

FFmpeg FAQ

+ + +

Table of Contents

+
+ + +
+ + +

1. General Questions

+ + +

1.1 Why doesn’t FFmpeg support feature [xyz]?

+ +

Because no one has taken on that task yet. FFmpeg development is +driven by the tasks that are important to the individual developers. +If there is a feature that is important to you, the best way to get +it implemented is to undertake the task yourself or sponsor a developer. +

+ +

1.2 FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?

+ +

No. Windows DLLs are not portable, bloated and often slow. +Moreover FFmpeg strives to support all codecs natively. +A DLL loader is not conducive to that goal. +

+ +

1.3 I cannot read this file although this format seems to be supported by ffmpeg.

+ +

Even if ffmpeg can read the container format, it may not support all its +codecs. Please consult the supported codec list in the ffmpeg +documentation. +

+ +

1.4 Which codecs are supported by Windows?

+ +

Windows does not support standard formats like MPEG very well, unless you +install some additional codecs. +

+

The following list of video codecs should work on most Windows systems: +

+
msmpeg4v2
+

.avi/.asf +

+
msmpeg4
+

.asf only +

+
wmv1
+

.asf only +

+
wmv2
+

.asf only +

+
mpeg4
+

Only if you have some MPEG-4 codec like ffdshow or Xvid installed. +

+
mpeg1video
+

.mpg only +

+
+

Note, ASF files often have .wmv or .wma extensions in Windows. It should also +be mentioned that Microsoft claims a patent on the ASF format, and may sue +or threaten users who create ASF files with non-Microsoft software. It is +strongly advised to avoid ASF where possible. +

+

The following list of audio codecs should work on most Windows systems: +

+
adpcm_ima_wav
+
adpcm_ms
+
pcm_s16le
+

always +

+
libmp3lame
+

If some MP3 codec like LAME is installed. +

+
+ + + +

2. Compilation

+ + +

2.1 error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'

+ +

This is a bug in gcc. Do not report it to us. Instead, please report it to +the gcc developers. Note that we will not add workarounds for gcc bugs. +

+

Also note that (some of) the gcc developers believe this is not a bug or +not a bug they should fix: +http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203. +Then again, some of them do not know the difference between an undecidable +problem and an NP-hard problem... +

+ +

2.2 I have installed this library with my distro’s package manager. Why does configure not see it?

+ +

Distributions usually split libraries in several packages. The main package +contains the files necessary to run programs using the library. The +development package contains the files necessary to build programs using the +library. Sometimes, docs and/or data are in a separate package too. +

+

To build FFmpeg, you need to install the development package. It is usually +called ‘libfoo-dev’ or ‘libfoo-devel’. You can remove it after the +build is finished, but be sure to keep the main package. +

+ +

3. Usage

+ + +

3.1 ffmpeg does not work; what is wrong?

+ +

Try a make distclean in the ffmpeg source directory before the build. +If this does not help see +(http://ffmpeg.org/bugreports.html). +

+ +

3.2 How do I encode single pictures into movies?

+ +

First, rename your pictures to follow a numerical sequence. +For example, img1.jpg, img2.jpg, img3.jpg,... +Then you may run: +

+
 
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
+
+ +

Notice that ‘%d’ is replaced by the image number. +

+

img%03d.jpg’ means the sequence ‘img001.jpg’, ‘img002.jpg’, etc. +

+

Use the ‘-start_number’ option to declare a starting number for +the sequence. This is useful if your sequence does not start with +‘img001.jpg’ but is still in a numerical order. The following +example will start with ‘img100.jpg’: +

+
 
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
+
+ +

If you have large number of pictures to rename, you can use the +following command to ease the burden. The command, using the bourne +shell syntax, symbolically links all files in the current directory +that match *jpg to the ‘/tmp’ directory in the sequence of +‘img001.jpg’, ‘img002.jpg’ and so on. +

+
 
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
+
+ +

If you want to sequence them by oldest modified first, substitute +$(ls -r -t *jpg) in place of *jpg. +

+

Then run: +

+
 
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
+
+ +

The same logic is used for any image format that ffmpeg reads. +

+

You can also use cat to pipe images to ffmpeg: +

+
 
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
+
+ + +

3.3 How do I encode movie to single pictures?

+ +

Use: +

+
 
ffmpeg -i movie.mpg movie%d.jpg
+
+ +

The ‘movie.mpg’ used as input will be converted to +‘movie1.jpg’, ‘movie2.jpg’, etc... +

+

Instead of relying on file format self-recognition, you may also use +

+
-c:v ppm
+
-c:v png
+
-c:v mjpeg
+
+

to force the encoding. +

+

Applying that to the previous example: +

 
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
+
+ +

Beware that there is no "jpeg" codec. Use "mjpeg" instead. +

+ +

3.4 Why do I see a slight quality degradation with multithreaded MPEG* encoding?

+ +

For multithreaded MPEG* encoding, the encoded slices must be independent, +otherwise thread n would practically have to wait for n-1 to finish, so it’s +quite logical that there is a small reduction of quality. This is not a bug. +

+ +

3.5 How can I read from the standard input or write to the standard output?

+ +

Use ‘-’ as file name. +

+ +

3.6 -f jpeg doesn’t work.

+ +

Try ’-f image2 test%d.jpg’. +

+ +

3.7 Why can I not change the frame rate?

+ +

Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates. +Choose a different codec with the -c:v command line option. +

+ +

3.8 How do I encode Xvid or DivX video with ffmpeg?

+ +

Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4 +standard (note that there are many other coding formats that use this +same standard). Thus, use ’-c:v mpeg4’ to encode in these formats. The +default fourcc stored in an MPEG-4-coded file will be ’FMP4’. If you want +a different fourcc, use the ’-vtag’ option. E.g., ’-vtag xvid’ will +force the fourcc ’xvid’ to be stored as the video fourcc rather than the +default. +

+ +

3.9 Which are good parameters for encoding high quality MPEG-4?

+ +

’-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2’, +things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd’. +

+ +

3.10 Which are good parameters for encoding high quality MPEG-1/MPEG-2?

+ +

’-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2’ +but beware the ’-g 100’ might cause problems with some decoders. +Things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd. +

+ +

3.11 Interlaced video looks very bad when encoded with ffmpeg, what is wrong?

+ +

You should use ’-flags +ilme+ildct’ and maybe ’-flags +alt’ for interlaced +material, and try ’-top 0/1’ if the result looks really messed-up. +

+ +

3.12 How can I read DirectShow files?

+ +

If you have built FFmpeg with ./configure --enable-avisynth +(only possible on MinGW/Cygwin platforms), +then you may use any file that DirectShow can read as input. +

+

Just create an "input.avs" text file with this single line ... +

 
DirectShowSource("C:\path to your file\yourfile.asf")
+
+

... and then feed that text file to ffmpeg: +

 
ffmpeg -i input.avs
+
+ +

For ANY other help on AviSynth, please visit the +AviSynth homepage. +

+ +

3.13 How can I join video files?

+ +

To "join" video files is quite ambiguous. The following list explains the +different kinds of "joining" and points out how those are addressed in +FFmpeg. To join video files may mean: +

+
    +
  • +To put them one after the other: this is called to concatenate them +(in short: concat) and is addressed +in this very faq. + +
  • +To put them together in the same file, to let the user choose between the +different versions (example: different audio languages): this is called to +multiplex them together (in short: mux), and is done by simply +invoking ffmpeg with several ‘-i’ options. + +
  • +For audio, to put all channels together in a single stream (example: two +mono streams into one stereo stream): this is sometimes called to +merge them, and can be done using the +amerge filter. + +
  • +For audio, to play one on top of the other: this is called to mix +them, and can be done by first merging them into a single stream and then +using the pan filter to mix +the channels at will. + +
  • +For video, to display both together, side by side or one on top of a part of +the other; it can be done using the +overlay video filter. + +
+ +

+

+

3.14 How can I concatenate video files?

+ +

There are several solutions, depending on the exact circumstances. +

+ +

3.14.1 Concatenating using the concat filter

+ +

FFmpeg has a concat filter designed specifically for that, with examples in the +documentation. This operation is recommended if you need to re-encode. +

+ +

3.14.2 Concatenating using the concat demuxer

+ +

FFmpeg has a concat demuxer which you can use when you want to avoid a re-encode and +your format doesn’t support file level concatenation. +

+ +

3.14.3 Concatenating using the concat protocol (file level)

+ +

FFmpeg has a concat protocol designed specifically for that, with examples in the +documentation. +

+

A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate +video by merely concatenating the files containing them. +

+

Hence you may concatenate your multimedia files by first transcoding them to +these privileged formats, then using the humble cat command (or the +equally humble copy under Windows), and finally transcoding back to your +format of choice. +

+
 
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
+ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
+cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
+ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
+
+ +

Additionally, you can use the concat protocol instead of cat or +copy which will avoid creation of a potentially huge intermediate file. +

+
 
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
+ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
+ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
+ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
+
+ +

Note that you may need to escape the character "|" which is special for many +shells. +

+

Another option is usage of named pipes, should your platform support it: +

+
 
mkfifo intermediate1.mpg
+mkfifo intermediate2.mpg
+ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
+ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
+cat intermediate1.mpg intermediate2.mpg |\
+ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
+
+ + +

3.14.4 Concatenating using raw audio and video

+ +

Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also +allow concatenation, and the transcoding step is almost lossless. +When using multiple yuv4mpegpipe(s), the first line needs to be discarded +from all but the first stream. This can be accomplished by piping through +tail as seen below. Note that when piping through tail you +must use command grouping, { ;}, to background properly. +

+

For example, let’s say we want to concatenate two FLV files into an +output.flv file: +

+
 
mkfifo temp1.a
+mkfifo temp1.v
+mkfifo temp2.a
+mkfifo temp2.v
+mkfifo all.a
+mkfifo all.v
+ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
+ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
+ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
+{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; } &
+cat temp1.a temp2.a > all.a &
+cat temp1.v temp2.v > all.v &
+ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
+       -f yuv4mpegpipe -i all.v \
+       -y output.flv
+rm temp[12].[av] all.[av]
+
+ + +

3.15 Using ‘-f lavfi’, audio becomes mono for no apparent reason.

+ +

Use ‘-dumpgraph -’ to find out exactly where the channel layout is +lost. +

+

Most likely, it is through auto-inserted aresample. Try to understand +why the converting filter was needed at that place. +

+

Just before the output is a likely place, as ‘-f lavfi’ currently +only support packed S16. +

+

Then insert the correct aformat explicitly in the filtergraph, +specifying the exact format. +

+
 
aformat=sample_fmts=s16:channel_layouts=stereo
+
+ + +

3.16 Why does FFmpeg not see the subtitles in my VOB file?

+ +

VOB and a few other formats do not have a global header that describes +everything present in the file. Instead, applications are supposed to scan +the file to see what it contains. Since VOB files are frequently large, only +the beginning is scanned. If the subtitles happen only later in the file, +they will not be initally detected. +

+

Some applications, including the ffmpeg command-line tool, can only +work with streams that were detected during the initial scan; streams that +are detected later are ignored. +

+

The size of the initial scan is controlled by two options: probesize +(default ~5 Mo) and analyzeduration (default 5,000,000 µs = 5 s). For +the subtitle stream to be detected, both values must be large enough. +

+ +

3.17 Why was the ffmpeg-sameq’ option removed? What to use instead?

+ +

The ‘-sameq’ option meant "same quantizer", and made sense only in a +very limited set of cases. Unfortunately, a lot of people mistook it for +"same quality" and used it in places where it did not make sense: it had +roughly the expected visible effect, but achieved it in a very inefficient +way. +

+

Each encoder has its own set of options to set the quality-vs-size balance, +use the options for the encoder you are using to set the quality level to a +point acceptable for your tastes. The most common options to do that are +‘-qscale’ and ‘-qmax’, but you should peruse the documentation +of the encoder you chose. +

+ +

4. Development

+ + +

4.1 Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?

+ +

Yes. Check the ‘doc/examples’ directory in the source +repository, also available online at: +https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples. +

+

Examples are also installed by default, usually in +$PREFIX/share/ffmpeg/examples. +

+

Also you may read the Developers Guide of the FFmpeg documentation. Alternatively, +examine the source code for one of the many open source projects that +already incorporate FFmpeg at (projects.html). +

+ +

4.2 Can you support my C compiler XXX?

+ +

It depends. If your compiler is C99-compliant, then patches to support +it are likely to be welcome if they do not pollute the source code +with #ifdefs related to the compiler. +

+ +

4.3 Is Microsoft Visual C++ supported?

+ +

Yes. Please see the Microsoft Visual C++ +section in the FFmpeg documentation. +

+ +

4.4 Can you add automake, libtool or autoconf support?

+ +

No. These tools are too bloated and they complicate the build. +

+ +

4.5 Why not rewrite FFmpeg in object-oriented C++?

+ +

FFmpeg is already organized in a highly modular manner and does not need to +be rewritten in a formal object language. Further, many of the developers +favor straight C; it works for them. For more arguments on this matter, +read "Programming Religion". +

+ +

4.6 Why are the ffmpeg programs devoid of debugging symbols?

+ +

The build process creates ffmpeg_g, ffplay_g, etc. which +contain full debug information. Those binaries are stripped to create +ffmpeg, ffplay, etc. If you need the debug information, use +the *_g versions. +

+ +

4.7 I do not like the LGPL, can I contribute code under the GPL instead?

+ +

Yes, as long as the code is optional and can easily and cleanly be placed +under #if CONFIG_GPL without breaking anything. So, for example, a new codec +or filter would be OK under GPL while a bug fix to LGPL code would not. +

+ +

4.8 I’m using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.

+ +

FFmpeg builds static libraries by default. In static libraries, dependencies +are not handled. That has two consequences. First, you must specify the +libraries in dependency order: -lavdevice must come before +-lavformat, -lavutil must come after everything else, etc. +Second, external libraries that are used in FFmpeg have to be specified too. +

+

An easy way to get the full list of required libraries in dependency order +is to use pkg-config. +

+
 
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
+
+ +

See ‘doc/example/Makefile’ and ‘doc/example/pc-uninstalled’ for +more details. +

+ +

4.9 I’m using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.

+ +

FFmpeg is a pure C project, so to use the libraries within your C++ application +you need to explicitly state that you are using a C library. You can do this by +encompassing your FFmpeg includes using extern "C". +

+

See http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3 +

+ +

4.10 I’m using libavutil from within my C++ application but the compiler complains about ’UINT64_C’ was not declared in this scope

+ +

FFmpeg is a pure C project using C99 math features, in order to enable C++ +to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS +

+ +

4.11 I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?

+ +

You have to create a custom AVIOContext using avio_alloc_context, +see ‘libavformat/aviobuf.c’ in FFmpeg and ‘libmpdemux/demux_lavf.c’ in MPlayer or MPlayer2 sources. +

+ +

4.12 Where is the documentation about ffv1, msmpeg4, asv1, 4xm?

+ +

see http://www.ffmpeg.org/~michael/ +

+ +

4.13 How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?

+ +

Even if peculiar since it is network oriented, RTP is a container like any +other. You have to demux RTP before feeding the payload to libavcodec. +In this specific case please look at RFC 4629 to see how it should be done. +

+ +

4.14 AVStream.r_frame_rate is wrong, it is much larger than the frame rate.

+ +

r_frame_rate is NOT the average frame rate, it is the smallest frame rate +that can accurately represent all timestamps. So no, it is not +wrong if it is larger than the average! +For example, if you have mixed 25 and 30 fps content, then r_frame_rate +will be 150 (it is the least common multiple). +If you are looking for the average frame rate, see AVStream.avg_frame_rate. +

+ +

4.15 Why is make fate not running all tests?

+ +

Make sure you have the fate-suite samples and the SAMPLES Make variable +or FATE_SAMPLES environment variable or the --samples +configure option is set to the right path. +

+ +

4.16 Why is make fate not finding the samples?

+ +

Do you happen to have a ~ character in the samples path to indicate a +home directory? The value is used in ways where the shell cannot expand it, +causing FATE to not find files. Just replace ~ by the full path. +

+
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/fate.html b/dependencies64/ffmpeg/doc/fate.html new file mode 100644 index 000000000..1eb50bcb8 --- /dev/null +++ b/dependencies64/ffmpeg/doc/fate.html @@ -0,0 +1,284 @@ + + + + + +FFmpeg documentation : FFmpeg Automated Testing Environment: + + + + + + + + + + +
+
+ + + +

FFmpeg Automated Testing Environment

+ + +

Table of Contents

+ + + +

1. Introduction

+ +

FATE is an extended regression suite on the client-side and a means +for results aggregation and presentation on the server-side. +

+

The first part of this document explains how you can use FATE from +your FFmpeg source directory to test your ffmpeg binary. The second +part describes how you can run FATE to submit the results to FFmpeg’s +FATE server. +

+

In any way you can have a look at the publicly viewable FATE results +by visiting this website: +

+

http://fate.ffmpeg.org/ +

+

This is especially recommended for all people contributing source +code to FFmpeg, as it can be seen if some test on some platform broke +with their recent contribution. This usually happens on the platforms +the developers could not test on. +

+

The second part of this document describes how you can run FATE to +submit your results to FFmpeg’s FATE server. If you want to submit your +results be sure to check that your combination of CPU, OS and compiler +is not already listed on the above mentioned website. +

+

In the third part you can find a comprehensive listing of FATE makefile +targets and variables. +

+ + +

2. Using FATE from your FFmpeg source directory

+ +

If you want to run FATE on your machine you need to have the samples +in place. You can get the samples via the build target fate-rsync. +Use this command from the top-level source directory: +

+
 
make fate-rsync SAMPLES=fate-suite/
+make fate       SAMPLES=fate-suite/
+
+ +

The above commands set the samples location by passing a makefile +variable via command line. It is also possible to set the samples +location at source configuration time by invoking configure with +‘–samples=<path to the samples directory>’. Afterwards you can +invoke the makefile targets without setting the SAMPLES makefile +variable. This is illustrated by the following commands: +

+
 
./configure --samples=fate-suite/
+make fate-rsync
+make fate
+
+ +

Yet another way to tell FATE about the location of the sample +directory is by making sure the environment variable FATE_SAMPLES +contains the path to your samples directory. This can be achieved +by e.g. putting that variable in your shell profile or by setting +it in your interactive session. +

+
 
FATE_SAMPLES=fate-suite/ make fate
+
+ +
+

Do not put a ’~’ character in the samples path to indicate a home +directory. Because of shell nuances, this will cause FATE to fail. +

+

To use a custom wrapper to run the test, pass ‘--target-exec’ to +configure or set the TARGET_EXEC Make variable. +

+ + +

3. Submitting the results to the FFmpeg result aggregation server

+ +

To submit your results to the server you should run fate through the +shell script ‘tests/fate.sh’ from the FFmpeg sources. This script needs +to be invoked with a configuration file as its first argument. +

+
 
tests/fate.sh /path/to/fate_config
+
+ +

A configuration file template with comments describing the individual +configuration variables can be found at ‘doc/fate_config.sh.template’. +

+

The mentioned configuration template is also available here: +

slot=                                    # some unique identifier
+repo=git://source.ffmpeg.org/ffmpeg.git  # the source repository
+samples=                                 # path to samples directory
+workdir=                                 # directory in which to do all the work
+#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
+comment=                                 # optional description
+build_only=     # set to "yes" for a compile-only instance that skips tests
+
+# the following are optional and map to configure options
+arch=
+cpu=
+cross_prefix=
+as=
+cc=
+ld=
+target_os=
+sysroot=
+target_exec=
+target_path=
+target_samples=
+extra_cflags=
+extra_ldflags=
+extra_libs=
+extra_conf=     # extra configure options not covered above
+
+#make=          # name of GNU make if not 'make'
+makeopts=       # extra options passed to 'make'
+#tar=           # command to create a tar archive from its arguments on stdout,
+                # defaults to 'tar c'
+

+

Create a configuration that suits your needs, based on the configuration +template. The ‘slot’ configuration variable can be any string that is not +yet used, but it is suggested that you name it adhering to the following +pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file +itself will be sourced in a shell script, therefore all shell features may +be used. This enables you to setup the environment as you need it for your +build. +

+

For your first test runs the ‘fate_recv’ variable should be empty or +commented out. This will run everything as normal except that it will omit +the submission of the results to the server. The following files should be +present in $workdir as specified in the configuration file: +

+
    +
  • configure.log +
  • compile.log +
  • test.log +
  • report +
  • version +
+ +

When you have everything working properly you can create an SSH key pair +and send the public key to the FATE server administrator who can be contacted +at the email address fate-admin@ffmpeg.org. +

+

Configure your SSH client to use public key authentication with that key +when connecting to the FATE server. Also do not forget to check the identity +of the server and to accept its host key. This can usually be achieved by +running your SSH client manually and killing it after you accepted the key. +The FATE server’s fingerprint is: +

+
+
RSA
+

d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51 +

+
ECDSA
+

76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86 +

+
+ +

If you have problems connecting to the FATE server, it may help to try out +the ssh command with one or more ‘-v’ options. You should +get detailed output concerning your SSH configuration and the authentication +process. +

+

The only thing left is to automate the execution of the fate.sh script and +the synchronisation of the samples directory. +

+ + +

4. FATE makefile targets and variables

+ + +

4.1 Makefile targets

+ +
+
fate-rsync
+

Download/synchronize sample files to the configured samples directory. +

+
+
fate-list
+

Will list all fate/regression test targets. +

+
+
fate
+

Run the FATE test suite (requires the fate-suite dataset). +

+
+ + +

4.2 Makefile variables

+ +
+
V
+

Verbosity level, can be set to 0, 1 or 2. +

    +
  • 0: show just the test arguments +
  • 1: show just the command used in the test +
  • 2: show everything +
+ +
+
SAMPLES
+

Specify or override the path to the FATE samples at make time, it has a +meaning only while running the regression tests. +

+
+
THREADS
+

Specify how many threads to use while running regression tests, it is +quite useful to detect thread-related regressions. +

+
+
THREAD_TYPE
+

Specify which threading strategy test, either slice or frame, +by default slice+frame +

+
+
CPUFLAGS
+

Specify CPU flags. +

+
+
TARGET_EXEC
+

Specify or override the wrapper used to run the tests. +The TARGET_EXEC option provides a way to run FATE wrapped in +valgrind, qemu-user or wine or on remote targets +through ssh. +

+
+
GEN
+

Set to 1 to generate the missing or mismatched references. +

+
+ + +

4.3 Examples

+ +
 
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
+
+
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-all.html b/dependencies64/ffmpeg/doc/ffmpeg-all.html new file mode 100644 index 000000000..83f8e25b7 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-all.html @@ -0,0 +1,24449 @@ + + + + + +FFmpeg documentation : ffmpeg + + + + + + + + + + +
+
+ + +

ffmpeg Documentation

+ + +

Table of Contents

+
+ + +
+ + +

1. Synopsis

+ +

ffmpeg [global_options] {[input_file_options] -i ‘input_file’} ... {[output_file_options] ‘output_file’} ... +

+ +

2. Description

+ +

ffmpeg is a very fast video and audio converter that can also grab from +a live audio/video source. It can also convert between arbitrary sample +rates and resize video on the fly with a high quality polyphase filter. +

+

ffmpeg reads from an arbitrary number of input "files" (which can be regular +files, pipes, network streams, grabbing devices, etc.), specified by the +-i option, and writes to an arbitrary number of output "files", which are +specified by a plain output filename. Anything found on the command line which +cannot be interpreted as an option is considered to be an output filename. +

+

Each input or output file can, in principle, contain any number of streams of +different types (video/audio/subtitle/attachment/data). The allowed number and/or +types of streams may be limited by the container format. Selecting which +streams from which inputs will go into which output is either done automatically +or with the -map option (see the Stream selection chapter). +

+

To refer to input files in options, you must use their indices (0-based). E.g. +the first input file is 0, the second is 1, etc. Similarly, streams +within a file are referred to by their indices. E.g. 2:3 refers to the +fourth stream in the third input file. Also see the Stream specifiers chapter. +

+

As a general rule, options are applied to the next specified +file. Therefore, order is important, and you can have the same +option on the command line multiple times. Each occurrence is +then applied to the next input or output file. +Exceptions from this rule are the global options (e.g. verbosity level), +which should be specified first. +

+

Do not mix input and output files – first specify all input files, then all +output files. Also do not mix options which belong to different files. All +options apply ONLY to the next input or output file and are reset between files. +

+
    +
  • +To set the video bitrate of the output file to 64 kbit/s: +
     
    ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
    +
    + +
  • +To force the frame rate of the output file to 24 fps: +
     
    ffmpeg -i input.avi -r 24 output.avi
    +
    + +
  • +To force the frame rate of the input file (valid for raw formats only) +to 1 fps and the frame rate of the output file to 24 fps: +
     
    ffmpeg -r 1 -i input.m2v -r 24 output.avi
    +
    +
+ +

The format option may be needed for raw input files. +

+ + +

3. Detailed description

+ +

The transcoding process in ffmpeg for each output can be described by +the following diagram: +

+
 
 _______              ______________
+|       |            |              |
+| input |  demuxer   | encoded data |   decoder
+| file  | ---------> | packets      | -----+
+|_______|            |______________|      |
+                                           v
+                                       _________
+                                      |         |
+                                      | decoded |
+                                      | frames  |
+ ________             ______________  |_________|
+|        |           |              |      |
+| output | <-------- | encoded data | <----+
+| file   |   muxer   | packets      |   encoder
+|________|           |______________|
+
+
+
+ +

ffmpeg calls the libavformat library (containing demuxers) to read +input files and get packets containing encoded data from them. When there are +multiple input files, ffmpeg tries to keep them synchronized by +tracking lowest timestamp on any active input stream. +

+

Encoded packets are then passed to the decoder (unless streamcopy is selected +for the stream, see further for a description). The decoder produces +uncompressed frames (raw video/PCM audio/...) which can be processed further by +filtering (see next section). After filtering, the frames are passed to the +encoder, which encodes them and outputs encoded packets. Finally those are +passed to the muxer, which writes the encoded packets to the output file. +

+ +

3.1 Filtering

+

Before encoding, ffmpeg can process raw audio and video frames using +filters from the libavfilter library. Several chained filters form a filter +graph. ffmpeg distinguishes between two types of filtergraphs: +simple and complex. +

+ +

3.1.1 Simple filtergraphs

+

Simple filtergraphs are those that have exactly one input and output, both of +the same type. In the above diagram they can be represented by simply inserting +an additional step between decoding and encoding: +

+
 
 _________               __________              ______________
+|         |  simple     |          |            |              |
+| decoded |  fltrgrph   | filtered |  encoder   | encoded data |
+| frames  | ----------> | frames   | ---------> | packets      |
+|_________|             |__________|            |______________|
+
+
+ +

Simple filtergraphs are configured with the per-stream ‘-filter’ option +(with ‘-vf’ and ‘-af’ aliases for video and audio respectively). +A simple filtergraph for video can look for example like this: +

+
 
 _______        _____________        _______        ________
+|       |      |             |      |       |      |        |
+| input | ---> | deinterlace | ---> | scale | ---> | output |
+|_______|      |_____________|      |_______|      |________|
+
+
+ +

Note that some filters change frame properties but not frame contents. E.g. the +fps filter in the example above changes number of frames, but does not +touch the frame contents. Another example is the setpts filter, which +only sets timestamps and otherwise passes the frames unchanged. +

+ +

3.1.2 Complex filtergraphs

+

Complex filtergraphs are those which cannot be described as simply a linear +processing chain applied to one stream. This is the case, for example, when the graph has +more than one input and/or output, or when output stream type is different from +input. They can be represented with the following diagram: +

+
 
 _________
+|         |
+| input 0 |\                    __________
+|_________| \                  |          |
+             \   _________    /| output 0 |
+              \ |         |  / |__________|
+ _________     \| complex | /
+|         |     |         |/
+| input 1 |---->| filter  |\
+|_________|     |         | \   __________
+               /| graph   |  \ |          |
+              / |         |   \| output 1 |
+ _________   /  |_________|    |__________|
+|         | /
+| input 2 |/
+|_________|
+
+
+ +

Complex filtergraphs are configured with the ‘-filter_complex’ option. +Note that this option is global, since a complex filtergraph, by its nature, +cannot be unambiguously associated with a single stream or file. +

+

The ‘-lavfi’ option is equivalent to ‘-filter_complex’. +

+

A trivial example of a complex filtergraph is the overlay filter, which +has two video inputs and one video output, containing one video overlaid on top +of the other. Its audio counterpart is the amix filter. +

+ +

3.2 Stream copy

+

Stream copy is a mode selected by supplying the copy parameter to the +‘-codec’ option. It makes ffmpeg omit the decoding and encoding +step for the specified stream, so it does only demuxing and muxing. It is useful +for changing the container format or modifying container-level metadata. The +diagram above will, in this case, simplify to this: +

+
 
 _______              ______________            ________
+|       |            |              |          |        |
+| input |  demuxer   | encoded data |  muxer   | output |
+| file  | ---------> | packets      | -------> | file   |
+|_______|            |______________|          |________|
+
+
+ +

Since there is no decoding or encoding, it is very fast and there is no quality +loss. However, it might not work in some cases because of many factors. Applying +filters is obviously also impossible, since filters work on uncompressed data. +

+ + +

4. Stream selection

+ +

By default, ffmpeg includes only one stream of each type (video, audio, subtitle) +present in the input files and adds them to each output file. It picks the +"best" of each based upon the following criteria: for video, it is the stream +with the highest resolution, for audio, it is the stream with the most channels, for +subtitles, it is the first subtitle stream. In the case where several streams of +the same type rate equally, the stream with the lowest index is chosen. +

+

You can disable some of those defaults by using the -vn/-an/-sn options. For +full manual control, use the -map option, which disables the defaults just +described. +

+ + +

5. Options

+ +

All the numerical options, if not specified otherwise, accept a string +representing a number as input, which may be followed by one of the SI +unit prefixes, for example: ’K’, ’M’, or ’G’. +

+

If ’i’ is appended to the SI unit prefix, the complete prefix will be +interpreted as a unit prefix for binary multiplies, which are based on +powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit +prefix multiplies the value by 8. This allows using, for example: +’KB’, ’MiB’, ’G’ and ’B’ as number suffixes. +

+

Options which do not take arguments are boolean options, and set the +corresponding value to true. They can be set to false by prefixing +the option name with "no". For example using "-nofoo" +will set the boolean option with name "foo" to false. +

+

+

+

5.1 Stream specifiers

+

Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers +are used to precisely specify which stream(s) a given option belongs to. +

+

A stream specifier is a string generally appended to the option name and +separated from it by a colon. E.g. -codec:a:1 ac3 contains the +a:1 stream specifier, which matches the second audio stream. Therefore, it +would select the ac3 codec for the second audio stream. +

+

A stream specifier can match several streams, so that the option is applied to all +of them. E.g. the stream specifier in -b:a 128k matches all audio +streams. +

+

An empty stream specifier matches all streams. For example, -codec copy +or -codec: copy would copy all the streams without reencoding. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. E.g. -threads:1 4 would set the +thread count for the second stream to 4. +

+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle, +’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches +stream number stream_index of this type. Otherwise, it matches all +streams of this type. +

+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number stream_index +in the program with the id program_id. Otherwise, it matches all streams in the +program. +

+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ + +

5.2 Generic options

+ +

These options are shared amongst the ff* tools. +

+
+
-L
+

Show license. +

+
+
-h, -?, -help, --help [arg]
+

Show help. An optional parameter may be specified to print help about a specific +item. If no argument is specified, only basic (non advanced) tool +options are shown. +

+

Possible values of arg are: +

+
long
+

Print advanced tool options in addition to the basic tool options. +

+
+
full
+

Print complete list of options, including shared and private options +for encoders, decoders, demuxers, muxers, filters, etc. +

+
+
decoder=decoder_name
+

Print detailed information about the decoder named decoder_name. Use the +‘-decoders’ option to get a list of all decoders. +

+
+
encoder=encoder_name
+

Print detailed information about the encoder named encoder_name. Use the +‘-encoders’ option to get a list of all encoders. +

+
+
demuxer=demuxer_name
+

Print detailed information about the demuxer named demuxer_name. Use the +‘-formats’ option to get a list of all demuxers and muxers. +

+
+
muxer=muxer_name
+

Print detailed information about the muxer named muxer_name. Use the +‘-formats’ option to get a list of all muxers and demuxers. +

+
+
filter=filter_name
+

Print detailed information about the filter name filter_name. Use the +‘-filters’ option to get a list of all filters. +

+
+ +
+
-version
+

Show version. +

+
+
-formats
+

Show available formats. +

+
+
-codecs
+

Show all codecs known to libavcodec. +

+

Note that the term ’codec’ is used throughout this documentation as a shortcut +for what is more correctly called a media bitstream format. +

+
+
-decoders
+

Show available decoders. +

+
+
-encoders
+

Show all available encoders. +

+
+
-bsfs
+

Show available bitstream filters. +

+
+
-protocols
+

Show available protocols. +

+
+
-filters
+

Show available libavfilter filters. +

+
+
-pix_fmts
+

Show available pixel formats. +

+
+
-sample_fmts
+

Show available sample formats. +

+
+
-layouts
+

Show channel names and standard channel layouts. +

+
+
-colors
+

Show recognized color names. +

+
+
-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+

Set the logging level used by the library. +Adding "repeat+" indicates that repeated log output should not be compressed +to the first line and the "Last message repeated n times" line will be +omitted. "repeat" can also be used alone. +If "repeat" is used alone, and with no prior loglevel set, the default +loglevel will be used. If multiple loglevel parameters are given, using +’repeat’ will not change the loglevel. +loglevel is a number or a string containing one of the following values: +

+
quiet
+

Show nothing at all; be silent. +

+
panic
+

Only show fatal errors which could lead the process to crash, such as +and assert failure. This is not currently used for anything. +

+
fatal
+

Only show fatal errors. These are errors after which the process absolutely +cannot continue after. +

+
error
+

Show all errors, including ones which can be recovered from. +

+
warning
+

Show all warnings and errors. Any message related to possibly +incorrect or unexpected events will be shown. +

+
info
+

Show informative messages during processing. This is in addition to +warnings and errors. This is the default value. +

+
verbose
+

Same as info, except more verbose. +

+
debug
+

Show everything, including debugging information. +

+
+ +

By default the program logs to stderr, if coloring is supported by the +terminal, colors are used to mark errors and warnings. Log coloring +can be disabled setting the environment variable +AV_LOG_FORCE_NOCOLOR or NO_COLOR, or can be forced setting +the environment variable AV_LOG_FORCE_COLOR. +The use of the environment variable NO_COLOR is deprecated and +will be dropped in a following FFmpeg version. +

+
+
-report
+

Dump full command line and console output to a file named +program-YYYYMMDD-HHMMSS.log in the current +directory. +This file can be useful for bug reports. +It also implies -loglevel verbose. +

+

Setting the environment variable FFREPORT to any value has the +same effect. If the value is a ’:’-separated key=value sequence, these +options will affect the report; options values must be escaped if they +contain special characters or the options delimiter ’:’ (see the +“Quoting and escaping” section in the ffmpeg-utils manual). The +following option is recognized: +

+
file
+

set the file name to use for the report; %p is expanded to the name +of the program, %t is expanded to a timestamp, %% is expanded +to a plain % +

+
+ +

Errors in parsing the environment variable are not fatal, and will not +appear in the report. +

+
+
-hide_banner
+

Suppress printing banner. +

+

All FFmpeg tools will normally show a copyright notice, build options +and library versions. This option can be used to suppress printing +this information. +

+
+
-cpuflags flags (global)
+

Allows setting and clearing cpu flags. This option is intended +for testing. Do not use it unless you know what you’re doing. +

 
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+

Possible flags for this option are: +

+
x86
+
+
mmx
+
mmxext
+
sse
+
sse2
+
sse2slow
+
sse3
+
sse3slow
+
ssse3
+
atom
+
sse4.1
+
sse4.2
+
avx
+
xop
+
fma4
+
3dnow
+
3dnowext
+
cmov
+
+
+
ARM
+
+
armv5te
+
armv6
+
armv6t2
+
vfp
+
vfpv3
+
neon
+
+
+
PowerPC
+
+
altivec
+
+
+
Specific Processors
+
+
pentium2
+
pentium3
+
pentium4
+
k6
+
k62
+
athlon
+
athlonxp
+
k8
+
+
+
+ +
+
-opencl_bench
+

Benchmark all available OpenCL devices and show the results. This option +is only available when FFmpeg has been compiled with --enable-opencl. +

+
+
-opencl_options options (global)
+

Set OpenCL environment options. This option is only available when +FFmpeg has been compiled with --enable-opencl. +

+

options must be a list of key=value option pairs +separated by ’:’. See the “OpenCL Options” section in the +ffmpeg-utils manual for the list of supported options. +

+
+ + +

5.3 AVOptions

+ +

These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +‘-help’ option. They are separated into two categories: +

+
generic
+

These options can be set for any container, codec or device. Generic options +are listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +

+
private
+

These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +

+
+ +

For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the ‘id3v2_version’ private option of the MP3 +muxer: +

 
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+ +

All codec AVOptions are per-stream, and thus a stream specifier +should be attached to them. +

+

Note: the ‘-nooption’ syntax cannot be used for boolean +AVOptions, use ‘-option 0’/‘-option 1’. +

+

Note: the old undocumented way of specifying per-stream AVOptions by +prepending v/a/s to the options name is now obsolete and will be +removed soon. +

+ +

5.4 Main options

+ +
+
-f fmt (input/output)
+

Force input or output file format. The format is normally auto detected for input +files and guessed from the file extension for output files, so this option is not +needed in most cases. +

+
+
-i filename (input)
+

input file name +

+
+
-y (global)
+

Overwrite output files without asking. +

+
+
-n (global)
+

Do not overwrite output files, and exit immediately if a specified +output file already exists. +

+
+
-c[:stream_specifier] codec (input/output,per-stream)
+
-codec[:stream_specifier] codec (input/output,per-stream)
+

Select an encoder (when used before an output file) or a decoder (when used +before an input file) for one or more streams. codec is the name of a +decoder/encoder or a special value copy (output only) to indicate that +the stream is not to be re-encoded. +

+

For example +

 
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
+
+

encodes all video streams with libx264 and copies all audio streams. +

+

For each stream, the last matching c option is applied, so +

 
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
+
+

will copy all the streams except the second video, which will be encoded with +libx264, and the 138th audio, which will be encoded with libvorbis. +

+
+
-t duration (output)
+

Stop writing the output after its duration reaches duration. +duration may be a number in seconds, or in hh:mm:ss[.xxx] form. +

+

-to and -t are mutually exclusive and -t has priority. +

+
+
-to position (output)
+

Stop writing the output at position. +position may be a number in seconds, or in hh:mm:ss[.xxx] form. +

+

-to and -t are mutually exclusive and -t has priority. +

+
+
-fs limit_size (output)
+

Set the file size limit, expressed in bytes. +

+
+
-ss position (input/output)
+

When used as an input option (before -i), seeks in this input file to +position. Note the in most formats it is not possible to seek exactly, so +ffmpeg will seek to the closest seek point before position. +When transcoding and ‘-accurate_seek’ is enabled (the default), this +extra segment between the seek point and position will be decoded and +discarded. When doing stream copy or when ‘-noaccurate_seek’ is used, it +will be preserved. +

+

When used as an output option (before an output filename), decodes but discards +input until the timestamps reach position. +

+

position may be either in seconds or in hh:mm:ss[.xxx] form. +

+
+
-itsoffset offset (input)
+

Set the input time offset. +

+

offset must be a time duration specification, +see (ffmpeg-utils)time duration syntax. +

+

The offset is added to the timestamps of the input files. Specifying +a positive offset means that the corresponding streams are delayed by +the time duration specified in offset. +

+
+
-timestamp date (output)
+

Set the recording timestamp in the container. +

+

date must be a time duration specification, +see (ffmpeg-utils)date syntax. +

+
+
-metadata[:metadata_specifier] key=value (output,per-metadata)
+

Set a metadata key/value pair. +

+

An optional metadata_specifier may be given to set metadata +on streams or chapters. See -map_metadata documentation for +details. +

+

This option overrides metadata set with -map_metadata. It is +also possible to delete metadata by using an empty value. +

+

For example, for setting the title in the output file: +

 
ffmpeg -i in.avi -metadata title="my title" out.flv
+
+ +

To set the language of the first audio stream: +

 
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
+
+ +
+
-target type (output)
+

Specify target file type (vcd, svcd, dvd, dv, +dv50). type may be prefixed with pal-, ntsc- or +film- to use the corresponding standard. All the format options +(bitrate, codecs, buffer sizes) are then set automatically. You can just type: +

+
 
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
+
+ +

Nevertheless you can specify additional options as long as you know +they do not conflict with the standard, as in: +

+
 
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
+
+ +
+
-dframes number (output)
+

Set the number of data frames to record. This is an alias for -frames:d. +

+
+
-frames[:stream_specifier] framecount (output,per-stream)
+

Stop writing to the stream after framecount frames. +

+
+
-q[:stream_specifier] q (output,per-stream)
+
-qscale[:stream_specifier] q (output,per-stream)
+

Use fixed quality scale (VBR). The meaning of q/qscale is +codec-dependent. +If qscale is used without a stream_specifier then it applies only +to the video stream, this is to maintain compatibility with previous behavior +and as specifying the same codec specific value to 2 different codecs that is +audio and video generally is not what is intended when no stream_specifier is +used. +

+

+

+
-filter[:stream_specifier] filtergraph (output,per-stream)
+

Create the filtergraph specified by filtergraph and use it to +filter the stream. +

+

filtergraph is a description of the filtergraph to apply to +the stream, and must have a single input and a single output of the +same type of the stream. In the filtergraph, the input is associated +to the label in, and the output to the label out. See +the ffmpeg-filters manual for more information about the filtergraph +syntax. +

+

See the -filter_complex option if you +want to create filtergraphs with multiple inputs and/or outputs. +

+
+
-filter_script[:stream_specifier] filename (output,per-stream)
+

This option is similar to ‘-filter’, the only difference is that its +argument is the name of the file from which a filtergraph description is to be +read. +

+
+
-pre[:stream_specifier] preset_name (output,per-stream)
+

Specify the preset for matching stream(s). +

+
+
-stats (global)
+

Print encoding progress/statistics. It is on by default, to explicitly +disable it you need to specify -nostats. +

+
+
-progress url (global)
+

Send program-friendly progress information to url. +

+

Progress information is written approximately every second and at the end of +the encoding process. It is made of "key=value" lines. key +consists of only alphanumeric characters. The last key of a sequence of +progress information is always "progress". +

+
+
-stdin
+

Enable interaction on standard input. On by default unless standard input is +used as an input. To explicitly disable interaction you need to specify +-nostdin. +

+

Disabling interaction on standard input is useful, for example, if +ffmpeg is in the background process group. Roughly the same result can +be achieved with ffmpeg ... < /dev/null but it requires a +shell. +

+
+
-debug_ts (global)
+

Print timestamp information. It is off by default. This option is +mostly useful for testing and debugging purposes, and the output +format may change from one version to another, so it should not be +employed by portable scripts. +

+

See also the option -fdebug ts. +

+
+
-attach filename (output)
+

Add an attachment to the output file. This is supported by a few formats +like Matroska for e.g. fonts used in rendering subtitles. Attachments +are implemented as a specific type of stream, so this option will add +a new stream to the file. It is then possible to use per-stream options +on this stream in the usual way. Attachment streams created with this +option will be created after all the other streams (i.e. those created +with -map or automatic mappings). +

+

Note that for Matroska you also have to set the mimetype metadata tag: +

 
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
+
+

(assuming that the attachment stream will be third in the output file). +

+
+
-dump_attachment[:stream_specifier] filename (input,per-stream)
+

Extract the matching attachment stream into a file named filename. If +filename is empty, then the value of the filename metadata tag +will be used. +

+

E.g. to extract the first attachment to a file named ’out.ttf’: +

 
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
+
+

To extract all attachments to files determined by the filename tag: +

 
ffmpeg -dump_attachment:t "" -i INPUT
+
+ +

Technical note – attachments are implemented as codec extradata, so this +option can actually be used to extract extradata from any stream, not just +attachments. +

+
+
+ + +

5.5 Video Options

+ +
+
-vframes number (output)
+

Set the number of video frames to record. This is an alias for -frames:v. +

+
-r[:stream_specifier] fps (input/output,per-stream)
+

Set frame rate (Hz value, fraction or abbreviation). +

+

As an input option, ignore any timestamps stored in the file and instead +generate timestamps assuming constant frame rate fps. +

+

As an output option, duplicate or drop input frames to achieve constant output +frame rate fps. +

+
+
-s[:stream_specifier] size (input/output,per-stream)
+

Set frame size. +

+

As an input option, this is a shortcut for the ‘video_size’ private +option, recognized by some demuxers for which the frame size is either not +stored in the file or is configurable – e.g. raw video or video grabbers. +

+

As an output option, this inserts the scale video filter to the +end of the corresponding filtergraph. Please use the scale filter +directly to insert it at the beginning or some other place. +

+

The format is ‘wxh’ (default - same as source). +

+
+
-aspect[:stream_specifier] aspect (output,per-stream)
+

Set the video display aspect ratio specified by aspect. +

+

aspect can be a floating point number string, or a string of the +form num:den, where num and den are the +numerator and denominator of the aspect ratio. For example "4:3", +"16:9", "1.3333", and "1.7777" are valid argument values. +

+

If used together with ‘-vcodec copy’, it will affect the aspect ratio +stored at container level, but not the aspect ratio stored in encoded +frames, if it exists. +

+
+
-vn (output)
+

Disable video recording. +

+
+
-vcodec codec (output)
+

Set the video codec. This is an alias for -codec:v. +

+
+
-pass[:stream_specifier] n (output,per-stream)
+

Select the pass number (1 or 2). It is used to do two-pass +video encoding. The statistics of the video are recorded in the first +pass into a log file (see also the option -passlogfile), +and in the second pass that log file is used to generate the video +at the exact requested bitrate. +On pass 1, you may just deactivate audio and set output to null, +examples for Windows and Unix: +

 
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
+ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
+
+ +
+
-passlogfile[:stream_specifier] prefix (output,per-stream)
+

Set two-pass log file name prefix to prefix, the default file name +prefix is “ffmpeg2pass”. The complete file name will be +‘PREFIX-N.log’, where N is a number specific to the output +stream +

+
+
-vf filtergraph (output)
+

Create the filtergraph specified by filtergraph and use it to +filter the stream. +

+

This is an alias for -filter:v, see the -filter option. +

+
+ + +

5.6 Advanced Video Options

+ +
+
-pix_fmt[:stream_specifier] format (input/output,per-stream)
+

Set pixel format. Use -pix_fmts to show all the supported +pixel formats. +If the selected pixel format can not be selected, ffmpeg will print a +warning and select the best pixel format supported by the encoder. +If pix_fmt is prefixed by a +, ffmpeg will exit with an error +if the requested pixel format can not be selected, and automatic conversions +inside filtergraphs are disabled. +If pix_fmt is a single +, ffmpeg selects the same pixel format +as the input (or graph output) and automatic conversions are disabled. +

+
+
-sws_flags flags (input/output)
+

Set SwScaler flags. +

+
-vdt n
+

Discard threshold. +

+
+
-rc_override[:stream_specifier] override (output,per-stream)
+

Rate control override for specific intervals, formatted as "int,int,int" +list separated with slashes. Two first values are the beginning and +end frame numbers, last one is quantizer to use if positive, or quality +factor if negative. +

+
+
-ilme
+

Force interlacing support in encoder (MPEG-2 and MPEG-4 only). +Use this option if your input file is interlaced and you want +to keep the interlaced format for minimum losses. +The alternative is to deinterlace the input stream with +‘-deinterlace’, but deinterlacing introduces losses. +

+
-psnr
+

Calculate PSNR of compressed frames. +

+
-vstats
+

Dump video coding statistics to ‘vstats_HHMMSS.log’. +

+
-vstats_file file
+

Dump video coding statistics to file. +

+
-top[:stream_specifier] n (output,per-stream)
+

top=1/bottom=0/auto=-1 field first +

+
-dc precision
+

Intra_dc_precision. +

+
-vtag fourcc/tag (output)
+

Force video tag/fourcc. This is an alias for -tag:v. +

+
-qphist (global)
+

Show QP histogram +

+
-vbsf bitstream_filter
+

Deprecated see -bsf +

+
+
-force_key_frames[:stream_specifier] time[,time...] (output,per-stream)
+
-force_key_frames[:stream_specifier] expr:expr (output,per-stream)
+

Force key frames at the specified timestamps, more precisely at the first +frames after each specified time. +

+

If the argument is prefixed with expr:, the string expr +is interpreted like an expression and is evaluated for each frame. A +key frame is forced in case the evaluation is non-zero. +

+

If one of the times is "chapters[delta]", it is expanded into +the time of the beginning of all chapters in the file, shifted by +delta, expressed as a time in seconds. +This option can be useful to ensure that a seek point is present at a +chapter mark or any other designated place in the output file. +

+

For example, to insert a key frame at 5 minutes, plus key frames 0.1 second +before the beginning of every chapter: +

 
-force_key_frames 0:05:00,chapters-0.1
+
+ +

The expression in expr can contain the following constants: +

+
n
+

the number of current processed frame, starting from 0 +

+
n_forced
+

the number of forced frames +

+
prev_forced_n
+

the number of the previous forced frame, it is NAN when no +keyframe was forced yet +

+
prev_forced_t
+

the time of the previous forced frame, it is NAN when no +keyframe was forced yet +

+
t
+

the time of the current processed frame +

+
+ +

For example to force a key frame every 5 seconds, you can specify: +

 
-force_key_frames expr:gte(t,n_forced*5)
+
+ +

To force a key frame 5 seconds after the time of the last forced one, +starting from second 13: +

 
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
+
+ +

Note that forcing too many keyframes is very harmful for the lookahead +algorithms of certain encoders: using fixed-GOP options or similar +would be more efficient. +

+
+
-copyinkf[:stream_specifier] (output,per-stream)
+

When doing stream copy, copy also non-key frames found at the +beginning. +

+
+
-hwaccel[:stream_specifier] hwaccel (input,per-stream)
+

Use hardware acceleration to decode the matching stream(s). The allowed values +of hwaccel are: +

+
none
+

Do not use any hardware acceleration (the default). +

+
+
auto
+

Automatically select the hardware acceleration method. +

+
+
vdpau
+

Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration. +

+
+ +

This option has no effect if the selected hwaccel is not available or not +supported by the chosen decoder. +

+

Note that most acceleration methods are intended for playback and will not be +faster than software decoding on modern CPUs. Additionally, ffmpeg +will usually need to copy the decoded frames from the GPU memory into the system +memory, resulting in further performance loss. This option is thus mainly +useful for testing. +

+
+
-hwaccel_device[:stream_specifier] hwaccel_device (input,per-stream)
+

Select a device to use for hardware acceleration. +

+

This option only makes sense when the ‘-hwaccel’ option is also +specified. Its exact meaning depends on the specific hardware acceleration +method chosen. +

+
+
vdpau
+

For VDPAU, this option specifies the X11 display/screen to use. If this option +is not specified, the value of the DISPLAY environment variable is used +

+
+
+
+ + +

5.7 Audio Options

+ +
+
-aframes number (output)
+

Set the number of audio frames to record. This is an alias for -frames:a. +

+
-ar[:stream_specifier] freq (input/output,per-stream)
+

Set the audio sampling frequency. For output streams it is set by +default to the frequency of the corresponding input stream. For input +streams this option only makes sense for audio grabbing devices and raw +demuxers and is mapped to the corresponding demuxer options. +

+
-aq q (output)
+

Set the audio quality (codec-specific, VBR). This is an alias for -q:a. +

+
-ac[:stream_specifier] channels (input/output,per-stream)
+

Set the number of audio channels. For output streams it is set by +default to the number of input audio channels. For input streams +this option only makes sense for audio grabbing devices and raw demuxers +and is mapped to the corresponding demuxer options. +

+
-an (output)
+

Disable audio recording. +

+
-acodec codec (input/output)
+

Set the audio codec. This is an alias for -codec:a. +

+
-sample_fmt[:stream_specifier] sample_fmt (output,per-stream)
+

Set the audio sample format. Use -sample_fmts to get a list +of supported sample formats. +

+
+
-af filtergraph (output)
+

Create the filtergraph specified by filtergraph and use it to +filter the stream. +

+

This is an alias for -filter:a, see the -filter option. +

+
+ + +

5.8 Advanced Audio options:

+ +
+
-atag fourcc/tag (output)
+

Force audio tag/fourcc. This is an alias for -tag:a. +

+
-absf bitstream_filter
+

Deprecated, see -bsf +

+
-guess_layout_max channels (input,per-stream)
+

If some input channel layout is not known, try to guess only if it +corresponds to at most the specified number of channels. For example, 2 +tells to ffmpeg to recognize 1 channel as mono and 2 channels as +stereo but not 6 channels as 5.1. The default is to always try to guess. Use +0 to disable all guessing. +

+
+ + +

5.9 Subtitle options:

+ +
+
-scodec codec (input/output)
+

Set the subtitle codec. This is an alias for -codec:s. +

+
-sn (output)
+

Disable subtitle recording. +

+
-sbsf bitstream_filter
+

Deprecated, see -bsf +

+
+ + +

5.10 Advanced Subtitle options:

+ +
+
-fix_sub_duration
+

Fix subtitles durations. For each subtitle, wait for the next packet in the +same stream and adjust the duration of the first to avoid overlap. This is +necessary with some subtitles codecs, especially DVB subtitles, because the +duration in the original packet is only a rough estimate and the end is +actually marked by an empty subtitle frame. Failing to use this option when +necessary can result in exaggerated durations or muxing failures due to +non-monotonic timestamps. +

+

Note that this option will delay the output of all data until the next +subtitle packet is decoded: it may increase memory consumption and latency a +lot. +

+
+
-canvas_size size
+

Set the size of the canvas used to render subtitles. +

+
+
+ + +

5.11 Advanced options

+ +
+
-map [-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]] | [linklabel] (output)
+
+

Designate one or more input streams as a source for the output file. Each input +stream is identified by the input file index input_file_id and +the input stream index input_stream_id within the input +file. Both indices start at 0. If specified, +sync_file_id:stream_specifier sets which input stream +is used as a presentation sync reference. +

+

The first -map option on the command line specifies the +source for output stream 0, the second -map option specifies +the source for output stream 1, etc. +

+

A - character before the stream identifier creates a "negative" mapping. +It disables matching streams from already created mappings. +

+

An alternative [linklabel] form will map outputs from complex filter +graphs (see the ‘-filter_complex’ option) to the output file. +linklabel must correspond to a defined output link label in the graph. +

+

For example, to map ALL streams from the first input file to output +

 
ffmpeg -i INPUT -map 0 output
+
+ +

For example, if you have two audio streams in the first input file, +these streams are identified by "0:0" and "0:1". You can use +-map to select which streams to place in an output file. For +example: +

 
ffmpeg -i INPUT -map 0:1 out.wav
+
+

will map the input stream in ‘INPUT’ identified by "0:1" to +the (single) output stream in ‘out.wav’. +

+

For example, to select the stream with index 2 from input file +‘a.mov’ (specified by the identifier "0:2"), and stream with +index 6 from input ‘b.mov’ (specified by the identifier "1:6"), +and copy them to the output file ‘out.mov’: +

 
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
+
+ +

To select all video and the third audio stream from an input file: +

 
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
+
+ +

To map all the streams except the second audio, use negative mappings +

 
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
+
+ +

Note that using this option disables the default mappings for this output file. +

+
+
-map_channel [input_file_id.stream_specifier.channel_id|-1][:output_file_id.stream_specifier]
+

Map an audio channel from a given input to an output. If +output_file_id.stream_specifier is not set, the audio channel will +be mapped on all the audio streams. +

+

Using "-1" instead of +input_file_id.stream_specifier.channel_id will map a muted +channel. +

+

For example, assuming INPUT is a stereo audio file, you can switch the +two audio channels with the following command: +

 
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
+
+ +

If you want to mute the first channel and keep the second: +

 
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
+
+ +

The order of the "-map_channel" option specifies the order of the channels in +the output stream. The output channel layout is guessed from the number of +channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac" +in combination of "-map_channel" makes the channel gain levels to be updated if +input and output channel layouts don’t match (for instance two "-map_channel" +options and "-ac 6"). +

+

You can also extract each channel of an input to specific outputs; the following +command extracts two channels of the INPUT audio stream (file 0, stream 0) +to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs: +

 
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
+
+ +

The following example splits the channels of a stereo input into two separate +streams, which are put into the same output file: +

 
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
+
+ +

Note that currently each output stream can only contain channels from a single +input stream; you can’t for example use "-map_channel" to pick multiple input +audio channels contained in different streams (from the same or different files) +and merge them into a single output stream. It is therefore not currently +possible, for example, to turn two separate mono streams into a single stereo +stream. However splitting a stereo stream into two single channel mono streams +is possible. +

+

If you need this feature, a possible workaround is to use the amerge +filter. For example, if you need to merge a media (here ‘input.mkv’) with 2 +mono audio streams into one single stereo channel audio stream (and keep the +video stream), you can use the following command: +

 
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
+
+ +
+
-map_metadata[:metadata_spec_out] infile[:metadata_spec_in] (output,per-metadata)
+

Set metadata information of the next output file from infile. Note that +those are file indices (zero-based), not filenames. +Optional metadata_spec_in/out parameters specify, which metadata to copy. +A metadata specifier can have the following forms: +

+
g
+

global metadata, i.e. metadata that applies to the whole file +

+
+
s[:stream_spec]
+

per-stream metadata. stream_spec is a stream specifier as described +in the Stream specifiers chapter. In an input metadata specifier, the first +matching stream is copied from. In an output metadata specifier, all matching +streams are copied to. +

+
+
c:chapter_index
+

per-chapter metadata. chapter_index is the zero-based chapter index. +

+
+
p:program_index
+

per-program metadata. program_index is the zero-based program index. +

+
+

If metadata specifier is omitted, it defaults to global. +

+

By default, global metadata is copied from the first input file, +per-stream and per-chapter metadata is copied along with streams/chapters. These +default mappings are disabled by creating any mapping of the relevant type. A negative +file index can be used to create a dummy mapping that just disables automatic copying. +

+

For example to copy metadata from the first stream of the input file to global metadata +of the output file: +

 
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
+
+ +

To do the reverse, i.e. copy global metadata to all audio streams: +

 
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
+
+

Note that simple 0 would work as well in this example, since global +metadata is assumed by default. +

+
+
-map_chapters input_file_index (output)
+

Copy chapters from input file with index input_file_index to the next +output file. If no chapter mapping is specified, then chapters are copied from +the first input file with at least one chapter. Use a negative file index to +disable any chapter copying. +

+
+
-benchmark (global)
+

Show benchmarking information at the end of an encode. +Shows CPU time used and maximum memory consumption. +Maximum memory consumption is not supported on all systems, +it will usually display as 0 if not supported. +

+
-benchmark_all (global)
+

Show benchmarking information during the encode. +Shows CPU time used in various steps (audio/video encode/decode). +

+
-timelimit duration (global)
+

Exit after ffmpeg has been running for duration seconds. +

+
-dump (global)
+

Dump each input packet to stderr. +

+
-hex (global)
+

When dumping packets, also dump the payload. +

+
-re (input)
+

Read input at native frame rate. Mainly used to simulate a grab device. +or live input stream (e.g. when reading from a file). Should not be used +with actual grab devices or live input streams (where it can cause packet +loss). +By default ffmpeg attempts to read the input(s) as fast as possible. +This option will slow down the reading of the input(s) to the native frame rate +of the input(s). It is useful for real-time output (e.g. live streaming). +

+
-loop_input
+

Loop over the input stream. Currently it works only for image +streams. This option is used for automatic FFserver testing. +This option is deprecated, use -loop 1. +

+
-loop_output number_of_times
+

Repeatedly loop output for formats that support looping such as animated GIF +(0 will loop the output infinitely). +This option is deprecated, use -loop. +

+
-vsync parameter
+

Video sync method. +For compatibility reasons old values can be specified as numbers. +Newly added values will have to be specified as strings always. +

+
+
0, passthrough
+

Each frame is passed with its timestamp from the demuxer to the muxer. +

+
1, cfr
+

Frames will be duplicated and dropped to achieve exactly the requested +constant frame rate. +

+
2, vfr
+

Frames are passed through with their timestamp or dropped so as to +prevent 2 frames from having the same timestamp. +

+
drop
+

As passthrough but destroys all timestamps, making the muxer generate +fresh timestamps based on frame-rate. +

+
-1, auto
+

Chooses between 1 and 2 depending on muxer capabilities. This is the +default method. +

+
+ +

Note that the timestamps may be further modified by the muxer, after this. +For example, in the case that the format option ‘avoid_negative_ts’ +is enabled. +

+

With -map you can select from which stream the timestamps should be +taken. You can leave either video or audio unchanged and sync the +remaining stream(s) to the unchanged one. +

+
+
-async samples_per_second
+

Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps, +the parameter is the maximum samples per second by which the audio is changed. +-async 1 is a special case where only the start of the audio stream is corrected +without any later correction. +

+

Note that the timestamps may be further modified by the muxer, after this. +For example, in the case that the format option ‘avoid_negative_ts’ +is enabled. +

+

This option has been deprecated. Use the aresample audio filter instead. +

+
+
-copyts
+

Do not process input timestamps, but keep their values without trying +to sanitize them. In particular, do not remove the initial start time +offset value. +

+

Note that, depending on the ‘vsync’ option or on specific muxer +processing (e.g. in case the format option ‘avoid_negative_ts’ +is enabled) the output timestamps may mismatch with the input +timestamps even when this option is selected. +

+
+
-copytb mode
+

Specify how to set the encoder timebase when stream copying. mode is an +integer numeric value, and can assume one of the following values: +

+
+
1
+

Use the demuxer timebase. +

+

The time base is copied to the output encoder from the corresponding input +demuxer. This is sometimes required to avoid non monotonically increasing +timestamps when copying video streams with variable frame rate. +

+
+
0
+

Use the decoder timebase. +

+

The time base is copied to the output encoder from the corresponding input +decoder. +

+
+
-1
+

Try to make the choice automatically, in order to generate a sane output. +

+
+ +

Default value is -1. +

+
+
-shortest (output)
+

Finish encoding when the shortest input stream ends. +

+
-dts_delta_threshold
+

Timestamp discontinuity delta threshold. +

+
-muxdelay seconds (input)
+

Set the maximum demux-decode delay. +

+
-muxpreload seconds (input)
+

Set the initial demux-decode delay. +

+
-streamid output-stream-index:new-value (output)
+

Assign a new stream-id value to an output stream. This option should be +specified prior to the output filename to which it applies. +For the situation where multiple output files exist, a streamid +may be reassigned to a different value. +

+

For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for +an output mpegts file: +

 
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
+
+ +
+
-bsf[:stream_specifier] bitstream_filters (output,per-stream)
+

Set bitstream filters for matching streams. bitstream_filters is +a comma-separated list of bitstream filters. Use the -bsfs option +to get the list of bitstream filters. +

 
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
+
+
 
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
+
+ +
+
-tag[:stream_specifier] codec_tag (input/output,per-stream)
+

Force a tag/fourcc for matching streams. +

+
+
-timecode hh:mm:ssSEPff
+

Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’ +(or ’.’) for drop. +

 
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
+
+ +

+

+
-filter_complex filtergraph (global)
+

Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or +outputs. For simple graphs – those with one input and one output of the same +type – see the ‘-filter’ options. filtergraph is a description of +the filtergraph, as described in the “Filtergraph syntax” section of the +ffmpeg-filters manual. +

+

Input link labels must refer to input streams using the +[file_index:stream_specifier] syntax (i.e. the same as ‘-map’ +uses). If stream_specifier matches multiple streams, the first one will be +used. An unlabeled input will be connected to the first unused input stream of +the matching type. +

+

Output link labels are referred to with ‘-map’. Unlabeled outputs are +added to the first output file. +

+

Note that with this option it is possible to use only lavfi sources without +normal input files. +

+

For example, to overlay an image over video +

 
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
+'[out]' out.mkv
+
+

Here [0:v] refers to the first video stream in the first input file, +which is linked to the first (main) input of the overlay filter. Similarly the +first video stream in the second input is linked to the second (overlay) input +of overlay. +

+

Assuming there is only one video stream in each input file, we can omit input +labels, so the above is equivalent to +

 
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
+'[out]' out.mkv
+
+ +

Furthermore we can omit the output label and the single output from the filter +graph will be added to the output file automatically, so we can simply write +

 
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
+
+ +

To generate 5 seconds of pure red video using lavfi color source: +

 
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
+
+ +
+
-lavfi filtergraph (global)
+

Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or +outputs. Equivalent to ‘-filter_complex’. +

+
+
-filter_complex_script filename (global)
+

This option is similar to ‘-filter_complex’, the only difference is that +its argument is the name of the file from which a complex filtergraph +description is to be read. +

+
+
-accurate_seek (input)
+

This option enables or disables accurate seeking in input files with the +‘-ss’ option. It is enabled by default, so seeking is accurate when +transcoding. Use ‘-noaccurate_seek’ to disable it, which may be useful +e.g. when copying some streams and transcoding the others. +

+
+
-override_ffserver (global)
+

Overrides the input specifications from ffserver. Using this +option you can map any input stream to ffserver and control +many aspects of the encoding from ffmpeg. Without this +option ffmpeg will transmit to ffserver what is +requested by ffserver. +

+

The option is intended for cases where features are needed that cannot be +specified to ffserver but can be to ffmpeg. +

+
+
+ +

As a special exception, you can use a bitmap subtitle stream as input: it +will be converted into a video with the same size as the largest video in +the file, or 720x576 if no video is present. Note that this is an +experimental and temporary solution. It will be removed once libavfilter has +proper support for subtitles. +

+

For example, to hardcode subtitles on top of a DVB-T recording stored in +MPEG-TS format, delaying the subtitles by 1 second: +

 
ffmpeg -i input.ts -filter_complex \
+  '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
+  -sn -map '#0x2dc' output.mkv
+
+

(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video, +audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too) +

+ +

5.12 Preset files

+

A preset file contains a sequence of option=value pairs, +one for each line, specifying a sequence of options which would be +awkward to specify on the command line. Lines starting with the hash +(’#’) character are ignored and are used to provide comments. Check +the ‘presets’ directory in the FFmpeg source tree for examples. +

+

Preset files are specified with the vpre, apre, +spre, and fpre options. The fpre option takes the +filename of the preset instead of a preset name as input and can be +used for any kind of codec. For the vpre, apre, and +spre options, the options specified in a preset file are +applied to the currently selected codec of the same type as the preset +option. +

+

The argument passed to the vpre, apre, and spre +preset options identifies the preset file to use according to the +following rules: +

+

First ffmpeg searches for a file named arg.ffpreset in the +directories ‘$FFMPEG_DATADIR’ (if set), and ‘$HOME/.ffmpeg’, and in +the datadir defined at configuration time (usually ‘PREFIX/share/ffmpeg’) +or in a ‘ffpresets’ folder along the executable on win32, +in that order. For example, if the argument is libvpx-1080p, it will +search for the file ‘libvpx-1080p.ffpreset’. +

+

If no such file is found, then ffmpeg will search for a file named +codec_name-arg.ffpreset in the above-mentioned +directories, where codec_name is the name of the codec to which +the preset file options will be applied. For example, if you select +the video codec with -vcodec libvpx and use -vpre 1080p, +then it will search for the file ‘libvpx-1080p.ffpreset’. +

+ +

6. Tips

+ +
    +
  • +For streaming at very low bitrates, use a low frame rate +and a small GOP size. This is especially true for RealVideo where +the Linux player does not seem to be very fast, so it can miss +frames. An example is: + +
     
    ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
    +
    + +
  • +The parameter ’q’ which is displayed while encoding is the current +quantizer. The value 1 indicates that a very good quality could +be achieved. The value 31 indicates the worst quality. If q=31 appears +too often, it means that the encoder cannot compress enough to meet +your bitrate. You must either increase the bitrate, decrease the +frame rate or decrease the frame size. + +
  • +If your computer is not fast enough, you can speed up the +compression at the expense of the compression ratio. You can use +’-me zero’ to speed up motion estimation, and ’-g 0’ to disable +motion estimation completely (you have only I-frames, which means it +is about as good as JPEG compression). + +
  • +To have very low audio bitrates, reduce the sampling frequency +(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3). + +
  • +To have a constant quality (but a variable bitrate), use the option +’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst +quality). + +
+ + +

7. Examples

+ + +

7.1 Preset files

+ +

A preset file contains a sequence of option=value pairs, one for +each line, specifying a sequence of options which can be specified also on +the command line. Lines starting with the hash (’#’) character are ignored and +are used to provide comments. Empty lines are also ignored. Check the +‘presets’ directory in the FFmpeg source tree for examples. +

+

Preset files are specified with the pre option, this option takes a +preset name as input. FFmpeg searches for a file named preset_name.avpreset in +the directories ‘$AVCONV_DATADIR’ (if set), and ‘$HOME/.ffmpeg’, and in +the data directory defined at configuration time (usually ‘$PREFIX/share/ffmpeg’) +in that order. For example, if the argument is libx264-max, it will +search for the file ‘libx264-max.avpreset’. +

+ +

7.2 Video and Audio grabbing

+ +

If you specify the input format and device then ffmpeg can grab video +and audio directly. +

+
 
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+ +

Or with an ALSA audio source (mono input, card id 1) instead of OSS: +

 
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+ +

Note that you must activate the right video source and channel before +launching ffmpeg with any TV viewer such as +xawtv by Gerd Knorr. You also +have to set the audio recording levels correctly with a +standard mixer. +

+ +

7.3 X11 grabbing

+ +

Grab the X11 display with ffmpeg via +

+
 
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
+
+ +

0.0 is display.screen number of your X11 server, same as +the DISPLAY environment variable. +

+
 
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
+
+ +

0.0 is display.screen number of your X11 server, same as the DISPLAY environment +variable. 10 is the x-offset and 20 the y-offset for the grabbing. +

+ +

7.4 Video and Audio file format conversion

+ +

Any supported file format and protocol can serve as input to ffmpeg: +

+

Examples: +

    +
  • +You can use YUV files as input: + +
     
    ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
    +
    + +

    It will use the files: +

     
    /tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
    +/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
    +
    + +

    The Y files use twice the resolution of the U and V files. They are +raw files, without header. They can be generated by all decent video +decoders. You must specify the size of the image with the ‘-s’ option +if ffmpeg cannot guess it. +

    +
  • +You can input from a raw YUV420P file: + +
     
    ffmpeg -i /tmp/test.yuv /tmp/out.avi
    +
    + +

    test.yuv is a file containing raw YUV planar data. Each frame is composed +of the Y plane followed by the U and V planes at half vertical and +horizontal resolution. +

    +
  • +You can output to a raw YUV420P file: + +
     
    ffmpeg -i mydivx.avi hugefile.yuv
    +
    + +
  • +You can set several input files and output files: + +
     
    ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
    +
    + +

    Converts the audio file a.wav and the raw YUV video file a.yuv +to MPEG file a.mpg. +

    +
  • +You can also do audio and video conversions at the same time: + +
     
    ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
    +
    + +

    Converts a.wav to MPEG audio at 22050 Hz sample rate. +

    +
  • +You can encode to several formats at the same time and define a +mapping from input stream to output streams: + +
     
    ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
    +
    + +

    Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map +file:index’ specifies which input stream is used for each output +stream, in the order of the definition of output streams. +

    +
  • +You can transcode decrypted VOBs: + +
     
    ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
    +
    + +

    This is a typical DVD ripping example; the input is a VOB file, the +output an AVI file with MPEG-4 video and MP3 audio. Note that in this +command we use B-frames so the MPEG-4 stream is DivX5 compatible, and +GOP size is 300 which means one intra frame every 10 seconds for 29.97fps +input video. Furthermore, the audio stream is MP3-encoded so you need +to enable LAME support by passing --enable-libmp3lame to configure. +The mapping is particularly useful for DVD transcoding +to get the desired audio language. +

    +

    NOTE: To see the supported input formats, use ffmpeg -formats. +

    +
  • +You can extract images from a video, or create a video from many images: + +

    For extracting images from a video: +

     
    ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
    +
    + +

    This will extract one video frame per second from the video and will +output them in files named ‘foo-001.jpeg’, ‘foo-002.jpeg’, +etc. Images will be rescaled to fit the new WxH values. +

    +

    If you want to extract just a limited number of frames, you can use the +above command in combination with the -vframes or -t option, or in +combination with -ss to start extracting from a certain point in time. +

    +

    For creating a video from many images: +

     
    ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
    +
    + +

    The syntax foo-%03d.jpeg specifies to use a decimal number +composed of three digits padded with zeroes to express the sequence +number. It is the same syntax supported by the C printf function, but +only formats accepting a normal integer are suitable. +

    +

    When importing an image sequence, -i also supports expanding +shell-like wildcard patterns (globbing) internally, by selecting the +image2-specific -pattern_type glob option. +

    +

    For example, for creating a video from filenames matching the glob pattern +foo-*.jpeg: +

     
    ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
    +
    + +
  • +You can put many streams of the same type in the output: + +
     
    ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
    +
    + +

    The resulting output file ‘test12.avi’ will contain first four streams from +the input file in reverse order. +

    +
  • +To force CBR video output: +
     
    ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
    +
    + +
  • +The four options lmin, lmax, mblmin and mblmax use ’lambda’ units, +but you may use the QP2LAMBDA constant to easily convert from ’q’ units: +
     
    ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
    +
    + +
+ + +

8. Syntax

+ +

This section documents the syntax and formats employed by the FFmpeg +libraries and tools. +

+

+

+

8.1 Quoting and escaping

+ +

FFmpeg adopts the following quoting and escaping mechanism, unless +explicitly specified. The following rules are applied: +

+
    +
  • +' and \ are special characters (respectively used for +quoting and escaping). In addition to them, there might be other +special characters depending on the specific syntax where the escaping +and quoting are employed. + +
  • +A special character is escaped by prefixing it with a ’\’. + +
  • +All characters enclosed between ” are included literally in the +parsed string. The quote character ' itself cannot be quoted, +so you may need to close the quote and escape it. + +
  • +Leading and trailing whitespaces, unless escaped or quoted, are +removed from the parsed string. +
+ +

Note that you may need to add a second level of escaping when using +the command line or a script, which depends on the syntax of the +adopted shell language. +

+

The function av_get_token defined in +‘libavutil/avstring.h’ can be used to parse a token quoted or +escaped according to the rules defined above. +

+

The tool ‘tools/ffescape’ in the FFmpeg source tree can be used +to automatically quote or escape a string in a script. +

+ +

8.1.1 Examples

+ +
    +
  • +Escape the string Crime d'Amour containing the ' special +character: +
     
    Crime d\'Amour
    +
    + +
  • +The string above contains a quote, so the ' needs to be escaped +when quoting it: +
     
    'Crime d'\''Amour'
    +
    + +
  • +Include leading or trailing whitespaces using quoting: +
     
    '  this string starts and ends with whitespaces  '
    +
    + +
  • +Escaping and quoting can be mixed together: +
     
    ' The string '\'string\'' is a string '
    +
    + +
  • +To include a literal \ you can use either escaping or quoting: +
     
    'c:\foo' can be written as c:\\foo
    +
    +
+ +

+

+

8.2 Date

+ +

The accepted syntax is: +

 
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+ +

If the value is "now" it takes the current time. +

+

Time is local time unless Z is appended, in which case it is +interpreted as UTC. +If the year-month-day part is not specified it takes the current +year-month-day. +

+

+

+

8.3 Time duration

+ +

There are two accepted syntaxes for expressing time duration. +

+
 
[-][HH:]MM:SS[.m...]
+
+ +

HH expresses the number of hours, MM the number of minutes +for a maximum of 2 digits, and SS the number of seconds for a +maximum of 2 digits. The m at the end expresses decimal value for +SS. +

+

or +

+
 
[-]S+[.m...]
+
+ +

S expresses the number of seconds, with the optional decimal part +m. +

+

In both expressions, the optional ‘-’ indicates negative duration. +

+ +

8.3.1 Examples

+ +

The following examples are all valid time duration: +

+
+
55
+

55 seconds +

+
+
12:03:45
+

12 hours, 03 minutes and 45 seconds +

+
+
23.189
+

23.189 seconds +

+
+ +

+

+

8.4 Video size

+

Specify the size of the sourced video, it may be a string of the form +widthxheight, or the name of a size abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

720x480 +

+
pal
+

720x576 +

+
qntsc
+

352x240 +

+
qpal
+

352x288 +

+
sntsc
+

640x480 +

+
spal
+

768x576 +

+
film
+

352x240 +

+
ntsc-film
+

352x240 +

+
sqcif
+

128x96 +

+
qcif
+

176x144 +

+
cif
+

352x288 +

+
4cif
+

704x576 +

+
16cif
+

1408x1152 +

+
qqvga
+

160x120 +

+
qvga
+

320x240 +

+
vga
+

640x480 +

+
svga
+

800x600 +

+
xga
+

1024x768 +

+
uxga
+

1600x1200 +

+
qxga
+

2048x1536 +

+
sxga
+

1280x1024 +

+
qsxga
+

2560x2048 +

+
hsxga
+

5120x4096 +

+
wvga
+

852x480 +

+
wxga
+

1366x768 +

+
wsxga
+

1600x1024 +

+
wuxga
+

1920x1200 +

+
woxga
+

2560x1600 +

+
wqsxga
+

3200x2048 +

+
wquxga
+

3840x2400 +

+
whsxga
+

6400x4096 +

+
whuxga
+

7680x4800 +

+
cga
+

320x200 +

+
ega
+

640x350 +

+
hd480
+

852x480 +

+
hd720
+

1280x720 +

+
hd1080
+

1920x1080 +

+
2k
+

2048x1080 +

+
2kflat
+

1998x1080 +

+
2kscope
+

2048x858 +

+
4k
+

4096x2160 +

+
4kflat
+

3996x2160 +

+
4kscope
+

4096x1716 +

+
nhd
+

640x360 +

+
hqvga
+

240x160 +

+
wqvga
+

400x240 +

+
fwqvga
+

432x240 +

+
hvga
+

480x320 +

+
qhd
+

960x540 +

+
+ +

+

+

8.5 Video rate

+ +

Specify the frame rate of a video, expressed as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

30000/1001 +

+
pal
+

25/1 +

+
qntsc
+

30000/1001 +

+
qpal
+

25/1 +

+
sntsc
+

30000/1001 +

+
spal
+

25/1 +

+
film
+

24/1 +

+
ntsc-film
+

24000/1001 +

+
+ +

+

+

8.6 Ratio

+ +

A ratio can be expressed as an expression, or in the form +numerator:denominator. +

+

Note that a ratio with infinite (1/0) or negative value is +considered valid, so you should check on the returned value if you +want to exclude those values. +

+

The undefined value can be expressed using the "0:0" string. +

+

+

+

8.7 Color

+ +

It can be the name of a color as defined below (case insensitive match) or a +[0x|#]RRGGBB[AA] sequence, possibly followed by @ and a string +representing the alpha component. +

+

The alpha component may be a string composed by "0x" followed by an +hexadecimal number or a decimal number between 0.0 and 1.0, which +represents the opacity value (‘0x00’ or ‘0.0’ means completely +transparent, ‘0xff’ or ‘1.0’ completely opaque). If the alpha +component is not specified then ‘0xff’ is assumed. +

+

The string ‘random’ will result in a random color. +

+

The following names of colors are recognized: +

+
AliceBlue
+

0xF0F8FF +

+
AntiqueWhite
+

0xFAEBD7 +

+
Aqua
+

0x00FFFF +

+
Aquamarine
+

0x7FFFD4 +

+
Azure
+

0xF0FFFF +

+
Beige
+

0xF5F5DC +

+
Bisque
+

0xFFE4C4 +

+
Black
+

0x000000 +

+
BlanchedAlmond
+

0xFFEBCD +

+
Blue
+

0x0000FF +

+
BlueViolet
+

0x8A2BE2 +

+
Brown
+

0xA52A2A +

+
BurlyWood
+

0xDEB887 +

+
CadetBlue
+

0x5F9EA0 +

+
Chartreuse
+

0x7FFF00 +

+
Chocolate
+

0xD2691E +

+
Coral
+

0xFF7F50 +

+
CornflowerBlue
+

0x6495ED +

+
Cornsilk
+

0xFFF8DC +

+
Crimson
+

0xDC143C +

+
Cyan
+

0x00FFFF +

+
DarkBlue
+

0x00008B +

+
DarkCyan
+

0x008B8B +

+
DarkGoldenRod
+

0xB8860B +

+
DarkGray
+

0xA9A9A9 +

+
DarkGreen
+

0x006400 +

+
DarkKhaki
+

0xBDB76B +

+
DarkMagenta
+

0x8B008B +

+
DarkOliveGreen
+

0x556B2F +

+
Darkorange
+

0xFF8C00 +

+
DarkOrchid
+

0x9932CC +

+
DarkRed
+

0x8B0000 +

+
DarkSalmon
+

0xE9967A +

+
DarkSeaGreen
+

0x8FBC8F +

+
DarkSlateBlue
+

0x483D8B +

+
DarkSlateGray
+

0x2F4F4F +

+
DarkTurquoise
+

0x00CED1 +

+
DarkViolet
+

0x9400D3 +

+
DeepPink
+

0xFF1493 +

+
DeepSkyBlue
+

0x00BFFF +

+
DimGray
+

0x696969 +

+
DodgerBlue
+

0x1E90FF +

+
FireBrick
+

0xB22222 +

+
FloralWhite
+

0xFFFAF0 +

+
ForestGreen
+

0x228B22 +

+
Fuchsia
+

0xFF00FF +

+
Gainsboro
+

0xDCDCDC +

+
GhostWhite
+

0xF8F8FF +

+
Gold
+

0xFFD700 +

+
GoldenRod
+

0xDAA520 +

+
Gray
+

0x808080 +

+
Green
+

0x008000 +

+
GreenYellow
+

0xADFF2F +

+
HoneyDew
+

0xF0FFF0 +

+
HotPink
+

0xFF69B4 +

+
IndianRed
+

0xCD5C5C +

+
Indigo
+

0x4B0082 +

+
Ivory
+

0xFFFFF0 +

+
Khaki
+

0xF0E68C +

+
Lavender
+

0xE6E6FA +

+
LavenderBlush
+

0xFFF0F5 +

+
LawnGreen
+

0x7CFC00 +

+
LemonChiffon
+

0xFFFACD +

+
LightBlue
+

0xADD8E6 +

+
LightCoral
+

0xF08080 +

+
LightCyan
+

0xE0FFFF +

+
LightGoldenRodYellow
+

0xFAFAD2 +

+
LightGreen
+

0x90EE90 +

+
LightGrey
+

0xD3D3D3 +

+
LightPink
+

0xFFB6C1 +

+
LightSalmon
+

0xFFA07A +

+
LightSeaGreen
+

0x20B2AA +

+
LightSkyBlue
+

0x87CEFA +

+
LightSlateGray
+

0x778899 +

+
LightSteelBlue
+

0xB0C4DE +

+
LightYellow
+

0xFFFFE0 +

+
Lime
+

0x00FF00 +

+
LimeGreen
+

0x32CD32 +

+
Linen
+

0xFAF0E6 +

+
Magenta
+

0xFF00FF +

+
Maroon
+

0x800000 +

+
MediumAquaMarine
+

0x66CDAA +

+
MediumBlue
+

0x0000CD +

+
MediumOrchid
+

0xBA55D3 +

+
MediumPurple
+

0x9370D8 +

+
MediumSeaGreen
+

0x3CB371 +

+
MediumSlateBlue
+

0x7B68EE +

+
MediumSpringGreen
+

0x00FA9A +

+
MediumTurquoise
+

0x48D1CC +

+
MediumVioletRed
+

0xC71585 +

+
MidnightBlue
+

0x191970 +

+
MintCream
+

0xF5FFFA +

+
MistyRose
+

0xFFE4E1 +

+
Moccasin
+

0xFFE4B5 +

+
NavajoWhite
+

0xFFDEAD +

+
Navy
+

0x000080 +

+
OldLace
+

0xFDF5E6 +

+
Olive
+

0x808000 +

+
OliveDrab
+

0x6B8E23 +

+
Orange
+

0xFFA500 +

+
OrangeRed
+

0xFF4500 +

+
Orchid
+

0xDA70D6 +

+
PaleGoldenRod
+

0xEEE8AA +

+
PaleGreen
+

0x98FB98 +

+
PaleTurquoise
+

0xAFEEEE +

+
PaleVioletRed
+

0xD87093 +

+
PapayaWhip
+

0xFFEFD5 +

+
PeachPuff
+

0xFFDAB9 +

+
Peru
+

0xCD853F +

+
Pink
+

0xFFC0CB +

+
Plum
+

0xDDA0DD +

+
PowderBlue
+

0xB0E0E6 +

+
Purple
+

0x800080 +

+
Red
+

0xFF0000 +

+
RosyBrown
+

0xBC8F8F +

+
RoyalBlue
+

0x4169E1 +

+
SaddleBrown
+

0x8B4513 +

+
Salmon
+

0xFA8072 +

+
SandyBrown
+

0xF4A460 +

+
SeaGreen
+

0x2E8B57 +

+
SeaShell
+

0xFFF5EE +

+
Sienna
+

0xA0522D +

+
Silver
+

0xC0C0C0 +

+
SkyBlue
+

0x87CEEB +

+
SlateBlue
+

0x6A5ACD +

+
SlateGray
+

0x708090 +

+
Snow
+

0xFFFAFA +

+
SpringGreen
+

0x00FF7F +

+
SteelBlue
+

0x4682B4 +

+
Tan
+

0xD2B48C +

+
Teal
+

0x008080 +

+
Thistle
+

0xD8BFD8 +

+
Tomato
+

0xFF6347 +

+
Turquoise
+

0x40E0D0 +

+
Violet
+

0xEE82EE +

+
Wheat
+

0xF5DEB3 +

+
White
+

0xFFFFFF +

+
WhiteSmoke
+

0xF5F5F5 +

+
Yellow
+

0xFFFF00 +

+
YellowGreen
+

0x9ACD32 +

+
+ +

+

+

8.8 Channel Layout

+ +

A channel layout specifies the spatial disposition of the channels in +a multi-channel audio stream. To specify a channel layout, FFmpeg +makes use of a special syntax. +

+

Individual channels are identified by an id, as given by the table +below: +

+
FL
+

front left +

+
FR
+

front right +

+
FC
+

front center +

+
LFE
+

low frequency +

+
BL
+

back left +

+
BR
+

back right +

+
FLC
+

front left-of-center +

+
FRC
+

front right-of-center +

+
BC
+

back center +

+
SL
+

side left +

+
SR
+

side right +

+
TC
+

top center +

+
TFL
+

top front left +

+
TFC
+

top front center +

+
TFR
+

top front right +

+
TBL
+

top back left +

+
TBC
+

top back center +

+
TBR
+

top back right +

+
DL
+

downmix left +

+
DR
+

downmix right +

+
WL
+

wide left +

+
WR
+

wide right +

+
SDL
+

surround direct left +

+
SDR
+

surround direct right +

+
LFE2
+

low frequency 2 +

+
+ +

Standard channel layout compositions can be specified by using the +following identifiers: +

+
mono
+

FC +

+
stereo
+

FL+FR +

+
2.1
+

FL+FR+LFE +

+
3.0
+

FL+FR+FC +

+
3.0(back)
+

FL+FR+BC +

+
4.0
+

FL+FR+FC+BC +

+
quad
+

FL+FR+BL+BR +

+
quad(side)
+

FL+FR+SL+SR +

+
3.1
+

FL+FR+FC+LFE +

+
5.0
+

FL+FR+FC+BL+BR +

+
5.0(side)
+

FL+FR+FC+SL+SR +

+
4.1
+

FL+FR+FC+LFE+BC +

+
5.1
+

FL+FR+FC+LFE+BL+BR +

+
5.1(side)
+

FL+FR+FC+LFE+SL+SR +

+
6.0
+

FL+FR+FC+BC+SL+SR +

+
6.0(front)
+

FL+FR+FLC+FRC+SL+SR +

+
hexagonal
+

FL+FR+FC+BL+BR+BC +

+
6.1
+

FL+FR+FC+LFE+BC+SL+SR +

+
6.1
+

FL+FR+FC+LFE+BL+BR+BC +

+
6.1(front)
+

FL+FR+LFE+FLC+FRC+SL+SR +

+
7.0
+

FL+FR+FC+BL+BR+SL+SR +

+
7.0(front)
+

FL+FR+FC+FLC+FRC+SL+SR +

+
7.1
+

FL+FR+FC+LFE+BL+BR+SL+SR +

+
7.1(wide)
+

FL+FR+FC+LFE+BL+BR+FLC+FRC +

+
7.1(wide-side)
+

FL+FR+FC+LFE+FLC+FRC+SL+SR +

+
octagonal
+

FL+FR+FC+BL+BR+BC+SL+SR +

+
downmix
+

DL+DR +

+
+ +

A custom channel layout can be specified as a sequence of terms, separated by +’+’ or ’|’. Each term can be: +

    +
  • +the name of a standard channel layout (e.g. ‘mono’, +‘stereo’, ‘4.0’, ‘quad’, ‘5.0’, etc.) + +
  • +the name of a single channel (e.g. ‘FL’, ‘FR’, ‘FC’, ‘LFE’, etc.) + +
  • +a number of channels, in decimal, optionally followed by ’c’, yielding +the default channel layout for that number of channels (see the +function av_get_default_channel_layout) + +
  • +a channel layout mask, in hexadecimal starting with "0x" (see the +AV_CH_* macros in ‘libavutil/channel_layout.h’. +
+ +

Starting from libavutil version 53 the trailing character "c" to +specify a number of channels will be required, while a channel layout +mask could also be specified as a decimal number (if and only if not +followed by "c"). +

+

See also the function av_get_channel_layout defined in +‘libavutil/channel_layout.h’. +

+ +

9. Expression Evaluation

+ +

When evaluating an arithmetic expression, FFmpeg uses an internal +formula evaluator, implemented through the ‘libavutil/eval.h’ +interface. +

+

An expression may contain unary, binary operators, constants, and +functions. +

+

Two expressions expr1 and expr2 can be combined to form +another expression "expr1;expr2". +expr1 and expr2 are evaluated in turn, and the new +expression evaluates to the value of expr2. +

+

The following binary operators are available: +, -, +*, /, ^. +

+

The following unary operators are available: +, -. +

+

The following functions are available: +

+
abs(x)
+

Compute absolute value of x. +

+
+
acos(x)
+

Compute arccosine of x. +

+
+
asin(x)
+

Compute arcsine of x. +

+
+
atan(x)
+

Compute arctangent of x. +

+
+
between(x, min, max)
+

Return 1 if x is greater than or equal to min and lesser than or +equal to max, 0 otherwise. +

+
+
bitand(x, y)
+
bitor(x, y)
+

Compute bitwise and/or operation on x and y. +

+

The results of the evaluation of x and y are converted to +integers before executing the bitwise operation. +

+

Note that both the conversion to integer and the conversion back to +floating point can lose precision. Beware of unexpected results for +large numbers (usually 2^53 and larger). +

+
+
ceil(expr)
+

Round the value of expression expr upwards to the nearest +integer. For example, "ceil(1.5)" is "2.0". +

+
+
cos(x)
+

Compute cosine of x. +

+
+
cosh(x)
+

Compute hyperbolic cosine of x. +

+
+
eq(x, y)
+

Return 1 if x and y are equivalent, 0 otherwise. +

+
+
exp(x)
+

Compute exponential of x (with base e, the Euler’s number). +

+
+
floor(expr)
+

Round the value of expression expr downwards to the nearest +integer. For example, "floor(-1.5)" is "-2.0". +

+
+
gauss(x)
+

Compute Gauss function of x, corresponding to +exp(-x*x/2) / sqrt(2*PI). +

+
+
gcd(x, y)
+

Return the greatest common divisor of x and y. If both x and +y are 0 or either or both are less than zero then behavior is undefined. +

+
+
gt(x, y)
+

Return 1 if x is greater than y, 0 otherwise. +

+
+
gte(x, y)
+

Return 1 if x is greater than or equal to y, 0 otherwise. +

+
+
hypot(x, y)
+

This function is similar to the C function with the same name; it returns +"sqrt(x*x + y*y)", the length of the hypotenuse of a +right triangle with sides of length x and y, or the distance of the +point (x, y) from the origin. +

+
+
if(x, y)
+

Evaluate x, and if the result is non-zero return the result of +the evaluation of y, return 0 otherwise. +

+
+
if(x, y, z)
+

Evaluate x, and if the result is non-zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
ifnot(x, y)
+

Evaluate x, and if the result is zero return the result of the +evaluation of y, return 0 otherwise. +

+
+
ifnot(x, y, z)
+

Evaluate x, and if the result is zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
isinf(x)
+

Return 1.0 if x is +/-INFINITY, 0.0 otherwise. +

+
+
isnan(x)
+

Return 1.0 if x is NAN, 0.0 otherwise. +

+
+
ld(var)
+

Allow to load the value of the internal variable with number +var, which was previously stored with st(var, expr). +The function returns the loaded value. +

+
+
log(x)
+

Compute natural logarithm of x. +

+
+
lt(x, y)
+

Return 1 if x is lesser than y, 0 otherwise. +

+
+
lte(x, y)
+

Return 1 if x is lesser than or equal to y, 0 otherwise. +

+
+
max(x, y)
+

Return the maximum between x and y. +

+
+
min(x, y)
+

Return the maximum between x and y. +

+
+
mod(x, y)
+

Compute the remainder of division of x by y. +

+
+
not(expr)
+

Return 1.0 if expr is zero, 0.0 otherwise. +

+
+
pow(x, y)
+

Compute the power of x elevated y, it is equivalent to +"(x)^(y)". +

+
+
print(t)
+
print(t, l)
+

Print the value of expression t with loglevel l. If +l is not specified then a default log level is used. +Returns the value of the expression printed. +

+

Prints t with loglevel l +

+
+
random(x)
+

Return a pseudo random value between 0.0 and 1.0. x is the index of the +internal variable which will be used to save the seed/state. +

+
+
root(expr, max)
+

Find an input value for which the function represented by expr +with argument ld(0) is 0 in the interval 0..max. +

+

The expression in expr must denote a continuous function or the +result is undefined. +

+

ld(0) is used to represent the function input value, which means +that the given expression will be evaluated multiple times with +various input values that the expression can access through +ld(0). When the expression evaluates to 0 then the +corresponding input value will be returned. +

+
+
sin(x)
+

Compute sine of x. +

+
+
sinh(x)
+

Compute hyperbolic sine of x. +

+
+
sqrt(expr)
+

Compute the square root of expr. This is equivalent to +"(expr)^.5". +

+
+
squish(x)
+

Compute expression 1/(1 + exp(4*x)). +

+
+
st(var, expr)
+

Allow to store the value of the expression expr in an internal +variable. var specifies the number of the variable where to +store the value, and it is a value ranging from 0 to 9. The function +returns the value stored in the internal variable. +Note, Variables are currently not shared between expressions. +

+
+
tan(x)
+

Compute tangent of x. +

+
+
tanh(x)
+

Compute hyperbolic tangent of x. +

+
+
taylor(expr, x)
+
taylor(expr, x, id)
+

Evaluate a Taylor series at x, given an expression representing +the ld(id)-th derivative of a function at 0. +

+

When the series does not converge the result is undefined. +

+

ld(id) is used to represent the derivative order in expr, +which means that the given expression will be evaluated multiple times +with various input values that the expression can access through +ld(id). If id is not specified then 0 is assumed. +

+

Note, when you have the derivatives at y instead of 0, +taylor(expr, x-y) can be used. +

+
+
time(0)
+

Return the current (wallclock) time in seconds. +

+
+
trunc(expr)
+

Round the value of expression expr towards zero to the nearest +integer. For example, "trunc(-1.5)" is "-1.0". +

+
+
while(cond, expr)
+

Evaluate expression expr while the expression cond is +non-zero, and returns the value of the last expr evaluation, or +NAN if cond was always false. +

+
+ +

The following constants are available: +

+
PI
+

area of the unit disc, approximately 3.14 +

+
E
+

exp(1) (Euler’s number), approximately 2.718 +

+
PHI
+

golden ratio (1+sqrt(5))/2, approximately 1.618 +

+
+ +

Assuming that an expression is considered "true" if it has a non-zero +value, note that: +

+

* works like AND +

+

+ works like OR +

+

For example the construct: +

 
if (A AND B) then C
+
+

is equivalent to: +

 
if(A*B, C)
+
+ +

In your C code, you can extend the list of unary and binary functions, +and define recognized constants, so that they are available for your +expressions. +

+

The evaluator also recognizes the International System unit prefixes. +If ’i’ is appended after the prefix, binary prefixes are used, which +are based on powers of 1024 instead of powers of 1000. +The ’B’ postfix multiplies the value by 8, and can be appended after a +unit prefix or used alone. This allows using for example ’KB’, ’MiB’, +’G’ and ’B’ as number postfix. +

+

The list of available International System prefixes follows, with +indication of the corresponding powers of 10 and of 2. +

+
y
+

10^-24 / 2^-80 +

+
z
+

10^-21 / 2^-70 +

+
a
+

10^-18 / 2^-60 +

+
f
+

10^-15 / 2^-50 +

+
p
+

10^-12 / 2^-40 +

+
n
+

10^-9 / 2^-30 +

+
u
+

10^-6 / 2^-20 +

+
m
+

10^-3 / 2^-10 +

+
c
+

10^-2 +

+
d
+

10^-1 +

+
h
+

10^2 +

+
k
+

10^3 / 2^10 +

+
K
+

10^3 / 2^10 +

+
M
+

10^6 / 2^20 +

+
G
+

10^9 / 2^30 +

+
T
+

10^12 / 2^40 +

+
P
+

10^15 / 2^40 +

+
E
+

10^18 / 2^50 +

+
Z
+

10^21 / 2^60 +

+
Y
+

10^24 / 2^70 +

+
+ + + +

10. OpenCL Options

+ +

When FFmpeg is configured with --enable-opencl, it is possible +to set the options for the global OpenCL context. +

+

The list of supported options follows: +

+
+
build_options
+

Set build options used to compile the registered kernels. +

+

See reference "OpenCL Specification Version: 1.2 chapter 5.6.4". +

+
+
platform_idx
+

Select the index of the platform to run OpenCL code. +

+

The specified index must be one of the indexes in the device list +which can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
device_idx
+

Select the index of the device used to run OpenCL code. +

+

The specified index must be one of the indexes in the device list which +can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
+ +

+

+

11. Codec Options

+ +

libavcodec provides some generic global options, which can be set on +all the encoders and decoders. In addition each codec may support +so-called private options, which are specific for a given codec. +

+

Sometimes, a global option may only affect a specific kind of codec, +and may be unsensical or ignored by another, so you need to be aware +of the meaning of the specified options. Also some options are +meant only for decoding or encoding. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVCodecContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follow: +

+
+
b integer (encoding,audio,video)
+

Set bitrate in bits/s. Default value is 200K. +

+
+
ab integer (encoding,audio)
+

Set audio bitrate (in bits/s). Default value is 128K. +

+
+
bt integer (encoding,video)
+

Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate +tolerance specifies how far ratecontrol is willing to deviate from the +target average bitrate value. This is not related to min/max +bitrate. Lowering tolerance too much has an adverse effect on quality. +

+
+
flags flags (decoding/encoding,audio,video,subtitles)
+

Set generic flags. +

+

Possible values: +

+
mv4
+

Use four motion vector by macroblock (mpeg4). +

+
qpel
+

Use 1/4 pel motion compensation. +

+
loop
+

Use loop filter. +

+
qscale
+

Use fixed qscale. +

+
gmc
+

Use gmc. +

+
mv0
+

Always try a mb with mv=<0,0>. +

+
input_preserved
+
pass1
+

Use internal 2pass ratecontrol in first pass mode. +

+
pass2
+

Use internal 2pass ratecontrol in second pass mode. +

+
gray
+

Only decode/encode grayscale. +

+
emu_edge
+

Do not draw edges. +

+
psnr
+

Set error[?] variables during encoding. +

+
truncated
+
naq
+

Normalize adaptive quantization. +

+
ildct
+

Use interlaced DCT. +

+
low_delay
+

Force low delay. +

+
global_header
+

Place global headers in extradata instead of every keyframe. +

+
bitexact
+

Use only bitexact stuff (except (I)DCT). +

+
aic
+

Apply H263 advanced intra coding / mpeg4 ac prediction. +

+
cbp
+

Deprecated, use mpegvideo private options instead. +

+
qprd
+

Deprecated, use mpegvideo private options instead. +

+
ilme
+

Apply interlaced motion estimation. +

+
cgop
+

Use closed gop. +

+
+ +
+
me_method integer (encoding,video)
+

Set motion estimation method. +

+

Possible values: +

+
zero
+

zero motion estimation (fastest) +

+
full
+

full motion estimation (slowest) +

+
epzs
+

EPZS motion estimation (default) +

+
esa
+

esa motion estimation (alias for full) +

+
tesa
+

tesa motion estimation +

+
dia
+

dia motion estimation (alias for epzs) +

+
log
+

log motion estimation +

+
phods
+

phods motion estimation +

+
x1
+

X1 motion estimation +

+
hex
+

hex motion estimation +

+
umh
+

umh motion estimation +

+
iter
+

iter motion estimation +

+
+ +
+
extradata_size integer
+

Set extradata size. +

+
+
time_base rational number
+

Set codec time base. +

+

It is the fundamental unit of time (in seconds) in terms of which +frame timestamps are represented. For fixed-fps content, timebase +should be 1 / frame_rate and timestamp increments should be +identically 1. +

+
+
g integer (encoding,video)
+

Set the group of picture size. Default value is 12. +

+
+
ar integer (decoding/encoding,audio)
+

Set audio sampling rate (in Hz). +

+
+
ac integer (decoding/encoding,audio)
+

Set number of audio channels. +

+
+
cutoff integer (encoding,audio)
+

Set cutoff bandwidth. +

+
+
frame_size integer (encoding,audio)
+

Set audio frame size. +

+

Each submitted frame except the last must contain exactly frame_size +samples per channel. May be 0 when the codec has +CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not +restricted. It is set by some decoders to indicate constant frame +size. +

+
+
frame_number integer
+

Set the frame number. +

+
+
delay integer
+
qcomp float (encoding,video)
+

Set video quantizer scale compression (VBR). It is used as a constant +in the ratecontrol equation. Recommended range for default rc_eq: +0.0-1.0. +

+
+
qblur float (encoding,video)
+

Set video quantizer scale blur (VBR). +

+
+
qmin integer (encoding,video)
+

Set min video quantizer scale (VBR). Must be included between -1 and +69, default value is 2. +

+
+
qmax integer (encoding,video)
+

Set max video quantizer scale (VBR). Must be included between -1 and +1024, default value is 31. +

+
+
qdiff integer (encoding,video)
+

Set max difference between the quantizer scale (VBR). +

+
+
bf integer (encoding,video)
+

Set max number of B frames between non-B-frames. +

+

Must be an integer between -1 and 16. 0 means that B-frames are +disabled. If a value of -1 is used, it will choose an automatic value +depending on the encoder. +

+

Default value is 0. +

+
+
b_qfactor float (encoding,video)
+

Set qp factor between P and B frames. +

+
+
rc_strategy integer (encoding,video)
+

Set ratecontrol method. +

+
+
b_strategy integer (encoding,video)
+

Set strategy to choose between I/P/B-frames. +

+
+
ps integer (encoding,video)
+

Set RTP payload size in bytes. +

+
+
mv_bits integer
+
header_bits integer
+
i_tex_bits integer
+
p_tex_bits integer
+
i_count integer
+
p_count integer
+
skip_count integer
+
misc_bits integer
+
frame_bits integer
+
codec_tag integer
+
bug flags (decoding,video)
+

Workaround not auto detected encoder bugs. +

+

Possible values: +

+
autodetect
+
old_msmpeg4
+

some old lavc generated msmpeg4v3 files (no autodetection) +

+
xvid_ilace
+

Xvid interlacing bug (autodetected if fourcc==XVIX) +

+
ump4
+

(autodetected if fourcc==UMP4) +

+
no_padding
+

padding bug (autodetected) +

+
amv
+
ac_vlc
+

illegal vlc bug (autodetected per fourcc) +

+
qpel_chroma
+
std_qpel
+

old standard qpel (autodetected per fourcc/version) +

+
qpel_chroma2
+
direct_blocksize
+

direct-qpel-blocksize bug (autodetected per fourcc/version) +

+
edge
+

edge padding bug (autodetected per fourcc/version) +

+
hpel_chroma
+
dc_clip
+
ms
+

Workaround various bugs in microsoft broken decoders. +

+
trunc
+

trancated frames +

+
+ +
+
lelim integer (encoding,video)
+

Set single coefficient elimination threshold for luminance (negative +values also consider DC coefficient). +

+
+
celim integer (encoding,video)
+

Set single coefficient elimination threshold for chrominance (negative +values also consider dc coefficient) +

+
+
strict integer (decoding/encoding,audio,video)
+

Specify how strictly to follow the standards. +

+

Possible values: +

+
very
+

strictly conform to a older more strict version of the spec or reference software +

+
strict
+

strictly conform to all the things in the spec no matter what consequences +

+
normal
+
unofficial
+

allow unofficial extensions +

+
experimental
+

allow non standardized experimental things, experimental +(unfinished/work in progress/not well tested) decoders and encoders. +Note: experimental decoders can pose a security risk, do not use this for +decoding untrusted input. +

+
+ +
+
b_qoffset float (encoding,video)
+

Set QP offset between P and B frames. +

+
+
err_detect flags (decoding,audio,video)
+

Set error detection flags. +

+

Possible values: +

+
crccheck
+

verify embedded CRCs +

+
bitstream
+

detect bitstream specification deviations +

+
buffer
+

detect improper bitstream length +

+
explode
+

abort decoding on minor error detection +

+
careful
+

consider things that violate the spec and have not been seen in the wild as errors +

+
compliant
+

consider all spec non compliancies as errors +

+
aggressive
+

consider things that a sane encoder should not do as an error +

+
+ +
+
has_b_frames integer
+
block_align integer
+
mpeg_quant integer (encoding,video)
+

Use MPEG quantizers instead of H.263. +

+
+
qsquish float (encoding,video)
+

How to keep quantizer between qmin and qmax (0 = clip, 1 = use +differentiable function). +

+
+
rc_qmod_amp float (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_qmod_freq integer (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_override_count integer
+
rc_eq string (encoding,video)
+

Set rate control equation. When computing the expression, besides the +standard functions defined in the section ’Expression Evaluation’, the +following functions are available: bits2qp(bits), qp2bits(qp). Also +the following constants are available: iTex pTex tex mv fCode iCount +mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex +avgTex. +

+
+
maxrate integer (encoding,audio,video)
+

Set max bitrate tolerance (in bits/s). Requires bufsize to be set. +

+
+
minrate integer (encoding,audio,video)
+

Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR +encode. It is of little use elsewise. +

+
+
bufsize integer (encoding,audio,video)
+

Set ratecontrol buffer size (in bits). +

+
+
rc_buf_aggressivity float (encoding,video)
+

Currently useless. +

+
+
i_qfactor float (encoding,video)
+

Set QP factor between P and I frames. +

+
+
i_qoffset float (encoding,video)
+

Set QP offset between P and I frames. +

+
+
rc_init_cplx float (encoding,video)
+

Set initial complexity for 1-pass encoding. +

+
+
dct integer (encoding,video)
+

Set DCT algorithm. +

+

Possible values: +

+
auto
+

autoselect a good one (default) +

+
fastint
+

fast integer +

+
int
+

accurate integer +

+
mmx
+
altivec
+
faan
+

floating point AAN DCT +

+
+ +
+
lumi_mask float (encoding,video)
+

Compress bright areas stronger than medium ones. +

+
+
tcplx_mask float (encoding,video)
+

Set temporal complexity masking. +

+
+
scplx_mask float (encoding,video)
+

Set spatial complexity masking. +

+
+
p_mask float (encoding,video)
+

Set inter masking. +

+
+
dark_mask float (encoding,video)
+

Compress dark areas stronger than medium ones. +

+
+
idct integer (decoding/encoding,video)
+

Select IDCT implementation. +

+

Possible values: +

+
auto
+
int
+
simple
+
simplemmx
+
arm
+
altivec
+
sh4
+
simplearm
+
simplearmv5te
+
simplearmv6
+
simpleneon
+
simplealpha
+
ipp
+
xvidmmx
+
faani
+

floating point AAN IDCT +

+
+ +
+
slice_count integer
+
ec flags (decoding,video)
+

Set error concealment strategy. +

+

Possible values: +

+
guess_mvs
+

iterative motion vector (MV) search (slow) +

+
deblock
+

use strong deblock filter for damaged MBs +

+
+ +
+
bits_per_coded_sample integer
+
pred integer (encoding,video)
+

Set prediction method. +

+

Possible values: +

+
left
+
plane
+
median
+
+ +
+
aspect rational number (encoding,video)
+

Set sample aspect ratio. +

+
+
debug flags (decoding/encoding,audio,video,subtitles)
+

Print specific debug info. +

+

Possible values: +

+
pict
+

picture info +

+
rc
+

rate control +

+
bitstream
+
mb_type
+

macroblock (MB) type +

+
qp
+

per-block quantization parameter (QP) +

+
mv
+

motion vector +

+
dct_coeff
+
skip
+
startcode
+
pts
+
er
+

error recognition +

+
mmco
+

memory management control operations (H.264) +

+
bugs
+
vis_qp
+

visualize quantization parameter (QP), lower QP are tinted greener +

+
vis_mb_type
+

visualize block types +

+
buffers
+

picture buffer allocations +

+
thread_ops
+

threading operations +

+
+ +
+
vismv integer (decoding,video)
+

Visualize motion vectors (MVs). +

+

Possible values: +

+
pf
+

forward predicted MVs of P-frames +

+
bf
+

forward predicted MVs of B-frames +

+
bb
+

backward predicted MVs of B-frames +

+
+ +
+
cmp integer (encoding,video)
+

Set full pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
subcmp integer (encoding,video)
+

Set sub pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
mbcmp integer (encoding,video)
+

Set macroblock compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
ildctcmp integer (encoding,video)
+

Set interlaced dct compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation. +

+
+
last_pred integer (encoding,video)
+

Set amount of motion predictors from the previous frame. +

+
+
preme integer (encoding,video)
+

Set pre motion estimation. +

+
+
precmp integer (encoding,video)
+

Set pre motion estimation compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
pre_dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation pre-pass. +

+
+
subq integer (encoding,video)
+

Set sub pel motion estimation quality. +

+
+
dtg_active_format integer
+
me_range integer (encoding,video)
+

Set limit motion vectors range (1023 for DivX player). +

+
+
ibias integer (encoding,video)
+

Set intra quant bias. +

+
+
pbias integer (encoding,video)
+

Set inter quant bias. +

+
+
color_table_id integer
+
global_quality integer (encoding,audio,video)
+
coder integer (encoding,video)
+
+

Possible values: +

+
vlc
+

variable length coder / huffman coder +

+
ac
+

arithmetic coder +

+
raw
+

raw (no encoding) +

+
rle
+

run-length coder +

+
deflate
+

deflate-based coder +

+
+ +
+
context integer (encoding,video)
+

Set context model. +

+
+
slice_flags integer
+
xvmc_acceleration integer
+
mbd integer (encoding,video)
+

Set macroblock decision algorithm (high quality mode). +

+

Possible values: +

+
simple
+

use mbcmp (default) +

+
bits
+

use fewest bits +

+
rd
+

use best rate distortion +

+
+ +
+
stream_codec_tag integer
+
sc_threshold integer (encoding,video)
+

Set scene change threshold. +

+
+
lmin integer (encoding,video)
+

Set min lagrange factor (VBR). +

+
+
lmax integer (encoding,video)
+

Set max lagrange factor (VBR). +

+
+
nr integer (encoding,video)
+

Set noise reduction. +

+
+
rc_init_occupancy integer (encoding,video)
+

Set number of bits which should be loaded into the rc buffer before +decoding starts. +

+
+
flags2 flags (decoding/encoding,audio,video)
+
+

Possible values: +

+
fast
+

Allow non spec compliant speedup tricks. +

+
sgop
+

Deprecated, use mpegvideo private options instead. +

+
noout
+

Skip bitstream encoding. +

+
ignorecrop
+

Ignore cropping information from sps. +

+
local_header
+

Place global headers at every keyframe instead of in extradata. +

+
chunks
+

Frame data might be split into multiple chunks. +

+
showall
+

Show all frames before the first keyframe. +

+
skiprd
+

Deprecated, use mpegvideo private options instead. +

+
+ +
+
error integer (encoding,video)
+
qns integer (encoding,video)
+

Deprecated, use mpegvideo private options instead. +

+
+
threads integer (decoding/encoding,video)
+
+

Possible values: +

+
auto
+

detect a good number of threads +

+
+ +
+
me_threshold integer (encoding,video)
+

Set motion estimation threshold. +

+
+
mb_threshold integer (encoding,video)
+

Set macroblock threshold. +

+
+
dc integer (encoding,video)
+

Set intra_dc_precision. +

+
+
nssew integer (encoding,video)
+

Set nsse weight. +

+
+
skip_top integer (decoding,video)
+

Set number of macroblock rows at the top which are skipped. +

+
+
skip_bottom integer (decoding,video)
+

Set number of macroblock rows at the bottom which are skipped. +

+
+
profile integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
aac_main
+
aac_low
+
aac_ssr
+
aac_ltp
+
aac_he
+
aac_he_v2
+
aac_ld
+
aac_eld
+
mpeg2_aac_low
+
mpeg2_aac_he
+
dts
+
dts_es
+
dts_96_24
+
dts_hd_hra
+
dts_hd_ma
+
+ +
+
level integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
+ +
+
lowres integer (decoding,audio,video)
+

Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions. +

+
+
skip_threshold integer (encoding,video)
+

Set frame skip threshold. +

+
+
skip_factor integer (encoding,video)
+

Set frame skip factor. +

+
+
skip_exp integer (encoding,video)
+

Set frame skip exponent. +Negative values behave identical to the corresponding positive ones, except +that the score is normalized. +Positive values exist primarly for compatibility reasons and are not so useful. +

+
+
skipcmp integer (encoding,video)
+

Set frame skip compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
border_mask float (encoding,video)
+

Increase the quantizer for macroblocks close to borders. +

+
+
mblmin integer (encoding,video)
+

Set min macroblock lagrange factor (VBR). +

+
+
mblmax integer (encoding,video)
+

Set max macroblock lagrange factor (VBR). +

+
+
mepc integer (encoding,video)
+

Set motion estimation bitrate penalty compensation (1.0 = 256). +

+
+
skip_loop_filter integer (decoding,video)
+
skip_idct integer (decoding,video)
+
skip_frame integer (decoding,video)
+
+

Make decoder discard processing depending on the frame type selected +by the option value. +

+

skip_loop_filter’ skips frame loop filtering, ‘skip_idct’ +skips frame IDCT/dequantization, ‘skip_frame’ skips decoding. +

+

Possible values: +

+
none
+

Discard no frame. +

+
+
default
+

Discard useless frames like 0-sized frames. +

+
+
noref
+

Discard all non-reference frames. +

+
+
bidir
+

Discard all bidirectional frames. +

+
+
nokey
+

Discard all frames excepts keyframes. +

+
+
all
+

Discard all frames. +

+
+ +

Default value is ‘default’. +

+
+
bidir_refine integer (encoding,video)
+

Refine the two motion vectors used in bidirectional macroblocks. +

+
+
brd_scale integer (encoding,video)
+

Downscale frames for dynamic B-frame decision. +

+
+
keyint_min integer (encoding,video)
+

Set minimum interval between IDR-frames. +

+
+
refs integer (encoding,video)
+

Set reference frames to consider for motion compensation. +

+
+
chromaoffset integer (encoding,video)
+

Set chroma qp offset from luma. +

+
+
trellis integer (encoding,audio,video)
+

Set rate-distortion optimal quantization. +

+
+
sc_factor integer (encoding,video)
+

Set value multiplied by qscale for each frame and added to +scene_change_score. +

+
+
mv0_threshold integer (encoding,video)
+
b_sensitivity integer (encoding,video)
+

Adjust sensitivity of b_frame_strategy 1. +

+
+
compression_level integer (encoding,audio,video)
+
min_prediction_order integer (encoding,audio)
+
max_prediction_order integer (encoding,audio)
+
timecode_frame_start integer (encoding,video)
+

Set GOP timecode frame start number, in non drop frame format. +

+
+
request_channels integer (decoding,audio)
+

Set desired number of audio channels. +

+
+
bits_per_raw_sample integer
+
channel_layout integer (decoding/encoding,audio)
+
+

Possible values: +

+
request_channel_layout integer (decoding,audio)
+
+

Possible values: +

+
rc_max_vbv_use float (encoding,video)
+
rc_min_vbv_use float (encoding,video)
+
ticks_per_frame integer (decoding/encoding,audio,video)
+
color_primaries integer (decoding/encoding,video)
+
color_trc integer (decoding/encoding,video)
+
colorspace integer (decoding/encoding,video)
+
color_range integer (decoding/encoding,video)
+
chroma_sample_location integer (decoding/encoding,video)
+
log_level_offset integer
+

Set the log level offset. +

+
+
slices integer (encoding,video)
+

Number of slices, used in parallelized encoding. +

+
+
thread_type flags (decoding/encoding,video)
+

Select multithreading type. +

+

Possible values: +

+
slice
+
frame
+
+
+
audio_service_type integer (encoding,audio)
+

Set audio service type. +

+

Possible values: +

+
ma
+

Main Audio Service +

+
ef
+

Effects +

+
vi
+

Visually Impaired +

+
hi
+

Hearing Impaired +

+
di
+

Dialogue +

+
co
+

Commentary +

+
em
+

Emergency +

+
vo
+

Voice Over +

+
ka
+

Karaoke +

+
+ +
+
request_sample_fmt sample_fmt (decoding,audio)
+

Set sample format audio decoders should prefer. Default value is +none. +

+
+
pkt_timebase rational number
+
sub_charenc encoding (decoding,subtitles)
+

Set the input subtitles character encoding. +

+
+
field_order field_order (video)
+

Set/override the field order of the video. +Possible values: +

+
progressive
+

Progressive video +

+
tt
+

Interlaced video, top field coded and displayed first +

+
bb
+

Interlaced video, bottom field coded and displayed first +

+
tb
+

Interlaced video, top coded first, bottom displayed first +

+
bt
+

Interlaced video, bottom coded first, top displayed first +

+
+ +
+
skip_alpha integer (decoding,video)
+

Set to 1 to disable processing alpha (transparency). This works like the +‘gray’ flag in the ‘flags’ option which skips chroma information +instead of alpha. Default is 0. +

+
+ + + +

12. Decoders

+ +

Decoders are configured elements in FFmpeg which allow the decoding of +multimedia streams. +

+

When you configure your FFmpeg build, all the supported native decoders +are enabled by default. Decoders requiring an external library must be enabled +manually via the corresponding --enable-lib option. You can list all +available decoders using the configure option --list-decoders. +

+

You can disable all the decoders with the configure option +--disable-decoders and selectively enable / disable single decoders +with the options --enable-decoder=DECODER / +--disable-decoder=DECODER. +

+

The option -decoders of the ff* tools will display the list of +enabled decoders. +

+ + +

13. Video Decoders

+ +

A description of some of the currently available video decoders +follows. +

+ +

13.1 rawvideo

+ +

Raw video decoder. +

+

This decoder decodes rawvideo streams. +

+ +

13.1.1 Options

+ +
+
top top_field_first
+

Specify the assumed field type of the input video. +

+
-1
+

the video is assumed to be progressive (default) +

+
0
+

bottom-field-first is assumed +

+
1
+

top-field-first is assumed +

+
+ +
+
+ + + +

14. Audio Decoders

+ +

A description of some of the currently available audio decoders +follows. +

+ +

14.1 ac3

+ +

AC-3 audio decoder. +

+

This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as +the undocumented RealAudio 3 (a.k.a. dnet). +

+ +

14.1.1 AC-3 Decoder Options

+ +
+
-drc_scale value
+

Dynamic Range Scale Factor. The factor to apply to dynamic range values +from the AC-3 stream. This factor is applied exponentially. +There are 3 notable scale factor ranges: +

+
drc_scale == 0
+

DRC disabled. Produces full range audio. +

+
0 < drc_scale <= 1
+

DRC enabled. Applies a fraction of the stream DRC value. +Audio reproduction is between full range and full compression. +

+
drc_scale > 1
+

DRC enabled. Applies drc_scale asymmetrically. +Loud sounds are fully compressed. Soft sounds are enhanced. +

+
+ +
+
+ + +

14.2 ffwavesynth

+ +

Internal wave synthetizer. +

+

This decoder generates wave patterns according to predefined sequences. Its +use is purely internal and the format of the data it accepts is not publicly +documented. +

+ +

14.3 libcelt

+ +

libcelt decoder wrapper. +

+

libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec. +Requires the presence of the libcelt headers and library during configuration. +You need to explicitly configure the build with --enable-libcelt. +

+ +

14.4 libgsm

+ +

libgsm decoder wrapper. +

+

libgsm allows libavcodec to decode the GSM full rate audio codec. Requires +the presence of the libgsm headers and library during configuration. You need +to explicitly configure the build with --enable-libgsm. +

+

This decoder supports both the ordinary GSM and the Microsoft variant. +

+ +

14.5 libilbc

+ +

libilbc decoder wrapper. +

+

libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC) +audio codec. Requires the presence of the libilbc headers and library during +configuration. You need to explicitly configure the build with +--enable-libilbc. +

+ +

14.5.1 Options

+ +

The following option is supported by the libilbc wrapper. +

+
+
enhance
+
+

Enable the enhancement of the decoded audio when set to 1. The default +value is 0 (disabled). +

+
+
+ + +

14.6 libopencore-amrnb

+ +

libopencore-amrnb decoder wrapper. +

+

libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate +Narrowband audio codec. Using it requires the presence of the +libopencore-amrnb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrnb. +

+

An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB +without this library. +

+ +

14.7 libopencore-amrwb

+ +

libopencore-amrwb decoder wrapper. +

+

libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate +Wideband audio codec. Using it requires the presence of the +libopencore-amrwb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrwb. +

+

An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB +without this library. +

+ +

14.8 libopus

+ +

libopus decoder wrapper. +

+

libopus allows libavcodec to decode the Opus Interactive Audio Codec. +Requires the presence of the libopus headers and library during +configuration. You need to explicitly configure the build with +--enable-libopus. +

+ + +

15. Subtitles Decoders

+ + +

15.1 dvdsub

+ +

This codec decodes the bitmap subtitles used in DVDs; the same subtitles can +also be found in VobSub file pairs and in some Matroska files. +

+ +

15.1.1 Options

+ +
+
palette
+

Specify the global palette used by the bitmaps. When stored in VobSub, the +palette is normally specified in the index file; in Matroska, the palette is +stored in the codec extra-data in the same format as in VobSub. In DVDs, the +palette is stored in the IFO file, and therefore not available when reading +from dumped VOB files. +

+

The format for this option is a string containing 16 24-bits hexadecimal +numbers (without 0x prefix) separated by comas, for example 0d00ee, +ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1, +7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b. +

+
+ + +

15.2 libzvbi-teletext

+ +

Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext +subtitles. Requires the presence of the libzvbi headers and library during +configuration. You need to explicitly configure the build with +--enable-libzvbi. +

+ +

15.2.1 Options

+ +
+
txt_page
+

List of teletext page numbers to decode. You may use the special * string to +match all pages. Pages that do not match the specified list are dropped. +Default value is *. +

+
txt_chop_top
+

Discards the top teletext line. Default value is 1. +

+
txt_format
+

Specifies the format of the decoded subtitles. The teletext decoder is capable +of decoding the teletext pages to bitmaps or to simple text, you should use +"bitmap" for teletext pages, because certain graphics and colors cannot be +expressed in simple text. You might use "text" for teletext based subtitles if +your application can handle simple text based subtitles. Default value is +bitmap. +

+
txt_left
+

X offset of generated bitmaps, default is 0. +

+
txt_top
+

Y offset of generated bitmaps, default is 0. +

+
txt_chop_spaces
+

Chops leading and trailing spaces and removes empty lines from the generated +text. This option is useful for teletext based subtitles where empty spaces may +be present at the start or at the end of the lines or empty lines may be +present between the subtitle lines because of double-sized teletext charactes. +Default value is 1. +

+
txt_duration
+

Sets the display duration of the decoded teletext pages or subtitles in +miliseconds. Default value is 30000 which is 30 seconds. +

+
txt_transparent
+

Force transparent background of the generated teletext bitmaps. Default value +is 0 which means an opaque (black) background. +

+
+ + +

16. Encoders

+ +

Encoders are configured elements in FFmpeg which allow the encoding of +multimedia streams. +

+

When you configure your FFmpeg build, all the supported native encoders +are enabled by default. Encoders requiring an external library must be enabled +manually via the corresponding --enable-lib option. You can list all +available encoders using the configure option --list-encoders. +

+

You can disable all the encoders with the configure option +--disable-encoders and selectively enable / disable single encoders +with the options --enable-encoder=ENCODER / +--disable-encoder=ENCODER. +

+

The option -encoders of the ff* tools will display the list of +enabled encoders. +

+ + +

17. Audio Encoders

+ +

A description of some of the currently available audio encoders +follows. +

+

+

+

17.1 aac

+ +

Advanced Audio Coding (AAC) encoder. +

+

This encoder is an experimental FFmpeg-native AAC encoder. Currently only the +low complexity (AAC-LC) profile is supported. To use this encoder, you must set +‘strict’ option to ‘experimental’ or lower. +

+

As this encoder is experimental, unexpected behavior may exist from time to +time. For a more stable AAC encoder, see libvo-aacenc. However, be warned +that it has a worse quality reported by some users. +

+

See also libfdk_aac and libfaac. +

+ +

17.1.1 Options

+ +
+
b
+

Set bit rate in bits/s. Setting this automatically activates constant bit rate +(CBR) mode. +

+
+
q
+

Set quality for variable bit rate (VBR) mode. This option is valid only using +the ffmpeg command-line tool. For library interface users, use +‘global_quality’. +

+
+
stereo_mode
+

Set stereo encoding mode. Possible values: +

+
+
auto
+

Automatically selected by the encoder. +

+
+
ms_off
+

Disable middle/side encoding. This is the default. +

+
+
ms_force
+

Force middle/side encoding. +

+
+ +
+
aac_coder
+

Set AAC encoder coding method. Possible values: +

+
+
faac
+

FAAC-inspired method. +

+

This method is a simplified reimplementation of the method used in FAAC, which +sets thresholds proportional to the band energies, and then decreases all the +thresholds with quantizer steps to find the appropriate quantization with +distortion below threshold band by band. +

+

The quality of this method is comparable to the two loop searching method +descibed below, but somewhat a little better and slower. +

+
+
anmr
+

Average noise to mask ratio (ANMR) trellis-based solution. +

+

This has a theoretic best quality out of all the coding methods, but at the +cost of the slowest speed. +

+
+
twoloop
+

Two loop searching (TLS) method. +

+

This method first sets quantizers depending on band thresholds and then tries +to find an optimal combination by adding or subtracting a specific value from +all quantizers and adjusting some individual quantizer a little. +

+

This method produces similar quality with the FAAC method and is the default. +

+
+
fast
+

Constant quantizer method. +

+

This method sets a constant quantizer for all bands. This is the fastest of all +the methods, yet produces the worst quality. +

+
+
+ +
+
+ + +

17.2 ac3 and ac3_fixed

+ +

AC-3 audio encoders. +

+

These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as +the undocumented RealAudio 3 (a.k.a. dnet). +

+

The ac3 encoder uses floating-point math, while the ac3_fixed +encoder only uses fixed-point integer math. This does not mean that one is +always faster, just that one or the other may be better suited to a +particular system. The floating-point encoder will generally produce better +quality audio for a given bitrate. The ac3_fixed encoder is not the +default codec for any of the output formats, so it must be specified explicitly +using the option -acodec ac3_fixed in order to use it. +

+ +

17.2.1 AC-3 Metadata

+ +

The AC-3 metadata options are used to set parameters that describe the audio, +but in most cases do not affect the audio encoding itself. Some of the options +do directly affect or influence the decoding and playback of the resulting +bitstream, while others are just for informational purposes. A few of the +options will add bits to the output stream that could otherwise be used for +audio data, and will thus affect the quality of the output. Those will be +indicated accordingly with a note in the option list below. +

+

These parameters are described in detail in several publicly-available +documents. +

+ + +

17.2.1.1 Metadata Control Options

+ +
+
-per_frame_metadata boolean
+

Allow Per-Frame Metadata. Specifies if the encoder should check for changing +metadata for each frame. +

+
0
+

The metadata values set at initialization will be used for every frame in the +stream. (default) +

+
1
+

Metadata values can be changed before encoding each frame. +

+
+ +
+
+ + +

17.2.1.2 Downmix Levels

+ +
+
-center_mixlev level
+

Center Mix Level. The amount of gain the decoder should apply to the center +channel when downmixing to stereo. This field will only be written to the +bitstream if a center channel is present. The value is specified as a scale +factor. There are 3 valid values: +

+
0.707
+

Apply -3dB gain +

+
0.595
+

Apply -4.5dB gain (default) +

+
0.500
+

Apply -6dB gain +

+
+ +
+
-surround_mixlev level
+

Surround Mix Level. The amount of gain the decoder should apply to the surround +channel(s) when downmixing to stereo. This field will only be written to the +bitstream if one or more surround channels are present. The value is specified +as a scale factor. There are 3 valid values: +

+
0.707
+

Apply -3dB gain +

+
0.500
+

Apply -6dB gain (default) +

+
0.000
+

Silence Surround Channel(s) +

+
+ +
+
+ + +

17.2.1.3 Audio Production Information

+

Audio Production Information is optional information describing the mixing +environment. Either none or both of the fields are written to the bitstream. +

+
+
-mixing_level number
+

Mixing Level. Specifies peak sound pressure level (SPL) in the production +environment when the mix was mastered. Valid values are 80 to 111, or -1 for +unknown or not indicated. The default value is -1, but that value cannot be +used if the Audio Production Information is written to the bitstream. Therefore, +if the room_type option is not the default value, the mixing_level +option must not be -1. +

+
+
-room_type type
+

Room Type. Describes the equalization used during the final mixing session at +the studio or on the dubbing stage. A large room is a dubbing stage with the +industry standard X-curve equalization; a small room has flat equalization. +This field will not be written to the bitstream if both the mixing_level +option and the room_type option have the default values. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
large
+

Large Room +

+
2
+
small
+

Small Room +

+
+ +
+
+ + +

17.2.1.4 Other Metadata Options

+ +
+
-copyright boolean
+

Copyright Indicator. Specifies whether a copyright exists for this audio. +

+
0
+
off
+

No Copyright Exists (default) +

+
1
+
on
+

Copyright Exists +

+
+ +
+
-dialnorm value
+

Dialogue Normalization. Indicates how far the average dialogue level of the +program is below digital 100% full scale (0 dBFS). This parameter determines a +level shift during audio reproduction that sets the average volume of the +dialogue to a preset level. The goal is to match volume level between program +sources. A value of -31dB will result in no volume level change, relative to +the source volume, during audio reproduction. Valid values are whole numbers in +the range -31 to -1, with -31 being the default. +

+
+
-dsur_mode mode
+

Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround +(Pro Logic). This field will only be written to the bitstream if the audio +stream is stereo. Using this option does NOT mean the encoder will actually +apply Dolby Surround processing. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
off
+

Not Dolby Surround Encoded +

+
2
+
on
+

Dolby Surround Encoded +

+
+ +
+
-original boolean
+

Original Bit Stream Indicator. Specifies whether this audio is from the +original source and not a copy. +

+
0
+
off
+

Not Original Source +

+
1
+
on
+

Original Source (default) +

+
+ +
+
+ + +

17.2.2 Extended Bitstream Information

+

The extended bitstream options are part of the Alternate Bit Stream Syntax as +specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts. +If any one parameter in a group is specified, all values in that group will be +written to the bitstream. Default values are used for those that are written +but have not been specified. If the mixing levels are written, the decoder +will use these values instead of the ones specified in the center_mixlev +and surround_mixlev options if it supports the Alternate Bit Stream +Syntax. +

+ +

17.2.2.1 Extended Bitstream Information - Part 1

+ +
+
-dmix_mode mode
+

Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt +(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
ltrt
+

Lt/Rt Downmix Preferred +

+
2
+
loro
+

Lo/Ro Downmix Preferred +

+
+ +
+
-ltrt_cmixlev level
+

Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the +center channel when downmixing to stereo in Lt/Rt mode. +

+
1.414
+

Apply +3dB gain +

+
1.189
+

Apply +1.5dB gain +

+
1.000
+

Apply 0dB gain +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain (default) +

+
0.500
+

Apply -6.0dB gain +

+
0.000
+

Silence Center Channel +

+
+ +
+
-ltrt_surmixlev level
+

Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the +surround channel(s) when downmixing to stereo in Lt/Rt mode. +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain +

+
0.500
+

Apply -6.0dB gain (default) +

+
0.000
+

Silence Surround Channel(s) +

+
+ +
+
-loro_cmixlev level
+

Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the +center channel when downmixing to stereo in Lo/Ro mode. +

+
1.414
+

Apply +3dB gain +

+
1.189
+

Apply +1.5dB gain +

+
1.000
+

Apply 0dB gain +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain (default) +

+
0.500
+

Apply -6.0dB gain +

+
0.000
+

Silence Center Channel +

+
+ +
+
-loro_surmixlev level
+

Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the +surround channel(s) when downmixing to stereo in Lo/Ro mode. +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain +

+
0.500
+

Apply -6.0dB gain (default) +

+
0.000
+

Silence Surround Channel(s) +

+
+ +
+
+ + +

17.2.2.2 Extended Bitstream Information - Part 2

+ +
+
-dsurex_mode mode
+

Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX +(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually +apply Dolby Surround EX processing. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
on
+

Dolby Surround EX Off +

+
2
+
off
+

Dolby Surround EX On +

+
+ +
+
-dheadphone_mode mode
+

Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone +encoding (multi-channel matrixed to 2.0 for use with headphones). Using this +option does NOT mean the encoder will actually apply Dolby Headphone +processing. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
on
+

Dolby Headphone Off +

+
2
+
off
+

Dolby Headphone On +

+
+ +
+
-ad_conv_type type
+

A/D Converter Type. Indicates whether the audio has passed through HDCD A/D +conversion. +

+
0
+
standard
+

Standard A/D Converter (default) +

+
1
+
hdcd
+

HDCD A/D Converter +

+
+ +
+
+ + +

17.2.3 Other AC-3 Encoding Options

+ +
+
-stereo_rematrixing boolean
+

Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This +is an optional AC-3 feature that increases quality by selectively encoding +the left/right channels as mid/side. This option is enabled by default, and it +is highly recommended that it be left as enabled except for testing purposes. +

+
+
+ + +

17.2.4 Floating-Point-Only AC-3 Encoding Options

+ +

These options are only valid for the floating-point encoder and do not exist +for the fixed-point encoder due to the corresponding features not being +implemented in fixed-point. +

+
+
-channel_coupling boolean
+

Enables/Disables use of channel coupling, which is an optional AC-3 feature +that increases quality by combining high frequency information from multiple +channels into a single channel. The per-channel high frequency information is +sent with less accuracy in both the frequency and time domains. This allows +more bits to be used for lower frequencies while preserving enough information +to reconstruct the high frequencies. This option is enabled by default for the +floating-point encoder and should generally be left as enabled except for +testing purposes or to increase encoding speed. +

+
-1
+
auto
+

Selected by Encoder (default) +

+
0
+
off
+

Disable Channel Coupling +

+
1
+
on
+

Enable Channel Coupling +

+
+ +
+
-cpl_start_band number
+

Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a +value higher than the bandwidth is used, it will be reduced to 1 less than the +coupling end band. If auto is used, the start band will be determined by +the encoder based on the bit rate, sample rate, and channel layout. This option +has no effect if channel coupling is disabled. +

+
-1
+
auto
+

Selected by Encoder (default) +

+
+ +
+
+ +

+

+

17.3 libfaac

+ +

libfaac AAC (Advanced Audio Coding) encoder wrapper. +

+

Requires the presence of the libfaac headers and library during +configuration. You need to explicitly configure the build with +--enable-libfaac --enable-nonfree. +

+

This encoder is considered to be of higher quality with respect to the +the native experimental FFmpeg AAC encoder. +

+

For more information see the libfaac project at +http://www.audiocoding.com/faac.html/. +

+ +

17.3.1 Options

+ +

The following shared FFmpeg codec options are recognized. +

+

The following options are supported by the libfaac wrapper. The +faac-equivalent of the options are listed in parentheses. +

+
+
b (-b)
+

Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate +is not explicitly specified, it is automatically set to a suitable +value depending on the selected profile. faac bitrate is +expressed in kilobits/s. +

+

Note that libfaac does not support CBR (Constant Bit Rate) but only +ABR (Average Bit Rate). +

+

If VBR mode is enabled this option is ignored. +

+
+
ar (-R)
+

Set audio sampling rate (in Hz). +

+
+
ac (-c)
+

Set the number of audio channels. +

+
+
cutoff (-C)
+

Set cutoff frequency. If not specified (or explicitly set to 0) it +will use a value automatically computed by the library. Default value +is 0. +

+
+
profile
+

Set audio profile. +

+

The following profiles are recognized: +

+
aac_main
+

Main AAC (Main) +

+
+
aac_low
+

Low Complexity AAC (LC) +

+
+
aac_ssr
+

Scalable Sample Rate (SSR) +

+
+
aac_ltp
+

Long Term Prediction (LTP) +

+
+ +

If not specified it is set to ‘aac_low’. +

+
+
flags +qscale
+

Set constant quality VBR (Variable Bit Rate) mode. +

+
+
global_quality
+

Set quality in VBR mode as an integer number of lambda units. +

+

Only relevant when VBR mode is enabled with flags +qscale. The +value is converted to QP units by dividing it by FF_QP2LAMBDA, +and used to set the quality value used by libfaac. A reasonable range +for the option value in QP units is [10-500], the higher the value the +higher the quality. +

+
+
q (-q)
+

Enable VBR mode when set to a non-negative value, and set constant +quality value as a double floating point value in QP units. +

+

The value sets the quality value used by libfaac. A reasonable range +for the option value is [10-500], the higher the value the higher the +quality. +

+

This option is valid only using the ffmpeg command-line +tool. For library interface users, use ‘global_quality’. +

+
+ + +

17.3.2 Examples

+ +
    +
  • +Use ffmpeg to convert an audio file to ABR 128 kbps AAC in an M4A (MP4) +container: +
     
    ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
    +
    + +
  • +Use ffmpeg to convert an audio file to VBR AAC, using the +LTP AAC profile: +
     
    ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
    +
    +
+ +

+

+

17.4 libfdk_aac

+ +

libfdk-aac AAC (Advanced Audio Coding) encoder wrapper. +

+

The libfdk-aac library is based on the Fraunhofer FDK AAC code from +the Android project. +

+

Requires the presence of the libfdk-aac headers and library during +configuration. You need to explicitly configure the build with +--enable-libfdk-aac. The library is also incompatible with GPL, +so if you allow the use of GPL, you should configure with +--enable-gpl --enable-nonfree --enable-libfdk-aac. +

+

This encoder is considered to be of higher quality with respect to +both the native experimental FFmpeg AAC encoder and +libfaac. +

+

VBR encoding, enabled through the ‘vbr’ or ‘flags ++qscale’ options, is experimental and only works with some +combinations of parameters. +

+

Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or +higher. +

+

For more information see the fdk-aac project at +http://sourceforge.net/p/opencore-amr/fdk-aac/. +

+ +

17.4.1 Options

+ +

The following options are mapped on the shared FFmpeg codec options. +

+
+
b
+

Set bit rate in bits/s. If the bitrate is not explicitly specified, it +is automatically set to a suitable value depending on the selected +profile. +

+

In case VBR mode is enabled the option is ignored. +

+
+
ar
+

Set audio sampling rate (in Hz). +

+
+
channels
+

Set the number of audio channels. +

+
+
flags +qscale
+

Enable fixed quality, VBR (Variable Bit Rate) mode. +Note that VBR is implicitly enabled when the ‘vbr’ value is +positive. +

+
+
cutoff
+

Set cutoff frequency. If not specified (or explicitly set to 0) it +will use a value automatically computed by the library. Default value +is 0. +

+
+
profile
+

Set audio profile. +

+

The following profiles are recognized: +

+
aac_low
+

Low Complexity AAC (LC) +

+
+
aac_he
+

High Efficiency AAC (HE-AAC) +

+
+
aac_he_v2
+

High Efficiency AAC version 2 (HE-AACv2) +

+
+
aac_ld
+

Low Delay AAC (LD) +

+
+
aac_eld
+

Enhanced Low Delay AAC (ELD) +

+
+ +

If not specified it is set to ‘aac_low’. +

+
+ +

The following are private options of the libfdk_aac encoder. +

+
+
afterburner
+

Enable afterburner feature if set to 1, disabled if set to 0. This +improves the quality but also the required processing power. +

+

Default value is 1. +

+
+
eld_sbr
+

Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled +if set to 0. +

+

Default value is 0. +

+
+
signaling
+

Set SBR/PS signaling style. +

+

It can assume one of the following values: +

+
default
+

choose signaling implicitly (explicit hierarchical by default, +implicit if global header is disabled) +

+
+
implicit
+

implicit backwards compatible signaling +

+
+
explicit_sbr
+

explicit SBR, implicit PS signaling +

+
+
explicit_hierarchical
+

explicit hierarchical signaling +

+
+ +

Default value is ‘default’. +

+
+
latm
+

Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0. +

+

Default value is 0. +

+
+
header_period
+

Set StreamMuxConfig and PCE repetition period (in frames) for sending +in-band configuration buffers within LATM/LOAS transport layer. +

+

Must be a 16-bits non-negative integer. +

+

Default value is 0. +

+
+
vbr
+

Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty +good) and 5 is highest quality. A value of 0 will disable VBR, and CBR +(Constant Bit Rate) is enabled. +

+

Currently only the ‘aac_low’ profile supports VBR encoding. +

+

VBR modes 1-5 correspond to roughly the following average bit rates: +

+
+
1
+

32 kbps/channel +

+
2
+

40 kbps/channel +

+
3
+

48-56 kbps/channel +

+
4
+

64 kbps/channel +

+
5
+

about 80-96 kbps/channel +

+
+ +

Default value is 0. +

+
+ + +

17.4.2 Examples

+ +
    +
  • +Use ffmpeg to convert an audio file to VBR AAC in an M4A (MP4) +container: +
     
    ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
    +
    + +
  • +Use ffmpeg to convert an audio file to CBR 64k kbps AAC, using the +High-Efficiency AAC profile: +
     
    ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
    +
    +
+ +

+

+

17.5 libmp3lame

+ +

LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper. +

+

Requires the presence of the libmp3lame headers and library during +configuration. You need to explicitly configure the build with +--enable-libmp3lame. +

+

See libshine for a fixed-point MP3 encoder, although with a +lower quality. +

+ +

17.5.1 Options

+ +

The following options are supported by the libmp3lame wrapper. The +lame-equivalent of the options are listed in parentheses. +

+
+
b (-b)
+

Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate is +expressed in kilobits/s. +

+
+
q (-V)
+

Set constant quality setting for VBR. This option is valid only +using the ffmpeg command-line tool. For library interface +users, use ‘global_quality’. +

+
+
compression_level (-q)
+

Set algorithm quality. Valid arguments are integers in the 0-9 range, +with 0 meaning highest quality but slowest, and 9 meaning fastest +while producing the worst quality. +

+
+
reservoir
+

Enable use of bit reservoir when set to 1. Default value is 1. LAME +has this enabled by default, but can be overridden by use +‘--nores’ option. +

+
+
joint_stereo (-m j)
+

Enable the encoder to use (on a frame by frame basis) either L/R +stereo or mid/side stereo. Default value is 1. +

+
+
abr (--abr)
+

Enable the encoder to use ABR when set to 1. The lame +‘--abr’ sets the target bitrate, while this options only +tells FFmpeg to use ABR still relies on ‘b’ to set bitrate. +

+
+
+ + +

17.6 libopencore-amrnb

+ +

OpenCORE Adaptive Multi-Rate Narrowband encoder. +

+

Requires the presence of the libopencore-amrnb headers and library during +configuration. You need to explicitly configure the build with +--enable-libopencore-amrnb --enable-version3. +

+

This is a mono-only encoder. Officially it only supports 8000Hz sample rate, +but you can override it by setting ‘strict’ to ‘unofficial’ or +lower. +

+ +

17.6.1 Options

+ +
+
b
+

Set bitrate in bits per second. Only the following bitrates are supported, +otherwise libavcodec will round to the nearest valid bitrate. +

+
+
4750
+
5150
+
5900
+
6700
+
7400
+
7950
+
10200
+
12200
+
+ +
+
dtx
+

Allow discontinuous transmission (generate comfort noise) when set to 1. The +default value is 0 (disabled). +

+
+
+ +

+

+

17.7 libshine

+ +

Shine Fixed-Point MP3 encoder wrapper. +

+

Shine is a fixed-point MP3 encoder. It has a far better performance on +platforms without an FPU, e.g. armel CPUs, and some phones and tablets. +However, as it is more targeted on performance than quality, it is not on par +with LAME and other production-grade encoders quality-wise. Also, according to +the project’s homepage, this encoder may not be free of bugs as the code was +written a long time ago and the project was dead for at least 5 years. +

+

This encoder only supports stereo and mono input. This is also CBR-only. +

+

The original project (last updated in early 2007) is at +http://sourceforge.net/projects/libshine-fxp/. We only support the +updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine. +

+

Requires the presence of the libshine headers and library during +configuration. You need to explicitly configure the build with +--enable-libshine. +

+

See also libmp3lame. +

+ +

17.7.1 Options

+ +

The following options are supported by the libshine wrapper. The +shineenc-equivalent of the options are listed in parentheses. +

+
+
b (-b)
+

Set bitrate expressed in bits/s for CBR. shineenc-b’ option +is expressed in kilobits/s. +

+
+
+ + +

17.8 libtwolame

+ +

TwoLAME MP2 encoder wrapper. +

+

Requires the presence of the libtwolame headers and library during +configuration. You need to explicitly configure the build with +--enable-libtwolame. +

+ +

17.8.1 Options

+ +

The following options are supported by the libtwolame wrapper. The +twolame-equivalent options follow the FFmpeg ones and are in +parentheses. +

+
+
b (-b)
+

Set bitrate expressed in bits/s for CBR. twolameb’ +option is expressed in kilobits/s. Default value is 128k. +

+
+
q (-V)
+

Set quality for experimental VBR support. Maximum value range is +from -50 to 50, useful range is from -10 to 10. The higher the +value, the better the quality. This option is valid only using the +ffmpeg command-line tool. For library interface users, +use ‘global_quality’. +

+
+
mode (--mode)
+

Set the mode of the resulting audio. Possible values: +

+
+
auto
+

Choose mode automatically based on the input. This is the default. +

+
stereo
+

Stereo +

+
joint_stereo
+

Joint stereo +

+
dual_channel
+

Dual channel +

+
mono
+

Mono +

+
+ +
+
psymodel (--psyc-mode)
+

Set psychoacoustic model to use in encoding. The argument must be +an integer between -1 and 4, inclusive. The higher the value, the +better the quality. The default value is 3. +

+
+
energy_levels (--energy)
+

Enable energy levels extensions when set to 1. The default value is +0 (disabled). +

+
+
error_protection (--protect)
+

Enable CRC error protection when set to 1. The default value is 0 +(disabled). +

+
+
copyright (--copyright)
+

Set MPEG audio copyright flag when set to 1. The default value is 0 +(disabled). +

+
+
original (--original)
+

Set MPEG audio original flag when set to 1. The default value is 0 +(disabled). +

+
+
+ +

+

+

17.9 libvo-aacenc

+ +

VisualOn AAC encoder. +

+

Requires the presence of the libvo-aacenc headers and library during +configuration. You need to explicitly configure the build with +--enable-libvo-aacenc --enable-version3. +

+

This encoder is considered to be worse than the +native experimental FFmpeg AAC encoder, according to +multiple sources. +

+ +

17.9.1 Options

+ +

The VisualOn AAC encoder only support encoding AAC-LC and up to 2 +channels. It is also CBR-only. +

+
+
b
+

Set bit rate in bits/s. +

+
+
+ + +

17.10 libvo-amrwbenc

+ +

VisualOn Adaptive Multi-Rate Wideband encoder. +

+

Requires the presence of the libvo-amrwbenc headers and library during +configuration. You need to explicitly configure the build with +--enable-libvo-amrwbenc --enable-version3. +

+

This is a mono-only encoder. Officially it only supports 16000Hz sample +rate, but you can override it by setting ‘strict’ to +‘unofficial’ or lower. +

+ +

17.10.1 Options

+ +
+
b
+

Set bitrate in bits/s. Only the following bitrates are supported, otherwise +libavcodec will round to the nearest valid bitrate. +

+
+
6600
+
8850
+
12650
+
14250
+
15850
+
18250
+
19850
+
23050
+
23850
+
+ +
+
dtx
+

Allow discontinuous transmission (generate comfort noise) when set to 1. The +default value is 0 (disabled). +

+
+
+ + +

17.11 libopus

+ +

libopus Opus Interactive Audio Codec encoder wrapper. +

+

Requires the presence of the libopus headers and library during +configuration. You need to explicitly configure the build with +--enable-libopus. +

+ +

17.11.1 Option Mapping

+ +

Most libopus options are modeled after the opusenc utility from +opus-tools. The following is an option mapping chart describing options +supported by the libopus wrapper, and their opusenc-equivalent +in parentheses. +

+
+
b (bitrate)
+

Set the bit rate in bits/s. FFmpeg’s ‘b’ option is +expressed in bits/s, while opusenc’s ‘bitrate’ in +kilobits/s. +

+
+
vbr (vbr, hard-cbr, and cvbr)
+

Set VBR mode. The FFmpeg ‘vbr’ option has the following +valid arguments, with the their opusenc equivalent options +in parentheses: +

+
+
off (hard-cbr)
+

Use constant bit rate encoding. +

+
+
on (vbr)
+

Use variable bit rate encoding (the default). +

+
+
constrained (cvbr)
+

Use constrained variable bit rate encoding. +

+
+ +
+
compression_level (comp)
+

Set encoding algorithm complexity. Valid options are integers in +the 0-10 range. 0 gives the fastest encodes but lower quality, while 10 +gives the highest quality but slowest encoding. The default is 10. +

+
+
frame_duration (framesize)
+

Set maximum frame size, or duration of a frame in milliseconds. The +argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller +frame sizes achieve lower latency but less quality at a given bitrate. +Sizes greater than 20ms are only interesting at fairly low bitrates. +The default is 20ms. +

+
+
packet_loss (expect-loss)
+

Set expected packet loss percentage. The default is 0. +

+
+
application (N.A.)
+

Set intended application type. Valid options are listed below: +

+
+
voip
+

Favor improved speech intelligibility. +

+
audio
+

Favor faithfulness to the input (the default). +

+
lowdelay
+

Restrict to only the lowest delay modes. +

+
+ +
+
cutoff (N.A.)
+

Set cutoff bandwidth in Hz. The argument must be exactly one of the +following: 4000, 6000, 8000, 12000, or 20000, corresponding to +narrowband, mediumband, wideband, super wideband, and fullband +respectively. The default is 0 (cutoff disabled). +

+
+
+ + +

17.12 libvorbis

+ +

libvorbis encoder wrapper. +

+

Requires the presence of the libvorbisenc headers and library during +configuration. You need to explicitly configure the build with +--enable-libvorbis. +

+ +

17.12.1 Options

+ +

The following options are supported by the libvorbis wrapper. The +oggenc-equivalent of the options are listed in parentheses. +

+

To get a more accurate and extensive documentation of the libvorbis +options, consult the libvorbisenc’s and oggenc’s documentations. +See http://xiph.org/vorbis/, +http://wiki.xiph.org/Vorbis-tools, and oggenc(1). +

+
+
b (-b)
+

Set bitrate expressed in bits/s for ABR. oggenc-b’ is +expressed in kilobits/s. +

+
+
q (-q)
+

Set constant quality setting for VBR. The value should be a float +number in the range of -1.0 to 10.0. The higher the value, the better +the quality. The default value is ‘3.0’. +

+

This option is valid only using the ffmpeg command-line tool. +For library interface users, use ‘global_quality’. +

+
+
cutoff (--advanced-encode-option lowpass_frequency=N)
+

Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc’s +related option is expressed in kHz. The default value is ‘0’ (cutoff +disabled). +

+
+
minrate (-m)
+

Set minimum bitrate expressed in bits/s. oggenc-m’ is +expressed in kilobits/s. +

+
+
maxrate (-M)
+

Set maximum bitrate expressed in bits/s. oggenc-M’ is +expressed in kilobits/s. This only has effect on ABR mode. +

+
+
iblock (--advanced-encode-option impulse_noisetune=N)
+

Set noise floor bias for impulse blocks. The value is a float number from +-15.0 to 0.0. A negative bias instructs the encoder to pay special attention +to the crispness of transients in the encoded audio. The tradeoff for better +transient response is a higher bitrate. +

+
+
+ +

+

+

17.13 libwavpack

+ +

A wrapper providing WavPack encoding through libwavpack. +

+

Only lossless mode using 32-bit integer samples is supported currently. +

+

Requires the presence of the libwavpack headers and library during +configuration. You need to explicitly configure the build with +--enable-libwavpack. +

+

Note that a libavcodec-native encoder for the WavPack codec exists so users can +encode audios with this codec without using this encoder. See wavpackenc. +

+ +

17.13.1 Options

+ +

wavpack command line utility’s corresponding options are listed in +parentheses, if any. +

+
+
frame_size (--blocksize)
+

Default is 32768. +

+
+
compression_level
+

Set speed vs. compression tradeoff. Acceptable arguments are listed below: +

+
+
0 (-f)
+

Fast mode. +

+
+
1
+

Normal (default) settings. +

+
+
2 (-h)
+

High quality. +

+
+
3 (-hh)
+

Very high quality. +

+
+
4-8 (-hh -xEXTRAPROC)
+

Same as ‘3’, but with extra processing enabled. +

+

4’ is the same as ‘-x2’ and ‘8’ is the same as ‘-x6’. +

+
+
+
+
+ +

+

+

17.14 wavpack

+ +

WavPack lossless audio encoder. +

+

This is a libavcodec-native WavPack encoder. There is also an encoder based on +libwavpack, but there is virtually no reason to use that encoder. +

+

See also libwavpack. +

+ +

17.14.1 Options

+ +

The equivalent options for wavpack command line utility are listed in +parentheses. +

+ +

17.14.1.1 Shared options

+ +

The following shared options are effective for this encoder. Only special notes +about this particular encoder will be documented here. For the general meaning +of the options, see the Codec Options chapter. +

+
+
frame_size (--blocksize)
+

For this encoder, the range for this option is between 128 and 131072. Default +is automatically decided based on sample rate and number of channel. +

+

For the complete formula of calculating default, see +‘libavcodec/wavpackenc.c’. +

+
+
compression_level (-f, -h, -hh, and -x)
+

This option’s syntax is consistent with libwavpack’s. +

+
+ + +

17.14.1.2 Private options

+ +
+
joint_stereo (-j)
+

Set whether to enable joint stereo. Valid values are: +

+
+
on (1)
+

Force mid/side audio encoding. +

+
off (0)
+

Force left/right audio encoding. +

+
auto
+

Let the encoder decide automatically. +

+
+ +
+
optimize_mono
+

Set whether to enable optimization for mono. This option is only effective for +non-mono streams. Available values: +

+
+
on
+

enabled +

+
off
+

disabled +

+
+ +
+
+ + + +

18. Video Encoders

+ +

A description of some of the currently available video encoders +follows. +

+ +

18.1 libtheora

+ +

libtheora Theora encoder wrapper. +

+

Requires the presence of the libtheora headers and library during +configuration. You need to explicitly configure the build with +--enable-libtheora. +

+

For more information about the libtheora project see +http://www.theora.org/. +

+ +

18.1.1 Options

+ +

The following global options are mapped to internal libtheora options +which affect the quality and the bitrate of the encoded stream. +

+
+
b
+

Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In +case VBR (Variable Bit Rate) mode is enabled this option is ignored. +

+
+
flags
+

Used to enable constant quality mode (VBR) encoding through the +‘qscale’ flag, and to enable the pass1 and pass2 +modes. +

+
+
g
+

Set the GOP size. +

+
+
global_quality
+

Set the global quality as an integer in lambda units. +

+

Only relevant when VBR mode is enabled with flags +qscale. The +value is converted to QP units by dividing it by FF_QP2LAMBDA, +clipped in the [0 - 10] range, and then multiplied by 6.3 to get a +value in the native libtheora range [0-63]. A higher value corresponds +to a higher quality. +

+
+
q
+

Enable VBR mode when set to a non-negative value, and set constant +quality value as a double floating point value in QP units. +

+

The value is clipped in the [0-10] range, and then multiplied by 6.3 +to get a value in the native libtheora range [0-63]. +

+

This option is valid only using the ffmpeg command-line +tool. For library interface users, use ‘global_quality’. +

+
+ + +

18.1.2 Examples

+ +
    +
  • +Set maximum constant quality (VBR) encoding with ffmpeg: +
     
    ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
    +
    + +
  • +Use ffmpeg to convert a CBR 1000 kbps Theora video stream: +
     
    ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
    +
    +
+ + +

18.2 libvpx

+ +

VP8 format supported through libvpx. +

+

Requires the presence of the libvpx headers and library during configuration. +You need to explicitly configure the build with --enable-libvpx. +

+ +

18.2.1 Options

+ +

Mapping from FFmpeg to libvpx options with conversion notes in parentheses. +

+
+
threads
+

g_threads +

+
+
profile
+

g_profile +

+
+
vb
+

rc_target_bitrate +

+
+
g
+

kf_max_dist +

+
+
keyint_min
+

kf_min_dist +

+
+
qmin
+

rc_min_quantizer +

+
+
qmax
+

rc_max_quantizer +

+
+
bufsize, vb
+

rc_buf_sz +(bufsize * 1000 / vb) +

+

rc_buf_optimal_sz +(bufsize * 1000 / vb * 5 / 6) +

+
+
rc_init_occupancy, vb
+

rc_buf_initial_sz +(rc_init_occupancy * 1000 / vb) +

+
+
rc_buffer_aggressivity
+

rc_undershoot_pct +

+
+
skip_threshold
+

rc_dropframe_thresh +

+
+
qcomp
+

rc_2pass_vbr_bias_pct +

+
+
maxrate, vb
+

rc_2pass_vbr_maxsection_pct +(maxrate * 100 / vb) +

+
+
minrate, vb
+

rc_2pass_vbr_minsection_pct +(minrate * 100 / vb) +

+
+
minrate, maxrate, vb
+

VPX_CBR +(minrate == maxrate == vb) +

+
+
crf
+

VPX_CQ, VP8E_SET_CQ_LEVEL +

+
+
quality
+
+
best
+

VPX_DL_BEST_QUALITY +

+
good
+

VPX_DL_GOOD_QUALITY +

+
realtime
+

VPX_DL_REALTIME +

+
+ +
+
speed
+

VP8E_SET_CPUUSED +

+
+
nr
+

VP8E_SET_NOISE_SENSITIVITY +

+
+
mb_threshold
+

VP8E_SET_STATIC_THRESHOLD +

+
+
slices
+

VP8E_SET_TOKEN_PARTITIONS +

+
+
max-intra-rate
+

VP8E_SET_MAX_INTRA_BITRATE_PCT +

+
+
force_key_frames
+

VPX_EFLAG_FORCE_KF +

+
+
Alternate reference frame related
+
+
vp8flags altref
+

VP8E_SET_ENABLEAUTOALTREF +

+
arnr_max_frames
+

VP8E_SET_ARNR_MAXFRAMES +

+
arnr_type
+

VP8E_SET_ARNR_TYPE +

+
arnr_strength
+

VP8E_SET_ARNR_STRENGTH +

+
rc_lookahead
+

g_lag_in_frames +

+
+ +
+
vp8flags error_resilient
+

g_error_resilient +

+
+
+ +

For more information about libvpx see: +http://www.webmproject.org/ +

+ + +

18.3 libwebp

+ +

libwebp WebP Image encoder wrapper +

+

libwebp is Google’s official encoder for WebP images. It can encode in either +lossy or lossless mode. Lossy images are essentially a wrapper around a VP8 +frame. Lossless images are a separate codec developed by Google. +

+ +

18.3.1 Pixel Format

+ +

Currently, libwebp only supports YUV420 for lossy and RGB for lossless due +to limitations of the format and libwebp. Alpha is supported for either mode. +Because of API limitations, if RGB is passed in when encoding lossy or YUV is +passed in for encoding lossless, the pixel format will automatically be +converted using functions from libwebp. This is not ideal and is done only for +convenience. +

+ +

18.3.2 Options

+ +
+
-lossless boolean
+

Enables/Disables use of lossless mode. Default is 0. +

+
+
-compression_level integer
+

For lossy, this is a quality/speed tradeoff. Higher values give better quality +for a given size at the cost of increased encoding time. For lossless, this is +a size/speed tradeoff. Higher values give smaller size at the cost of increased +encoding time. More specifically, it controls the number of extra algorithms +and compression tools used, and varies the combination of these tools. This +maps to the method option in libwebp. The valid range is 0 to 6. +Default is 4. +

+
+
-qscale float
+

For lossy encoding, this controls image quality, 0 to 100. For lossless +encoding, this controls the effort and time spent at compressing more. The +default value is 75. Note that for usage via libavcodec, this option is called +global_quality and must be multiplied by FF_QP2LAMBDA. +

+
+
-preset type
+

Configuration preset. This does some automatic settings based on the general +type of the image. +

+
none
+

Do not use a preset. +

+
default
+

Use the encoder default. +

+
picture
+

Digital picture, like portrait, inner shot +

+
photo
+

Outdoor photograph, with natural lighting +

+
drawing
+

Hand or line drawing, with high-contrast details +

+
icon
+

Small-sized colorful images +

+
text
+

Text-like +

+
+ +
+
+ + +

18.4 libx264, libx264rgb

+ +

x264 H.264/MPEG-4 AVC encoder wrapper. +

+

This encoder requires the presence of the libx264 headers and library +during configuration. You need to explicitly configure the build with +--enable-libx264. +

+

libx264 supports an impressive number of features, including 8x8 and +4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC +entropy coding, interlacing (MBAFF), lossless mode, psy optimizations +for detail retention (adaptive quantization, psy-RD, psy-trellis). +

+

Many libx264 encoder options are mapped to FFmpeg global codec +options, while unique encoder options are provided through private +options. Additionally the ‘x264opts’ and ‘x264-params’ +private options allows one to pass a list of key=value tuples as accepted +by the libx264 x264_param_parse function. +

+

The x264 project website is at +http://www.videolan.org/developers/x264.html. +

+

The libx264rgb encoder is the same as libx264, except it accepts packed RGB +pixel formats as input instead of YUV. +

+ +

18.4.1 Supported Pixel Formats

+ +

x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at +x264’s configure time. FFmpeg only supports one bit depth in one particular +build. In other words, it is not possible to build one FFmpeg with multiple +versions of x264 with different bit depths. +

+ +

18.4.2 Options

+ +

The following options are supported by the libx264 wrapper. The +x264-equivalent options or values are listed in parentheses +for easy migration. +

+

To reduce the duplication of documentation, only the private options +and some others requiring special attention are documented here. For +the documentation of the undocumented generic options, see +the Codec Options chapter. +

+

To get a more accurate and extensive documentation of the libx264 +options, invoke the command x264 --full-help or consult +the libx264 documentation. +

+
+
b (bitrate)
+

Set bitrate in bits/s. Note that FFmpeg’s ‘b’ option is +expressed in bits/s, while x264’s ‘bitrate’ is in +kilobits/s. +

+
+
bf (bframes)
+
g (keyint)
+
qmax (qpmax)
+
qmin (qpmin)
+
qdiff (qpstep)
+
qblur (qblur)
+
qcomp (qcomp)
+
refs (ref)
+
sc_threshold (scenecut)
+
trellis (trellis)
+
nr (nr)
+
me_range (merange)
+
me_method (me)
+

Set motion estimation method. Possible values in the decreasing order +of speed: +

+
+
dia (dia)
+
epzs (dia)
+

Diamond search with radius 1 (fastest). ‘epzs’ is an alias for +‘dia’. +

+
hex (hex)
+

Hexagonal search with radius 2. +

+
umh (umh)
+

Uneven multi-hexagon search. +

+
esa (esa)
+

Exhaustive search. +

+
tesa (tesa)
+

Hadamard exhaustive search (slowest). +

+
+ +
+
subq (subme)
+
b_strategy (b-adapt)
+
keyint_min (min-keyint)
+
coder
+

Set entropy encoder. Possible values: +

+
+
ac
+

Enable CABAC. +

+
+
vlc
+

Enable CAVLC and disable CABAC. It generates the same effect as +x264’s ‘--no-cabac’ option. +

+
+ +
+
cmp
+

Set full pixel motion estimation comparation algorithm. Possible values: +

+
+
chroma
+

Enable chroma in motion estimation. +

+
+
sad
+

Ignore chroma in motion estimation. It generates the same effect as +x264’s ‘--no-chroma-me’ option. +

+
+ +
+
threads (threads)
+
thread_type
+

Set multithreading technique. Possible values: +

+
+
slice
+

Slice-based multithreading. It generates the same effect as +x264’s ‘--sliced-threads’ option. +

+
frame
+

Frame-based multithreading. +

+
+ +
+
flags
+

Set encoding flags. It can be used to disable closed GOP and enable +open GOP by setting it to -cgop. The result is similar to +the behavior of x264’s ‘--open-gop’ option. +

+
+
rc_init_occupancy (vbv-init)
+
preset (preset)
+

Set the encoding preset. +

+
+
tune (tune)
+

Set tuning of the encoding params. +

+
+
profile (profile)
+

Set profile restrictions. +

+
+
fastfirstpass
+

Enable fast settings when encoding first pass, when set to 1. When set +to 0, it has the same effect of x264’s +‘--slow-firstpass’ option. +

+
+
crf (crf)
+

Set the quality for constant quality mode. +

+
+
crf_max (crf-max)
+

In CRF mode, prevents VBV from lowering quality beyond this point. +

+
+
qp (qp)
+

Set constant quantization rate control method parameter. +

+
+
aq-mode (aq-mode)
+

Set AQ method. Possible values: +

+
+
none (0)
+

Disabled. +

+
+
variance (1)
+

Variance AQ (complexity mask). +

+
+
autovariance (2)
+

Auto-variance AQ (experimental). +

+
+ +
+
aq-strength (aq-strength)
+

Set AQ strength, reduce blocking and blurring in flat and textured areas. +

+
+
psy
+

Use psychovisual optimizations when set to 1. When set to 0, it has the +same effect as x264’s ‘--no-psy’ option. +

+
+
psy-rd (psy-rd)
+

Set strength of psychovisual optimization, in +psy-rd:psy-trellis format. +

+
+
rc-lookahead (rc-lookahead)
+

Set number of frames to look ahead for frametype and ratecontrol. +

+
+
weightb
+

Enable weighted prediction for B-frames when set to 1. When set to 0, +it has the same effect as x264’s ‘--no-weightb’ option. +

+
+
weightp (weightp)
+

Set weighted prediction method for P-frames. Possible values: +

+
+
none (0)
+

Disabled +

+
simple (1)
+

Enable only weighted refs +

+
smart (2)
+

Enable both weighted refs and duplicates +

+
+ +
+
ssim (ssim)
+

Enable calculation and printing SSIM stats after the encoding. +

+
+
intra-refresh (intra-refresh)
+

Enable the use of Periodic Intra Refresh instead of IDR frames when set +to 1. +

+
+
bluray-compat (bluray-compat)
+

Configure the encoder to be compatible with the bluray standard. +It is a shorthand for setting "bluray-compat=1 force-cfr=1". +

+
+
b-bias (b-bias)
+

Set the influence on how often B-frames are used. +

+
+
b-pyramid (b-pyramid)
+

Set method for keeping of some B-frames as references. Possible values: +

+
+
none (none)
+

Disabled. +

+
strict (strict)
+

Strictly hierarchical pyramid. +

+
normal (normal)
+

Non-strict (not Blu-ray compatible). +

+
+ +
+
mixed-refs
+

Enable the use of one reference per partition, as opposed to one +reference per macroblock when set to 1. When set to 0, it has the +same effect as x264’s ‘--no-mixed-refs’ option. +

+
+
8x8dct
+

Enable adaptive spatial transform (high profile 8x8 transform) +when set to 1. When set to 0, it has the same effect as +x264’s ‘--no-8x8dct’ option. +

+
+
fast-pskip
+

Enable early SKIP detection on P-frames when set to 1. When set +to 0, it has the same effect as x264’s +‘--no-fast-pskip’ option. +

+
+
aud (aud)
+

Enable use of access unit delimiters when set to 1. +

+
+
mbtree
+

Enable use macroblock tree ratecontrol when set to 1. When set +to 0, it has the same effect as x264’s +‘--no-mbtree’ option. +

+
+
deblock (deblock)
+

Set loop filter parameters, in alpha:beta form. +

+
+
cplxblur (cplxblur)
+

Set fluctuations reduction in QP (before curve compression). +

+
+
partitions (partitions)
+

Set partitions to consider as a comma-separated list of. Possible +values in the list: +

+
+
p8x8
+

8x8 P-frame partition. +

+
p4x4
+

4x4 P-frame partition. +

+
b8x8
+

4x4 B-frame partition. +

+
i8x8
+

8x8 I-frame partition. +

+
i4x4
+

4x4 I-frame partition. +(Enabling ‘p4x4’ requires ‘p8x8’ to be enabled. Enabling +‘i8x8’ requires adaptive spatial transform (‘8x8dct’ +option) to be enabled.) +

+
none (none)
+

Do not consider any partitions. +

+
all (all)
+

Consider every partition. +

+
+ +
+
direct-pred (direct)
+

Set direct MV prediction mode. Possible values: +

+
+
none (none)
+

Disable MV prediction. +

+
spatial (spatial)
+

Enable spatial predicting. +

+
temporal (temporal)
+

Enable temporal predicting. +

+
auto (auto)
+

Automatically decided. +

+
+ +
+
slice-max-size (slice-max-size)
+

Set the limit of the size of each slice in bytes. If not specified +but RTP payload size (‘ps’) is specified, that is used. +

+
+
stats (stats)
+

Set the file name for multi-pass stats. +

+
+
nal-hrd (nal-hrd)
+

Set signal HRD information (requires ‘vbv-bufsize’ to be set). +Possible values: +

+
+
none (none)
+

Disable HRD information signaling. +

+
vbr (vbr)
+

Variable bit rate. +

+
cbr (cbr)
+

Constant bit rate (not allowed in MP4 container). +

+
+ +
+
x264opts (N.A.)
+

Set any x264 option, see x264 --fullhelp for a list. +

+

Argument is a list of key=value couples separated by +":". In filter and psy-rd options that use ":" as a separator +themselves, use "," instead. They accept it as well since long ago but this +is kept undocumented for some reason. +

+

For example to specify libx264 encoding options with ffmpeg: +

 
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
+
+ +
+
x264-params (N.A.)
+

Override the x264 configuration using a :-separated list of key=value +parameters. +

+

This option is functionally the same as the ‘x264opts’, but is +duplicated for compatibility with the Libav fork. +

+

For example to specify libx264 encoding options with ffmpeg: +

 
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
+cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
+no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
+
+
+
+ +

Encoding ffpresets for common usages are provided so they can be used with the +general presets system (e.g. passing the ‘pre’ option). +

+ +

18.5 libxvid

+ +

Xvid MPEG-4 Part 2 encoder wrapper. +

+

This encoder requires the presence of the libxvidcore headers and library +during configuration. You need to explicitly configure the build with +--enable-libxvid --enable-gpl. +

+

The native mpeg4 encoder supports the MPEG-4 Part 2 format, so +users can encode to this format without this library. +

+ +

18.5.1 Options

+ +

The following options are supported by the libxvid wrapper. Some of +the following options are listed but are not documented, and +correspond to shared codec options. See the Codec Options chapter for their documentation. The other shared options +which are not listed have no effect for the libxvid encoder. +

+
+
b
+
g
+
qmin
+
qmax
+
mpeg_quant
+
threads
+
bf
+
b_qfactor
+
b_qoffset
+
flags
+

Set specific encoding flags. Possible values: +

+
+
mv4
+

Use four motion vector by macroblock. +

+
+
aic
+

Enable high quality AC prediction. +

+
+
gray
+

Only encode grayscale. +

+
+
gmc
+

Enable the use of global motion compensation (GMC). +

+
+
qpel
+

Enable quarter-pixel motion compensation. +

+
+
cgop
+

Enable closed GOP. +

+
+
global_header
+

Place global headers in extradata instead of every keyframe. +

+
+
+ +
+
trellis
+
me_method
+

Set motion estimation method. Possible values in decreasing order of +speed and increasing order of quality: +

+
+
zero
+

Use no motion estimation (default). +

+
+
phods
+
x1
+
log
+

Enable advanced diamond zonal search for 16x16 blocks and half-pixel +refinement for 16x16 blocks. ‘x1’ and ‘log’ are aliases for +‘phods’. +

+
+
epzs
+

Enable all of the things described above, plus advanced diamond zonal +search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion +estimation on chroma planes. +

+
+
full
+

Enable all of the things described above, plus extended 16x16 and 8x8 +blocks search. +

+
+ +
+
mbd
+

Set macroblock decision algorithm. Possible values in the increasing +order of quality: +

+
+
simple
+

Use macroblock comparing function algorithm (default). +

+
+
bits
+

Enable rate distortion-based half pixel and quarter pixel refinement for +16x16 blocks. +

+
+
rd
+

Enable all of the things described above, plus rate distortion-based +half pixel and quarter pixel refinement for 8x8 blocks, and rate +distortion-based search using square pattern. +

+
+ +
+
lumi_aq
+

Enable lumi masking adaptive quantization when set to 1. Default is 0 +(disabled). +

+
+
variance_aq
+

Enable variance adaptive quantization when set to 1. Default is 0 +(disabled). +

+

When combined with ‘lumi_aq’, the resulting quality will not +be better than any of the two specified individually. In other +words, the resulting quality will be the worse one of the two +effects. +

+
+
ssim
+

Set structural similarity (SSIM) displaying method. Possible values: +

+
+
off
+

Disable displaying of SSIM information. +

+
+
avg
+

Output average SSIM at the end of encoding to stdout. The format of +showing the average SSIM is: +

+
 
Average SSIM: %f
+
+ +

For users who are not familiar with C, %f means a float number, or +a decimal (e.g. 0.939232). +

+
+
frame
+

Output both per-frame SSIM data during encoding and average SSIM at +the end of encoding to stdout. The format of per-frame information +is: +

+
 
       SSIM: avg: %1.3f min: %1.3f max: %1.3f
+
+ +

For users who are not familiar with C, %1.3f means a float number +rounded to 3 digits after the dot (e.g. 0.932). +

+
+
+ +
+
ssim_acc
+

Set SSIM accuracy. Valid options are integers within the range of +0-4, while 0 gives the most accurate result and 4 computes the +fastest. +

+
+
+ + +

18.6 png

+ +

PNG image encoder. +

+ +

18.6.1 Private options

+ +
+
dpi integer
+

Set physical density of pixels, in dots per inch, unset by default +

+
dpm integer
+

Set physical density of pixels, in dots per meter, unset by default +

+
+ + +

18.7 ProRes

+ +

Apple ProRes encoder. +

+

FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder. +The used encoder can be chosen with the -vcodec option. +

+ +

18.7.1 Private Options for prores-ks

+ +
+
profile integer
+

Select the ProRes profile to encode +

+
proxy
+
lt
+
standard
+
hq
+
4444
+
+ +
+
quant_mat integer
+

Select quantization matrix. +

+
auto
+
default
+
proxy
+
lt
+
standard
+
hq
+
+

If set to auto, the matrix matching the profile will be picked. +If not set, the matrix providing the highest quality, default, will be +picked. +

+
+
bits_per_mb integer
+

How many bits to allot for coding one macroblock. Different profiles use +between 200 and 2400 bits per macroblock, the maximum is 8000. +

+
+
mbs_per_slice integer
+

Number of macroblocks in each slice (1-8); the default value (8) +should be good in almost all situations. +

+
+
vendor string
+

Override the 4-byte vendor ID. +A custom vendor ID like apl0 would claim the stream was produced by +the Apple encoder. +

+
+
alpha_bits integer
+

Specify number of bits for alpha component. +Possible values are 0, 8 and 16. +Use 0 to disable alpha plane coding. +

+
+
+ + +

18.7.2 Speed considerations

+ +

In the default mode of operation the encoder has to honor frame constraints +(i.e. not produc frames with size bigger than requested) while still making +output picture as good as possible. +A frame containing a lot of small details is harder to compress and the encoder +would spend more time searching for appropriate quantizers for each slice. +

+

Setting a higher ‘bits_per_mb’ limit will improve the speed. +

+

For the fastest encoding speed set the ‘qscale’ parameter (4 is the +recommended value) and do not set a size constraint. +

+ +

19. Bitstream Filters

+ +

When you configure your FFmpeg build, all the supported bitstream +filters are enabled by default. You can list all available ones using +the configure option --list-bsfs. +

+

You can disable all the bitstream filters using the configure option +--disable-bsfs, and selectively enable any bitstream filter using +the option --enable-bsf=BSF, or you can disable a particular +bitstream filter using the option --disable-bsf=BSF. +

+

The option -bsfs of the ff* tools will display the list of +all the supported bitstream filters included in your build. +

+

Below is a description of the currently available bitstream filters. +

+ +

19.1 aac_adtstoasc

+ +

Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration +bitstream filter. +

+

This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 +ADTS header and removes the ADTS header. +

+

This is required for example when copying an AAC stream from a raw +ADTS AAC container to a FLV or a MOV/MP4 file. +

+ +

19.2 chomp

+ +

Remove zero padding at the end of a packet. +

+ +

19.3 dump_extra

+ +

Add extradata to the beginning of the filtered packets. +

+

The additional argument specifies which packets should be filtered. +It accepts the values: +

+
a
+

add extradata to all key packets, but only if local_header is +set in the ‘flags2’ codec context field +

+
+
k
+

add extradata to all key packets +

+
+
e
+

add extradata to all packets +

+
+ +

If not specified it is assumed ‘k’. +

+

For example the following ffmpeg command forces a global +header (thus disabling individual packet headers) in the H.264 packets +generated by the libx264 encoder, but corrects them by adding +the header stored in extradata to the key packets: +

 
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+ + +

19.4 h264_mp4toannexb

+ +

Convert an H.264 bitstream from length prefixed mode to start code +prefixed mode (as defined in the Annex B of the ITU-T H.264 +specification). +

+

This is required by some streaming formats, typically the MPEG-2 +transport stream format ("mpegts"). +

+

For example to remux an MP4 file containing an H.264 stream to mpegts +format with ffmpeg, you can use the command: +

+
 
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+ + +

19.5 imx_dump_header

+ + +

19.6 mjpeg2jpeg

+ +

Convert MJPEG/AVI1 packets to full JPEG/JFIF packets. +

+

MJPEG is a video codec wherein each video frame is essentially a +JPEG image. The individual frames can be extracted without loss, +e.g. by +

+
 
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+ +

Unfortunately, these chunks are incomplete JPEG images, because +they lack the DHT segment required for decoding. Quoting from +http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml: +

+

Avery Lee, writing in the rec.video.desktop newsgroup in 2001, +commented that "MJPEG, or at least the MJPEG in AVIs having the +MJPG fourcc, is restricted JPEG with a fixed – and *omitted* – +Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2, +and it must use basic Huffman encoding, not arithmetic or +progressive. . . . You can indeed extract the MJPEG frames and +decode them with a regular JPEG decoder, but you have to prepend +the DHT segment to them, or else the decoder won’t have any idea +how to decompress the data. The exact table necessary is given in +the OpenDML spec." +

+

This bitstream filter patches the header of frames extracted from an MJPEG +stream (carrying the AVI1 header ID and lacking a DHT segment) to +produce fully qualified JPEG images. +

+
 
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+ + +

19.7 mjpega_dump_header

+ + +

19.8 movsub

+ + +

19.9 mp3_header_decompress

+ + +

19.10 noise

+ + +

19.11 remove_extra

+ + +

20. Format Options

+ +

The libavformat library provides some generic global options, which +can be set on all the muxers and demuxers. In addition each muxer or +demuxer may support so-called private options, which are specific for +that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follows: +

+
+
avioflags flags (input/output)
+

Possible values: +

+
direct
+

Reduce buffering. +

+
+ +
+
probesize integer (input)
+

Set probing size in bytes, i.e. the size of the data to analyze to get +stream information. A higher value will allow to detect more +information in case it is dispersed into the stream, but will increase +latency. Must be an integer not lesser than 32. It is 5000000 by default. +

+
+
packetsize integer (output)
+

Set packet size. +

+
+
fflags flags (input/output)
+

Set format flags. +

+

Possible values: +

+
ignidx
+

Ignore index. +

+
genpts
+

Generate PTS. +

+
nofillin
+

Do not fill in missing values that can be exactly calculated. +

+
noparse
+

Disable AVParsers, this needs +nofillin too. +

+
igndts
+

Ignore DTS. +

+
discardcorrupt
+

Discard corrupted frames. +

+
sortdts
+

Try to interleave output packets by DTS. +

+
keepside
+

Do not merge side data. +

+
latm
+

Enable RTP MP4A-LATM payload. +

+
nobuffer
+

Reduce the latency introduced by optional buffering +

+
+ +
+
seek2any integer (input)
+

Allow seeking to non-keyframes on demuxer level when supported if set to 1. +Default is 0. +

+
+
analyzeduration integer (input)
+

Specify how many microseconds are analyzed to probe the input. A +higher value will allow to detect more accurate information, but will +increase latency. It defaults to 5,000,000 microseconds = 5 seconds. +

+
+
cryptokey hexadecimal string (input)
+

Set decryption key. +

+
+
indexmem integer (input)
+

Set max memory used for timestamp index (per stream). +

+
+
rtbufsize integer (input)
+

Set max memory used for buffering real-time frames. +

+
+
fdebug flags (input/output)
+

Print specific debug info. +

+

Possible values: +

+
ts
+
+ +
+
max_delay integer (input/output)
+

Set maximum muxing or demuxing delay in microseconds. +

+
+
fpsprobesize integer (input)
+

Set number of frames used to probe fps. +

+
+
audio_preload integer (output)
+

Set microseconds by which audio packets should be interleaved earlier. +

+
+
chunk_duration integer (output)
+

Set microseconds for each chunk. +

+
+
chunk_size integer (output)
+

Set size in bytes for each chunk. +

+
+
err_detect, f_err_detect flags (input)
+

Set error detection flags. f_err_detect is deprecated and +should be used only via the ffmpeg tool. +

+

Possible values: +

+
crccheck
+

Verify embedded CRCs. +

+
bitstream
+

Detect bitstream specification deviations. +

+
buffer
+

Detect improper bitstream length. +

+
explode
+

Abort decoding on minor error detection. +

+
careful
+

Consider things that violate the spec and have not been seen in the +wild as errors. +

+
compliant
+

Consider all spec non compliancies as errors. +

+
aggressive
+

Consider things that a sane encoder should not do as an error. +

+
+ +
+
use_wallclock_as_timestamps integer (input)
+

Use wallclock as timestamps. +

+
+
avoid_negative_ts integer (output)
+
+

Possible values: +

+
make_non_negative
+

Shift timestamps to make them non-negative. +Also note that this affects only leading negative timestamps, and not +non-monotonic negative timestamps. +

+
make_zero
+

Shift timestamps so that the first timestamp is 0. +

+
auto (default)
+

Enables shifting when required by the target format. +

+
disabled
+

Disables shifting of timestamp. +

+
+ +

When shifting is enabled, all output timestamps are shifted by the +same amount. Audio, video, and subtitles desynching and relative +timestamp differences are preserved compared to how they would have +been without shifting. +

+
+
skip_initial_bytes integer (input)
+

Set number of bytes to skip before reading header and frames if set to 1. +Default is 0. +

+
+
correct_ts_overflow integer (input)
+

Correct single timestamp overflows if set to 1. Default is 1. +

+
+
flush_packets integer (output)
+

Flush the underlying I/O stream after each packet. Default 1 enables it, and +has the effect of reducing the latency; 0 disables it and may slightly +increase performance in some cases. +

+
+
output_ts_offset offset (output)
+

Set the output time offset. +

+

offset must be a time duration specification, +see (ffmpeg-utils)time duration syntax. +

+

The offset is added by the muxer to the output timestamps. +

+

Specifying a positive offset means that the corresponding streams are +delayed bt the time duration specified in offset. Default value +is 0 (meaning that no offset is applied). +

+
+ + +

+

+

20.1 Format stream specifiers

+ +

Format stream specifiers allow selection of one or more streams that +match specific properties. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. +

+
+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, +’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If +stream_index is given, then it matches the stream number +stream_index of this type. Otherwise, it matches all streams of +this type. +

+
+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number +stream_index in the program with the id +program_id. Otherwise, it matches all streams in the program. +

+
+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ +

The exact semantics of stream specifiers is defined by the +avformat_match_stream_specifier() function declared in the +‘libavformat/avformat.h’ header. +

+ +

21. Demuxers

+ +

Demuxers are configured elements in FFmpeg that can read the +multimedia streams from a particular type of file. +

+

When you configure your FFmpeg build, all the supported demuxers +are enabled by default. You can list all available ones using the +configure option --list-demuxers. +

+

You can disable all the demuxers using the configure option +--disable-demuxers, and selectively enable a single demuxer with +the option --enable-demuxer=DEMUXER, or disable it +with the option --disable-demuxer=DEMUXER. +

+

The option -formats of the ff* tools will display the list of +enabled demuxers. +

+

The description of some of the currently available demuxers follows. +

+ +

21.1 applehttp

+ +

Apple HTTP Live Streaming demuxer. +

+

This demuxer presents all AVStreams from all variant streams. +The id field is set to the bitrate variant index number. By setting +the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay), +the caller can decide which variant streams to actually receive. +The total bitrate of the variant that the stream belongs to is +available in a metadata key named "variant_bitrate". +

+ +

21.2 asf

+ +

Advanced Systems Format demuxer. +

+

This demuxer is used to demux ASF files and MMS network streams. +

+
+
-no_resync_search bool
+

Do not try to resynchronize by looking for a certain optional start code. +

+
+ +

+

+

21.3 concat

+ +

Virtual concatenation script demuxer. +

+

This demuxer reads a list of files and other directives from a text file and +demuxes them one after the other, as if all their packet had been muxed +together. +

+

The timestamps in the files are adjusted so that the first file starts at 0 +and each next file starts where the previous one finishes. Note that it is +done globally and may cause gaps if all streams do not have exactly the same +length. +

+

All files must have the same streams (same codecs, same time base, etc.). +

+

The duration of each file is used to adjust the timestamps of the next file: +if the duration is incorrect (because it was computed using the bit-rate or +because the file is truncated, for example), it can cause artifacts. The +duration directive can be used to override the duration stored in +each file. +

+ +

21.3.1 Syntax

+ +

The script is a text file in extended-ASCII, with one directive per line. +Empty lines, leading spaces and lines starting with ’#’ are ignored. The +following directive is recognized: +

+
+
file path
+

Path to a file to read; special characters and spaces must be escaped with +backslash or single quotes. +

+

All subsequent directives apply to that file. +

+
+
ffconcat version 1.0
+

Identify the script type and version. It also sets the ‘safe’ option +to 1 if it was to its default -1. +

+

To make FFmpeg recognize the format automatically, this directive must +appears exactly as is (no extra space or byte-order-mark) on the very first +line of the script. +

+
+
duration dur
+

Duration of the file. This information can be specified from the file; +specifying it here may be more efficient or help if the information from the +file is not available or accurate. +

+

If the duration is set for all files, then it is possible to seek in the +whole concatenated video. +

+
+
+ + +

21.3.2 Options

+ +

This demuxer accepts the following option: +

+
+
safe
+

If set to 1, reject unsafe file paths. A file path is considered safe if it +does not contain a protocol specification and is relative and all components +only contain characters from the portable character set (letters, digits, +period, underscore and hyphen) and have no period at the beginning of a +component. +

+

If set to 0, any file name is accepted. +

+

The default is -1, it is equivalent to 1 if the format was automatically +probed and 0 otherwise. +

+
+
+ + +

21.4 flv

+ +

Adobe Flash Video Format demuxer. +

+

This demuxer is used to demux FLV files and RTMP network streams. +

+
+
-flv_metadata bool
+

Allocate the streams according to the onMetaData array content. +

+
+ + +

21.5 libgme

+ +

The Game Music Emu library is a collection of video game music file emulators. +

+

See http://code.google.com/p/game-music-emu/ for more information. +

+

Some files have multiple tracks. The demuxer will pick the first track by +default. The ‘track_index’ option can be used to select a different +track. Track indexes start at 0. The demuxer exports the number of tracks as +tracks meta data entry. +

+

For very large files, the ‘max_size’ option may have to be adjusted. +

+ +

21.6 libquvi

+ +

Play media from Internet services using the quvi project. +

+

The demuxer accepts a ‘format’ option to request a specific quality. It +is by default set to best. +

+

See http://quvi.sourceforge.net/ for more information. +

+

FFmpeg needs to be built with --enable-libquvi for this demuxer to be +enabled. +

+ +

21.7 image2

+ +

Image file demuxer. +

+

This demuxer reads from a list of image files specified by a pattern. +The syntax and meaning of the pattern is specified by the +option pattern_type. +

+

The pattern may contain a suffix which is used to automatically +determine the format of the images contained in the files. +

+

The size, the pixel format, and the format of each image must be the +same for all the files in the sequence. +

+

This demuxer accepts the following options: +

+
framerate
+

Set the frame rate for the video stream. It defaults to 25. +

+
loop
+

If set to 1, loop over the input. Default value is 0. +

+
pattern_type
+

Select the pattern type used to interpret the provided filename. +

+

pattern_type accepts one of the following values. +

+
sequence
+

Select a sequence pattern type, used to specify a sequence of files +indexed by sequential numbers. +

+

A sequence pattern may contain the string "%d" or "%0Nd", which +specifies the position of the characters representing a sequential +number in each filename matched by the pattern. If the form +"%d0Nd" is used, the string representing the number in each +filename is 0-padded and N is the total number of 0-padded +digits representing the number. The literal character ’%’ can be +specified in the pattern with the string "%%". +

+

If the sequence pattern contains "%d" or "%0Nd", the first filename of +the file list specified by the pattern must contain a number +inclusively contained between start_number and +start_number+start_number_range-1, and all the following +numbers must be sequential. +

+

For example the pattern "img-%03d.bmp" will match a sequence of +filenames of the form ‘img-001.bmp’, ‘img-002.bmp’, ..., +‘img-010.bmp’, etc.; the pattern "i%%m%%g-%d.jpg" will match a +sequence of filenames of the form ‘i%m%g-1.jpg’, +‘i%m%g-2.jpg’, ..., ‘i%m%g-10.jpg’, etc. +

+

Note that the pattern must not necessarily contain "%d" or +"%0Nd", for example to convert a single image file +‘img.jpeg’ you can employ the command: +

 
ffmpeg -i img.jpeg img.png
+
+ +
+
glob
+

Select a glob wildcard pattern type. +

+

The pattern is interpreted like a glob() pattern. This is only +selectable if libavformat was compiled with globbing support. +

+
+
glob_sequence (deprecated, will be removed)
+

Select a mixed glob wildcard/sequence pattern. +

+

If your version of libavformat was compiled with globbing support, and +the provided pattern contains at least one glob meta character among +%*?[]{} that is preceded by an unescaped "%", the pattern is +interpreted like a glob() pattern, otherwise it is interpreted +like a sequence pattern. +

+

All glob special characters %*?[]{} must be prefixed +with "%". To escape a literal "%" you shall use "%%". +

+

For example the pattern foo-%*.jpeg will match all the +filenames prefixed by "foo-" and terminating with ".jpeg", and +foo-%?%?%?.jpeg will match all the filenames prefixed with +"foo-", followed by a sequence of three characters, and terminating +with ".jpeg". +

+

This pattern type is deprecated in favor of glob and +sequence. +

+
+ +

Default value is glob_sequence. +

+
pixel_format
+

Set the pixel format of the images to read. If not specified the pixel +format is guessed from the first image file in the sequence. +

+
start_number
+

Set the index of the file matched by the image file pattern to start +to read from. Default value is 0. +

+
start_number_range
+

Set the index interval range to check when looking for the first image +file in the sequence, starting from start_number. Default value +is 5. +

+
ts_from_file
+

If set to 1, will set frame timestamp to modification time of image file. Note +that monotonity of timestamps is not provided: images go in the same order as +without this option. Default value is 0. +

+
video_size
+

Set the video size of the images to read. If not specified the video +size is guessed from the first image file in the sequence. +

+
+ + +

21.7.1 Examples

+ +
    +
  • +Use ffmpeg for creating a video from the images in the file +sequence ‘img-001.jpeg’, ‘img-002.jpeg’, ..., assuming an +input frame rate of 10 frames per second: +
     
    ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +As above, but start by reading from a file with index 100 in the sequence: +
     
    ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +Read images matching the "*.png" glob pattern , that is all the files +terminating with the ".png" suffix: +
     
    ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
    +
    +
+ + +

21.8 mpegts

+ +

MPEG-2 transport stream demuxer. +

+
+
fix_teletext_pts
+

Overrides teletext packet PTS and DTS values with the timestamps calculated +from the PCR of the first program which the teletext stream is part of and is +not discarded. Default value is 1, set this option to 0 if you want your +teletext packet PTS and DTS values untouched. +

+
+ + +

21.9 rawvideo

+ +

Raw video demuxer. +

+

This demuxer allows one to read raw video data. Since there is no header +specifying the assumed video parameters, the user must specify them +in order to be able to decode the data correctly. +

+

This demuxer accepts the following options: +

+
framerate
+

Set input video frame rate. Default value is 25. +

+
+
pixel_format
+

Set the input video pixel format. Default value is yuv420p. +

+
+
video_size
+

Set the input video size. This value must be specified explicitly. +

+
+ +

For example to read a rawvideo file ‘input.raw’ with +ffplay, assuming a pixel format of rgb24, a video +size of 320x240, and a frame rate of 10 images per second, use +the command: +

 
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+ + +

21.10 sbg

+ +

SBaGen script demuxer. +

+

This demuxer reads the script language used by SBaGen +http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG +script looks like that: +

 
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW      == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00    off
+
+ +

A SBG script can mix absolute and relative timestamps. If the script uses +either only absolute timestamps (including the script start time) or only +relative ones, then its layout is fixed, and the conversion is +straightforward. On the other hand, if the script mixes both kind of +timestamps, then the NOW reference for relative timestamps will be +taken from the current time of day at the time the script is read, and the +script layout will be frozen according to that reference. That means that if +the script is directly played, the actual times will match the absolute +timestamps up to the sound controller’s clock accuracy, but if the user +somehow pauses the playback or seeks, all times will be shifted accordingly. +

+ +

21.11 tedcaptions

+ +

JSON captions used for TED Talks. +

+

TED does not provide links to the captions, but they can be guessed from the +page. The file ‘tools/bookmarklets.html’ from the FFmpeg source tree +contains a bookmarklet to expose them. +

+

This demuxer accepts the following option: +

+
start_time
+

Set the start time of the TED talk, in milliseconds. The default is 15000 +(15s). It is used to sync the captions with the downloadable videos, because +they include a 15s intro. +

+
+ +

Example: convert the captions to a format most players understand: +

 
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+ + +

22. Muxers

+ +

Muxers are configured elements in FFmpeg which allow writing +multimedia streams to a particular type of file. +

+

When you configure your FFmpeg build, all the supported muxers +are enabled by default. You can list all available muxers using the +configure option --list-muxers. +

+

You can disable all the muxers with the configure option +--disable-muxers and selectively enable / disable single muxers +with the options --enable-muxer=MUXER / +--disable-muxer=MUXER. +

+

The option -formats of the ff* tools will display the list of +enabled muxers. +

+

A description of some of the currently available muxers follows. +

+

+

+

22.1 aiff

+ +

Audio Interchange File Format muxer. +

+ +

22.1.1 Options

+ +

It accepts the following options: +

+
+
write_id3v2
+

Enable ID3v2 tags writing when set to 1. Default is 0 (disabled). +

+
+
id3v2_version
+

Select ID3v2 version to write. Currently only version 3 and 4 (aka. +ID3v2.3 and ID3v2.4) are supported. The default is version 4. +

+
+
+ +

+

+

22.2 crc

+ +

CRC (Cyclic Redundancy Check) testing format. +

+

This muxer computes and prints the Adler-32 CRC of all the input audio +and video frames. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +CRC. +

+

The output of the muxer consists of a single line of the form: +CRC=0xCRC, where CRC is a hexadecimal number 0-padded to +8 digits containing the CRC for all the decoded input frames. +

+

See also the framecrc muxer. +

+ +

22.2.1 Examples

+ +

For example to compute the CRC of the input, and store it in the file +‘out.crc’: +

 
ffmpeg -i INPUT -f crc out.crc
+
+ +

You can print the CRC to stdout with the command: +

 
ffmpeg -i INPUT -f crc -
+
+ +

You can select the output format of each frame with ffmpeg by +specifying the audio and video codec and format. For example to +compute the CRC of the input audio converted to PCM unsigned 8-bit +and the input video converted to MPEG-2 video, use the command: +

 
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
+
+ +

+

+

22.3 framecrc

+ +

Per-packet CRC (Cyclic Redundancy Check) testing format. +

+

This muxer computes and prints the Adler-32 CRC for each audio +and video packet. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +CRC. +

+

The output of the muxer consists of a line for each audio and video +packet of the form: +

 
stream_index, packet_dts, packet_pts, packet_duration, packet_size, 0xCRC
+
+ +

CRC is a hexadecimal number 0-padded to 8 digits containing the +CRC of the packet. +

+ +

22.3.1 Examples

+ +

For example to compute the CRC of the audio and video frames in +‘INPUT’, converted to raw audio and video packets, and store it +in the file ‘out.crc’: +

 
ffmpeg -i INPUT -f framecrc out.crc
+
+ +

To print the information to stdout, use the command: +

 
ffmpeg -i INPUT -f framecrc -
+
+ +

With ffmpeg, you can select the output format to which the +audio and video frames are encoded before computing the CRC for each +packet by specifying the audio and video codec. For example, to +compute the CRC of each decoded input audio frame converted to PCM +unsigned 8-bit and of each decoded input video frame converted to +MPEG-2 video, use the command: +

 
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
+
+ +

See also the crc muxer. +

+

+

+

22.4 framemd5

+ +

Per-packet MD5 testing format. +

+

This muxer computes and prints the MD5 hash for each audio +and video packet. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +hash. +

+

The output of the muxer consists of a line for each audio and video +packet of the form: +

 
stream_index, packet_dts, packet_pts, packet_duration, packet_size, MD5
+
+ +

MD5 is a hexadecimal number representing the computed MD5 hash +for the packet. +

+ +

22.4.1 Examples

+ +

For example to compute the MD5 of the audio and video frames in +‘INPUT’, converted to raw audio and video packets, and store it +in the file ‘out.md5’: +

 
ffmpeg -i INPUT -f framemd5 out.md5
+
+ +

To print the information to stdout, use the command: +

 
ffmpeg -i INPUT -f framemd5 -
+
+ +

See also the md5 muxer. +

+

+

+

22.5 gif

+ +

Animated GIF muxer. +

+

It accepts the following options: +

+
+
loop
+

Set the number of times to loop the output. Use -1 for no loop, 0 +for looping indefinitely (default). +

+
+
final_delay
+

Force the delay (expressed in centiseconds) after the last frame. Each frame +ends with a delay until the next frame. The default is -1, which is a +special value to tell the muxer to re-use the previous delay. In case of a +loop, you might want to customize this value to mark a pause for instance. +

+
+ +

For example, to encode a gif looping 10 times, with a 5 seconds delay between +the loops: +

 
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
+
+ +

Note 1: if you wish to extract the frames in separate GIF files, you need to +force the image2 muxer: +

 
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
+
+ +

Note 2: the GIF format has a very small time base: the delay between two frames +can not be smaller than one centi second. +

+

+

+

22.6 hls

+ +

Apple HTTP Live Streaming muxer that segments MPEG-TS according to +the HTTP Live Streaming (HLS) specification. +

+

It creates a playlist file and numbered segment files. The output +filename specifies the playlist filename; the segment filenames +receive the same basename as the playlist, a sequential number and +a .ts extension. +

+

For example, to convert an input file with ffmpeg: +

 
ffmpeg -i in.nut out.m3u8
+
+ +

See also the segment muxer, which provides a more generic and +flexible implementation of a segmenter, and can be used to perform HLS +segmentation. +

+ +

22.6.1 Options

+ +

This muxer supports the following options: +

+
+
hls_time seconds
+

Set the segment length in seconds. Default value is 2. +

+
+
hls_list_size size
+

Set the maximum number of playlist entries. If set to 0 the list file +will contain all the segments. Default value is 5. +

+
+
hls_wrap wrap
+

Set the number after which the segment filename number (the number +specified in each segment file) wraps. If set to 0 the number will be +never wrapped. Default value is 0. +

+

This option is useful to avoid to fill the disk with many segment +files, and limits the maximum number of segment files written to disk +to wrap. +

+
+
start_number number
+

Start the playlist sequence number from number. Default value is +0. +

+

Note that the playlist sequence number must be unique for each segment +and it is not to be confused with the segment filename sequence number +which can be cyclic, for example if the ‘wrap’ option is +specified. +

+
+ +

+

+

22.7 ico

+ +

ICO file muxer. +

+

Microsoft’s icon file format (ICO) has some strict limitations that should be noted: +

+
    +
  • +Size cannot exceed 256 pixels in any dimension + +
  • +Only BMP and PNG images can be stored + +
  • +If a BMP image is used, it must be one of the following pixel formats: +
     
    BMP Bit Depth      FFmpeg Pixel Format
    +1bit               pal8
    +4bit               pal8
    +8bit               pal8
    +16bit              rgb555le
    +24bit              bgr24
    +32bit              bgra
    +
    + +
  • +If a BMP image is used, it must use the BITMAPINFOHEADER DIB header + +
  • +If a PNG image is used, it must use the rgba pixel format +
+ +

+

+

22.8 image2

+ +

Image file muxer. +

+

The image file muxer writes video frames to image files. +

+

The output filenames are specified by a pattern, which can be used to +produce sequentially numbered series of files. +The pattern may contain the string "%d" or "%0Nd", this string +specifies the position of the characters representing a numbering in +the filenames. If the form "%0Nd" is used, the string +representing the number in each filename is 0-padded to N +digits. The literal character ’%’ can be specified in the pattern with +the string "%%". +

+

If the pattern contains "%d" or "%0Nd", the first filename of +the file list specified will contain the number 1, all the following +numbers will be sequential. +

+

The pattern may contain a suffix which is used to automatically +determine the format of the image files to write. +

+

For example the pattern "img-%03d.bmp" will specify a sequence of +filenames of the form ‘img-001.bmp’, ‘img-002.bmp’, ..., +‘img-010.bmp’, etc. +The pattern "img%%-%d.jpg" will specify a sequence of filenames of the +form ‘img%-1.jpg’, ‘img%-2.jpg’, ..., ‘img%-10.jpg’, +etc. +

+ +

22.8.1 Examples

+ +

The following example shows how to use ffmpeg for creating a +sequence of files ‘img-001.jpeg’, ‘img-002.jpeg’, ..., +taking one image every second from the input video: +

 
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
+
+ +

Note that with ffmpeg, if the format is not specified with the +-f option and the output filename specifies an image file +format, the image2 muxer is automatically selected, so the previous +command can be written as: +

 
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
+
+ +

Note also that the pattern must not necessarily contain "%d" or +"%0Nd", for example to create a single image file +‘img.jpeg’ from the input video you can employ the command: +

 
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
+
+ +

The ‘strftime’ option allows you to expand the filename with +date and time information. Check the documentation of +the strftime() function for the syntax. +

+

For example to generate image files from the strftime() +"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg command +can be used: +

 
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
+
+ + +

22.8.2 Options

+ +
+
start_number
+

Start the sequence from the specified number. Default value is 1. Must +be a non-negative number. +

+
+
update
+

If set to 1, the filename will always be interpreted as just a +filename, not a pattern, and the corresponding file will be continuously +overwritten with new images. Default value is 0. +

+
+
strftime
+

If set to 1, expand the filename with date and time information from +strftime(). Default value is 0. +

+
+ +

The image muxer supports the .Y.U.V image file format. This format is +special in that that each image frame consists of three files, for +each of the YUV420P components. To read or write this image file format, +specify the name of the ’.Y’ file. The muxer will automatically open the +’.U’ and ’.V’ files as required. +

+ +

22.9 matroska

+ +

Matroska container muxer. +

+

This muxer implements the matroska and webm container specs. +

+ +

22.9.1 Metadata

+ +

The recognized metadata settings in this muxer are: +

+
+
title
+

Set title name provided to a single track. +

+
+
language
+

Specify the language of the track in the Matroska languages form. +

+

The language can be either the 3 letters bibliographic ISO-639-2 (ISO +639-2/B) form (like "fre" for French), or a language code mixed with a +country code for specialities in languages (like "fre-ca" for Canadian +French). +

+
+
stereo_mode
+

Set stereo 3D video layout of two views in a single video track. +

+

The following values are recognized: +

+
mono
+

video is not stereo +

+
left_right
+

Both views are arranged side by side, Left-eye view is on the left +

+
bottom_top
+

Both views are arranged in top-bottom orientation, Left-eye view is at bottom +

+
top_bottom
+

Both views are arranged in top-bottom orientation, Left-eye view is on top +

+
checkerboard_rl
+

Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first +

+
checkerboard_lr
+

Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first +

+
row_interleaved_rl
+

Each view is constituted by a row based interleaving, Right-eye view is first row +

+
row_interleaved_lr
+

Each view is constituted by a row based interleaving, Left-eye view is first row +

+
col_interleaved_rl
+

Both views are arranged in a column based interleaving manner, Right-eye view is first column +

+
col_interleaved_lr
+

Both views are arranged in a column based interleaving manner, Left-eye view is first column +

+
anaglyph_cyan_red
+

All frames are in anaglyph format viewable through red-cyan filters +

+
right_left
+

Both views are arranged side by side, Right-eye view is on the left +

+
anaglyph_green_magenta
+

All frames are in anaglyph format viewable through green-magenta filters +

+
block_lr
+

Both eyes laced in one Block, Left-eye view is first +

+
block_rl
+

Both eyes laced in one Block, Right-eye view is first +

+
+
+
+ +

For example a 3D WebM clip can be created using the following command line: +

 
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
+
+ + +

22.9.2 Options

+ +

This muxer supports the following options: +

+
+
reserve_index_space
+

By default, this muxer writes the index for seeking (called cues in Matroska +terms) at the end of the file, because it cannot know in advance how much space +to leave for the index at the beginning of the file. However for some use cases +– e.g. streaming where seeking is possible but slow – it is useful to put the +index at the beginning of the file. +

+

If this option is set to a non-zero value, the muxer will reserve a given amount +of space in the file header and then try to write the cues there when the muxing +finishes. If the available space does not suffice, muxing will fail. A safe size +for most use cases should be about 50kB per hour of video. +

+

Note that cues are only written if the output is seekable and this option will +have no effect if it is not. +

+
+ +

+

+

22.10 md5

+ +

MD5 testing format. +

+

This muxer computes and prints the MD5 hash of all the input audio +and video frames. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +hash. +

+

The output of the muxer consists of a single line of the form: +MD5=MD5, where MD5 is a hexadecimal number representing +the computed MD5 hash. +

+

For example to compute the MD5 hash of the input converted to raw +audio and video, and store it in the file ‘out.md5’: +

 
ffmpeg -i INPUT -f md5 out.md5
+
+ +

You can print the MD5 to stdout with the command: +

 
ffmpeg -i INPUT -f md5 -
+
+ +

See also the framemd5 muxer. +

+ +

22.11 mov, mp4, ismv

+ +

MOV/MP4/ISMV (Smooth Streaming) muxer. +

+

The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4 +file has all the metadata about all packets stored in one location +(written at the end of the file, it can be moved to the start for +better playback by adding faststart to the movflags, or +using the qt-faststart tool). A fragmented +file consists of a number of fragments, where packets and metadata +about these packets are stored together. Writing a fragmented +file has the advantage that the file is decodable even if the +writing is interrupted (while a normal MOV/MP4 is undecodable if +it is not properly finished), and it requires less memory when writing +very long files (since writing normal MOV/MP4 files stores info about +every single packet in memory until the file is closed). The downside +is that it is less compatible with other applications. +

+ +

22.11.1 Options

+ +

Fragmentation is enabled by setting one of the AVOptions that define +how to cut the file into fragments: +

+
+
-moov_size bytes
+

Reserves space for the moov atom at the beginning of the file instead of placing the +moov atom at the end. If the space reserved is insufficient, muxing will fail. +

+
-movflags frag_keyframe
+

Start a new fragment at each video keyframe. +

+
-frag_duration duration
+

Create fragments that are duration microseconds long. +

+
-frag_size size
+

Create fragments that contain up to size bytes of payload data. +

+
-movflags frag_custom
+

Allow the caller to manually choose when to cut fragments, by +calling av_write_frame(ctx, NULL) to write a fragment with +the packets written so far. (This is only useful with other +applications integrating libavformat, not from ffmpeg.) +

+
-min_frag_duration duration
+

Don’t create fragments that are shorter than duration microseconds long. +

+
+ +

If more than one condition is specified, fragments are cut when +one of the specified conditions is fulfilled. The exception to this is +-min_frag_duration, which has to be fulfilled for any of the other +conditions to apply. +

+

Additionally, the way the output file is written can be adjusted +through a few other options: +

+
+
-movflags empty_moov
+

Write an initial moov atom directly at the start of the file, without +describing any samples in it. Generally, an mdat/moov pair is written +at the start of the file, as a normal MOV/MP4 file, containing only +a short portion of the file. With this option set, there is no initial +mdat atom, and the moov atom only describes the tracks but has +a zero duration. +

+

Files written with this option set do not work in QuickTime. +This option is implicitly set when writing ismv (Smooth Streaming) files. +

+
-movflags separate_moof
+

Write a separate moof (movie fragment) atom for each track. Normally, +packets for all tracks are written in a moof atom (which is slightly +more efficient), but with this option set, the muxer writes one moof/mdat +pair for each track, making it easier to separate tracks. +

+

This option is implicitly set when writing ismv (Smooth Streaming) files. +

+
-movflags faststart
+

Run a second pass moving the index (moov atom) to the beginning of the file. +This operation can take a while, and will not work in various situations such +as fragmented output, thus it is not enabled by default. +

+
-movflags rtphint
+

Add RTP hinting tracks to the output file. +

+
+ + +

22.11.2 Example

+ +

Smooth Streaming content can be pushed in real time to a publishing +point on IIS with this muxer. Example: +

 
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
+
+ + +

22.12 mp3

+ +

The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and +optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the +id3v2_version option controls which one is used. Setting +id3v2_version to 0 will disable the ID3v2 header completely. The legacy +ID3v1 tag is not written by default, but may be enabled with the +write_id3v1 option. +

+

The muxer may also write a Xing frame at the beginning, which contains the +number of frames in the file. It is useful for computing duration of VBR files. +The Xing frame is written if the output stream is seekable and if the +write_xing option is set to 1 (the default). +

+

The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures +are supplied to the muxer in form of a video stream with a single packet. There +can be any number of those streams, each will correspond to a single APIC frame. +The stream metadata tags title and comment map to APIC +description and picture type respectively. See +http://id3.org/id3v2.4.0-frames for allowed picture types. +

+

Note that the APIC frames must be written at the beginning, so the muxer will +buffer the audio frames until it gets all the pictures. It is therefore advised +to provide the pictures as soon as possible to avoid excessive buffering. +

+

Examples: +

+

Write an mp3 with an ID3v2.3 header and an ID3v1 footer: +

 
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
+
+ +

To attach a picture to an mp3 file select both the audio and the picture stream +with map: +

 
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
+-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
+
+ +

Write a "clean" MP3 without any extra features: +

 
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
+
+ + +

22.13 mpegts

+ +

MPEG transport stream muxer. +

+

This muxer implements ISO 13818-1 and part of ETSI EN 300 468. +

+

The recognized metadata settings in mpegts muxer are service_provider +and service_name. If they are not set the default for +service_provider is "FFmpeg" and the default for +service_name is "Service01". +

+ +

22.13.1 Options

+ +

The muxer options are: +

+
+
-mpegts_original_network_id number
+

Set the original_network_id (default 0x0001). This is unique identifier +of a network in DVB. Its main use is in the unique identification of a +service through the path Original_Network_ID, Transport_Stream_ID. +

+
-mpegts_transport_stream_id number
+

Set the transport_stream_id (default 0x0001). This identifies a +transponder in DVB. +

+
-mpegts_service_id number
+

Set the service_id (default 0x0001) also known as program in DVB. +

+
-mpegts_pmt_start_pid number
+

Set the first PID for PMT (default 0x1000, max 0x1f00). +

+
-mpegts_start_pid number
+

Set the first PID for data packets (default 0x0100, max 0x0f00). +

+
-mpegts_m2ts_mode number
+

Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode. +

+
-muxrate number
+

Set muxrate. +

+
-pes_payload_size number
+

Set minimum PES packet payload in bytes. +

+
-mpegts_flags flags
+

Set flags (see below). +

+
-mpegts_copyts number
+

Preserve original timestamps, if value is set to 1. Default value is -1, which +results in shifting timestamps so that they start from 0. +

+
-tables_version number
+

Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively). +This option allows updating stream structure so that standard consumer may +detect the change. To do so, reopen output AVFormatContext (in case of API +usage) or restart ffmpeg instance, cyclically changing tables_version value: +

 
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
+ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+
+
+
+ +

Option mpegts_flags may take a set of such flags: +

+
+
resend_headers
+

Reemit PAT/PMT before writing the next packet. +

+
latm
+

Use LATM packetization for AAC. +

+
+ + +

22.13.2 Example

+ +
 
ffmpeg -i file.mpg -c copy \
+     -mpegts_original_network_id 0x1122 \
+     -mpegts_transport_stream_id 0x3344 \
+     -mpegts_service_id 0x5566 \
+     -mpegts_pmt_start_pid 0x1500 \
+     -mpegts_start_pid 0x150 \
+     -metadata service_provider="Some provider" \
+     -metadata service_name="Some Channel" \
+     -y out.ts
+
+ + +

22.14 null

+ +

Null muxer. +

+

This muxer does not generate any output file, it is mainly useful for +testing or benchmarking purposes. +

+

For example to benchmark decoding with ffmpeg you can use the +command: +

 
ffmpeg -benchmark -i INPUT -f null out.null
+
+ +

Note that the above command does not read or write the ‘out.null’ +file, but specifying the output file is required by the ffmpeg +syntax. +

+

Alternatively you can write the command as: +

 
ffmpeg -benchmark -i INPUT -f null -
+
+ + +

22.15 ogg

+ +

Ogg container muxer. +

+
+
-page_duration duration
+

Preferred page duration, in microseconds. The muxer will attempt to create +pages that are approximately duration microseconds long. This allows the +user to compromise between seek granularity and container overhead. The default +is 1 second. A value of 0 will fill all segments, making pages as large as +possible. A value of 1 will effectively use 1 packet-per-page in most +situations, giving a small seek granularity at the cost of additional container +overhead. +

+
+ +

+

+

22.16 segment, stream_segment, ssegment

+ +

Basic stream segmenter. +

+

This muxer outputs streams to a number of separate files of nearly +fixed duration. Output filename pattern can be set in a fashion similar to +image2. +

+

stream_segment is a variant of the muxer used to write to +streaming output formats, i.e. which do not require global headers, +and is recommended for outputting e.g. to MPEG transport stream segments. +ssegment is a shorter alias for stream_segment. +

+

Every segment starts with a keyframe of the selected reference stream, +which is set through the ‘reference_stream’ option. +

+

Note that if you want accurate splitting for a video file, you need to +make the input key frames correspond to the exact splitting times +expected by the segmenter, or the segment muxer will start the new +segment with the key frame found next after the specified start +time. +

+

The segment muxer works best with a single constant frame rate video. +

+

Optionally it can generate a list of the created segments, by setting +the option segment_list. The list type is specified by the +segment_list_type option. The entry filenames in the segment +list are set by default to the basename of the corresponding segment +files. +

+

See also the hls muxer, which provides a more specific +implementation for HLS segmentation. +

+ +

22.16.1 Options

+ +

The segment muxer supports the following options: +

+
+
reference_stream specifier
+

Set the reference stream, as specified by the string specifier. +If specifier is set to auto, the reference is chosen +automatically. Otherwise it must be a stream specifier (see the “Stream +specifiers” chapter in the ffmpeg manual) which specifies the +reference stream. The default value is auto. +

+
+
segment_format format
+

Override the inner container format, by default it is guessed by the filename +extension. +

+
+
segment_list name
+

Generate also a listfile named name. If not specified no +listfile is generated. +

+
+
segment_list_flags flags
+

Set flags affecting the segment list generation. +

+

It currently supports the following flags: +

+
cache
+

Allow caching (only affects M3U8 list files). +

+
+
live
+

Allow live-friendly file generation. +

+
+ +
+
segment_list_size size
+

Update the list file so that it contains at most the last size +segments. If 0 the list file will contain all the segments. Default +value is 0. +

+
+
segment_list_entry_prefix prefix
+

Set prefix to prepend to the name of each entry filename. By +default no prefix is applied. +

+
+
segment_list_type type
+

Specify the format for the segment list file. +

+

The following values are recognized: +

+
flat
+

Generate a flat list for the created segments, one segment per line. +

+
+
csv, ext
+

Generate a list for the created segments, one segment per line, +each line matching the format (comma-separated values): +

 
segment_filename,segment_start_time,segment_end_time
+
+ +

segment_filename is the name of the output file generated by the +muxer according to the provided pattern. CSV escaping (according to +RFC4180) is applied if required. +

+

segment_start_time and segment_end_time specify +the segment start and end time expressed in seconds. +

+

A list file with the suffix ".csv" or ".ext" will +auto-select this format. +

+

ext’ is deprecated in favor or ‘csv’. +

+
+
ffconcat
+

Generate an ffconcat file for the created segments. The resulting file +can be read using the FFmpeg concat demuxer. +

+

A list file with the suffix ".ffcat" or ".ffconcat" will +auto-select this format. +

+
+
m3u8
+

Generate an extended M3U8 file, version 3, compliant with +http://tools.ietf.org/id/draft-pantos-http-live-streaming. +

+

A list file with the suffix ".m3u8" will auto-select this format. +

+
+ +

If not specified the type is guessed from the list file name suffix. +

+
+
segment_time time
+

Set segment duration to time, the value must be a duration +specification. Default value is "2". See also the +‘segment_times’ option. +

+

Note that splitting may not be accurate, unless you force the +reference stream key-frames at the given time. See the introductory +notice and the examples below. +

+
+
segment_time_delta delta
+

Specify the accuracy time when selecting the start time for a +segment, expressed as a duration specification. Default value is "0". +

+

When delta is specified a key-frame will start a new segment if its +PTS satisfies the relation: +

 
PTS >= start_time - time_delta
+
+ +

This option is useful when splitting video content, which is always +split at GOP boundaries, in case a key frame is found just before the +specified split time. +

+

In particular may be used in combination with the ‘ffmpeg’ option +force_key_frames. The key frame times specified by +force_key_frames may not be set accurately because of rounding +issues, with the consequence that a key frame time may result set just +before the specified time. For constant frame rate videos a value of +1/(2*frame_rate) should address the worst case mismatch between +the specified time and the time set by force_key_frames. +

+
+
segment_times times
+

Specify a list of split points. times contains a list of comma +separated duration specifications, in increasing order. See also +the ‘segment_time’ option. +

+
+
segment_frames frames
+

Specify a list of split video frame numbers. frames contains a +list of comma separated integer numbers, in increasing order. +

+

This option specifies to start a new segment whenever a reference +stream key frame is found and the sequential number (starting from 0) +of the frame is greater or equal to the next value in the list. +

+
+
segment_wrap limit
+

Wrap around segment index once it reaches limit. +

+
+
segment_start_number number
+

Set the sequence number of the first segment. Defaults to 0. +

+
+
reset_timestamps 1|0
+

Reset timestamps at the begin of each segment, so that each segment +will start with near-zero timestamps. It is meant to ease the playback +of the generated segments. May not work with some combinations of +muxers/codecs. It is set to 0 by default. +

+
+
initial_offset offset
+

Specify timestamp offset to apply to the output packet timestamps. The +argument must be a time duration specification, and defaults to 0. +

+
+ + +

22.16.2 Examples

+ +
    +
  • +To remux the content of file ‘in.mkv’ to a list of segments +‘out-000.nut’, ‘out-001.nut’, etc., and write the list of +generated segments to ‘out.list’: +
     
    ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nut
    +
    + +
  • +As the example above, but segment the input file according to the split +points specified by the segment_times option: +
     
    ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
    +
    + +
  • +As the example above, but use the ffmpegforce_key_frames’ +option to force key frames in the input at the specified location, together +with the segment option ‘segment_time_delta’ to account for +possible roundings operated when setting key frame times. +
     
    ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -codec:v mpeg4 -codec:a pcm_s16le -map 0 \
    +-f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut
    +
    +

    In order to force key frames on the input file, transcoding is +required. +

    +
  • +Segment the input file by splitting the input file according to the +frame numbers sequence specified with the ‘segment_frames’ option: +
     
    ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_frames 100,200,300,500,800 out%03d.nut
    +
    + +
  • +To convert the ‘in.mkv’ to TS segments using the libx264 +and libfaac encoders: +
     
    ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
    +
    + +
  • +Segment the input file, and create an M3U8 live playlist (can be used +as live HLS source): +
     
    ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \
    +-segment_list_flags +live -segment_time 10 out%03d.mkv
    +
    +
+ + +

22.17 tee

+ +

The tee muxer can be used to write the same data to several files or any +other kind of muxer. It can be used, for example, to both stream a video to +the network and save it to disk at the same time. +

+

It is different from specifying several outputs to the ffmpeg +command-line tool because the audio and video data will be encoded only once +with the tee muxer; encoding can be a very expensive process. It is not +useful when using the libavformat API directly because it is then possible +to feed the same packets to several muxers directly. +

+

The slave outputs are specified in the file name given to the muxer, +separated by ’|’. If any of the slave name contains the ’|’ separator, +leading or trailing spaces or any special character, it must be +escaped (see (ffmpeg-utils)quoting_and_escaping). +

+

Muxer options can be specified for each slave by prepending them as a list of +key=value pairs separated by ’:’, between square brackets. If +the options values contain a special character or the ’:’ separator, they +must be escaped; note that this is a second level escaping. +

+

The following special options are also recognized: +

+
f
+

Specify the format name. Useful if it cannot be guessed from the +output name suffix. +

+
+
bsfs[/spec]
+

Specify a list of bitstream filters to apply to the specified +output. +

+

It is possible to specify to which streams a given bitstream filter +applies, by appending a stream specifier to the option separated by +/. spec must be a stream specifier (see Format stream specifiers). If the stream specifier is not specified, the +bistream filters will be applied to all streams in the output. +

+

Several bitstream filters can be specified, separated by ",". +

+
+
select
+

Select the streams that should be mapped to the slave output, +specified by a stream specifier. If not specified, this defaults to +all the input streams. +

+
+ + +

22.17.1 Examples

+ +
    +
  • +Encode something and both archive it in a WebM file and stream it +as MPEG-TS over UDP (the streams need to be explicitly mapped): +
     
    ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
    +  "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
    +
    + +
  • +Use ffmpeg to encode the input, and send the output +to three different destinations. The dump_extra bitstream +filter is used to add extradata information to all the output video +keyframes packets, as requested by the MPEG-TS format. The select +option is applied to ‘out.aac’ in order to make it contain only +audio packets. +
     
    ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
    +       -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
    +
    + +
  • +As below, but select only stream a:1 for the audio output. Note +that a second level escaping must be performed, as ":" is a special +character used to separate options. +
     
    ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
    +       -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
    +
    +
+ +

Note: some codecs may need different options depending on the output format; +the auto-detection of this can not work with the tee muxer. The main example +is the ‘global_header’ flag. +

+ +

23. Metadata

+ +

FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded +INI-like text file and then load it back using the metadata muxer/demuxer. +

+

The file format is as follows: +

    +
  1. +A file consists of a header and a number of metadata tags divided into sections, +each on its own line. + +
  2. +The header is a ’;FFMETADATA’ string, followed by a version number (now 1). + +
  3. +Metadata tags are of the form ’key=value’ + +
  4. +Immediately after header follows global metadata + +
  5. +After global metadata there may be sections with per-stream/per-chapter +metadata. + +
  6. +A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in +brackets (’[’, ’]’) and ends with next section or end of file. + +
  7. +At the beginning of a chapter section there may be an optional timebase to be +used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and +den are integers. If the timebase is missing then start/end times are assumed to +be in milliseconds. +Next a chapter section must contain chapter start and end times in form +’START=num’, ’END=num’, where num is a positive integer. + +
  8. +Empty lines and lines starting with ’;’ or ’#’ are ignored. + +
  9. +Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a +newline) must be escaped with a backslash ’\’. + +
  10. +Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of +the tag (in the example above key is ’foo ’, value is ’ bar’). +
+ +

A ffmetadata file might look like this: +

 
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+ +

By using the ffmetadata muxer and demuxer it is possible to extract +metadata from an input file to an ffmetadata file, and then transcode +the file into an output file with the edited ffmetadata file. +

+

Extracting an ffmetadata file with ‘ffmpeg’ goes as follows: +

 
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+ +

Reinserting edited metadata information from the FFMETADATAFILE file can +be done as: +

 
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+ + +

24. Protocols

+ +

Protocols are configured elements in FFmpeg that enable access to +resources that require specific protocols. +

+

When you configure your FFmpeg build, all the supported protocols are +enabled by default. You can list all available ones using the +configure option "–list-protocols". +

+

You can disable all the protocols using the configure option +"–disable-protocols", and selectively enable a protocol using the +option "–enable-protocol=PROTOCOL", or you can disable a +particular protocol using the option +"–disable-protocol=PROTOCOL". +

+

The option "-protocols" of the ff* tools will display the list of +supported protocols. +

+

A description of the currently available protocols follows. +

+ +

24.1 bluray

+ +

Read BluRay playlist. +

+

The accepted options are: +

+
angle
+

BluRay angle +

+
+
chapter
+

Start chapter (1...N) +

+
+
playlist
+

Playlist to read (BDMV/PLAYLIST/?????.mpls) +

+
+
+ +

Examples: +

+

Read longest playlist from BluRay mounted to /mnt/bluray: +

 
bluray:/mnt/bluray
+
+ +

Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2: +

 
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+ + +

24.2 cache

+ +

Caching wrapper for input stream. +

+

Cache the input stream to temporary file. It brings seeking capability to live streams. +

+
 
cache:URL
+
+ + +

24.3 concat

+ +

Physical concatenation protocol. +

+

Allow to read and seek from many resource in sequence as if they were +a unique resource. +

+

A URL accepted by this protocol has the syntax: +

 
concat:URL1|URL2|...|URLN
+
+ +

where URL1, URL2, ..., URLN are the urls of the +resource to be concatenated, each one possibly specifying a distinct +protocol. +

+

For example to read a sequence of files ‘split1.mpeg’, +‘split2.mpeg’, ‘split3.mpeg’ with ffplay use the +command: +

 
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+ +

Note that you may need to escape the character "|" which is special for +many shells. +

+ +

24.4 crypto

+ +

AES-encrypted stream reading protocol. +

+

The accepted options are: +

+
key
+

Set the AES decryption key binary block from given hexadecimal representation. +

+
+
iv
+

Set the AES decryption initialization vector binary block from given hexadecimal representation. +

+
+ +

Accepted URL formats: +

 
crypto:URL
+crypto+URL
+
+ + +

24.5 data

+ +

Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme. +

+

For example, to convert a GIF file given inline with ffmpeg: +

 
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+ + +

24.6 file

+ +

File access protocol. +

+

Allow to read from or write to a file. +

+

A file URL can have the form: +

 
file:filename
+
+ +

where filename is the path of the file to read. +

+

An URL that does not have a protocol prefix will be assumed to be a +file URL. Depending on the build, an URL that looks like a Windows +path with the drive letter at the beginning will also be assumed to be +a file URL (usually not the case in builds for unix-like systems). +

+

For example to read from a file ‘input.mpeg’ with ffmpeg +use the command: +

 
ffmpeg -i file:input.mpeg output.mpeg
+
+ +

This protocol accepts the following options: +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable for files on slow medium. +

+
+ + +

24.7 ftp

+ +

FTP (File Transfer Protocol). +

+

Allow to read from or write to remote resources using FTP protocol. +

+

Following syntax is required. +

 
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
ftp-anonymous-password
+

Password used when login as anonymous user. Typically an e-mail address +should be used. +

+
+
ftp-write-seekable
+

Control seekability of connection during encoding. If set to 1 the +resource is supposed to be seekable, if set to 0 it is assumed not +to be seekable. Default value is 0. +

+
+ +

NOTE: Protocol can be used as output, but it is recommended to not do +it, unless special care is taken (tests, customized server configuration +etc.). Different FTP servers behave in different way during seek +operation. ff* tools may produce incomplete content due to server limitations. +

+ +

24.8 gopher

+ +

Gopher protocol. +

+ +

24.9 hls

+ +

Read Apple HTTP Live Streaming compliant segmented stream as +a uniform one. The M3U8 playlists describing the segments can be +remote HTTP resources or local files, accessed using the standard +file protocol. +The nested protocol is declared by specifying +"+proto" after the hls URI scheme name, where proto +is either "file" or "http". +

+
 
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+ +

Using this protocol is discouraged - the hls demuxer should work +just as well (if not, please report the issues) and is more complete. +To use the hls demuxer instead, simply use the direct URLs to the +m3u8 files. +

+ +

24.10 http

+ +

HTTP (Hyper Text Transfer Protocol). +

+

This protocol accepts the following options: +

+
+
seekable
+

Control seekability of connection. If set to 1 the resource is +supposed to be seekable, if set to 0 it is assumed not to be seekable, +if set to -1 it will try to autodetect if it is seekable. Default +value is -1. +

+
+
chunked_post
+

If set to 1 use chunked Transfer-Encoding for posts, default is 1. +

+
+
content_type
+

Set a specific content type for the POST messages. +

+
+
headers
+

Set custom HTTP headers, can override built in default headers. The +value must be a string encoding the headers. +

+
+
multiple_requests
+

Use persistent connections if set to 1, default is 0. +

+
+
post_data
+

Set custom HTTP post data. +

+
+
user-agent
+
user_agent
+

Override the User-Agent header. If not specified the protocol will use a +string describing the libavformat build. ("Lavf/<version>") +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
mime_type
+

Export the MIME type. +

+
+
icy
+

If set to 1 request ICY (SHOUTcast) metadata from the server. If the server +supports this, the metadata has to be retrieved by the application by reading +the ‘icy_metadata_headers’ and ‘icy_metadata_packet’ options. +The default is 0. +

+
+
icy_metadata_headers
+

If the server supports ICY metadata, this contains the ICY-specific HTTP reply +headers, separated by newline characters. +

+
+
icy_metadata_packet
+

If the server supports ICY metadata, and ‘icy’ was set to 1, this +contains the last non-empty metadata packet sent by the server. It should be +polled in regular intervals by applications interested in mid-stream metadata +updates. +

+
+
cookies
+

Set the cookies to be sent in future requests. The format of each cookie is the +same as the value of a Set-Cookie HTTP response field. Multiple cookies can be +delimited by a newline character. +

+
+
offset
+

Set initial byte offset. +

+
+
end_offset
+

Try to limit the request to bytes preceding this offset. +

+
+ + +

24.10.1 HTTP Cookies

+ +

Some HTTP requests will be denied unless cookie values are passed in with the +request. The ‘cookies’ option allows these cookies to be specified. At +the very least, each cookie must specify a value along with a path and domain. +HTTP requests that match both the domain and path will automatically include the +cookie value in the HTTP Cookie header field. Multiple cookies can be delimited +by a newline. +

+

The required syntax to play a stream specifying a cookie is: +

 
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+ + +

24.11 mmst

+ +

MMS (Microsoft Media Server) protocol over TCP. +

+ +

24.12 mmsh

+ +

MMS (Microsoft Media Server) protocol over HTTP. +

+

The required syntax is: +

 
mmsh://server[:port][/app][/playpath]
+
+ + +

24.13 md5

+ +

MD5 output protocol. +

+

Computes the MD5 hash of the data to be written, and on close writes +this to the designated output or stdout if none is specified. It can +be used to test muxers without writing an actual file. +

+

Some examples follow. +

 
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+ +

Note that some formats (typically MOV) require the output protocol to +be seekable, so they will fail with the MD5 output protocol. +

+ +

24.14 pipe

+ +

UNIX pipe access protocol. +

+

Allow to read and write from UNIX pipes. +

+

The accepted syntax is: +

 
pipe:[number]
+
+ +

number is the number corresponding to the file descriptor of the +pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number +is not specified, by default the stdout file descriptor will be used +for writing, stdin for reading. +

+

For example to read from stdin with ffmpeg: +

 
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+ +

For writing to stdout with ffmpeg: +

 
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+ +

This protocol accepts the following options: +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable if data transmission is slow. +

+
+ +

Note that some formats (typically MOV), require the output protocol to +be seekable, so they will fail with the pipe output protocol. +

+ +

24.15 rtmp

+ +

Real-Time Messaging Protocol. +

+

The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia +content across a TCP/IP network. +

+

The required syntax is: +

 
rtmp://[username:password@]server[:port][/app][/instance][/playpath]
+
+ +

The accepted parameters are: +

+
username
+

An optional username (mostly for publishing). +

+
+
password
+

An optional password (mostly for publishing). +

+
+
server
+

The address of the RTMP server. +

+
+
port
+

The number of the TCP port to use (by default is 1935). +

+
+
app
+

It is the name of the application to access. It usually corresponds to +the path where the application is installed on the RTMP server +(e.g. ‘/ondemand/’, ‘/flash/live/’, etc.). You can override +the value parsed from the URI through the rtmp_app option, too. +

+
+
playpath
+

It is the path or name of the resource to play with reference to the +application specified in app, may be prefixed by "mp4:". You +can override the value parsed from the URI through the rtmp_playpath +option, too. +

+
+
listen
+

Act as a server, listening for an incoming connection. +

+
+
timeout
+

Maximum time to wait for the incoming connection. Implies listen. +

+
+ +

Additionally, the following parameters can be set via command line options +(or in code via AVOptions): +

+
rtmp_app
+

Name of application to connect on the RTMP server. This option +overrides the parameter specified in the URI. +

+
+
rtmp_buffer
+

Set the client buffer time in milliseconds. The default is 3000. +

+
+
rtmp_conn
+

Extra arbitrary AMF connection parameters, parsed from a string, +e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0. +Each value is prefixed by a single character denoting the type, +B for Boolean, N for number, S for string, O for object, or Z for null, +followed by a colon. For Booleans the data must be either 0 or 1 for +FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or +1 to end or begin an object, respectively. Data items in subobjects may +be named, by prefixing the type with ’N’ and specifying the name before +the value (i.e. NB:myFlag:1). This option may be used multiple +times to construct arbitrary AMF sequences. +

+
+
rtmp_flashver
+

Version of the Flash plugin used to run the SWF player. The default +is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible; +<libavformat version>).) +

+
+
rtmp_flush_interval
+

Number of packets flushed in the same request (RTMPT only). The default +is 10. +

+
+
rtmp_live
+

Specify that the media is a live stream. No resuming or seeking in +live streams is possible. The default value is any, which means the +subscriber first tries to play the live stream specified in the +playpath. If a live stream of that name is not found, it plays the +recorded stream. The other possible values are live and +recorded. +

+
+
rtmp_pageurl
+

URL of the web page in which the media was embedded. By default no +value will be sent. +

+
+
rtmp_playpath
+

Stream identifier to play or to publish. This option overrides the +parameter specified in the URI. +

+
+
rtmp_subscribe
+

Name of live stream to subscribe to. By default no value will be sent. +It is only sent if the option is specified or if rtmp_live +is set to live. +

+
+
rtmp_swfhash
+

SHA256 hash of the decompressed SWF file (32 bytes). +

+
+
rtmp_swfsize
+

Size of the decompressed SWF file, required for SWFVerification. +

+
+
rtmp_swfurl
+

URL of the SWF player for the media. By default no value will be sent. +

+
+
rtmp_swfverify
+

URL to player swf file, compute hash/size automatically. +

+
+
rtmp_tcurl
+

URL of the target stream. Defaults to proto://host[:port]/app. +

+
+
+ +

For example to read with ffplay a multimedia resource named +"sample" from the application "vod" from an RTMP server "myserver": +

 
ffplay rtmp://myserver/vod/sample
+
+ +

To publish to a password protected server, passing the playpath and +app names separately: +

 
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+ + +

24.16 rtmpe

+ +

Encrypted Real-Time Messaging Protocol. +

+

The Encrypted Real-Time Messaging Protocol (RTMPE) is used for +streaming multimedia content within standard cryptographic primitives, +consisting of Diffie-Hellman key exchange and HMACSHA256, generating +a pair of RC4 keys. +

+ +

24.17 rtmps

+ +

Real-Time Messaging Protocol over a secure SSL connection. +

+

The Real-Time Messaging Protocol (RTMPS) is used for streaming +multimedia content across an encrypted connection. +

+ +

24.18 rtmpt

+ +

Real-Time Messaging Protocol tunneled through HTTP. +

+

The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used +for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

24.19 rtmpte

+ +

Encrypted Real-Time Messaging Protocol tunneled through HTTP. +

+

The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE) +is used for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

24.20 rtmpts

+ +

Real-Time Messaging Protocol tunneled through HTTPS. +

+

The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used +for streaming multimedia content within HTTPS requests to traverse +firewalls. +

+ +

24.21 libssh

+ +

Secure File Transfer Protocol via libssh +

+

Allow to read from or write to remote resources using SFTP protocol. +

+

Following syntax is required. +

+
 
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout +is not specified. +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
private_key
+

Specify the path of the file containing private key to use during authorization. +By default libssh searches for keys in the ‘~/.ssh/’ directory. +

+
+
+ +

Example: Play a file stored on remote server. +

+
 
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+ + +

24.22 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte

+ +

Real-Time Messaging Protocol and its variants supported through +librtmp. +

+

Requires the presence of the librtmp headers and library during +configuration. You need to explicitly configure the build with +"–enable-librtmp". If enabled this will replace the native RTMP +protocol. +

+

This protocol provides most client functions and a few server +functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT), +encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled +variants of these encrypted types (RTMPTE, RTMPTS). +

+

The required syntax is: +

 
rtmp_proto://server[:port][/app][/playpath] options
+
+ +

where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe", +"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and +server, port, app and playpath have the same +meaning as specified for the RTMP native protocol. +options contains a list of space-separated options of the form +key=val. +

+

See the librtmp manual page (man 3 librtmp) for more information. +

+

For example, to stream a file in real-time to an RTMP server using +ffmpeg: +

 
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+ +

To play the same stream using ffplay: +

 
ffplay "rtmp://myserver/live/mystream live=1"
+
+ + +

24.23 rtp

+ +

Real-time Transport Protocol. +

+

The required syntax for an RTP URL is: +rtp://hostname[:port][?option=val...] +

+

port specifies the RTP port to use. +

+

The following URL options are supported: +

+
+
ttl=n
+

Set the TTL (Time-To-Live) value (for multicast only). +

+
+
rtcpport=n
+

Set the remote RTCP port to n. +

+
+
localrtpport=n
+

Set the local RTP port to n. +

+
+
localrtcpport=n'
+

Set the local RTCP port to n. +

+
+
pkt_size=n
+

Set max packet size (in bytes) to n. +

+
+
connect=0|1
+

Do a connect() on the UDP socket (if set to 1) or not (if set +to 0). +

+
+
sources=ip[,ip]
+

List allowed source IP addresses. +

+
+
block=ip[,ip]
+

List disallowed (blocked) source IP addresses. +

+
+
write_to_source=0|1
+

Send packets to the source address of the latest received packet (if +set to 1) or to a default remote address (if set to 0). +

+
+
localport=n
+

Set the local RTP port to n. +

+

This is a deprecated option. Instead, ‘localrtpport’ should be +used. +

+
+
+ +

Important notes: +

+
    +
  1. +If ‘rtcpport’ is not set the RTCP port will be set to the RTP +port value plus 1. + +
  2. +If ‘localrtpport’ (the local RTP port) is not set any available +port will be used for the local RTP and RTCP ports. + +
  3. +If ‘localrtcpport’ (the local RTCP port) is not set it will be +set to the the local RTP port value plus 1. +
+ + +

24.24 rtsp

+ +

Real-Time Streaming Protocol. +

+

RTSP is not technically a protocol handler in libavformat, it is a demuxer +and muxer. The demuxer supports both normal RTSP (with data transferred +over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with +data transferred over RDT). +

+

The muxer can be used to send a stream using RTSP ANNOUNCE to a server +supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s +RTSP server). +

+

The required syntax for a RTSP url is: +

 
rtsp://hostname[:port]/path
+
+ +

Options can be set on the ffmpeg/ffplay command +line, or set in code via AVOptions or in +avformat_open_input. +

+

The following options are supported. +

+
+
initial_pause
+

Do not start playing the stream immediately if set to 1. Default value +is 0. +

+
+
rtsp_transport
+

Set RTSP trasport protocols. +

+

It accepts the following values: +

+
udp
+

Use UDP as lower transport protocol. +

+
+
tcp
+

Use TCP (interleaving within the RTSP control channel) as lower +transport protocol. +

+
+
udp_multicast
+

Use UDP multicast as lower transport protocol. +

+
+
http
+

Use HTTP tunneling as lower transport protocol, which is useful for +passing proxies. +

+
+ +

Multiple lower transport protocols may be specified, in that case they are +tried one at a time (if the setup of one fails, the next one is tried). +For the muxer, only the ‘tcp’ and ‘udp’ options are supported. +

+
+
rtsp_flags
+

Set RTSP flags. +

+

The following values are accepted: +

+
filter_src
+

Accept packets only from negotiated peer address and port. +

+
listen
+

Act as a server, listening for an incoming connection. +

+
+ +

Default value is ‘none’. +

+
+
allowed_media_types
+

Set media types to accept from the server. +

+

The following flags are accepted: +

+
video
+
audio
+
data
+
+ +

By default it accepts all media types. +

+
+
min_port
+

Set minimum local UDP port. Default value is 5000. +

+
+
max_port
+

Set maximum local UDP port. Default value is 65000. +

+
+
timeout
+

Set maximum timeout (in seconds) to wait for incoming connections. +

+

A value of -1 mean infinite (default). This option implies the +‘rtsp_flags’ set to ‘listen’. +

+
+
reorder_queue_size
+

Set number of packets to buffer for handling of reordered packets. +

+
+
stimeout
+

Set socket TCP I/O timeout in micro seconds. +

+
+
user-agent
+

Override User-Agent header. If not specified, it default to the +libavformat identifier string. +

+
+ +

When receiving data over UDP, the demuxer tries to reorder received packets +(since they may arrive out of order, or packets may get lost totally). This +can be disabled by setting the maximum demuxing delay to zero (via +the max_delay field of AVFormatContext). +

+

When watching multi-bitrate Real-RTSP streams with ffplay, the +streams to display can be chosen with -vst n and +-ast n for video and audio respectively, and can be switched +on the fly by pressing v and a. +

+ +

24.24.1 Examples

+ +

The following examples all make use of the ffplay and +ffmpeg tools. +

+
    +
  • +Watch a stream over UDP, with a max reordering delay of 0.5 seconds: +
     
    ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
    +
    + +
  • +Watch a stream tunneled over HTTP: +
     
    ffplay -rtsp_transport http rtsp://server/video.mp4
    +
    + +
  • +Send a stream in realtime to a RTSP server, for others to watch: +
     
    ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
    +
    + +
  • +Receive a stream in realtime: +
     
    ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
    +
    +
+ + +

24.25 sap

+ +

Session Announcement Protocol (RFC 2974). This is not technically a +protocol handler in libavformat, it is a muxer and demuxer. +It is used for signalling of RTP streams, by announcing the SDP for the +streams regularly on a separate port. +

+ +

24.25.1 Muxer

+ +

The syntax for a SAP url given to the muxer is: +

 
sap://destination[:port][?options]
+
+ +

The RTP packets are sent to destination on port port, +or to port 5004 if no port is specified. +options is a &-separated list. The following options +are supported: +

+
+
announce_addr=address
+

Specify the destination IP address for sending the announcements to. +If omitted, the announcements are sent to the commonly used SAP +announcement multicast address 224.2.127.254 (sap.mcast.net), or +ff0e::2:7ffe if destination is an IPv6 address. +

+
+
announce_port=port
+

Specify the port to send the announcements on, defaults to +9875 if not specified. +

+
+
ttl=ttl
+

Specify the time to live value for the announcements and RTP packets, +defaults to 255. +

+
+
same_port=0|1
+

If set to 1, send all RTP streams on the same port pair. If zero (the +default), all streams are sent on unique ports, with each stream on a +port 2 numbers higher than the previous. +VLC/Live555 requires this to be set to 1, to be able to receive the stream. +The RTP stack in libavformat for receiving requires all streams to be sent +on unique ports. +

+
+ +

Example command lines follow. +

+

To broadcast a stream on the local subnet, for watching in VLC: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+ +

Similarly, for watching in ffplay: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+ +

And for watching in ffplay, over IPv6: +

+
 
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+ + +

24.25.2 Demuxer

+ +

The syntax for a SAP url given to the demuxer is: +

 
sap://[address][:port]
+
+ +

address is the multicast address to listen for announcements on, +if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port +is the port that is listened on, 9875 if omitted. +

+

The demuxers listens for announcements on the given address and port. +Once an announcement is received, it tries to receive that particular stream. +

+

Example command lines follow. +

+

To play back the first stream announced on the normal SAP multicast address: +

+
 
ffplay sap://
+
+ +

To play back the first stream announced on one the default IPv6 SAP multicast address: +

+
 
ffplay sap://[ff0e::2:7ffe]
+
+ + +

24.26 sctp

+ +

Stream Control Transmission Protocol. +

+

The accepted URL syntax is: +

 
sctp://host:port[?options]
+
+ +

The protocol accepts the following options: +

+
listen
+

If set to any value, listen for an incoming connection. Outgoing connection is done by default. +

+
+
max_streams
+

Set the maximum number of streams. By default no limit is set. +

+
+ + +

24.27 srtp

+ +

Secure Real-time Transport Protocol. +

+

The accepted options are: +

+
srtp_in_suite
+
srtp_out_suite
+

Select input and output encoding suites. +

+

Supported values: +

+
AES_CM_128_HMAC_SHA1_80
+
SRTP_AES128_CM_HMAC_SHA1_80
+
AES_CM_128_HMAC_SHA1_32
+
SRTP_AES128_CM_HMAC_SHA1_32
+
+ +
+
srtp_in_params
+
srtp_out_params
+

Set input and output encoding parameters, which are expressed by a +base64-encoded representation of a binary block. The first 16 bytes of +this binary block are used as master key, the following 14 bytes are +used as master salt. +

+
+ + +

24.28 tcp

+ +

Transmission Control Protocol. +

+

The required syntax for a TCP url is: +

 
tcp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form +key=val. +

+

The list of supported options follows. +

+
+
listen=1|0
+

Listen for an incoming connection. Default value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+
listen_timeout=microseconds
+

Set listen timeout, expressed in microseconds. +

+
+ +

The following example shows how to setup a listening TCP connection +with ffmpeg, which is then accessed with ffplay: +

 
ffmpeg -i input -f format tcp://hostname:port?listen
+ffplay tcp://hostname:port
+
+ + +

24.29 tls

+ +

Transport Layer Security (TLS) / Secure Sockets Layer (SSL) +

+

The required syntax for a TLS/SSL url is: +

 
tls://hostname:port[?options]
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
ca_file, cafile=filename
+

A file containing certificate authority (CA) root certificates to treat +as trusted. If the linked TLS library contains a default this might not +need to be specified for verification to work, but not all libraries and +setups have defaults built in. +The file must be in OpenSSL PEM format. +

+
+
tls_verify=1|0
+

If enabled, try to verify the peer that we are communicating with. +Note, if using OpenSSL, this currently only makes sure that the +peer certificate is signed by one of the root certificates in the CA +database, but it does not validate that the certificate actually +matches the host name we are trying to connect to. (With GnuTLS, +the host name is validated as well.) +

+

This is disabled by default since it requires a CA database to be +provided by the caller in many cases. +

+
+
cert_file, cert=filename
+

A file containing a certificate to use in the handshake with the peer. +(When operating as server, in listen mode, this is more often required +by the peer, while client certificates only are mandated in certain +setups.) +

+
+
key_file, key=filename
+

A file containing the private key for the certificate. +

+
+
listen=1|0
+

If enabled, listen for connections on the provided port, and assume +the server role in the handshake instead of the client role. +

+
+
+ +

Example command lines: +

+

To create a TLS/SSL server that serves an input stream. +

+
 
ffmpeg -i input -f format tls://hostname:port?listen&cert=server.crt&key=server.key
+
+ +

To play back a stream from the TLS/SSL server using ffplay: +

+
 
ffplay tls://hostname:port
+
+ + +

24.30 udp

+ +

User Datagram Protocol. +

+

The required syntax for an UDP URL is: +

 
udp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form key=val. +

+

In case threading is enabled on the system, a circular buffer is used +to store the incoming data, which allows one to reduce loss of data due to +UDP socket buffer overruns. The fifo_size and +overrun_nonfatal options are related to this buffer. +

+

The list of supported options follows. +

+
+
buffer_size=size
+

Set the UDP socket buffer size in bytes. This is used both for the +receiving and the sending buffer size. +

+
+
localport=port
+

Override the local UDP port to bind with. +

+
+
localaddr=addr
+

Choose the local IP address. This is useful e.g. if sending multicast +and the host has multiple interfaces, where the user can choose +which interface to send on by specifying the IP address of that interface. +

+
+
pkt_size=size
+

Set the size in bytes of UDP packets. +

+
+
reuse=1|0
+

Explicitly allow or disallow reusing UDP sockets. +

+
+
ttl=ttl
+

Set the time to live value (for multicast only). +

+
+
connect=1|0
+

Initialize the UDP socket with connect(). In this case, the +destination address can’t be changed with ff_udp_set_remote_url later. +If the destination address isn’t known at the start, this option can +be specified in ff_udp_set_remote_url, too. +This allows finding out the source address for the packets with getsockname, +and makes writes return with AVERROR(ECONNREFUSED) if "destination +unreachable" is received. +For receiving, this gives the benefit of only receiving packets from +the specified peer address/port. +

+
+
sources=address[,address]
+

Only receive packets sent to the multicast group from one of the +specified sender IP addresses. +

+
+
block=address[,address]
+

Ignore packets sent to the multicast group from the specified +sender IP addresses. +

+
+
fifo_size=units
+

Set the UDP receiving circular buffer size, expressed as a number of +packets with size of 188 bytes. If not specified defaults to 7*4096. +

+
+
overrun_nonfatal=1|0
+

Survive in case of UDP receiving circular buffer overrun. Default +value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+ + +

24.30.1 Examples

+ +
    +
  • +Use ffmpeg to stream over UDP to a remote endpoint: +
     
    ffmpeg -i input -f format udp://hostname:port
    +
    + +
  • +Use ffmpeg to stream in mpegts format over UDP using 188 +sized UDP packets, using a large input buffer: +
     
    ffmpeg -i input -f mpegts udp://hostname:port?pkt_size=188&buffer_size=65535
    +
    + +
  • +Use ffmpeg to receive over UDP from a remote endpoint: +
     
    ffmpeg -i udp://[multicast-address]:port ...
    +
    +
+ + +

24.31 unix

+ +

Unix local socket +

+

The required syntax for a Unix socket URL is: +

+
 
unix://filepath
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
timeout
+

Timeout in ms. +

+
listen
+

Create the Unix socket in listening mode. +

+
+ + +

25. Device Options

+ +

The libavdevice library provides the same interface as +libavformat. Namely, an input device is considered like a demuxer, and +an output device like a muxer, and the interface and generic device +options are the same provided by libavformat (see the ffmpeg-formats +manual). +

+

In addition each input or output device may support so-called private +options, which are specific for that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the device +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+ + +

26. Input Devices

+ +

Input devices are configured elements in FFmpeg which allow to access +the data coming from a multimedia device attached to your system. +

+

When you configure your FFmpeg build, all the supported input devices +are enabled by default. You can list all available ones using the +configure option "–list-indevs". +

+

You can disable all the input devices using the configure option +"–disable-indevs", and selectively enable an input device using the +option "–enable-indev=INDEV", or you can disable a particular +input device using the option "–disable-indev=INDEV". +

+

The option "-formats" of the ff* tools will display the list of +supported input devices (amongst the demuxers). +

+

A description of the currently available input devices follows. +

+ +

26.1 alsa

+ +

ALSA (Advanced Linux Sound Architecture) input device. +

+

To enable this input device during configuration you need libasound +installed on your system. +

+

This device allows capturing from an ALSA device. The name of the +device to capture has to be an ALSA card identifier. +

+

An ALSA identifier has the syntax: +

 
hw:CARD[,DEV[,SUBDEV]]
+
+ +

where the DEV and SUBDEV components are optional. +

+

The three arguments (in order: CARD,DEV,SUBDEV) +specify card number or identifier, device number and subdevice number +(-1 means any). +

+

To see the list of cards currently recognized by your system check the +files ‘/proc/asound/cards’ and ‘/proc/asound/devices’. +

+

For example to capture with ffmpeg from an ALSA device with +card id 0, you may run the command: +

 
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+ +

For more information see: +http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html +

+ +

26.2 bktr

+ +

BSD video input device. +

+ +

26.3 dshow

+ +

Windows DirectShow input device. +

+

DirectShow support is enabled when FFmpeg is built with the mingw-w64 project. +Currently only audio and video devices are supported. +

+

Multiple devices may be opened as separate inputs, but they may also be +opened on the same input, which should improve synchronism between them. +

+

The input name should be in the format: +

+
 
TYPE=NAME[:TYPE=NAME]
+
+ +

where TYPE can be either audio or video, +and NAME is the device’s name. +

+ +

26.3.1 Options

+ +

If no options are specified, the device’s defaults are used. +If the device does not support the requested options, it will +fail to open. +

+
+
video_size
+

Set the video size in the captured video. +

+
+
framerate
+

Set the frame rate in the captured video. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. +

+
+
channels
+

Set the number of channels in the captured audio. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +

+
+
list_options
+

If set to ‘true’, print a list of selected device’s options +and exit. +

+
+
video_device_number
+

Set video device number for devices with same name (starts at 0, +defaults to 0). +

+
+
audio_device_number
+

Set audio device number for devices with same name (starts at 0, +defaults to 0). +

+
+
pixel_format
+

Select pixel format to be used by DirectShow. This may only be set when +the video codec is not set or set to rawvideo. +

+
+
audio_buffer_size
+

Set audio device buffer size in milliseconds (which can directly +impact latency, depending on the device). +Defaults to using the audio device’s +default buffer size (typically some multiple of 500ms). +Setting this value too low can degrade performance. +See also +http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx +

+
+
+ + +

26.3.2 Examples

+ +
    +
  • +Print the list of DirectShow supported devices and exit: +
     
    $ ffmpeg -list_devices true -f dshow -i dummy
    +
    + +
  • +Open video device Camera: +
     
    $ ffmpeg -f dshow -i video="Camera"
    +
    + +
  • +Open second video device with name Camera: +
     
    $ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
    +
    + +
  • +Open video device Camera and audio device Microphone: +
     
    $ ffmpeg -f dshow -i video="Camera":audio="Microphone"
    +
    + +
  • +Print the list of supported options in selected device and exit: +
     
    $ ffmpeg -list_options true -f dshow -i video="Camera"
    +
    + +
+ + +

26.4 dv1394

+ +

Linux DV 1394 input device. +

+ +

26.5 fbdev

+ +

Linux framebuffer input device. +

+

The Linux framebuffer is a graphic hardware-independent abstraction +layer to show graphics on a computer monitor, typically on the +console. It is accessed through a file device node, usually +‘/dev/fb0’. +

+

For more detailed information read the file +Documentation/fb/framebuffer.txt included in the Linux source tree. +

+

To record from the framebuffer device ‘/dev/fb0’ with +ffmpeg: +

 
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+ +

You can take a single screenshot image with the command: +

 
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+ +

See also http://linux-fbdev.sourceforge.net/, and fbset(1). +

+ +

26.6 iec61883

+ +

FireWire DV/HDV input device using libiec61883. +

+

To enable this input device, you need libiec61883, libraw1394 and +libavc1394 installed on your system. Use the configure option +--enable-libiec61883 to compile with the device enabled. +

+

The iec61883 capture device supports capturing from a video device +connected via IEEE1394 (FireWire), using libiec61883 and the new Linux +FireWire stack (juju). This is the default DV/HDV input method in Linux +Kernel 2.6.37 and later, since the old FireWire stack was removed. +

+

Specify the FireWire port to be used as input file, or "auto" +to choose the first port connected. +

+ +

26.6.1 Options

+ +
+
dvtype
+

Override autodetection of DV/HDV. This should only be used if auto +detection does not work, or if usage of a different device type +should be prohibited. Treating a DV device as HDV (or vice versa) will +not work and result in undefined behavior. +The values ‘auto’, ‘dv’ and ‘hdv’ are supported. +

+
+
dvbuffer
+

Set maxiumum size of buffer for incoming data, in frames. For DV, this +is an exact value. For HDV, it is not frame exact, since HDV does +not have a fixed frame size. +

+
+
dvguid
+

Select the capture device by specifying it’s GUID. Capturing will only +be performed from the specified device and fails if no device with the +given GUID is found. This is useful to select the input if multiple +devices are connected at the same time. +Look at /sys/bus/firewire/devices to find out the GUIDs. +

+
+
+ + +

26.6.2 Examples

+ +
    +
  • +Grab and show the input of a FireWire DV/HDV device. +
     
    ffplay -f iec61883 -i auto
    +
    + +
  • +Grab and record the input of a FireWire DV/HDV device, +using a packet buffer of 100000 packets if the source is HDV. +
     
    ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
    +
    + +
+ + +

26.7 jack

+ +

JACK input device. +

+

To enable this input device during configuration you need libjack +installed on your system. +

+

A JACK input device creates one or more JACK writable clients, one for +each audio channel, with name client_name:input_N, where +client_name is the name provided by the application, and N +is a number which identifies the channel. +Each writable client will send the acquired data to the FFmpeg input +device. +

+

Once you have created one or more JACK readable clients, you need to +connect them to one or more JACK writable clients. +

+

To connect or disconnect JACK clients you can use the jack_connect +and jack_disconnect programs, or do it through a graphical interface, +for example with qjackctl. +

+

To list the JACK clients and their properties you can invoke the command +jack_lsp. +

+

Follows an example which shows how to capture a JACK readable client +with ffmpeg. +

 
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+ +

For more information read: +http://jackaudio.org/ +

+ +

26.8 lavfi

+ +

Libavfilter input virtual device. +

+

This input device reads data from the open output pads of a libavfilter +filtergraph. +

+

For each filtergraph open output, the input device will create a +corresponding stream which is mapped to the generated output. Currently +only video data is supported. The filtergraph is specified through the +option ‘graph’. +

+ +

26.8.1 Options

+ +
+
graph
+

Specify the filtergraph to use as input. Each video open output must be +labelled by a unique string of the form "outN", where N is a +number starting from 0 corresponding to the mapped input stream +generated by the device. +The first unlabelled output is automatically assigned to the "out0" +label, but all the others need to be specified explicitly. +

+

If not specified defaults to the filename specified for the input +device. +

+
+
graph_file
+

Set the filename of the filtergraph to be read and sent to the other +filters. Syntax of the filtergraph is the same as the one specified by +the option graph. +

+
+
+ + +

26.8.2 Examples

+ +
    +
  • +Create a color video stream and play it back with ffplay: +
     
    ffplay -f lavfi -graph "color=c=pink [out0]" dummy
    +
    + +
  • +As the previous example, but use filename for specifying the graph +description, and omit the "out0" label: +
     
    ffplay -f lavfi color=c=pink
    +
    + +
  • +Create three different video test filtered sources and play them: +
     
    ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
    +
    + +
  • +Read an audio stream from a file using the amovie source and play it +back with ffplay: +
     
    ffplay -f lavfi "amovie=test.wav"
    +
    + +
  • +Read an audio stream and a video stream and play it back with +ffplay: +
     
    ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
    +
    + +
+ + +

26.9 libdc1394

+ +

IIDC1394 input device, based on libdc1394 and libraw1394. +

+ +

26.10 openal

+ +

The OpenAL input device provides audio capture on all systems with a +working OpenAL 1.1 implementation. +

+

To enable this input device during configuration, you need OpenAL +headers and libraries installed on your system, and need to configure +FFmpeg with --enable-openal. +

+

OpenAL headers and libraries should be provided as part of your OpenAL +implementation, or as an additional download (an SDK). Depending on your +installation you may need to specify additional flags via the +--extra-cflags and --extra-ldflags for allowing the build +system to locate the OpenAL headers and libraries. +

+

An incomplete list of OpenAL implementations follows: +

+
+
Creative
+

The official Windows implementation, providing hardware acceleration +with supported devices and software fallback. +See http://openal.org/. +

+
OpenAL Soft
+

Portable, open source (LGPL) software implementation. Includes +backends for the most common sound APIs on the Windows, Linux, +Solaris, and BSD operating systems. +See http://kcat.strangesoft.net/openal.html. +

+
Apple
+

OpenAL is part of Core Audio, the official Mac OS X Audio interface. +See http://developer.apple.com/technologies/mac/audio-and-video.html +

+
+ +

This device allows one to capture from an audio input device handled +through OpenAL. +

+

You need to specify the name of the device to capture in the provided +filename. If the empty string is provided, the device will +automatically select the default device. You can get the list of the +supported devices by using the option list_devices. +

+ +

26.10.1 Options

+ +
+
channels
+

Set the number of channels in the captured audio. Only the values +‘1’ (monaural) and ‘2’ (stereo) are currently supported. +Defaults to ‘2’. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. Only the values +‘8’ and ‘16’ are currently supported. Defaults to +‘16’. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +Defaults to ‘44.1k’. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +Defaults to ‘false’. +

+
+
+ + +

26.10.2 Examples

+ +

Print the list of OpenAL supported devices and exit: +

 
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+ +

Capture from the OpenAL device ‘DR-BT101 via PulseAudio’: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+ +

Capture from the default device (note the empty string ” as filename): +

 
$ ffmpeg -f openal -i '' out.ogg
+
+ +

Capture from two devices simultaneously, writing to two different files, +within the same ffmpeg command: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+

Note: not all OpenAL implementations support multiple simultaneous capture - +try the latest OpenAL Soft if the above does not work. +

+ +

26.11 oss

+ +

Open Sound System input device. +

+

The filename to provide to the input device is the device node +representing the OSS input device, and is usually set to +‘/dev/dsp’. +

+

For example to grab from ‘/dev/dsp’ using ffmpeg use the +command: +

 
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+ +

For more information about OSS see: +http://manuals.opensound.com/usersguide/dsp.html +

+ +

26.12 pulse

+ +

PulseAudio input device. +

+

To enable this output device you need to configure FFmpeg with --enable-libpulse. +

+

The filename to provide to the input device is a source device or the +string "default" +

+

To list the PulseAudio source devices and their properties you can invoke +the command pactl list sources. +

+

More information about PulseAudio can be found on http://www.pulseaudio.org. +

+ +

26.12.1 Options

+
+
server
+

Connect to a specific PulseAudio server, specified by an IP address. +Default server is used when not provided. +

+
+
name
+

Specify the application name PulseAudio will use when showing active clients, +by default it is the LIBAVFORMAT_IDENT string. +

+
+
stream_name
+

Specify the stream name PulseAudio will use when showing active streams, +by default it is "record". +

+
+
sample_rate
+

Specify the samplerate in Hz, by default 48kHz is used. +

+
+
channels
+

Specify the channels in use, by default 2 (stereo) is set. +

+
+
frame_size
+

Specify the number of bytes per frame, by default it is set to 1024. +

+
+
fragment_size
+

Specify the minimal buffering fragment in PulseAudio, it will affect the +audio latency. By default it is unset. +

+
+ + +

26.12.2 Examples

+

Record a stream from default device: +

 
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+ + +

26.13 sndio

+ +

sndio input device. +

+

To enable this input device during configuration you need libsndio +installed on your system. +

+

The filename to provide to the input device is the device node +representing the sndio input device, and is usually set to +‘/dev/audio0’. +

+

For example to grab from ‘/dev/audio0’ using ffmpeg use the +command: +

 
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+ + +

26.14 video4linux2, v4l2

+ +

Video4Linux2 input video device. +

+

"v4l2" can be used as alias for "video4linux2". +

+

If FFmpeg is built with v4l-utils support (by using the +--enable-libv4l2 configure option), it is possible to use it with the +-use_libv4l2 input device option. +

+

The name of the device to grab is a file device node, usually Linux +systems tend to automatically create such nodes when the device +(e.g. an USB webcam) is plugged into the system, and has a name of the +kind ‘/dev/videoN’, where N is a number associated to +the device. +

+

Video4Linux2 devices usually support a limited set of +widthxheight sizes and frame rates. You can check which are +supported using -list_formats all for Video4Linux2 devices. +Some devices, like TV cards, support one or more standards. It is possible +to list all the supported standards using -list_standards all. +

+

The time base for the timestamps is 1 microsecond. Depending on the kernel +version and configuration, the timestamps may be derived from the real time +clock (origin at the Unix Epoch) or the monotonic clock (origin usually at +boot time, unaffected by NTP or manual changes to the clock). The +‘-timestamps abs’ or ‘-ts abs’ option can be used to force +conversion into the real time clock. +

+

Some usage examples of the video4linux2 device with ffmpeg +and ffplay: +

    +
  • +Grab and show the input of a video4linux2 device: +
     
    ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
    +
    + +
  • +Grab and record the input of a video4linux2 device, leave the +frame rate and size as previously set: +
     
    ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
    +
    +
+ +

For more information about Video4Linux, check http://linuxtv.org/. +

+ +

26.14.1 Options

+ +
+
standard
+

Set the standard. Must be the name of a supported standard. To get a +list of the supported standards, use the ‘list_standards’ +option. +

+
+
channel
+

Set the input channel number. Default to -1, which means using the +previously selected channel. +

+
+
video_size
+

Set the video frame size. The argument must be a string in the form +WIDTHxHEIGHT or a valid size abbreviation. +

+
+
pixel_format
+

Select the pixel format (only valid for raw video input). +

+
+
input_format
+

Set the preferred pixel format (for raw video) or a codec name. +This option allows one to select the input format, when several are +available. +

+
+
framerate
+

Set the preferred video frame rate. +

+
+
list_formats
+

List available formats (supported pixel formats, codecs, and frame +sizes) and exit. +

+

Available values are: +

+
all
+

Show all available (compressed and non-compressed) formats. +

+
+
raw
+

Show only raw video (non-compressed) formats. +

+
+
compressed
+

Show only compressed formats. +

+
+ +
+
list_standards
+

List supported standards and exit. +

+

Available values are: +

+
all
+

Show all supported standards. +

+
+ +
+
timestamps, ts
+

Set type of timestamps for grabbed frames. +

+

Available values are: +

+
default
+

Use timestamps from the kernel. +

+
+
abs
+

Use absolute timestamps (wall clock). +

+
+
mono2abs
+

Force conversion from monotonic to absolute timestamps. +

+
+ +

Default value is default. +

+
+ + +

26.15 vfwcap

+ +

VfW (Video for Windows) capture input device. +

+

The filename passed as input is the capture driver number, ranging from +0 to 9. You may use "list" as filename to print a list of drivers. Any +other filename will be interpreted as device number 0. +

+ +

26.16 x11grab

+ +

X11 video input device. +

+

This device allows one to capture a region of an X11 display. +

+

The filename passed as input has the syntax: +

 
[hostname]:display_number.screen_number[+x_offset,y_offset]
+
+ +

hostname:display_number.screen_number specifies the +X11 display name of the screen to grab from. hostname can be +omitted, and defaults to "localhost". The environment variable +DISPLAY contains the default display name. +

+

x_offset and y_offset specify the offsets of the grabbed +area with respect to the top-left border of the X11 screen. They +default to 0. +

+

Check the X11 documentation (e.g. man X) for more detailed information. +

+

Use the dpyinfo program for getting basic information about the +properties of your X11 display (e.g. grep for "name" or "dimensions"). +

+

For example to grab from ‘:0.0’ using ffmpeg: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

Grab at position 10,20: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ + +

26.16.1 Options

+ +
+
draw_mouse
+

Specify whether to draw the mouse pointer. A value of 0 specify +not to draw the pointer. Default value is 1. +

+
+
follow_mouse
+

Make the grabbed area follow the mouse. The argument can be +centered or a number of pixels PIXELS. +

+

When it is specified with "centered", the grabbing region follows the mouse +pointer and keeps the pointer at the center of region; otherwise, the region +follows only when the mouse pointer reaches within PIXELS (greater than +zero) to the edge of region. +

+

For example: +

 
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

To follow only when the mouse pointer reaches within 100 pixels to edge: +

 
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
framerate
+

Set the grabbing frame rate. Default value is ntsc, +corresponding to a frame rate of 30000/1001. +

+
+
show_region
+

Show grabbed region on screen. +

+

If show_region is specified with 1, then the grabbing +region will be indicated on screen. With this option, it is easy to +know what is being grabbed if only a portion of the screen is grabbed. +

+

For example: +

 
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ +

With follow_mouse: +

 
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
video_size
+

Set the video frame size. Default value is vga. +

+
+ + +

27. Output Devices

+ +

Output devices are configured elements in FFmpeg that can write +multimedia data to an output device attached to your system. +

+

When you configure your FFmpeg build, all the supported output devices +are enabled by default. You can list all available ones using the +configure option "–list-outdevs". +

+

You can disable all the output devices using the configure option +"–disable-outdevs", and selectively enable an output device using the +option "–enable-outdev=OUTDEV", or you can disable a particular +input device using the option "–disable-outdev=OUTDEV". +

+

The option "-formats" of the ff* tools will display the list of +enabled output devices (amongst the muxers). +

+

A description of the currently available output devices follows. +

+ +

27.1 alsa

+ +

ALSA (Advanced Linux Sound Architecture) output device. +

+ +

27.1.1 Examples

+ +
    +
  • +Play a file on default ALSA device: +
     
    ffmpeg -i INPUT -f alsa default
    +
    + +
  • +Play a file on soundcard 1, audio device 7: +
     
    ffmpeg -i INPUT -f alsa hw:1,7
    +
    +
+ + +

27.2 caca

+ +

CACA output device. +

+

This output device allows one to show a video stream in CACA window. +Only one CACA window is allowed per application, so you can +have only one instance of this output device in an application. +

+

To enable this output device you need to configure FFmpeg with +--enable-libcaca. +libcaca is a graphics library that outputs text instead of pixels. +

+

For more information about libcaca, check: +http://caca.zoy.org/wiki/libcaca +

+ +

27.2.1 Options

+ +
+
window_title
+

Set the CACA window title, if not specified default to the filename +specified for the output device. +

+
+
window_size
+

Set the CACA window size, can be a string of the form +widthxheight or a video size abbreviation. +If not specified it defaults to the size of the input video. +

+
+
driver
+

Set display driver. +

+
+
algorithm
+

Set dithering algorithm. Dithering is necessary +because the picture being rendered has usually far more colours than +the available palette. +The accepted values are listed with -list_dither algorithms. +

+
+
antialias
+

Set antialias method. Antialiasing smoothens the rendered +image and avoids the commonly seen staircase effect. +The accepted values are listed with -list_dither antialiases. +

+
+
charset
+

Set which characters are going to be used when rendering text. +The accepted values are listed with -list_dither charsets. +

+
+
color
+

Set color to be used when rendering text. +The accepted values are listed with -list_dither colors. +

+
+
list_drivers
+

If set to ‘true’, print a list of available drivers and exit. +

+
+
list_dither
+

List available dither options related to the argument. +The argument must be one of algorithms, antialiases, +charsets, colors. +

+
+ + +

27.2.2 Examples

+ +
    +
  • +The following command shows the ffmpeg output is an +CACA window, forcing its size to 80x25: +
     
    ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
    +
    + +
  • +Show the list of available drivers and exit: +
     
    ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
    +
    + +
  • +Show the list of available dither colors and exit: +
     
    ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
    +
    +
+ + +

27.3 decklink

+ +

The decklink output device provides playback capabilities for Blackmagic +DeckLink devices. +

+

To enable this output device, you need the Blackmagic DeckLink SDK and you +need to configure with the appropriate --extra-cflags +and --extra-ldflags. +On Windows, you need to run the IDL files through widl. +

+

DeckLink is very picky about the formats it supports. Pixel format is always +uyvy422, framerate and video size must be determined for your device with +-list_formats 1. Audio sample rate is always 48 kHz. +

+ +

27.3.1 Options

+ +
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +Defaults to ‘false’. +

+
+
list_formats
+

If set to ‘true’, print a list of supported formats and exit. +Defaults to ‘false’. +

+
+
preroll
+

Amount of time to preroll video in seconds. +Defaults to ‘0.5’. +

+
+
+ + +

27.3.2 Examples

+ +
    +
  • +List output devices: +
     
    ffmpeg -i test.avi -f decklink -list_devices 1 dummy
    +
    + +
  • +List supported formats: +
     
    ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
    +
    + +
  • +Play video clip: +
     
    ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
    +
    + +
  • +Play video clip with non-standard framerate or video size: +
     
    ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
    +
    + +
+ + +

27.4 fbdev

+ +

Linux framebuffer output device. +

+

The Linux framebuffer is a graphic hardware-independent abstraction +layer to show graphics on a computer monitor, typically on the +console. It is accessed through a file device node, usually +‘/dev/fb0’. +

+

For more detailed information read the file +‘Documentation/fb/framebuffer.txt’ included in the Linux source tree. +

+ +

27.4.1 Options

+
+
xoffset
+
yoffset
+

Set x/y coordinate of top left corner. Default is 0. +

+
+ + +

27.4.2 Examples

+

Play a file on framebuffer device ‘/dev/fb0’. +Required pixel format depends on current framebuffer settings. +

 
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
+
+ +

See also http://linux-fbdev.sourceforge.net/, and fbset(1). +

+ +

27.5 opengl

+

OpenGL output device. +

+

To enable this output device you need to configure FFmpeg with --enable-opengl. +

+

This output device allows one to render to OpenGL context. +Context may be provided by application or default SDL window is created. +

+

When device renders to external context, application must implement handlers for following messages: +AV_CTL_MESSAGE_CREATE_WINDOW_BUFFER - create OpenGL context on current thread. +AV_CTL_MESSAGE_PREPARE_WINDOW_BUFFER - make OpenGL context current. +AV_CTL_MESSAGE_DISPLAY_WINDOW_BUFFER - swap buffers. +AV_CTL_MESSAGE_DESTROY_WINDOW_BUFFER - destroy OpenGL context. +Application is also required to inform a device about current resolution by sending AV_DEVICE_WINDOW_RESIZED message. +

+ +

27.5.1 Options

+
+
background
+

Set background color. Black is a default. +

+
no_window
+

Disables default SDL window when set to non-zero value. +Application must provide OpenGL context and both window_size_cb and window_swap_buffers_cb callbacks when set. +

+
window_title
+

Set the SDL window title, if not specified default to the filename specified for the output device. +Ignored when ‘no_window’ is set. +

+
+
+ + +

27.5.2 Examples

+

Play a file on SDL window using OpenGL rendering: +

 
ffmpeg  -i INPUT -f opengl "window title"
+
+ + +

27.6 oss

+ +

OSS (Open Sound System) output device. +

+ +

27.7 pulse

+ +

PulseAudio output device. +

+

To enable this output device you need to configure FFmpeg with --enable-libpulse. +

+

More information about PulseAudio can be found on http://www.pulseaudio.org +

+ +

27.7.1 Options

+
+
server
+

Connect to a specific PulseAudio server, specified by an IP address. +Default server is used when not provided. +

+
+
name
+

Specify the application name PulseAudio will use when showing active clients, +by default it is the LIBAVFORMAT_IDENT string. +

+
+
stream_name
+

Specify the stream name PulseAudio will use when showing active streams, +by default it is set to the specified output name. +

+
+
device
+

Specify the device to use. Default device is used when not provided. +List of output devices can be obtained with command pactl list sinks. +

+
+
buffer_size
+
buffer_duration
+

Control the size and duration of the PulseAudio buffer. A small buffer +gives more control, but requires more frequent updates. +

+

buffer_size’ specifies size in bytes while +‘buffer_duration’ specifies duration in milliseconds. +

+

When both options are provided then the highest value is used +(duration is recalculated to bytes using stream parameters). If they +are set to 0 (which is default), the device will use the default +PulseAudio duration value. By default PulseAudio set buffer duration +to around 2 seconds. +

+
+ + +

27.7.2 Examples

+

Play a file on default device on default server: +

 
ffmpeg  -i INPUT -f pulse "stream name"
+
+ + +

27.8 sdl

+ +

SDL (Simple DirectMedia Layer) output device. +

+

This output device allows one to show a video stream in an SDL +window. Only one SDL window is allowed per application, so you can +have only one instance of this output device in an application. +

+

To enable this output device you need libsdl installed on your system +when configuring your build. +

+

For more information about SDL, check: +http://www.libsdl.org/ +

+ +

27.8.1 Options

+ +
+
window_title
+

Set the SDL window title, if not specified default to the filename +specified for the output device. +

+
+
icon_title
+

Set the name of the iconified SDL window, if not specified it is set +to the same value of window_title. +

+
+
window_size
+

Set the SDL window size, can be a string of the form +widthxheight or a video size abbreviation. +If not specified it defaults to the size of the input video, +downscaled according to the aspect ratio. +

+
+
window_fullscreen
+

Set fullscreen mode when non-zero value is provided. +Default value is zero. +

+
+ + +

27.8.2 Interactive commands

+ +

The window created by the device can be controlled through the +following interactive commands. +

+
+
<q, ESC>
+

Quit the device immediately. +

+
+ + +

27.8.3 Examples

+ +

The following command shows the ffmpeg output is an +SDL window, forcing its size to the qcif format: +

 
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
+
+ + +

27.9 sndio

+ +

sndio audio output device. +

+ +

27.10 xv

+ +

XV (XVideo) output device. +

+

This output device allows one to show a video stream in a X Window System +window. +

+ +

27.10.1 Options

+ +
+
display_name
+

Specify the hardware display name, which determines the display and +communications domain to be used. +

+

The display name or DISPLAY environment variable can be a string in +the format hostname[:number[.screen_number]]. +

+

hostname specifies the name of the host machine on which the +display is physically attached. number specifies the number of +the display server on that host machine. screen_number specifies +the screen to be used on that server. +

+

If unspecified, it defaults to the value of the DISPLAY environment +variable. +

+

For example, dual-headed:0.1 would specify screen 1 of display +0 on the machine named “dual-headed”. +

+

Check the X11 specification for more detailed information about the +display name format. +

+
+
window_size
+

Set the created window size, can be a string of the form +widthxheight or a video size abbreviation. If not +specified it defaults to the size of the input video. +

+
+
window_x
+
window_y
+

Set the X and Y window offsets for the created window. They are both +set to 0 by default. The values may be ignored by the window manager. +

+
+
window_title
+

Set the window title, if not specified default to the filename +specified for the output device. +

+
+ +

For more information about XVideo see http://www.x.org/. +

+ +

27.10.2 Examples

+ +
    +
  • +Decode, display and encode video input with ffmpeg at the +same time: +
     
    ffmpeg -i INPUT OUTPUT -f xv display
    +
    + +
  • +Decode and display the input video to multiple X11 windows: +
     
    ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
    +
    +
+ + +

28. Resampler Options

+ +

The audio resampler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools, option=value for the aresample filter, +by setting the value explicitly in the +SwrContext options or using the ‘libavutil/opt.h’ API for +programmatic use. +

+
+
ich, in_channel_count
+

Set the number of input channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘in_channel_layout’ is set. +

+
+
och, out_channel_count
+

Set the number of output channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘out_channel_layout’ is set. +

+
+
uch, used_channel_count
+

Set the number of used input channels. Default value is 0. This option is +only used for special remapping. +

+
+
isr, in_sample_rate
+

Set the input sample rate. Default value is 0. +

+
+
osr, out_sample_rate
+

Set the output sample rate. Default value is 0. +

+
+
isf, in_sample_fmt
+

Specify the input sample format. It is set by default to none. +

+
+
osf, out_sample_fmt
+

Specify the output sample format. It is set by default to none. +

+
+
tsf, internal_sample_fmt
+

Set the internal sample format. Default value is none. +This will automatically be chosen when it is not explicitly set. +

+
+
icl, in_channel_layout
+
ocl, out_channel_layout
+

Set the input/output channel layout. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+
clev, center_mix_level
+

Set the center mix level. It is a value expressed in deciBel, and must be +in the interval [-32,32]. +

+
+
slev, surround_mix_level
+

Set the surround mix level. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
lfe_mix_level
+

Set LFE mix into non LFE level. It is used when there is a LFE input but no +LFE output. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
rmvol, rematrix_volume
+

Set rematrix volume. Default value is 1.0. +

+
+
rematrix_maxval
+

Set maximum output value for rematrixing. +This can be used to prevent clipping vs. preventing volumn reduction +A value of 1.0 prevents cliping. +

+
+
flags, swr_flags
+

Set flags used by the converter. Default value is 0. +

+

It supports the following individual flags: +

+
res
+

force resampling, this flag forces resampling to be used even when the +input and output sample rates match. +

+
+ +
+
dither_scale
+

Set the dither scale. Default value is 1. +

+
+
dither_method
+

Set dither method. Default value is 0. +

+

Supported values: +

+
rectangular
+

select rectangular dither +

+
triangular
+

select triangular dither +

+
triangular_hp
+

select triangular dither with high pass +

+
lipshitz
+

select lipshitz noise shaping dither +

+
shibata
+

select shibata noise shaping dither +

+
low_shibata
+

select low shibata noise shaping dither +

+
high_shibata
+

select high shibata noise shaping dither +

+
f_weighted
+

select f-weighted noise shaping dither +

+
modified_e_weighted
+

select modified-e-weighted noise shaping dither +

+
improved_e_weighted
+

select improved-e-weighted noise shaping dither +

+
+
+ +
+
resampler
+

Set resampling engine. Default value is swr. +

+

Supported values: +

+
swr
+

select the native SW Resampler; filter options precision and cheby are not +applicable in this case. +

+
soxr
+

select the SoX Resampler (where available); compensation, and filter options +filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this +case. +

+
+ +
+
filter_size
+

For swr only, set resampling filter size, default value is 32. +

+
+
phase_shift
+

For swr only, set resampling phase shift, default value is 10, and must be in +the interval [0,30]. +

+
+
linear_interp
+

Use Linear Interpolation if set to 1, default value is 0. +

+
+
cutoff
+

Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float +value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr +(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz). +

+
+
precision
+

For soxr only, the precision in bits to which the resampled signal will be +calculated. The default value of 20 (which, with suitable dithering, is +appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a +value of 28 gives SoX’s ’Very High Quality’. +

+
+
cheby
+

For soxr only, selects passband rolloff none (Chebyshev) & higher-precision +approximation for ’irrational’ ratios. Default value is 0. +

+
+
async
+

For swr only, simple 1 parameter audio sync to timestamps using stretching, +squeezing, filling and trimming. Setting this to 1 will enable filling and +trimming, larger values represent the maximum amount in samples that the data +may be stretched or squeezed for each second. +Default value is 0, thus no compensation is applied to make the samples match +the audio timestamps. +

+
+
first_pts
+

For swr only, assume the first pts should be this value. The time unit is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
min_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger stretching/squeezing/filling or trimming of the +data to make it match the timestamps. The default is that +stretching/squeezing/filling and trimming is disabled +(‘min_comp’ = FLT_MAX). +

+
+
min_hard_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger adding/dropping samples to make it match the +timestamps. This option effectively is a threshold to select between +hard (trim/fill) and soft (squeeze/stretch) compensation. Note that +all compensation is by default disabled through ‘min_comp’. +The default is 0.1. +

+
+
comp_duration
+

For swr only, set duration (in seconds) over which data is stretched/squeezed +to make it match the timestamps. Must be a non-negative double float value, +default value is 1.0. +

+
+
max_soft_comp
+

For swr only, set maximum factor by which data is stretched/squeezed to make it +match the timestamps. Must be a non-negative double float value, default value +is 0. +

+
+
matrix_encoding
+

Select matrixed stereo encoding. +

+

It accepts the following values: +

+
none
+

select none +

+
dolby
+

select Dolby +

+
dplii
+

select Dolby Pro Logic II +

+
+ +

Default value is none. +

+
+
filter_type
+

For swr only, select resampling filter type. This only affects resampling +operations. +

+

It accepts the following values: +

+
cubic
+

select cubic +

+
blackman_nuttall
+

select Blackman Nuttall Windowed Sinc +

+
kaiser
+

select Kaiser Windowed Sinc +

+
+ +
+
kaiser_beta
+

For swr only, set Kaiser Window Beta value. Must be an integer in the +interval [2,16], default value is 9. +

+
+
output_sample_bits
+

For swr only, set number of used output sample bits for dithering. Must be an integer in the +interval [0,64], default value is 0, which means it’s not used. +

+
+
+ +

+

+

29. Scaler Options

+ +

The video scaler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools. For programmatic use, they can be set explicitly in the +SwsContext options or through the ‘libavutil/opt.h’ API. +

+
+
+

+

+
sws_flags
+

Set the scaler flags. This is also used to set the scaling +algorithm. Only a single algorithm should be selected. +

+

It accepts the following values: +

+
fast_bilinear
+

Select fast bilinear scaling algorithm. +

+
+
bilinear
+

Select bilinear scaling algorithm. +

+
+
bicubic
+

Select bicubic scaling algorithm. +

+
+
experimental
+

Select experimental scaling algorithm. +

+
+
neighbor
+

Select nearest neighbor rescaling algorithm. +

+
+
area
+

Select averaging area rescaling algorithm. +

+
+
bicublin
+

Select bicubic scaling algorithm for the luma component, bilinear for +chroma components. +

+
+
gauss
+

Select Gaussian rescaling algorithm. +

+
+
sinc
+

Select sinc rescaling algorithm. +

+
+
lanczos
+

Select lanczos rescaling algorithm. +

+
+
spline
+

Select natural bicubic spline rescaling algorithm. +

+
+
print_info
+

Enable printing/debug logging. +

+
+
accurate_rnd
+

Enable accurate rounding. +

+
+
full_chroma_int
+

Enable full chroma interpolation. +

+
+
full_chroma_inp
+

Select full chroma input. +

+
+
bitexact
+

Enable bitexact output. +

+
+ +
+
srcw
+

Set source width. +

+
+
srch
+

Set source height. +

+
+
dstw
+

Set destination width. +

+
+
dsth
+

Set destination height. +

+
+
src_format
+

Set source pixel format (must be expressed as an integer). +

+
+
dst_format
+

Set destination pixel format (must be expressed as an integer). +

+
+
src_range
+

Select source range. +

+
+
dst_range
+

Select destination range. +

+
+
param0, param1
+

Set scaling algorithm parameters. The specified values are specific of +some scaling algorithms and ignored by others. The specified values +are floating point number values. +

+
+
sws_dither
+

Set the dithering algorithm. Accepts one of the following +values. Default value is ‘auto’. +

+
+
auto
+

automatic choice +

+
+
none
+

no dithering +

+
+
bayer
+

bayer dither +

+
+
ed
+

error diffusion dither +

+
+ +
+
+ + +

30. Filtering Introduction

+ +

Filtering in FFmpeg is enabled through the libavfilter library. +

+

In libavfilter, a filter can have multiple inputs and multiple +outputs. +To illustrate the sorts of things that are possible, we consider the +following filtergraph. +

+
 
                [main]
+input --> split ---------------------> overlay --> output
+            |                             ^
+            |[tmp]                  [flip]|
+            +-----> crop --> vflip -------+
+
+ +

This filtergraph splits the input stream in two streams, sends one +stream through the crop filter and the vflip filter before merging it +back with the other stream by overlaying it on top. You can use the +following command to achieve this: +

+
 
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+ +

The result will be that in output the top half of the video is mirrored +onto the bottom half. +

+

Filters in the same linear chain are separated by commas, and distinct +linear chains of filters are separated by semicolons. In our example, +crop,vflip are in one linear chain, split and +overlay are separately in another. The points where the linear +chains join are labelled by names enclosed in square brackets. In the +example, the split filter generates two outputs that are associated to +the labels [main] and [tmp]. +

+

The stream sent to the second output of split, labelled as +[tmp], is processed through the crop filter, which crops +away the lower half part of the video, and then vertically flipped. The +overlay filter takes in input the first unchanged output of the +split filter (which was labelled as [main]), and overlay on its +lower half the output generated by the crop,vflip filterchain. +

+

Some filters take in input a list of parameters: they are specified +after the filter name and an equal sign, and are separated from each other +by a colon. +

+

There exist so-called source filters that do not have an +audio/video input, and sink filters that will not have audio/video +output. +

+ + +

31. graph2dot

+ +

The ‘graph2dot’ program included in the FFmpeg ‘tools’ +directory can be used to parse a filtergraph description and issue a +corresponding textual representation in the dot language. +

+

Invoke the command: +

 
graph2dot -h
+
+ +

to see how to use ‘graph2dot’. +

+

You can then pass the dot description to the ‘dot’ program (from +the graphviz suite of programs) and obtain a graphical representation +of the filtergraph. +

+

For example the sequence of commands: +

 
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+ +

can be used to create and display an image representing the graph +described by the GRAPH_DESCRIPTION string. Note that this string must be +a complete self-contained graph, with its inputs and outputs explicitly defined. +For example if your command line is of the form: +

 
ffmpeg -i infile -vf scale=640:360 outfile
+
+

your GRAPH_DESCRIPTION string will need to be of the form: +

 
nullsrc,scale=640:360,nullsink
+
+

you may also need to set the nullsrc parameters and add a format +filter in order to simulate a specific input file. +

+ + +

32. Filtergraph description

+ +

A filtergraph is a directed graph of connected filters. It can contain +cycles, and there can be multiple links between a pair of +filters. Each link has one input pad on one side connecting it to one +filter from which it takes its input, and one output pad on the other +side connecting it to the one filter accepting its output. +

+

Each filter in a filtergraph is an instance of a filter class +registered in the application, which defines the features and the +number of input and output pads of the filter. +

+

A filter with no input pads is called a "source", a filter with no +output pads is called a "sink". +

+

+

+

32.1 Filtergraph syntax

+ +

A filtergraph can be represented using a textual representation, which is +recognized by the ‘-filter’/‘-vf’ and ‘-filter_complex’ +options in ffmpeg and ‘-vf’ in ffplay, and by the +avfilter_graph_parse()/avfilter_graph_parse2() function defined in +‘libavfilter/avfilter.h’. +

+

A filterchain consists of a sequence of connected filters, each one +connected to the previous one in the sequence. A filterchain is +represented by a list of ","-separated filter descriptions. +

+

A filtergraph consists of a sequence of filterchains. A sequence of +filterchains is represented by a list of ";"-separated filterchain +descriptions. +

+

A filter is represented by a string of the form: +[in_link_1]...[in_link_N]filter_name=arguments[out_link_1]...[out_link_M] +

+

filter_name is the name of the filter class of which the +described filter is an instance of, and has to be the name of one of +the filter classes registered in the program. +The name of the filter class is optionally followed by a string +"=arguments". +

+

arguments is a string which contains the parameters used to +initialize the filter instance. It may have one of the following forms: +

    +
  • +A ’:’-separated list of key=value pairs. + +
  • +A ’:’-separated list of value. In this case, the keys are assumed to be +the option names in the order they are declared. E.g. the fade filter +declares three options in this order – ‘type’, ‘start_frame’ and +‘nb_frames’. Then the parameter list in:0:30 means that the value +in is assigned to the option ‘type’, 0 to +‘start_frame’ and 30 to ‘nb_frames’. + +
  • +A ’:’-separated list of mixed direct value and long key=value +pairs. The direct value must precede the key=value pairs, and +follow the same constraints order of the previous point. The following +key=value pairs can be set in any preferred order. + +
+ +

If the option value itself is a list of items (e.g. the format filter +takes a list of pixel formats), the items in the list are usually separated by +’|’. +

+

The list of arguments can be quoted using the character "’" as initial +and ending mark, and the character ’\’ for escaping the characters +within the quoted text; otherwise the argument string is considered +terminated when the next special character (belonging to the set +"[]=;,") is encountered. +

+

The name and arguments of the filter are optionally preceded and +followed by a list of link labels. +A link label allows one to name a link and associate it to a filter output +or input pad. The preceding labels in_link_1 +... in_link_N, are associated to the filter input pads, +the following labels out_link_1 ... out_link_M, are +associated to the output pads. +

+

When two link labels with the same name are found in the +filtergraph, a link between the corresponding input and output pad is +created. +

+

If an output pad is not labelled, it is linked by default to the first +unlabelled input pad of the next filter in the filterchain. +For example in the filterchain: +

 
nullsrc, split[L1], [L2]overlay, nullsink
+
+

the split filter instance has two output pads, and the overlay filter +instance two input pads. The first output pad of split is labelled +"L1", the first input pad of overlay is labelled "L2", and the second +output pad of split is linked to the second input pad of overlay, +which are both unlabelled. +

+

In a complete filterchain all the unlabelled filter input and output +pads must be connected. A filtergraph is considered valid if all the +filter input and output pads of all the filterchains are connected. +

+

Libavfilter will automatically insert scale filters where format +conversion is required. It is possible to specify swscale flags +for those automatically inserted scalers by prepending +sws_flags=flags; +to the filtergraph description. +

+

Follows a BNF description for the filtergraph syntax: +

 
NAME             ::= sequence of alphanumeric characters and '_'
+LINKLABEL        ::= "[" NAME "]"
+LINKLABELS       ::= LINKLABEL [LINKLABELS]
+FILTER_ARGUMENTS ::= sequence of chars (eventually quoted)
+FILTER           ::= [LINKLABELS] NAME ["=" FILTER_ARGUMENTS] [LINKLABELS]
+FILTERCHAIN      ::= FILTER [,FILTERCHAIN]
+FILTERGRAPH      ::= [sws_flags=flags;] FILTERCHAIN [;FILTERGRAPH]
+
+ + +

32.2 Notes on filtergraph escaping

+ +

Filtergraph description composition entails several levels of +escaping. See (ffmpeg-utils)quoting_and_escaping for more +information about the employed escaping procedure. +

+

A first level escaping affects the content of each filter option +value, which may contain the special character : used to +separate values, or one of the escaping characters \'. +

+

A second level escaping affects the whole filter description, which +may contain the escaping characters \' or the special +characters [],; used by the filtergraph description. +

+

Finally, when you specify a filtergraph on a shell commandline, you +need to perform a third level escaping for the shell special +characters contained within it. +

+

For example, consider the following string to be embedded in +the drawtext filter description ‘text’ value: +

 
this is a 'string': may contain one, or more, special characters
+
+ +

This string contains the ' special escaping character, and the +: special character, so it needs to be escaped in this way: +

 
text=this is a \'string\'\: may contain one, or more, special characters
+
+ +

A second level of escaping is required when embedding the filter +description in a filtergraph description, in order to escape all the +filtergraph special characters. Thus the example above becomes: +

 
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+

(note that in addition to the \' escaping special characters, +also , needs to be escaped). +

+

Finally an additional level of escaping is needed when writing the +filtergraph description in a shell command, which depends on the +escaping rules of the adopted shell. For example, assuming that +\ is special and needs to be escaped with another \, the +previous string will finally result in: +

 
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+ + +

33. Timeline editing

+ +

Some filters support a generic ‘enable’ option. For the filters +supporting timeline editing, this option can be set to an expression which is +evaluated before sending a frame to the filter. If the evaluation is non-zero, +the filter will be enabled, otherwise the frame will be sent unchanged to the +next filter in the filtergraph. +

+

The expression accepts the following values: +

+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+ +

Additionally, these filters support an ‘enable’ command that can be used +to re-define the expression. +

+

Like any other filtering option, the ‘enable’ option follows the same +rules. +

+

For example, to enable a blur filter (smartblur) from 10 seconds to 3 +minutes, and a curves filter starting at 3 seconds: +

 
smartblur = enable='between(t,10,3*60)',
+curves    = enable='gte(t,3)' : preset=cross_process
+
+ + + +

34. Audio Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the audio filters included in your +build. +

+

Below is a description of the currently available audio filters. +

+ +

34.1 aconvert

+ +

Convert the input audio format to the specified formats. +

+

This filter is deprecated. Use aformat instead. +

+

The filter accepts a string of the form: +"sample_format:channel_layout". +

+

sample_format specifies the sample format, and can be a string or the +corresponding numeric value defined in ‘libavutil/samplefmt.h’. Use ’p’ +suffix for a planar sample format. +

+

channel_layout specifies the channel layout, and can be a string +or the corresponding number value defined in ‘libavutil/channel_layout.h’. +

+

The special parameter "auto", signifies that the filter will +automatically select the output format depending on the output filter. +

+ +

34.1.1 Examples

+ +
    +
  • +Convert input to float, planar, stereo: +
     
    aconvert=fltp:stereo
    +
    + +
  • +Convert input to unsigned 8-bit, automatically select out channel layout: +
     
    aconvert=u8:auto
    +
    +
+ + +

34.2 adelay

+ +

Delay one or more audio channels. +

+

Samples in delayed channel are filled with silence. +

+

The filter accepts the following option: +

+
+
delays
+

Set list of delays in milliseconds for each channel separated by ’|’. +At least one delay greater than 0 should be provided. +Unused delays will be silently ignored. If number of given delays is +smaller than number of channels all remaining channels will not be delayed. +

+
+ + +

34.2.1 Examples

+ +
    +
  • +Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave +the second channel (and any other channels that may be present) unchanged. +
     
    adelay=1500|0|500
    +
    +
+ + +

34.3 aecho

+ +

Apply echoing to the input audio. +

+

Echoes are reflected sound and can occur naturally amongst mountains +(and sometimes large buildings) when talking or shouting; digital echo +effects emulate this behaviour and are often used to help fill out the +sound of a single instrument or vocal. The time difference between the +original signal and the reflection is the delay, and the +loudness of the reflected signal is the decay. +Multiple echoes can have different delays and decays. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain of reflected signal. Default is 0.6. +

+
+
out_gain
+

Set output gain of reflected signal. Default is 0.3. +

+
+
delays
+

Set list of time intervals in milliseconds between original signal and reflections +separated by ’|’. Allowed range for each delay is (0 - 90000.0]. +Default is 1000. +

+
+
decays
+

Set list of loudnesses of reflected signals separated by ’|’. +Allowed range for each decay is (0 - 1.0]. +Default is 0.5. +

+
+ + +

34.3.1 Examples

+ +
    +
  • +Make it sound as if there are twice as many instruments as are actually playing: +
     
    aecho=0.8:0.88:60:0.4
    +
    + +
  • +If delay is very short, then it sound like a (metallic) robot playing music: +
     
    aecho=0.8:0.88:6:0.4
    +
    + +
  • +A longer delay will sound like an open air concert in the mountains: +
     
    aecho=0.8:0.9:1000:0.3
    +
    + +
  • +Same as above but with one more mountain: +
     
    aecho=0.8:0.9:1000|1800:0.3|0.25
    +
    +
+ + +

34.4 aeval

+ +

Modify an audio signal according to the specified expressions. +

+

This filter accepts one or more expressions (one for each channel), +which are evaluated and used to modify a corresponding audio signal. +

+

This filter accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. If +the number of input channels is greater than the number of +expressions, the last specified expression is used for the remaining +output channels. +

+
+
channel_layout, c
+

Set output channel layout. If not specified, the channel layout is +specified by the number of expressions. If set to ‘same’, it will +use by default the same input channel layout. +

+
+ +

Each expression in exprs can contain the following constants and functions: +

+
+
ch
+

channel number of the current expression +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
s
+

sample rate +

+
+
t
+

time of the evaluated sample expressed in seconds +

+
+
nb_in_channels
+
nb_out_channels
+

input and output number of channels +

+
+
val(CH)
+

the value of input channel with number CH +

+
+ +

Note: this filter is slow. For faster processing you should use a +dedicated filter. +

+ +

34.4.1 Examples

+ +
    +
  • +Half volume: +
     
    aeval=val(ch)/2:c=same
    +
    + +
  • +Invert phase of the second channel: +
     
    eval=val(0)|-val(1)
    +
    +
+ + +

34.5 afade

+ +

Apply fade-in/out effect to input audio. +

+

A description of the accepted parameters follows. +

+
+
type, t
+

Specify the effect type, can be either in for fade-in, or +out for a fade-out effect. Default is in. +

+
+
start_sample, ss
+

Specify the number of the start sample for starting to apply the fade +effect. Default is 0. +

+
+
nb_samples, ns
+

Specify the number of samples for which the fade effect has to last. At +the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. Default is 44100. +

+
+
start_time, st
+

Specify time for starting to apply the fade effect. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +If set this option is used instead of start_sample one. +

+
+
duration, d
+

Specify the duration for which the fade effect has to last. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +At the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. +If set this option is used instead of nb_samples one. +

+
+
curve
+

Set curve for fade transition. +

+

It accepts the following values: +

+
tri
+

select triangular, linear slope (default) +

+
qsin
+

select quarter of sine wave +

+
hsin
+

select half of sine wave +

+
esin
+

select exponential sine wave +

+
log
+

select logarithmic +

+
par
+

select inverted parabola +

+
qua
+

select quadratic +

+
cub
+

select cubic +

+
squ
+

select square root +

+
cbr
+

select cubic root +

+
+
+
+ + +

34.5.1 Examples

+ +
    +
  • +Fade in first 15 seconds of audio: +
     
    afade=t=in:ss=0:d=15
    +
    + +
  • +Fade out last 25 seconds of a 900 seconds audio: +
     
    afade=t=out:st=875:d=25
    +
    +
+ +

+

+

34.6 aformat

+ +

Set output format constraints for the input audio. The framework will +negotiate the most appropriate format to minimize conversions. +

+

The filter accepts the following named parameters: +

+
sample_fmts
+

A ’|’-separated list of requested sample formats. +

+
+
sample_rates
+

A ’|’-separated list of requested sample rates. +

+
+
channel_layouts
+

A ’|’-separated list of requested channel layouts. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+ +

If a parameter is omitted, all values are allowed. +

+

For example to force the output to either unsigned 8-bit or signed 16-bit stereo: +

 
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+ + +

34.7 allpass

+ +

Apply a two-pole all-pass filter with central frequency (in Hz) +frequency, and filter-width width. +An all-pass filter changes the audio’s frequency to phase relationship +without changing its frequency to amplitude relationship. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

34.8 amerge

+ +

Merge two or more audio streams into a single multi-channel stream. +

+

The filter accepts the following options: +

+
+
inputs
+

Set the number of inputs. Default is 2. +

+
+
+ +

If the channel layouts of the inputs are disjoint, and therefore compatible, +the channel layout of the output will be set accordingly and the channels +will be reordered as necessary. If the channel layouts of the inputs are not +disjoint, the output will have all the channels of the first input then all +the channels of the second input, in that order, and the channel layout of +the output will be the default value corresponding to the total number of +channels. +

+

For example, if the first input is in 2.1 (FL+FR+LF) and the second input +is FC+BL+BR, then the output will be in 5.1, with the channels in the +following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the +first input, b1 is the first channel of the second input). +

+

On the other hand, if both input are in stereo, the output channels will be +in the default order: a1, a2, b1, b2, and the channel layout will be +arbitrarily set to 4.0, which may or may not be the expected value. +

+

All inputs must have the same sample rate, and format. +

+

If inputs do not have the same duration, the output will stop with the +shortest. +

+ +

34.8.1 Examples

+ +
    +
  • +Merge two mono files into a stereo stream: +
     
    amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
    +
    + +
  • +Multiple merges assuming 1 video stream and 6 audio streams in ‘input.mkv’: +
     
    ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
    +
    +
+ + +

34.9 amix

+ +

Mixes multiple audio inputs into a single output. +

+

For example +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+

will mix 3 input audio streams to a single output with the same duration as the +first input and a dropout transition time of 3 seconds. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of inputs. If unspecified, it defaults to 2. +

+
+
duration
+

How to determine the end-of-stream. +

+
longest
+

Duration of longest input. (default) +

+
+
shortest
+

Duration of shortest input. +

+
+
first
+

Duration of first input. +

+
+
+ +
+
dropout_transition
+

Transition time, in seconds, for volume renormalization when an input +stream ends. The default value is 2 seconds. +

+
+
+ + +

34.10 anull

+ +

Pass the audio source unchanged to the output. +

+ +

34.11 apad

+ +

Pad the end of a audio stream with silence, this can be used together with +-shortest to extend audio streams to the same length as the video stream. +

+ +

34.12 aphaser

+

Add a phasing effect to the input audio. +

+

A phaser filter creates series of peaks and troughs in the frequency spectrum. +The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain. Default is 0.4. +

+
+
out_gain
+

Set output gain. Default is 0.74 +

+
+
delay
+

Set delay in milliseconds. Default is 3.0. +

+
+
decay
+

Set decay. Default is 0.4. +

+
+
speed
+

Set modulation speed in Hz. Default is 0.5. +

+
+
type
+

Set modulation type. Default is triangular. +

+

It accepts the following values: +

+
triangular, t
+
sinusoidal, s
+
+
+
+ +

+

+

34.13 aresample

+ +

Resample the input audio to the specified parameters, using the +libswresample library. If none are specified then the filter will +automatically convert between its input and output. +

+

This filter is also able to stretch/squeeze the audio data to make it match +the timestamps or to inject silence / cut out audio to make it match the +timestamps, do a combination of both or do neither. +

+

The filter accepts the syntax +[sample_rate:]resampler_options, where sample_rate +expresses a sample rate and resampler_options is a list of +key=value pairs, separated by ":". See the +ffmpeg-resampler manual for the complete list of supported options. +

+ +

34.13.1 Examples

+ +
    +
  • +Resample the input audio to 44100Hz: +
     
    aresample=44100
    +
    + +
  • +Stretch/squeeze samples to the given timestamps, with a maximum of 1000 +samples per second compensation: +
     
    aresample=async=1000
    +
    +
+ + +

34.14 asetnsamples

+ +

Set the number of samples per each output audio frame. +

+

The last output packet may contain a different number of samples, as +the filter will flush all the remaining samples when the input audio +signal its end. +

+

The filter accepts the following options: +

+
+
nb_out_samples, n
+

Set the number of frames per each output audio frame. The number is +intended as the number of samples per each channel. +Default value is 1024. +

+
+
pad, p
+

If set to 1, the filter will pad the last audio frame with zeroes, so +that the last frame will contain the same number of samples as the +previous ones. Default value is 1. +

+
+ +

For example, to set the number of per-frame samples to 1234 and +disable padding for the last frame, use: +

 
asetnsamples=n=1234:p=0
+
+ + +

34.15 asetrate

+ +

Set the sample rate without altering the PCM data. +This will result in a change of speed and pitch. +

+

The filter accepts the following options: +

+
+
sample_rate, r
+

Set the output sample rate. Default is 44100 Hz. +

+
+ + +

34.16 ashowinfo

+ +

Show a line containing various information for each input audio frame. +The input audio is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation timestamp of the input frame, in time base units; the time base +depends on the filter input pad, and is usually 1/sample_rate. +

+
+
pts_time
+

presentation timestamp of the input frame in seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic audio) +

+
+
fmt
+

sample format +

+
+
chlayout
+

channel layout +

+
+
rate
+

sample rate for the audio frame +

+
+
nb_samples
+

number of samples (per channel) in the frame +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of the audio data. For planar audio +the data is treated as if all the planes were concatenated. +

+
+
plane_checksums
+

A list of Adler-32 checksums for each data plane. +

+
+ + +

34.17 astats

+ +

Display time domain statistical information about the audio channels. +Statistics are calculated and displayed for each audio channel and, +where applicable, an overall figure is also given. +

+

The filter accepts the following option: +

+
length
+

Short window length in seconds, used for peak and trough RMS measurement. +Default is 0.05 (50 miliseconds). Allowed range is [0.1 - 10]. +

+
+ +

A description of each shown parameter follows: +

+
+
DC offset
+

Mean amplitude displacement from zero. +

+
+
Min level
+

Minimal sample level. +

+
+
Max level
+

Maximal sample level. +

+
+
Peak level dB
+
RMS level dB
+

Standard peak and RMS level measured in dBFS. +

+
+
RMS peak dB
+
RMS trough dB
+

Peak and trough values for RMS level measured over a short window. +

+
+
Crest factor
+

Standard ratio of peak to RMS level (note: not in dB). +

+
+
Flat factor
+

Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels +(i.e. either Min level or Max level). +

+
+
Peak count
+

Number of occasions (not the number of samples) that the signal attained either +Min level or Max level. +

+
+ + +

34.18 astreamsync

+ +

Forward two audio streams and control the order the buffers are forwarded. +

+

The filter accepts the following options: +

+
+
expr, e
+

Set the expression deciding which stream should be +forwarded next: if the result is negative, the first stream is forwarded; if +the result is positive or zero, the second stream is forwarded. It can use +the following variables: +

+
+
b1 b2
+

number of buffers forwarded so far on each stream +

+
s1 s2
+

number of samples forwarded so far on each stream +

+
t1 t2
+

current timestamp of each stream +

+
+ +

The default value is t1-t2, which means to always forward the stream +that has a smaller timestamp. +

+
+ + +

34.18.1 Examples

+ +

Stress-test amerge by randomly sending buffers on the wrong +input, while avoiding too much of a desynchronization: +

 
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+ + +

34.19 asyncts

+ +

Synchronize audio data with timestamps by squeezing/stretching it and/or +dropping samples/adding silence when needed. +

+

This filter is not built by default, please use aresample to do squeezing/stretching. +

+

The filter accepts the following named parameters: +

+
compensate
+

Enable stretching/squeezing the data to make it match the timestamps. Disabled +by default. When disabled, time gaps are covered with silence. +

+
+
min_delta
+

Minimum difference between timestamps and audio data (in seconds) to trigger +adding/dropping samples. Default value is 0.1. If you get non-perfect sync with +this filter, try setting this parameter to 0. +

+
+
max_comp
+

Maximum compensation in samples per second. Relevant only with compensate=1. +Default value 500. +

+
+
first_pts
+

Assume the first pts should be this value. The time base is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
+ + +

34.20 atempo

+ +

Adjust audio tempo. +

+

The filter accepts exactly one parameter, the audio tempo. If not +specified then the filter will assume nominal 1.0 tempo. Tempo must +be in the [0.5, 2.0] range. +

+ +

34.20.1 Examples

+ +
    +
  • +Slow down audio to 80% tempo: +
     
    atempo=0.8
    +
    + +
  • +To speed up audio to 125% tempo: +
     
    atempo=1.25
    +
    +
+ + +

34.21 atrim

+ +

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the audio sample +with the timestamp start will be the first sample in the output. +

+
+
end
+

Specify time of the first audio sample that will be dropped, i.e. the +audio sample immediately preceding the one with the timestamp end will be +the last sample in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in samples +instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in samples instead +of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_sample
+

Number of the first sample that should be passed to output. +

+
+
end_sample
+

Number of the first sample that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _sample options simply count the +samples that pass through the filter. So start/end_pts and start/end_sample will +give different results when the timestamps are wrong, inexact or do not start at +zero. Also note that this filter does not modify the timestamps. If you wish +that the output timestamps start at zero, insert the asetpts filter after the +atrim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all samples that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple atrim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -af atrim=60:120
    +
    + +
  • +keep only the first 1000 samples +
     
    ffmpeg -i INPUT -af atrim=end_sample=1000
    +
    + +
+ + +

34.22 bandpass

+ +

Apply a two-pole Butterworth band-pass filter with central +frequency frequency, and (3dB-point) band-width width. +The csg option selects a constant skirt gain (peak gain = Q) +instead of the default: constant 0dB peak gain. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
csg
+

Constant skirt gain if set to 1. Defaults to 0. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

34.23 bandreject

+ +

Apply a two-pole Butterworth band-reject filter with central +frequency frequency, and (3dB-point) band-width width. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

34.24 bass

+ +

Boost or cut the bass (lower) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at 0 Hz. Its useful range is about -20 +(for a large cut) to +20 (for a large boost). +Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 100 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

34.25 biquad

+ +

Apply a biquad IIR filter with the given coefficients. +Where b0, b1, b2 and a0, a1, a2 +are the numerator and denominator coefficients respectively. +

+ +

34.26 channelmap

+ +

Remap input channels to new locations. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the output stream. +

+
+
map
+

Map channels from input to output. The argument is a ’|’-separated list of +mappings, each in the in_channel-out_channel or +in_channel form. in_channel can be either the name of the input +channel (e.g. FL for front left) or its index in the input channel layout. +out_channel is the name of the output channel or its index in the output +channel layout. If out_channel is not given then it is implicitly an +index, starting with zero and increasing by one for each mapping. +

+
+ +

If no mapping is present, the filter will implicitly map input channels to +output channels preserving index. +

+

For example, assuming a 5.1+downmix input MOV file +

 
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+

will create an output WAV file tagged as stereo from the downmix channels of +the input. +

+

To fix a 5.1 WAV improperly encoded in AAC’s native channel order +

 
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+ + +

34.27 channelsplit

+ +

Split each channel in input audio stream into a separate output stream. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the input stream. Default is "stereo". +

+
+ +

For example, assuming a stereo input MP3 file +

 
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+

will create an output Matroska file with two audio streams, one containing only +the left channel and the other the right channel. +

+

To split a 5.1 WAV file into per-channel files +

 
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+ + +

34.28 compand

+

Compress or expand audio dynamic range. +

+

A description of the accepted options follows. +

+
+
attacks
+
decays
+

Set list of times in seconds for each channel over which the instantaneous level +of the input signal is averaged to determine its volume. attacks refers to +increase of volume and decays refers to decrease of volume. For most +situations, the attack time (response to the audio getting louder) should be +shorter than the decay time because the human ear is more sensitive to sudden +loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and +a typical value for decay is 0.8 seconds. +

+
+
points
+

Set list of points for the transfer function, specified in dB relative to the +maximum possible signal amplitude. Each key points list must be defined using +the following syntax: x0/y0|x1/y1|x2/y2|.... or +x0/y0 x1/y1 x2/y2 .... +

+

The input values must be in strictly increasing order but the transfer function +does not have to be monotonically rising. The point 0/0 is assumed but +may be overridden (by 0/out-dBn). Typical values for the transfer +function are -70/-70|-60/-20. +

+
+
soft-knee
+

Set the curve radius in dB for all joints. Defaults to 0.01. +

+
+
gain
+

Set additional gain in dB to be applied at all points on the transfer function. +This allows easy adjustment of the overall gain. Defaults to 0. +

+
+
volume
+

Set initial volume in dB to be assumed for each channel when filtering starts. +This permits the user to supply a nominal level initially, so that, for +example, a very large gain is not applied to initial signal levels before the +companding has begun to operate. A typical value for audio which is initially +quiet is -90 dB. Defaults to 0. +

+
+
delay
+

Set delay in seconds. The input audio is analyzed immediately, but audio is +delayed before being fed to the volume adjuster. Specifying a delay +approximately equal to the attack/decay times allows the filter to effectively +operate in predictive rather than reactive mode. Defaults to 0. +

+
+
+ + +

34.28.1 Examples

+ +
    +
  • +Make music with both quiet and loud passages suitable for listening in a noisy +environment: +
     
    compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
    +
    + +
  • +Noise gate for when the noise is at a lower level than the signal: +
     
    compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
    +
    + +
  • +Here is another noise gate, this time for when the noise is at a higher level +than the signal (making it, in some ways, similar to squelch): +
     
    compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
    +
    +
+ + +

34.29 earwax

+ +

Make audio easier to listen to on headphones. +

+

This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio +so that when listened to on headphones the stereo image is moved from +inside your head (standard for headphones) to outside and in front of +the listener (standard for speakers). +

+

Ported from SoX. +

+ +

34.30 equalizer

+ +

Apply a two-pole peaking equalisation (EQ) filter. With this +filter, the signal-level at and around a selected frequency can +be increased or decreased, whilst (unlike bandpass and bandreject +filters) that at all other frequencies is unchanged. +

+

In order to produce complex equalisation curves, this filter can +be given several times, each with a different central frequency. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+
gain, g
+

Set the required gain or attenuation in dB. +Beware of clipping when using a positive gain. +

+
+ + +

34.30.1 Examples

+
    +
  • +Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz: +
     
    equalizer=f=1000:width_type=h:width=200:g=-10
    +
    + +
  • +Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2: +
     
    equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
    +
    +
+ + +

34.31 highpass

+ +

Apply a high-pass filter with 3dB point frequency. +The filter can be either single-pole, or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 3000. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

34.32 join

+ +

Join multiple input streams into one multi-channel stream. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of input streams. Defaults to 2. +

+
+
channel_layout
+

Desired output channel layout. Defaults to stereo. +

+
+
map
+

Map channels from inputs to output. The argument is a ’|’-separated list of +mappings, each in the input_idx.in_channel-out_channel +form. input_idx is the 0-based index of the input stream. in_channel +can be either the name of the input channel (e.g. FL for front left) or its +index in the specified input stream. out_channel is the name of the output +channel. +

+
+ +

The filter will attempt to guess the mappings when those are not specified +explicitly. It does so by first trying to find an unused matching input channel +and if that fails it picks the first unused input channel. +

+

E.g. to join 3 inputs (with properly set channel layouts) +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+ +

To build a 5.1 output from 6 single-channel streams: +

 
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+ + +

34.33 ladspa

+ +

Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-ladspa. +

+
+
file, f
+

Specifies the name of LADSPA plugin library to load. If the environment +variable LADSPA_PATH is defined, the LADSPA plugin is searched in +each one of the directories specified by the colon separated list in +LADSPA_PATH, otherwise in the standard LADSPA paths, which are in +this order: ‘HOME/.ladspa/lib/’, ‘/usr/local/lib/ladspa/’, +‘/usr/lib/ladspa/’. +

+
+
plugin, p
+

Specifies the plugin within the library. Some libraries contain only +one plugin, but others contain many of them. If this is not set filter +will list all available plugins within the specified library. +

+
+
controls, c
+

Set the ’|’ separated list of controls which are zero or more floating point +values that determine the behavior of the loaded plugin (for example delay, +threshold or gain). +Controls need to be defined using the following syntax: +c0=value0|c1=value1|c2=value2|..., where +valuei is the value set on the i-th control. +If ‘controls’ is set to help, all available controls and +their valid ranges are printed. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. Only used if plugin have +zero inputs. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, default +is 1024. Only used if plugin have zero inputs. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format, also check the "Time duration" +section in the ffmpeg-utils manual. +Note that the resulting duration may be greater than the specified duration, +as the generated audio is always cut at the end of a complete frame. +If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +Only used if plugin have zero inputs. +

+
+
+ + +

34.33.1 Examples

+ +
    +
  • +List all available plugins within amp (LADSPA example plugin) library: +
     
    ladspa=file=amp
    +
    + +
  • +List all available controls and their valid ranges for vcf_notch +plugin from VCF library: +
     
    ladspa=f=vcf:p=vcf_notch:c=help
    +
    + +
  • +Simulate low quality audio equipment using Computer Music Toolkit (CMT) +plugin library: +
     
    ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
    +
    + +
  • +Add reverberation to the audio using TAP-plugins +(Tom’s Audio Processing plugins): +
     
    ladspa=file=tap_reverb:tap_reverb
    +
    + +
  • +Generate white noise, with 0.2 amplitude: +
     
    ladspa=file=cmt:noise_source_white:c=c0=.2
    +
    + +
  • +Generate 20 bpm clicks using plugin C* Click - Metronome from the +C* Audio Plugin Suite (CAPS) library: +
     
    ladspa=file=caps:Click:c=c1=20'
    +
    + +
  • +Apply C* Eq10X2 - Stereo 10-band equaliser effect: +
     
    ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
    +
    +
+ + +

34.33.2 Commands

+ +

This filter supports the following commands: +

+
cN
+

Modify the N-th control value. +

+

If the specified value is not valid, it is ignored and prior one is kept. +

+
+ + +

34.34 lowpass

+ +

Apply a low-pass filter with 3dB point frequency. +The filter can be either single-pole or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 500. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

34.35 pan

+ +

Mix channels with specific gain levels. The filter accepts the output +channel layout followed by a set of channels definitions. +

+

This filter is also designed to remap efficiently the channels of an audio +stream. +

+

The filter accepts parameters of the form: +"l:outdef:outdef:..." +

+
+
l
+

output channel layout or number of channels +

+
+
outdef
+

output channel specification, of the form: +"out_name=[gain*]in_name[+[gain*]in_name...]" +

+
+
out_name
+

output channel to define, either a channel name (FL, FR, etc.) or a channel +number (c0, c1, etc.) +

+
+
gain
+

multiplicative coefficient for the channel, 1 leaving the volume unchanged +

+
+
in_name
+

input channel to use, see out_name for details; it is not possible to mix +named and numbered input channels +

+
+ +

If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for +that specification will be renormalized so that the total is 1, thus +avoiding clipping noise. +

+ +

34.35.1 Mixing examples

+ +

For example, if you want to down-mix from stereo to mono, but with a bigger +factor for the left channel: +

 
pan=1:c0=0.9*c0+0.1*c1
+
+ +

A customized down-mix to stereo that works automatically for 3-, 4-, 5- and +7-channels surround: +

 
pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+ +

Note that ffmpeg integrates a default down-mix (and up-mix) system +that should be preferred (see "-ac" option) unless you have very specific +needs. +

+ +

34.35.2 Remapping examples

+ +

The channel remapping will be effective if, and only if: +

+
    +
  • gain coefficients are zeroes or ones, +
  • only one input per channel output, +
+ +

If all these conditions are satisfied, the filter will notify the user ("Pure +channel mapping detected"), and use an optimized and lossless method to do the +remapping. +

+

For example, if you have a 5.1 source and want a stereo audio stream by +dropping the extra channels: +

 
pan="stereo: c0=FL : c1=FR"
+
+ +

Given the same source, you can also switch front left and front right channels +and keep the input channel layout: +

 
pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
+
+ +

If the input is a stereo audio stream, you can mute the front left channel (and +still keep the stereo channel layout) with: +

 
pan="stereo:c1=c1"
+
+ +

Still with a stereo audio stream input, you can copy the right channel in both +front left and right: +

 
pan="stereo: c0=FR : c1=FR"
+
+ + +

34.36 replaygain

+ +

ReplayGain scanner filter. This filter takes an audio stream as an input and +outputs it unchanged. +At end of filtering it displays track_gain and track_peak. +

+ +

34.37 resample

+ +

Convert the audio sample format, sample rate and channel layout. This filter is +not meant to be used directly. +

+ +

34.38 silencedetect

+ +

Detect silence in an audio stream. +

+

This filter logs a message when it detects that the input audio volume is less +or equal to a noise tolerance value for a duration greater or equal to the +minimum detected noise duration. +

+

The printed times and duration are expressed in seconds. +

+

The filter accepts the following options: +

+
+
duration, d
+

Set silence duration until notification (default is 2 seconds). +

+
+
noise, n
+

Set noise tolerance. Can be specified in dB (in case "dB" is appended to the +specified value) or amplitude ratio. Default is -60dB, or 0.001. +

+
+ + +

34.38.1 Examples

+ +
    +
  • +Detect 5 seconds of silence with -50dB noise tolerance: +
     
    silencedetect=n=-50dB:d=5
    +
    + +
  • +Complete example with ffmpeg to detect silence with 0.0001 noise +tolerance in ‘silence.mp3’: +
     
    ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
    +
    +
+ + +

34.39 treble

+ +

Boost or cut treble (upper) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at whichever is the lower of ~22 kHz and the +Nyquist frequency. Its useful range is about -20 (for a large cut) +to +20 (for a large boost). Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 3000 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

34.40 volume

+ +

Adjust the input audio volume. +

+

The filter accepts the following options: +

+
+
volume
+

Set audio volume expression. +

+

Output values are clipped to the maximum value. +

+

The output audio volume is given by the relation: +

 
output_volume = volume * input_volume
+
+ +

Default value for volume is "1.0". +

+
+
precision
+

Set the mathematical precision. +

+

This determines which input sample formats will be allowed, which affects the +precision of the volume scaling. +

+
+
fixed
+

8-bit fixed-point; limits input sample format to U8, S16, and S32. +

+
float
+

32-bit floating-point; limits input sample format to FLT. (default) +

+
double
+

64-bit floating-point; limits input sample format to DBL. +

+
+ +
+
eval
+

Set when the volume expression is evaluated. +

+

It accepts the following values: +

+
once
+

only evaluate expression once during the filter initialization, or +when the ‘volume’ command is sent +

+
+
frame
+

evaluate expression for each incoming frame +

+
+ +

Default value is ‘once’. +

+
+ +

The volume expression can contain the following parameters. +

+
+
n
+

frame number (starting at zero) +

+
nb_channels
+

number of channels +

+
nb_consumed_samples
+

number of samples consumed by the filter +

+
nb_samples
+

number of samples in the current frame +

+
pos
+

original frame position in the file +

+
pts
+

frame PTS +

+
sample_rate
+

sample rate +

+
startpts
+

PTS at start of stream +

+
startt
+

time at start of stream +

+
t
+

frame time +

+
tb
+

timestamp timebase +

+
volume
+

last set volume value +

+
+ +

Note that when ‘eval’ is set to ‘once’ only the +sample_rate and tb variables are available, all other +variables will evaluate to NAN. +

+ +

34.40.1 Commands

+ +

This filter supports the following commands: +

+
volume
+

Modify the volume expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

34.40.2 Examples

+ +
    +
  • +Halve the input audio volume: +
     
    volume=volume=0.5
    +volume=volume=1/2
    +volume=volume=-6.0206dB
    +
    + +

    In all the above example the named key for ‘volume’ can be +omitted, for example like in: +

     
    volume=0.5
    +
    + +
  • +Increase input audio power by 6 decibels using fixed-point precision: +
     
    volume=volume=6dB:precision=fixed
    +
    + +
  • +Fade volume after time 10 with an annihilation period of 5 seconds: +
     
    volume='if(lt(t,10),1,max(1-(t-10)/5,0))':eval=frame
    +
    +
+ + +

34.41 volumedetect

+ +

Detect the volume of the input video. +

+

The filter has no parameters. The input is not modified. Statistics about +the volume will be printed in the log when the input stream end is reached. +

+

In particular it will show the mean volume (root mean square), maximum +volume (on a per-sample basis), and the beginning of a histogram of the +registered volume values (from the maximum value to a cumulated 1/1000 of +the samples). +

+

All volumes are in decibels relative to the maximum PCM value. +

+ +

34.41.1 Examples

+ +

Here is an excerpt of the output: +

 
[Parsed_volumedetect_0  0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0  0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0  0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0  0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0  0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0  0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0  0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0  0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0  0xa23120] histogram_10db: 8409
+
+ +

It means that: +

    +
  • +The mean square energy is approximately -27 dB, or 10^-2.7. +
  • +The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB. +
  • +There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc. +
+ +

In other words, raising the volume by +4 dB does not cause any clipping, +raising it by +5 dB causes clipping for 6 samples, etc. +

+ + +

35. Audio Sources

+ +

Below is a description of the currently available audio sources. +

+ +

35.1 abuffer

+ +

Buffer audio frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/asrc_abuffer.h’. +

+

It accepts the following named parameters: +

+
+
time_base
+

Timebase which will be used for timestamps of submitted frames. It must be +either a floating-point number or in numerator/denominator form. +

+
+
sample_rate
+

The sample rate of the incoming audio buffers. +

+
+
sample_fmt
+

The sample format of the incoming audio buffers. +Either a sample format name or its corresponging integer representation from +the enum AVSampleFormat in ‘libavutil/samplefmt.h’ +

+
+
channel_layout
+

The channel layout of the incoming audio buffers. +Either a channel layout name from channel_layout_map in +‘libavutil/channel_layout.c’ or its corresponding integer representation +from the AV_CH_LAYOUT_* macros in ‘libavutil/channel_layout.h’ +

+
+
channels
+

The number of channels of the incoming audio buffers. +If both channels and channel_layout are specified, then they +must be consistent. +

+
+
+ + +

35.1.1 Examples

+ +
 
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+ +

will instruct the source to accept planar 16bit signed stereo at 44100Hz. +Since the sample format with name "s16p" corresponds to the number +6 and the "stereo" channel layout corresponds to the value 0x3, this is +equivalent to: +

 
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+ + +

35.2 aevalsrc

+ +

Generate an audio signal specified by an expression. +

+

This source accepts in input one or more expressions (one for each +channel), which are evaluated and used to generate a corresponding +audio signal. +

+

This source accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. In case the +‘channel_layout’ option is not specified, the selected channel layout +depends on the number of provided expressions. Otherwise the last +specified expression is applied to the remaining output channels. +

+
+
channel_layout, c
+

Set the channel layout. The number of channels in the specified layout +must be equal to the number of specified expressions. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format. +Note that the resulting duration may be greater than the specified +duration, as the generated audio is always cut at the end of a +complete frame. +

+

If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, +default to 1024. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. +

+
+ +

Each expression in exprs can contain the following constants: +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
t
+

time of the evaluated sample expressed in seconds, starting from 0 +

+
+
s
+

sample rate +

+
+
+ + +

35.2.1 Examples

+ +
    +
  • +Generate silence: +
     
    aevalsrc=0
    +
    + +
  • +Generate a sin signal with frequency of 440 Hz, set sample rate to +8000 Hz: +
     
    aevalsrc="sin(440*2*PI*t):s=8000"
    +
    + +
  • +Generate a two channels signal, specify the channel layout (Front +Center + Back Center) explicitly: +
     
    aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
    +
    + +
  • +Generate white noise: +
     
    aevalsrc="-2+random(0)"
    +
    + +
  • +Generate an amplitude modulated signal: +
     
    aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
    +
    + +
  • +Generate 2.5 Hz binaural beats on a 360 Hz carrier: +
     
    aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
    +
    + +
+ + +

35.3 anullsrc

+ +

Null audio source, return unprocessed audio frames. It is mainly useful +as a template and to be employed in analysis / debugging tools, or as +the source for filters which ignore the input data (for example the sox +synth filter). +

+

This source accepts the following options: +

+
+
channel_layout, cl
+
+

Specify the channel layout, and can be either an integer or a string +representing a channel layout. The default value of channel_layout +is "stereo". +

+

Check the channel_layout_map definition in +‘libavutil/channel_layout.c’ for the mapping between strings and +channel layout values. +

+
+
sample_rate, r
+

Specify the sample rate, and defaults to 44100. +

+
+
nb_samples, n
+

Set the number of samples per requested frames. +

+
+
+ + +

35.3.1 Examples

+ +
    +
  • +Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO. +
     
    anullsrc=r=48000:cl=4
    +
    + +
  • +Do the same operation with a more obvious syntax: +
     
    anullsrc=r=48000:cl=mono
    +
    +
+ +

All the parameters need to be explicitly defined. +

+ +

35.4 flite

+ +

Synthesize a voice utterance using the libflite library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libflite. +

+

Note that the flite library is not thread-safe. +

+

The filter accepts the following options: +

+
+
list_voices
+

If set to 1, list the names of the available voices and exit +immediately. Default value is 0. +

+
+
nb_samples, n
+

Set the maximum number of samples per frame. Default value is 512. +

+
+
textfile
+

Set the filename containing the text to speak. +

+
+
text
+

Set the text to speak. +

+
+
voice, v
+

Set the voice to use for the speech synthesis. Default value is +kal. See also the list_voices option. +

+
+ + +

35.4.1 Examples

+ +
    +
  • +Read from file ‘speech.txt’, and synthetize the text using the +standard flite voice: +
     
    flite=textfile=speech.txt
    +
    + +
  • +Read the specified text selecting the slt voice: +
     
    flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Input text to ffmpeg: +
     
    ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Make ‘ffplay’ speak the specified text, using flite and +the lavfi device: +
     
    ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
    +
    +
+ +

For more information about libflite, check: +http://www.speech.cs.cmu.edu/flite/ +

+ +

35.5 sine

+ +

Generate an audio signal made of a sine wave with amplitude 1/8. +

+

The audio signal is bit-exact. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the carrier frequency. Default is 440 Hz. +

+
+
beep_factor, b
+

Enable a periodic beep every second with frequency beep_factor times +the carrier frequency. Default is 0, meaning the beep is disabled. +

+
+
sample_rate, r
+

Specify the sample rate, default is 44100. +

+
+
duration, d
+

Specify the duration of the generated audio stream. +

+
+
samples_per_frame
+

Set the number of samples per output frame, default is 1024. +

+
+ + +

35.5.1 Examples

+ +
    +
  • +Generate a simple 440 Hz sine wave: +
     
    sine
    +
    + +
  • +Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds: +
     
    sine=220:4:d=5
    +sine=f=220:b=4:d=5
    +sine=frequency=220:beep_factor=4:duration=5
    +
    + +
+ + + +

36. Audio Sinks

+ +

Below is a description of the currently available audio sinks. +

+ +

36.1 abuffersink

+ +

Buffer audio frames, and make them available to the end of filter chain. +

+

This sink is mainly intended for programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVABufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

36.2 anullsink

+ +

Null audio sink, do absolutely nothing with the input audio. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

37. Video Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the video filters included in your +build. +

+

Below is a description of the currently available video filters. +

+ +

37.1 alphaextract

+ +

Extract the alpha component from the input as a grayscale video. This +is especially useful with the alphamerge filter. +

+ +

37.2 alphamerge

+ +

Add or replace the alpha component of the primary input with the +grayscale value of a second input. This is intended for use with +alphaextract to allow the transmission or storage of frame +sequences that have alpha in a format that doesn’t support an alpha +channel. +

+

For example, to reconstruct full frames from a normal YUV-encoded video +and a separate video created with alphaextract, you might use: +

 
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+ +

Since this filter is designed for reconstruction, it operates on frame +sequences without considering timestamps, and terminates when either +input reaches end of stream. This will cause problems if your encoding +pipeline drops frames. If you’re trying to apply an image as an +overlay to a video stream, consider the overlay filter instead. +

+ +

37.3 ass

+ +

Same as the subtitles filter, except that it doesn’t require libavcodec +and libavformat to work. On the other hand, it is limited to ASS (Advanced +Substation Alpha) subtitles files. +

+ +

37.4 bbox

+ +

Compute the bounding box for the non-black pixels in the input frame +luminance plane. +

+

This filter computes the bounding box containing all the pixels with a +luminance value greater than the minimum allowed value. +The parameters describing the bounding box are printed on the filter +log. +

+

The filter accepts the following option: +

+
+
min_val
+

Set the minimal luminance value. Default is 16. +

+
+ + +

37.5 blackdetect

+ +

Detect video intervals that are (almost) completely black. Can be +useful to detect chapter transitions, commercials, or invalid +recordings. Output lines contains the time for the start, end and +duration of the detected black interval expressed in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
black_min_duration, d
+

Set the minimum detected black duration expressed in seconds. It must +be a non-negative floating point number. +

+

Default value is 2.0. +

+
+
picture_black_ratio_th, pic_th
+

Set the threshold for considering a picture "black". +Express the minimum value for the ratio: +

 
nb_black_pixels / nb_pixels
+
+ +

for which a picture is considered black. +Default value is 0.98. +

+
+
pixel_black_th, pix_th
+

Set the threshold for considering a pixel "black". +

+

The threshold expresses the maximum pixel luminance value for which a +pixel is considered "black". The provided value is scaled according to +the following equation: +

 
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+ +

luminance_range_size and luminance_minimum_value depend on +the input video format, the range is [0-255] for YUV full-range +formats and [16-235] for YUV non full-range formats. +

+

Default value is 0.10. +

+
+ +

The following example sets the maximum pixel threshold to the minimum +value, and detects only black intervals of 2 or more seconds: +

 
blackdetect=d=2:pix_th=0.00
+
+ + +

37.6 blackframe

+ +

Detect frames that are (almost) completely black. Can be useful to +detect chapter transitions or commercials. Output lines consist of +the frame number of the detected frame, the percentage of blackness, +the position in the file if known or -1 and the timestamp in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
amount
+

Set the percentage of the pixels that have to be below the threshold, defaults +to 98. +

+
+
threshold, thresh
+

Set the threshold below which a pixel value is considered black, defaults to +32. +

+
+
+ + +

37.7 blend

+ +

Blend two video frames into each other. +

+

It takes two input streams and outputs one stream, the first input is the +"top" layer and second input is "bottom" layer. +Output terminates when shortest input terminates. +

+

A description of the accepted options follows. +

+
+
c0_mode
+
c1_mode
+
c2_mode
+
c3_mode
+
all_mode
+

Set blend mode for specific pixel component or all pixel components in case +of all_mode. Default value is normal. +

+

Available values for component modes are: +

+
addition
+
and
+
average
+
burn
+
darken
+
difference
+
divide
+
dodge
+
exclusion
+
hardlight
+
lighten
+
multiply
+
negation
+
normal
+
or
+
overlay
+
phoenix
+
pinlight
+
reflect
+
screen
+
softlight
+
subtract
+
vividlight
+
xor
+
+ +
+
c0_opacity
+
c1_opacity
+
c2_opacity
+
c3_opacity
+
all_opacity
+

Set blend opacity for specific pixel component or all pixel components in case +of all_opacity. Only used in combination with pixel component blend modes. +

+
+
c0_expr
+
c1_expr
+
c2_expr
+
c3_expr
+
all_expr
+

Set blend expression for specific pixel component or all pixel components in case +of all_expr. Note that related mode options will be ignored if those are set. +

+

The expressions can use the following variables: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

the coordinates of the current sample +

+
+
W
+
H
+

the width and height of currently filtered plane +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
TOP, A
+

Value of pixel component at current location for first video frame (top layer). +

+
+
BOTTOM, B
+

Value of pixel component at current location for second video frame (bottom layer). +

+
+ +
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last bottom frame after the end of the stream. A value of +0 disable the filter after the last frame of the bottom layer is reached. +Default is 1. +

+
+ + +

37.7.1 Examples

+ +
    +
  • +Apply transition from bottom layer to top layer in first 10 seconds: +
     
    blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
    +
    + +
  • +Apply 1x1 checkerboard effect: +
     
    blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
    +
    + +
  • +Apply uncover left effect: +
     
    blend=all_expr='if(gte(N*SW+X,W),A,B)'
    +
    + +
  • +Apply uncover down effect: +
     
    blend=all_expr='if(gte(Y-N*SH,0),A,B)'
    +
    + +
  • +Apply uncover up-left effect: +
     
    blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
    +
    +
+ + +

37.8 boxblur

+ +

Apply boxblur algorithm to the input video. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+
luma_power, lp
+
chroma_radius, cr
+
chroma_power, cp
+
alpha_radius, ar
+
alpha_power, ap
+
+ +

A description of the accepted options follows. +

+
+
luma_radius, lr
+
chroma_radius, cr
+
alpha_radius, ar
+

Set an expression for the box radius in pixels used for blurring the +corresponding input plane. +

+

The radius value must be a non-negative number, and must not be +greater than the value of the expression min(w,h)/2 for the +luma and alpha planes, and of min(cw,ch)/2 for the chroma +planes. +

+

Default value for ‘luma_radius’ is "2". If not specified, +‘chroma_radius’ and ‘alpha_radius’ default to the +corresponding value set for ‘luma_radius’. +

+

The expressions can contain the following constants: +

+
w
+
h
+

the input width and height in pixels +

+
+
cw
+
ch
+

the input chroma image width and height in pixels +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ +
+
luma_power, lp
+
chroma_power, cp
+
alpha_power, ap
+

Specify how many times the boxblur filter is applied to the +corresponding plane. +

+

Default value for ‘luma_power’ is 2. If not specified, +‘chroma_power’ and ‘alpha_power’ default to the +corresponding value set for ‘luma_power’. +

+

A value of 0 will disable the effect. +

+
+ + +

37.8.1 Examples

+ +
    +
  • +Apply a boxblur filter with luma, chroma, and alpha radius +set to 2: +
     
    boxblur=luma_radius=2:luma_power=1
    +boxblur=2:1
    +
    + +
  • +Set luma radius to 2, alpha and chroma radius to 0: +
     
    boxblur=2:1:cr=0:ar=0
    +
    + +
  • +Set luma and chroma radius to a fraction of the video dimension: +
     
    boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
    +
    +
+ + +

37.9 colorbalance

+

Modify intensity of primary colors (red, green and blue) of input frames. +

+

The filter allows an input frame to be adjusted in the shadows, midtones or highlights +regions for the red-cyan, green-magenta or blue-yellow balance. +

+

A positive adjustment value shifts the balance towards the primary color, a negative +value towards the complementary color. +

+

The filter accepts the following options: +

+
+
rs
+
gs
+
bs
+

Adjust red, green and blue shadows (darkest pixels). +

+
+
rm
+
gm
+
bm
+

Adjust red, green and blue midtones (medium pixels). +

+
+
rh
+
gh
+
bh
+

Adjust red, green and blue highlights (brightest pixels). +

+

Allowed ranges for options are [-1.0, 1.0]. Defaults are 0. +

+
+ + +

37.9.1 Examples

+ +
    +
  • +Add red color cast to shadows: +
     
    colorbalance=rs=.3
    +
    +
+ + +

37.10 colorchannelmixer

+ +

Adjust video input frames by re-mixing color channels. +

+

This filter modifies a color channel by adding the values associated to +the other channels of the same pixels. For example if the value to +modify is red, the output value will be: +

 
red=red*rr + blue*rb + green*rg + alpha*ra
+
+ +

The filter accepts the following options: +

+
+
rr
+
rg
+
rb
+
ra
+

Adjust contribution of input red, green, blue and alpha channels for output red channel. +Default is 1 for rr, and 0 for rg, rb and ra. +

+
+
gr
+
gg
+
gb
+
ga
+

Adjust contribution of input red, green, blue and alpha channels for output green channel. +Default is 1 for gg, and 0 for gr, gb and ga. +

+
+
br
+
bg
+
bb
+
ba
+

Adjust contribution of input red, green, blue and alpha channels for output blue channel. +Default is 1 for bb, and 0 for br, bg and ba. +

+
+
ar
+
ag
+
ab
+
aa
+

Adjust contribution of input red, green, blue and alpha channels for output alpha channel. +Default is 1 for aa, and 0 for ar, ag and ab. +

+

Allowed ranges for options are [-2.0, 2.0]. +

+
+ + +

37.10.1 Examples

+ +
    +
  • +Convert source to grayscale: +
     
    colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
    +
    +
  • +Simulate sepia tones: +
     
    colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
    +
    +
+ + +

37.11 colormatrix

+ +

Convert color matrix. +

+

The filter accepts the following options: +

+
+
src
+
dst
+

Specify the source and destination color matrix. Both values must be +specified. +

+

The accepted values are: +

+
bt709
+

BT.709 +

+
+
bt601
+

BT.601 +

+
+
smpte240m
+

SMPTE-240M +

+
+
fcc
+

FCC +

+
+
+
+ +

For example to convert from BT.601 to SMPTE-240M, use the command: +

 
colormatrix=bt601:smpte240m
+
+ + +

37.12 copy

+ +

Copy the input source unchanged to the output. Mainly useful for +testing purposes. +

+ +

37.13 crop

+ +

Crop the input video to given dimensions. +

+

The filter accepts the following options: +

+
+
w, out_w
+

Width of the output video. It defaults to iw. +This expression is evaluated only once during the filter +configuration. +

+
+
h, out_h
+

Height of the output video. It defaults to ih. +This expression is evaluated only once during the filter +configuration. +

+
+
x
+

Horizontal position, in the input video, of the left edge of the output video. +It defaults to (in_w-out_w)/2. +This expression is evaluated per-frame. +

+
+
y
+

Vertical position, in the input video, of the top edge of the output video. +It defaults to (in_h-out_h)/2. +This expression is evaluated per-frame. +

+
+
keep_aspect
+

If set to 1 will force the output display aspect ratio +to be the same of the input, by changing the output sample aspect +ratio. It defaults to 0. +

+
+ +

The out_w, out_h, x, y parameters are +expressions containing the following constants: +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (cropped) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

The expression for out_w may depend on the value of out_h, +and the expression for out_h may depend on out_w, but they +cannot depend on x and y, as x and y are +evaluated after out_w and out_h. +

+

The x and y parameters specify the expressions for the +position of the top-left corner of the output (non-cropped) area. They +are evaluated for each frame. If the evaluated value is not valid, it +is approximated to the nearest valid value. +

+

The expression for x may depend on y, and the expression +for y may depend on x. +

+ +

37.13.1 Examples

+ +
    +
  • +Crop area with size 100x100 at position (12,34). +
     
    crop=100:100:12:34
    +
    + +

    Using named options, the example above becomes: +

     
    crop=w=100:h=100:x=12:y=34
    +
    + +
  • +Crop the central input area with size 100x100: +
     
    crop=100:100
    +
    + +
  • +Crop the central input area with size 2/3 of the input video: +
     
    crop=2/3*in_w:2/3*in_h
    +
    + +
  • +Crop the input video central square: +
     
    crop=out_w=in_h
    +crop=in_h
    +
    + +
  • +Delimit the rectangle with the top-left corner placed at position +100:100 and the right-bottom corner corresponding to the right-bottom +corner of the input image: +
     
    crop=in_w-100:in_h-100:100:100
    +
    + +
  • +Crop 10 pixels from the left and right borders, and 20 pixels from +the top and bottom borders +
     
    crop=in_w-2*10:in_h-2*20
    +
    + +
  • +Keep only the bottom right quarter of the input image: +
     
    crop=in_w/2:in_h/2:in_w/2:in_h/2
    +
    + +
  • +Crop height for getting Greek harmony: +
     
    crop=in_w:1/PHI*in_w
    +
    + +
  • +Appply trembling effect: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(n/10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(n/7)
    +
    + +
  • +Apply erratic camera effect depending on timestamp: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(t*10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(t*13)"
    +
    + +
  • +Set x depending on the value of y: +
     
    crop=in_w/2:in_h/2:y:10+10*sin(n/10)
    +
    +
+ + +

37.14 cropdetect

+ +

Auto-detect crop size. +

+

Calculate necessary cropping parameters and prints the recommended +parameters through the logging system. The detected dimensions +correspond to the non-black area of the input video. +

+

The filter accepts the following options: +

+
+
limit
+

Set higher black value threshold, which can be optionally specified +from nothing (0) to everything (255). An intensity value greater +to the set value is considered non-black. Default value is 24. +

+
+
round
+

Set the value for which the width/height should be divisible by. The +offset is automatically adjusted to center the video. Use 2 to get +only even dimensions (needed for 4:2:2 video). 16 is best when +encoding to most video codecs. Default value is 16. +

+
+
reset_count, reset
+

Set the counter that determines after how many frames cropdetect will +reset the previously detected largest video area and start over to +detect the current optimal crop area. Default value is 0. +

+

This can be useful when channel logos distort the video area. 0 +indicates never reset and return the largest area encountered during +playback. +

+
+ +

+

+

37.15 curves

+ +

Apply color adjustments using curves. +

+

This filter is similar to the Adobe Photoshop and GIMP curves tools. Each +component (red, green and blue) has its values defined by N key points +tied from each other using a smooth curve. The x-axis represents the pixel +values from the input frame, and the y-axis the new pixel values to be set for +the output frame. +

+

By default, a component curve is defined by the two points (0;0) and +(1;1). This creates a straight line where each original pixel value is +"adjusted" to its own value, which means no change to the image. +

+

The filter allows you to redefine these two points and add some more. A new +curve (using a natural cubic spline interpolation) will be define to pass +smoothly through all these new coordinates. The new defined points needs to be +strictly increasing over the x-axis, and their x and y values must +be in the [0;1] interval. If the computed curves happened to go outside +the vector spaces, the values will be clipped accordingly. +

+

If there is no key point defined in x=0, the filter will automatically +insert a (0;0) point. In the same way, if there is no key point defined +in x=1, the filter will automatically insert a (1;1) point. +

+

The filter accepts the following options: +

+
+
preset
+

Select one of the available color presets. This option can be used in addition +to the ‘r’, ‘g’, ‘b’ parameters; in this case, the later +options takes priority on the preset values. +Available presets are: +

+
none
+
color_negative
+
cross_process
+
darker
+
increase_contrast
+
lighter
+
linear_contrast
+
medium_contrast
+
negative
+
strong_contrast
+
vintage
+
+

Default is none. +

+
master, m
+

Set the master key points. These points will define a second pass mapping. It +is sometimes called a "luminance" or "value" mapping. It can be used with +‘r’, ‘g’, ‘b’ or ‘all’ since it acts like a +post-processing LUT. +

+
red, r
+

Set the key points for the red component. +

+
green, g
+

Set the key points for the green component. +

+
blue, b
+

Set the key points for the blue component. +

+
all
+

Set the key points for all components (not including master). +Can be used in addition to the other key points component +options. In this case, the unset component(s) will fallback on this +‘all’ setting. +

+
psfile
+

Specify a Photoshop curves file (.asv) to import the settings from. +

+
+ +

To avoid some filtergraph syntax conflicts, each key points list need to be +defined using the following syntax: x0/y0 x1/y1 x2/y2 .... +

+ +

37.15.1 Examples

+ +
    +
  • +Increase slightly the middle level of blue: +
     
    curves=blue='0.5/0.58'
    +
    + +
  • +Vintage effect: +
     
    curves=r='0/0.11 .42/.51 1/0.95':g='0.50/0.48':b='0/0.22 .49/.44 1/0.8'
    +
    +

    Here we obtain the following coordinates for each components: +

    +
    red
    +

    (0;0.11) (0.42;0.51) (1;0.95) +

    +
    green
    +

    (0;0) (0.50;0.48) (1;1) +

    +
    blue
    +

    (0;0.22) (0.49;0.44) (1;0.80) +

    +
    + +
  • +The previous example can also be achieved with the associated built-in preset: +
     
    curves=preset=vintage
    +
    + +
  • +Or simply: +
     
    curves=vintage
    +
    + +
  • +Use a Photoshop preset and redefine the points of the green component: +
     
    curves=psfile='MyCurvesPresets/purple.asv':green='0.45/0.53'
    +
    +
+ + +

37.16 dctdnoiz

+ +

Denoise frames using 2D DCT (frequency domain filtering). +

+

This filter is not designed for real time and can be extremely slow. +

+

The filter accepts the following options: +

+
+
sigma, s
+

Set the noise sigma constant. +

+

This sigma defines a hard threshold of 3 * sigma; every DCT +coefficient (absolute value) below this threshold with be dropped. +

+

If you need a more advanced filtering, see ‘expr’. +

+

Default is 0. +

+
+
overlap
+

Set number overlapping pixels for each block. Each block is of size +16x16. Since the filter can be slow, you may want to reduce this value, +at the cost of a less effective filter and the risk of various artefacts. +

+

If the overlapping value doesn’t allow to process the whole input width or +height, a warning will be displayed and according borders won’t be denoised. +

+

Default value is 15. +

+
+
expr, e
+

Set the coefficient factor expression. +

+

For each coefficient of a DCT block, this expression will be evaluated as a +multiplier value for the coefficient. +

+

If this is option is set, the ‘sigma’ option will be ignored. +

+

The absolute value of the coefficient can be accessed through the c +variable. +

+
+ + +

37.16.1 Examples

+ +

Apply a denoise with a ‘sigma’ of 4.5: +

 
dctdnoiz=4.5
+
+ +

The same operation can be achieved using the expression system: +

 
dctdnoiz=e='gte(c, 4.5*3)'
+
+ +

+

+

37.17 decimate

+ +

Drop duplicated frames at regular intervals. +

+

The filter accepts the following options: +

+
+
cycle
+

Set the number of frames from which one will be dropped. Setting this to +N means one frame in every batch of N frames will be dropped. +Default is 5. +

+
+
dupthresh
+

Set the threshold for duplicate detection. If the difference metric for a frame +is less than or equal to this value, then it is declared as duplicate. Default +is 1.1 +

+
+
scthresh
+

Set scene change threshold. Default is 15. +

+
+
blockx
+
blocky
+

Set the size of the x and y-axis blocks used during metric calculations. +Larger blocks give better noise suppression, but also give worse detection of +small movements. Must be a power of two. Default is 32. +

+
+
ppsrc
+

Mark main input as a pre-processed input and activate clean source input +stream. This allows the input to be pre-processed with various filters to help +the metrics calculation while keeping the frame selection lossless. When set to +1, the first stream is for the pre-processed input, and the second +stream is the clean source from where the kept frames are chosen. Default is +0. +

+
+
chroma
+

Set whether or not chroma is considered in the metric calculations. Default is +1. +

+
+ + +

37.18 dejudder

+ +

Remove judder produced by partially interlaced telecined content. +

+

Judder can be introduced, for instance, by pullup filter. If the original +source was partially telecined content then the output of pullup,dejudder +will have a variable frame rate. May change the recorded frame rate of the +container. Aside from that change, this filter will not affect constant frame +rate video. +

+

The option available in this filter is: +

+
cycle
+

Specify the length of the window over which the judder repeats. +

+

Accepts any interger greater than 1. Useful values are: +

+
4
+

If the original was telecined from 24 to 30 fps (Film to NTSC). +

+
+
5
+

If the original was telecined from 25 to 30 fps (PAL to NTSC). +

+
+
20
+

If a mixture of the two. +

+
+ +

The default is ‘4’. +

+
+ + +

37.19 delogo

+ +

Suppress a TV station logo by a simple interpolation of the surrounding +pixels. Just set a rectangle covering the logo and watch it disappear +(and sometimes something even uglier appear - your mileage may vary). +

+

This filter accepts the following options: +

+
x
+
y
+

Specify the top left corner coordinates of the logo. They must be +specified. +

+
+
w
+
h
+

Specify the width and height of the logo to clear. They must be +specified. +

+
+
band, t
+

Specify the thickness of the fuzzy edge of the rectangle (added to +w and h). The default value is 4. +

+
+
show
+

When set to 1, a green rectangle is drawn on the screen to simplify +finding the right x, y, w, and h parameters. +The default value is 0. +

+

The rectangle is drawn on the outermost pixels which will be (partly) +replaced with interpolated values. The values of the next pixels +immediately outside this rectangle in each direction will be used to +compute the interpolated pixel values inside the rectangle. +

+
+
+ + +

37.19.1 Examples

+ +
    +
  • +Set a rectangle covering the area with top left corner coordinates 0,0 +and size 100x77, setting a band of size 10: +
     
    delogo=x=0:y=0:w=100:h=77:band=10
    +
    + +
+ + +

37.20 deshake

+ +

Attempt to fix small changes in horizontal and/or vertical shift. This +filter helps remove camera shake from hand-holding a camera, bumping a +tripod, moving on a vehicle, etc. +

+

The filter accepts the following options: +

+
+
x
+
y
+
w
+
h
+

Specify a rectangular area where to limit the search for motion +vectors. +If desired the search for motion vectors can be limited to a +rectangular area of the frame defined by its top left corner, width +and height. These parameters have the same meaning as the drawbox +filter which can be used to visualise the position of the bounding +box. +

+

This is useful when simultaneous movement of subjects within the frame +might be confused for camera motion by the motion vector search. +

+

If any or all of x, y, w and h are set to -1 +then the full frame is used. This allows later options to be set +without specifying the bounding box for the motion vector search. +

+

Default - search the whole frame. +

+
+
rx
+
ry
+

Specify the maximum extent of movement in x and y directions in the +range 0-64 pixels. Default 16. +

+
+
edge
+

Specify how to generate pixels to fill blanks at the edge of the +frame. Available values are: +

+
blank, 0
+

Fill zeroes at blank locations +

+
original, 1
+

Original image at blank locations +

+
clamp, 2
+

Extruded edge value at blank locations +

+
mirror, 3
+

Mirrored edge at blank locations +

+
+

Default value is ‘mirror’. +

+
+
blocksize
+

Specify the blocksize to use for motion search. Range 4-128 pixels, +default 8. +

+
+
contrast
+

Specify the contrast threshold for blocks. Only blocks with more than +the specified contrast (difference between darkest and lightest +pixels) will be considered. Range 1-255, default 125. +

+
+
search
+

Specify the search strategy. Available values are: +

+
exhaustive, 0
+

Set exhaustive search +

+
less, 1
+

Set less exhaustive search. +

+
+

Default value is ‘exhaustive’. +

+
+
filename
+

If set then a detailed log of the motion search is written to the +specified file. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ + +

37.21 drawbox

+ +

Draw a colored box on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the top left corner coordinates of the box. Default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the box, if 0 they are interpreted as +the input width and height. Default to 0. +

+
+
color, c
+

Specify the color of the box to write. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the box edge color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the box edge. Default value is 3. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y offset coordinates where the box is drawn. +

+
+
w
+
h
+

The width and height of the drawn box. +

+
+
t
+

The thickness of the drawn box. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

37.21.1 Examples

+ +
    +
  • +Draw a black box around the edge of the input image: +
     
    drawbox
    +
    + +
  • +Draw a box with color red and an opacity of 50%: +
     
    drawbox=10:20:200:60:red@0.5
    +
    + +

    The previous example can be specified as: +

     
    drawbox=x=10:y=20:w=200:h=60:color=red@0.5
    +
    + +
  • +Fill the box with pink color: +
     
    drawbox=x=10:y=10:w=100:h=100:color=pink@0.5:t=max
    +
    + +
  • +Draw a 2-pixel red 2.40:1 mask: +
     
    drawbox=x=-t:y=0.5*(ih-iw/2.4)-t:w=iw+t*2:h=iw/2.4+t*2:t=2:c=red
    +
    +
+ + +

37.22 drawgrid

+ +

Draw a grid on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the +input width and height, respectively, minus thickness, so image gets +framed. Default to 0. +

+
+
color, c
+

Specify the color of the grid. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the grid color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the grid line. Default value is 1. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input grid cell width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y coordinates of some point of grid intersection (meant to configure offset). +

+
+
w
+
h
+

The width and height of the drawn cell. +

+
+
t
+

The thickness of the drawn cell. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

37.22.1 Examples

+ +
    +
  • +Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%: +
     
    drawgrid=width=100:height=100:thickness=2:color=red@0.5
    +
    + +
  • +Draw a white 3x3 grid with an opacity of 50%: +
     
    drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
    +
    +
+ +

+

+

37.23 drawtext

+ +

Draw text string or text from specified file on top of video using the +libfreetype library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libfreetype. +

+ +

37.23.1 Syntax

+ +

The description of the accepted parameters follows. +

+
+
box
+

Used to draw a box around text using background color. +Value should be either 1 (enable) or 0 (disable). +The default value of box is 0. +

+
+
boxcolor
+

The color to be used for drawing box around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of boxcolor is "white". +

+
+
borderw
+

Set the width of the border to be drawn around the text using bordercolor. +The default value of borderw is 0. +

+
+
bordercolor
+

Set the color to be used for drawing border around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of bordercolor is "black". +

+
+
expansion
+

Select how the text is expanded. Can be either none, +strftime (deprecated) or +normal (default). See the Text expansion section +below for details. +

+
+
fix_bounds
+

If true, check and fix text coords to avoid clipping. +

+
+
fontcolor
+

The color to be used for drawing fonts. For the syntax of this option, check +the "Color" section in the ffmpeg-utils manual. +

+

The default value of fontcolor is "black". +

+
+
fontfile
+

The font file to be used for drawing text. Path must be included. +This parameter is mandatory. +

+
+
fontsize
+

The font size to be used for drawing text. +The default value of fontsize is 16. +

+
+
ft_load_flags
+

Flags to be used for loading the fonts. +

+

The flags map the corresponding flags supported by libfreetype, and are +a combination of the following values: +

+
default
+
no_scale
+
no_hinting
+
render
+
no_bitmap
+
vertical_layout
+
force_autohint
+
crop_bitmap
+
pedantic
+
ignore_global_advance_width
+
no_recurse
+
ignore_transform
+
monochrome
+
linear_design
+
no_autohint
+
+ +

Default value is "default". +

+

For more information consult the documentation for the FT_LOAD_* +libfreetype flags. +

+
+
shadowcolor
+

The color to be used for drawing a shadow behind the drawn text. For the +syntax of this option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of shadowcolor is "black". +

+
+
shadowx
+
shadowy
+

The x and y offsets for the text shadow position with respect to the +position of the text. They can be either positive or negative +values. Default value for both is "0". +

+
+
start_number
+

The starting frame number for the n/frame_num variable. The default value +is "0". +

+
+
tabsize
+

The size in number of spaces to use for rendering the tab. +Default value is 4. +

+
+
timecode
+

Set the initial timecode representation in "hh:mm:ss[:;.]ff" +format. It can be used with or without text parameter. timecode_rate +option must be specified. +

+
+
timecode_rate, rate, r
+

Set the timecode frame rate (timecode only). +

+
+
text
+

The text string to be drawn. The text must be a sequence of UTF-8 +encoded characters. +This parameter is mandatory if no file is specified with the parameter +textfile. +

+
+
textfile
+

A text file containing text to be drawn. The text must be a sequence +of UTF-8 encoded characters. +

+

This parameter is mandatory if no text string is specified with the +parameter text. +

+

If both text and textfile are specified, an error is thrown. +

+
+
reload
+

If set to 1, the textfile will be reloaded before each frame. +Be sure to update it atomically, or it may be read partially, or even fail. +

+
+
x
+
y
+

The expressions which specify the offsets where text will be drawn +within the video frame. They are relative to the top/left border of the +output image. +

+

The default value of x and y is "0". +

+

See below for the list of accepted constants and functions. +

+
+ +

The parameters for x and y are expressions containing the +following constants and functions: +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
line_h, lh
+

the height of each text line +

+
+
main_h, h, H
+

the input height +

+
+
main_w, w, W
+

the input width +

+
+
max_glyph_a, ascent
+

the maximum distance from the baseline to the highest/upper grid +coordinate used to place a glyph outline point, for all the rendered +glyphs. +It is a positive value, due to the grid’s orientation with the Y axis +upwards. +

+
+
max_glyph_d, descent
+

the maximum distance from the baseline to the lowest grid coordinate +used to place a glyph outline point, for all the rendered glyphs. +This is a negative value, due to the grid’s orientation, with the Y axis +upwards. +

+
+
max_glyph_h
+

maximum glyph height, that is the maximum height for all the glyphs +contained in the rendered text, it is equivalent to ascent - +descent. +

+
+
max_glyph_w
+

maximum glyph width, that is the maximum width for all the glyphs +contained in the rendered text +

+
+
n
+

the number of input frame, starting from 0 +

+
+
rand(min, max)
+

return a random number included between min and max +

+
+
sar
+

input sample aspect ratio +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
text_h, th
+

the height of the rendered text +

+
+
text_w, tw
+

the width of the rendered text +

+
+
x
+
y
+

the x and y offset coordinates where the text is drawn. +

+

These parameters allow the x and y expressions to refer +each other, so you can for example specify y=x/dar. +

+
+ +

If libavfilter was built with --enable-fontconfig, then +‘fontfile’ can be a fontconfig pattern or omitted. +

+

+

+

37.23.2 Text expansion

+ +

If ‘expansion’ is set to strftime, +the filter recognizes strftime() sequences in the provided text and +expands them accordingly. Check the documentation of strftime(). This +feature is deprecated. +

+

If ‘expansion’ is set to none, the text is printed verbatim. +

+

If ‘expansion’ is set to normal (which is the default), +the following expansion mechanism is used. +

+

The backslash character ’\’, followed by any character, always expands to +the second character. +

+

Sequence of the form %{...} are expanded. The text between the +braces is a function name, possibly followed by arguments separated by ’:’. +If the arguments contain special characters or delimiters (’:’ or ’}’), +they should be escaped. +

+

Note that they probably must also be escaped as the value for the +‘text’ option in the filter argument string and as the filter +argument in the filtergraph description, and possibly also for the shell, +that makes up to four levels of escaping; using a text file avoids these +problems. +

+

The following functions are available: +

+
+
expr, e
+

The expression evaluation result. +

+

It must take one argument specifying the expression to be evaluated, +which accepts the same constants and functions as the x and +y values. Note that not all constants should be used, for +example the text size is not known when evaluating the expression, so +the constants text_w and text_h will have an undefined +value. +

+
+
gmtime
+

The time at which the filter is running, expressed in UTC. +It can accept an argument: a strftime() format string. +

+
+
localtime
+

The time at which the filter is running, expressed in the local time zone. +It can accept an argument: a strftime() format string. +

+
+
metadata
+

Frame metadata. It must take one argument specifying metadata key. +

+
+
n, frame_num
+

The frame number, starting from 0. +

+
+
pict_type
+

A 1 character description of the current picture type. +

+
+
pts
+

The timestamp of the current frame, in seconds, with microsecond accuracy. +

+
+
+ + +

37.23.3 Examples

+ +
    +
  • +Draw "Test Text" with font FreeSerif, using the default values for the +optional parameters. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text'"
    +
    + +
  • +Draw ’Test Text’ with font FreeSerif of size 24 at position x=100 +and y=50 (counting from the top-left corner of the screen), text is +yellow with a red box around it. Both the text and the box have an +opacity of 20%. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text':\
    +          x=100: y=50: fontsize=24: fontcolor=yellow@0.2: box=1: boxcolor=red@0.2"
    +
    + +

    Note that the double quotes are not necessary if spaces are not used +within the parameter list. +

    +
  • +Show the text at the center of the video frame: +
     
    drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2"
    +
    + +
  • +Show a text line sliding from right to left in the last row of the video +frame. The file ‘LONG_LINE’ is assumed to contain a single line +with no newlines. +
     
    drawtext="fontsize=15:fontfile=FreeSerif.ttf:text=LONG_LINE:y=h-line_h:x=-50*t"
    +
    + +
  • +Show the content of file ‘CREDITS’ off the bottom of the frame and scroll up. +
     
    drawtext="fontsize=20:fontfile=FreeSerif.ttf:textfile=CREDITS:y=h-20*t"
    +
    + +
  • +Draw a single green letter "g", at the center of the input video. +The glyph baseline is placed at half screen height. +
     
    drawtext="fontsize=60:fontfile=FreeSerif.ttf:fontcolor=green:text=g:x=(w-max_glyph_w)/2:y=h/2-ascent"
    +
    + +
  • +Show text for 1 second every 3 seconds: +
     
    drawtext="fontfile=FreeSerif.ttf:fontcolor=white:x=100:y=x/dar:enable=lt(mod(t\,3)\,1):text='blink'"
    +
    + +
  • +Use fontconfig to set the font. Note that the colons need to be escaped. +
     
    drawtext='fontfile=Linux Libertine O-40\:style=Semibold:text=FFmpeg'
    +
    + +
  • +Print the date of a real-time encoding (see strftime(3)): +
     
    drawtext='fontfile=FreeSans.ttf:text=%{localtime:%a %b %d %Y}'
    +
    + +
+ +

For more information about libfreetype, check: +http://www.freetype.org/. +

+

For more information about fontconfig, check: +http://freedesktop.org/software/fontconfig/fontconfig-user.html. +

+ +

37.24 edgedetect

+ +

Detect and draw edges. The filter uses the Canny Edge Detection algorithm. +

+

The filter accepts the following options: +

+
+
low
+
high
+

Set low and high threshold values used by the Canny thresholding +algorithm. +

+

The high threshold selects the "strong" edge pixels, which are then +connected through 8-connectivity with the "weak" edge pixels selected +by the low threshold. +

+

low and high threshold values must be chosen in the range +[0,1], and low should be lesser or equal to high. +

+

Default value for low is 20/255, and default value for high +is 50/255. +

+
+ +

Example: +

 
edgedetect=low=0.1:high=0.4
+
+ + +

37.25 extractplanes

+ +

Extract color channel components from input video stream into +separate grayscale video streams. +

+

The filter accepts the following option: +

+
+
planes
+

Set plane(s) to extract. +

+

Available values for planes are: +

+
y
+
u
+
v
+
a
+
r
+
g
+
b
+
+ +

Choosing planes not available in the input will result in an error. +That means you cannot select r, g, b planes +with y, u, v planes at same time. +

+
+ + +

37.25.1 Examples

+ +
    +
  • +Extract luma, u and v color channel component from input video frame +into 3 grayscale outputs: +
     
    ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
    +
    +
+ + +

37.26 elbg

+ +

Apply a posterize effect using the ELBG (Enhanced LBG) algorithm. +

+

For each input image, the filter will compute the optimal mapping from +the input to the output given the codebook length, that is the number +of distinct output colors. +

+

This filter accepts the following options. +

+
+
codebook_length, l
+

Set codebook length. The value must be a positive integer, and +represents the number of distinct output colors. Default value is 256. +

+
+
nb_steps, n
+

Set the maximum number of iterations to apply for computing the optimal +mapping. The higher the value the better the result and the higher the +computation time. Default value is 1. +

+
+
seed, s
+

Set a random seed, must be an integer included between 0 and +UINT32_MAX. If not specified, or if explicitly set to -1, the filter +will try to use a good random seed on a best effort basis. +

+
+ + +

37.27 fade

+ +

Apply fade-in/out effect to input video. +

+

This filter accepts the following options: +

+
+
type, t
+

The effect type – can be either "in" for fade-in, or "out" for a fade-out +effect. +Default is in. +

+
+
start_frame, s
+

Specify the number of the start frame for starting to apply the fade +effect. Default is 0. +

+
+
nb_frames, n
+

The number of frames for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +Default is 25. +

+
+
alpha
+

If set to 1, fade only alpha channel, if one exists on the input. +Default value is 0. +

+
+
start_time, st
+

Specify the timestamp (in seconds) of the frame to start to apply the fade +effect. If both start_frame and start_time are specified, the fade will start at +whichever comes last. Default is 0. +

+
+
duration, d
+

The number of seconds for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +If both duration and nb_frames are specified, duration is used. Default is 0. +

+
+
color, c
+

Specify the color of the fade. Default is "black". +

+
+ + +

37.27.1 Examples

+ +
    +
  • +Fade in first 30 frames of video: +
     
    fade=in:0:30
    +
    + +

    The command above is equivalent to: +

     
    fade=t=in:s=0:n=30
    +
    + +
  • +Fade out last 45 frames of a 200-frame video: +
     
    fade=out:155:45
    +fade=type=out:start_frame=155:nb_frames=45
    +
    + +
  • +Fade in first 25 frames and fade out last 25 frames of a 1000-frame video: +
     
    fade=in:0:25, fade=out:975:25
    +
    + +
  • +Make first 5 frames yellow, then fade in from frame 5-24: +
     
    fade=in:5:20:color=yellow
    +
    + +
  • +Fade in alpha over first 25 frames of video: +
     
    fade=in:0:25:alpha=1
    +
    + +
  • +Make first 5.5 seconds black, then fade in for 0.5 seconds: +
     
    fade=t=in:st=5.5:d=0.5
    +
    + +
+ + +

37.28 field

+ +

Extract a single field from an interlaced image using stride +arithmetic to avoid wasting CPU time. The output frames are marked as +non-interlaced. +

+

The filter accepts the following options: +

+
+
type
+

Specify whether to extract the top (if the value is 0 or +top) or the bottom field (if the value is 1 or +bottom). +

+
+ + +

37.29 fieldmatch

+ +

Field matching filter for inverse telecine. It is meant to reconstruct the +progressive frames from a telecined stream. The filter does not drop duplicated +frames, so to achieve a complete inverse telecine fieldmatch needs to be +followed by a decimation filter such as decimate in the filtergraph. +

+

The separation of the field matching and the decimation is notably motivated by +the possibility of inserting a de-interlacing filter fallback between the two. +If the source has mixed telecined and real interlaced content, +fieldmatch will not be able to match fields for the interlaced parts. +But these remaining combed frames will be marked as interlaced, and thus can be +de-interlaced by a later filter such as yadif before decimation. +

+

In addition to the various configuration options, fieldmatch can take an +optional second stream, activated through the ‘ppsrc’ option. If +enabled, the frames reconstruction will be based on the fields and frames from +this second stream. This allows the first input to be pre-processed in order to +help the various algorithms of the filter, while keeping the output lossless +(assuming the fields are matched properly). Typically, a field-aware denoiser, +or brightness/contrast adjustments can help. +

+

Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project) +and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from +which fieldmatch is based on. While the semantic and usage are very +close, some behaviour and options names can differ. +

+

The filter accepts the following options: +

+
+
order
+

Specify the assumed field order of the input stream. Available values are: +

+
+
auto
+

Auto detect parity (use FFmpeg’s internal parity value). +

+
bff
+

Assume bottom field first. +

+
tff
+

Assume top field first. +

+
+ +

Note that it is sometimes recommended not to trust the parity announced by the +stream. +

+

Default value is auto. +

+
+
mode
+

Set the matching mode or strategy to use. ‘pc’ mode is the safest in the +sense that it won’t risk creating jerkiness due to duplicate frames when +possible, but if there are bad edits or blended fields it will end up +outputting combed frames when a good match might actually exist. On the other +hand, ‘pcn_ub’ mode is the most risky in terms of creating jerkiness, +but will almost always find a good frame if there is one. The other values are +all somewhere in between ‘pc’ and ‘pcn_ub’ in terms of risking +jerkiness and creating duplicate frames versus finding good matches in sections +with bad edits, orphaned fields, blended fields, etc. +

+

More details about p/c/n/u/b are available in p/c/n/u/b meaning section. +

+

Available values are: +

+
+
pc
+

2-way matching (p/c) +

+
pc_n
+

2-way matching, and trying 3rd match if still combed (p/c + n) +

+
pc_u
+

2-way matching, and trying 3rd match (same order) if still combed (p/c + u) +

+
pc_n_ub
+

2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if +still combed (p/c + n + u/b) +

+
pcn
+

3-way matching (p/c/n) +

+
pcn_ub
+

3-way matching, and trying 4th/5th matches if all 3 of the original matches are +detected as combed (p/c/n + u/b) +

+
+ +

The parenthesis at the end indicate the matches that would be used for that +mode assuming ‘order’=tff (and ‘field’ on auto or +top). +

+

In terms of speed ‘pc’ mode is by far the fastest and ‘pcn_ub’ is +the slowest. +

+

Default value is pc_n. +

+
+
ppsrc
+

Mark the main input stream as a pre-processed input, and enable the secondary +input stream as the clean source to pick the fields from. See the filter +introduction for more details. It is similar to the ‘clip2’ feature from +VFM/TFM. +

+

Default value is 0 (disabled). +

+
+
field
+

Set the field to match from. It is recommended to set this to the same value as +‘order’ unless you experience matching failures with that setting. In +certain circumstances changing the field that is used to match from can have a +large impact on matching performance. Available values are: +

+
+
auto
+

Automatic (same value as ‘order’). +

+
bottom
+

Match from the bottom field. +

+
top
+

Match from the top field. +

+
+ +

Default value is auto. +

+
+
mchroma
+

Set whether or not chroma is included during the match comparisons. In most +cases it is recommended to leave this enabled. You should set this to 0 +only if your clip has bad chroma problems such as heavy rainbowing or other +artifacts. Setting this to 0 could also be used to speed things up at +the cost of some accuracy. +

+

Default value is 1. +

+
+
y0
+
y1
+

These define an exclusion band which excludes the lines between ‘y0’ and +‘y1’ from being included in the field matching decision. An exclusion +band can be used to ignore subtitles, a logo, or other things that may +interfere with the matching. ‘y0’ sets the starting scan line and +‘y1’ sets the ending line; all lines in between ‘y0’ and +‘y1’ (including ‘y0’ and ‘y1’) will be ignored. Setting +‘y0’ and ‘y1’ to the same value will disable the feature. +‘y0’ and ‘y1’ defaults to 0. +

+
+
scthresh
+

Set the scene change detection threshold as a percentage of maximum change on +the luma plane. Good values are in the [8.0, 14.0] range. Scene change +detection is only relevant in case ‘combmatch’=sc. The range for +‘scthresh’ is [0.0, 100.0]. +

+

Default value is 12.0. +

+
+
combmatch
+

When ‘combatch’ is not none, fieldmatch will take into +account the combed scores of matches when deciding what match to use as the +final match. Available values are: +

+
+
none
+

No final matching based on combed scores. +

+
sc
+

Combed scores are only used when a scene change is detected. +

+
full
+

Use combed scores all the time. +

+
+ +

Default is sc. +

+
+
combdbg
+

Force fieldmatch to calculate the combed metrics for certain matches and +print them. This setting is known as ‘micout’ in TFM/VFM vocabulary. +Available values are: +

+
+
none
+

No forced calculation. +

+
pcn
+

Force p/c/n calculations. +

+
pcnub
+

Force p/c/n/u/b calculations. +

+
+ +

Default value is none. +

+
+
cthresh
+

This is the area combing threshold used for combed frame detection. This +essentially controls how "strong" or "visible" combing must be to be detected. +Larger values mean combing must be more visible and smaller values mean combing +can be less visible or strong and still be detected. Valid settings are from +-1 (every pixel will be detected as combed) to 255 (no pixel will +be detected as combed). This is basically a pixel difference value. A good +range is [8, 12]. +

+

Default value is 9. +

+
+
chroma
+

Sets whether or not chroma is considered in the combed frame decision. Only +disable this if your source has chroma problems (rainbowing, etc.) that are +causing problems for the combed frame detection with chroma enabled. Actually, +using ‘chroma’=0 is usually more reliable, except for the case +where there is chroma only combing in the source. +

+

Default value is 0. +

+
+
blockx
+
blocky
+

Respectively set the x-axis and y-axis size of the window used during combed +frame detection. This has to do with the size of the area in which +‘combpel’ pixels are required to be detected as combed for a frame to be +declared combed. See the ‘combpel’ parameter description for more info. +Possible values are any number that is a power of 2 starting at 4 and going up +to 512. +

+

Default value is 16. +

+
+
combpel
+

The number of combed pixels inside any of the ‘blocky’ by +‘blockx’ size blocks on the frame for the frame to be detected as +combed. While ‘cthresh’ controls how "visible" the combing must be, this +setting controls "how much" combing there must be in any localized area (a +window defined by the ‘blockx’ and ‘blocky’ settings) on the +frame. Minimum value is 0 and maximum is blocky x blockx (at +which point no frames will ever be detected as combed). This setting is known +as ‘MI’ in TFM/VFM vocabulary. +

+

Default value is 80. +

+
+ +

+

+

37.29.1 p/c/n/u/b meaning

+ + +

37.29.1.1 p/c/n

+ +

We assume the following telecined stream: +

+
 
Top fields:     1 2 2 3 4
+Bottom fields:  1 2 3 4 4
+
+ +

The numbers correspond to the progressive frame the fields relate to. Here, the +first two frames are progressive, the 3rd and 4th are combed, and so on. +

+

When fieldmatch is configured to run a matching from bottom +(‘field’=bottom) this is how this input stream get transformed: +

+
 
Input stream:
+                T     1 2 2 3 4
+                B     1 2 3 4 4   <-- matching reference
+
+Matches:              c c n n c
+
+Output stream:
+                T     1 2 3 4 4
+                B     1 2 3 4 4
+
+ +

As a result of the field matching, we can see that some frames get duplicated. +To perform a complete inverse telecine, you need to rely on a decimation filter +after this operation. See for instance the decimate filter. +

+

The same operation now matching from top fields (‘field’=top) +looks like this: +

+
 
Input stream:
+                T     1 2 2 3 4   <-- matching reference
+                B     1 2 3 4 4
+
+Matches:              c c p p c
+
+Output stream:
+                T     1 2 2 3 4
+                B     1 2 2 3 4
+
+ +

In these examples, we can see what p, c and n mean; +basically, they refer to the frame and field of the opposite parity: +

+
    +
  • p matches the field of the opposite parity in the previous frame +
  • c matches the field of the opposite parity in the current frame +
  • n matches the field of the opposite parity in the next frame +
+ + +

37.29.1.2 u/b

+ +

The u and b matching are a bit special in the sense that they match +from the opposite parity flag. In the following examples, we assume that we are +currently matching the 2nd frame (Top:2, bottom:2). According to the match, a +’x’ is placed above and below each matched fields. +

+

With bottom matching (‘field’=bottom): +

 
Match:           c         p           n          b          u
+
+                 x       x               x        x          x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x         x           x        x              x
+
+Output frames:
+                 2          1          2          2          2
+                 2          2          2          1          3
+
+ +

With top matching (‘field’=top): +

 
Match:           c         p           n          b          u
+
+                 x         x           x        x              x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x       x               x        x          x
+
+Output frames:
+                 2          2          2          1          2
+                 2          1          3          2          2
+
+ + +

37.29.2 Examples

+ +

Simple IVTC of a top field first telecined stream: +

 
fieldmatch=order=tff:combmatch=none, decimate
+
+ +

Advanced IVTC, with fallback on yadif for still combed frames: +

 
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+ + +

37.30 fieldorder

+ +

Transform the field order of the input video. +

+

This filter accepts the following options: +

+
+
order
+

Output field order. Valid values are tff for top field first or bff +for bottom field first. +

+
+ +

Default value is ‘tff’. +

+

Transformation is achieved by shifting the picture content up or down +by one line, and filling the remaining line with appropriate picture content. +This method is consistent with most broadcast field order converters. +

+

If the input video is not flagged as being interlaced, or it is already +flagged as being of the required output field order then this filter does +not alter the incoming video. +

+

This filter is very useful when converting to or from PAL DV material, +which is bottom field first. +

+

For example: +

 
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+ + +

37.31 fifo

+ +

Buffer input images and send them when they are requested. +

+

This filter is mainly useful when auto-inserted by the libavfilter +framework. +

+

The filter does not take parameters. +

+

+

+

37.32 format

+ +

Convert the input video to one of the specified pixel formats. +Libavfilter will try to pick one that is supported for the input to +the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

37.32.1 Examples

+ +
    +
  • +Convert the input video to the format yuv420p +
     
    format=pix_fmts=yuv420p
    +
    + +

    Convert the input video to any of the formats in the list +

     
    format=pix_fmts=yuv420p|yuv444p|yuv410p
    +
    +
+ +

+

+

37.33 fps

+ +

Convert the video to specified constant frame rate by duplicating or dropping +frames as necessary. +

+

This filter accepts the following named parameters: +

+
fps
+

Desired output frame rate. The default is 25. +

+
+
round
+

Rounding method. +

+

Possible values are: +

+
zero
+

zero round towards 0 +

+
inf
+

round away from 0 +

+
down
+

round towards -infinity +

+
up
+

round towards +infinity +

+
near
+

round to nearest +

+
+

The default is near. +

+
+
start_time
+

Assume the first PTS should be the given value, in seconds. This allows for +padding/trimming at the start of stream. By default, no assumption is made +about the first frame’s expected PTS, so no padding or trimming is done. +For example, this could be set to 0 to pad the beginning with duplicates of +the first frame if a video stream starts after the audio stream or to trim any +frames with a negative PTS. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +fps[:round]. +

+

See also the setpts filter. +

+ +

37.33.1 Examples

+ +
    +
  • +A typical usage in order to set the fps to 25: +
     
    fps=fps=25
    +
    + +
  • +Sets the fps to 24, using abbreviation and rounding method to round to nearest: +
     
    fps=fps=film:round=near
    +
    +
+ + +

37.34 framepack

+ +

Pack two different video streams into a stereoscopic video, setting proper +metadata on supported codecs. The two views should have the same size and +framerate and processing will stop when the shorter video ends. Please note +that you may conveniently adjust view properties with the scale and +fps filters. +

+

This filter accepts the following named parameters: +

+
format
+

Desired packing format. Supported values are: +

+
+
sbs
+

Views are next to each other (default). +

+
+
tab
+

Views are on top of each other. +

+
+
lines
+

Views are packed by line. +

+
+
columns
+

Views are eacked by column. +

+
+
frameseq
+

Views are temporally interleaved. +

+
+
+ +
+
+ +

Some examples follow: +

+
 
# Convert left and right views into a frame sequential video.
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input.
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+ + +

37.35 framestep

+ +

Select one frame every N-th frame. +

+

This filter accepts the following option: +

+
step
+

Select frame after every step frames. +Allowed values are positive integers higher than 0. Default value is 1. +

+
+ +

+

+

37.36 frei0r

+ +

Apply a frei0r effect to the input video. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This filter accepts the following options: +

+
+
filter_name
+

The name to the frei0r effect to load. If the environment variable +FREI0R_PATH is defined, the frei0r effect is searched in each one of the +directories specified by the colon separated list in FREIOR_PATH, +otherwise in the standard frei0r paths, which are in this order: +‘HOME/.frei0r-1/lib/’, ‘/usr/local/lib/frei0r-1/’, +‘/usr/lib/frei0r-1/’. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r effect. +

+
+
+ +

A frei0r effect parameter can be a boolean (whose values are specified +with "y" and "n"), a double, a color (specified by the syntax +R/G/B, (R, G, and B being float +numbers from 0.0 to 1.0) or by a color description specified in the "Color" +section in the ffmpeg-utils manual), a position (specified by the syntax X/Y, +X and Y being float numbers) and a string. +

+

The number and kind of parameters depend on the loaded effect. If an +effect parameter is not specified the default value is set. +

+ +

37.36.1 Examples

+ +
    +
  • +Apply the distort0r effect, set the first two double parameters: +
     
    frei0r=filter_name=distort0r:filter_params=0.5|0.01
    +
    + +
  • +Apply the colordistance effect, take a color as first parameter: +
     
    frei0r=colordistance:0.2/0.3/0.4
    +frei0r=colordistance:violet
    +frei0r=colordistance:0x112233
    +
    + +
  • +Apply the perspective effect, specify the top left and top right image +positions: +
     
    frei0r=perspective:0.2/0.2|0.8/0.2
    +
    +
+ +

For more information see: +http://frei0r.dyne.org +

+ +

37.37 geq

+ +

The filter accepts the following options: +

+
+
lum_expr, lum
+

Set the luminance expression. +

+
cb_expr, cb
+

Set the chrominance blue expression. +

+
cr_expr, cr
+

Set the chrominance red expression. +

+
alpha_expr, a
+

Set the alpha expression. +

+
red_expr, r
+

Set the red expression. +

+
green_expr, g
+

Set the green expression. +

+
blue_expr, b
+

Set the blue expression. +

+
+ +

The colorspace is selected according to the specified options. If one +of the ‘lum_expr’, ‘cb_expr’, or ‘cr_expr’ +options is specified, the filter will automatically select a YCbCr +colorspace. If one of the ‘red_expr’, ‘green_expr’, or +‘blue_expr’ options is specified, it will select an RGB +colorspace. +

+

If one of the chrominance expression is not defined, it falls back on the other +one. If no alpha expression is specified it will evaluate to opaque value. +If none of chrominance expressions are specified, they will evaluate +to the luminance expression. +

+

The expressions can use the following variables and functions: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

The coordinates of the current sample. +

+
+
W
+
H
+

The width and height of the image. +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
p(x, y)
+

Return the value of the pixel at location (x,y) of the current +plane. +

+
+
lum(x, y)
+

Return the value of the pixel at location (x,y) of the luminance +plane. +

+
+
cb(x, y)
+

Return the value of the pixel at location (x,y) of the +blue-difference chroma plane. Return 0 if there is no such plane. +

+
+
cr(x, y)
+

Return the value of the pixel at location (x,y) of the +red-difference chroma plane. Return 0 if there is no such plane. +

+
+
r(x, y)
+
g(x, y)
+
b(x, y)
+

Return the value of the pixel at location (x,y) of the +red/green/blue component. Return 0 if there is no such component. +

+
+
alpha(x, y)
+

Return the value of the pixel at location (x,y) of the alpha +plane. Return 0 if there is no such plane. +

+
+ +

For functions, if x and y are outside the area, the value will be +automatically clipped to the closer edge. +

+ +

37.37.1 Examples

+ +
    +
  • +Flip the image horizontally: +
     
    geq=p(W-X\,Y)
    +
    + +
  • +Generate a bidimensional sine wave, with angle PI/3 and a +wavelength of 100 pixels: +
     
    geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
    +
    + +
  • +Generate a fancy enigmatic moving light: +
     
    nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
    +
    + +
  • +Generate a quick emboss effect: +
     
    format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
    +
    + +
  • +Modify RGB components depending on pixel position: +
     
    geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
    +
    +
+ + +

37.38 gradfun

+ +

Fix the banding artifacts that are sometimes introduced into nearly flat +regions by truncation to 8bit color depth. +Interpolate the gradients that should go where the bands are, and +dither them. +

+

This filter is designed for playback only. Do not use it prior to +lossy compression, because compression tends to lose the dither and +bring back the bands. +

+

This filter accepts the following options: +

+
+
strength
+

The maximum amount by which the filter will change any one pixel. Also the +threshold for detecting nearly flat regions. Acceptable values range from .51 to +64, default value is 1.2, out-of-range values will be clipped to the valid +range. +

+
+
radius
+

The neighborhood to fit the gradient to. A larger radius makes for smoother +gradients, but also prevents the filter from modifying the pixels near detailed +regions. Acceptable values are 8-32, default value is 16, out-of-range values +will be clipped to the valid range. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +strength[:radius] +

+ +

37.38.1 Examples

+ +
    +
  • +Apply the filter with a 3.5 strength and radius of 8: +
     
    gradfun=3.5:8
    +
    + +
  • +Specify radius, omitting the strength (which will fall-back to the default +value): +
     
    gradfun=radius=8
    +
    + +
+ +

+

+

37.39 haldclut

+ +

Apply a Hald CLUT to a video stream. +

+

First input is the video stream to process, and second one is the Hald CLUT. +The Hald CLUT input can be a simple picture or a complete video stream. +

+

The filter accepts the following options: +

+
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last CLUT after the end of the stream. A value of +0 disable the filter after the last frame of the CLUT is reached. +Default is 1. +

+
+ +

haldclut also has the same interpolation options as lut3d (both +filters share the same internals). +

+

More information about the Hald CLUT can be found on Eskil Steenberg’s website +(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html. +

+ +

37.39.1 Workflow examples

+ + +

37.39.1.1 Hald CLUT video stream

+ +

Generate an identity Hald CLUT stream altered with various effects: +

 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+ +

Note: make sure you use a lossless codec. +

+

Then use it with haldclut to apply it on some random stream: +

 
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+ +

The Hald CLUT will be applied to the 10 first seconds (duration of +‘clut.nut’), then the latest picture of that CLUT stream will be applied +to the remaining frames of the mandelbrot stream. +

+ +

37.39.1.2 Hald CLUT with preview

+ +

A Hald CLUT is supposed to be a squared image of Level*Level*Level by +Level*Level*Level pixels. For a given Hald CLUT, FFmpeg will select the +biggest possible square starting at the top left of the picture. The remaining +padding pixels (bottom or right) will be ignored. This area can be used to add +a preview of the Hald CLUT. +

+

Typically, the following generated Hald CLUT will be supported by the +haldclut filter: +

+
 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "
+   pad=iw+320 [padded_clut];
+   smptebars=s=320x256, split [a][b];
+   [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+   [main][b] overlay=W-320" -frames:v 1 clut.png
+
+ +

It contains the original and a preview of the effect of the CLUT: SMPTE color +bars are displayed on the right-top, and below the same color bars processed by +the color changes. +

+

Then, the effect of this Hald CLUT can be visualized with: +

 
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+ + +

37.40 hflip

+ +

Flip the input video horizontally. +

+

For example to horizontally flip the input video with ffmpeg: +

 
ffmpeg -i in.avi -vf "hflip" out.avi
+
+ + +

37.41 histeq

+

This filter applies a global color histogram equalization on a +per-frame basis. +

+

It can be used to correct video that has a compressed range of pixel +intensities. The filter redistributes the pixel intensities to +equalize their distribution across the intensity range. It may be +viewed as an "automatically adjusting contrast filter". This filter is +useful only for correcting degraded or poorly captured source +video. +

+

The filter accepts the following options: +

+
+
strength
+

Determine the amount of equalization to be applied. As the strength +is reduced, the distribution of pixel intensities more-and-more +approaches that of the input frame. The value must be a float number +in the range [0,1] and defaults to 0.200. +

+
+
intensity
+

Set the maximum intensity that can generated and scale the output +values appropriately. The strength should be set as desired and then +the intensity can be limited if needed to avoid washing-out. The value +must be a float number in the range [0,1] and defaults to 0.210. +

+
+
antibanding
+

Set the antibanding level. If enabled the filter will randomly vary +the luminance of output pixels by a small amount to avoid banding of +the histogram. Possible values are none, weak or +strong. It defaults to none. +

+
+ + +

37.42 histogram

+ +

Compute and draw a color distribution histogram for the input video. +

+

The computed histogram is a representation of the color component +distribution in an image. +

+

The filter accepts the following options: +

+
+
mode
+

Set histogram mode. +

+

It accepts the following values: +

+
levels
+

Standard histogram that displays the color components distribution in an +image. Displays color graph for each color component. Shows distribution of +the Y, U, V, A or R, G, B components, depending on input format, in the +current frame. Below each graph a color component scale meter is shown. +

+
+
color
+

Displays chroma values (U/V color placement) in a two dimensional +graph (which is called a vectorscope). The brighter a pixel in the +vectorscope, the more pixels of the input frame correspond to that pixel +(i.e., more pixels have this chroma value). The V component is displayed on +the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost +side being V = 255. The U component is displayed on the vertical (Y) axis, +with the top representing U = 0 and the bottom representing U = 255. +

+

The position of a white pixel in the graph corresponds to the chroma value of +a pixel of the input clip. The graph can therefore be used to read the hue +(color flavor) and the saturation (the dominance of the hue in the color). As +the hue of a color changes, it moves around the square. At the center of the +square the saturation is zero, which means that the corresponding pixel has no +color. If the amount of a specific color is increased (while leaving the other +colors unchanged) the saturation increases, and the indicator moves towards +the edge of the square. +

+
+
color2
+

Chroma values in vectorscope, similar as color but actual chroma values +are displayed. +

+
+
waveform
+

Per row/column color component graph. In row mode, the graph on the left side +represents color component value 0 and the right side represents value = 255. +In column mode, the top side represents color component value = 0 and bottom +side represents value = 255. +

+
+

Default value is levels. +

+
+
level_height
+

Set height of level in levels. Default value is 200. +Allowed range is [50, 2048]. +

+
+
scale_height
+

Set height of color scale in levels. Default value is 12. +Allowed range is [0, 40]. +

+
+
step
+

Set step for waveform mode. Smaller values are useful to find out how +many values of the same luminance are distributed across input rows/columns. +Default value is 10. Allowed range is [1, 255]. +

+
+
waveform_mode
+

Set mode for waveform. Can be either row, or column. +Default is row. +

+
+
waveform_mirror
+

Set mirroring mode for waveform. 0 means unmirrored, 1 +means mirrored. In mirrored mode, higher values will be represented on the left +side for row mode and at the top for column mode. Default is +0 (unmirrored). +

+
+
display_mode
+

Set display mode for waveform and levels. +It accepts the following values: +

+
parade
+

Display separate graph for the color components side by side in +row waveform mode or one below the other in column waveform mode +for waveform histogram mode. For levels histogram mode, +per color component graphs are placed below each other. +

+

Using this display mode in waveform histogram mode makes it easy to +spot color casts in the highlights and shadows of an image, by comparing the +contours of the top and the bottom graphs of each waveform. Since whites, +grays, and blacks are characterized by exactly equal amounts of red, green, +and blue, neutral areas of the picture should display three waveforms of +roughly equal width/height. If not, the correction is easy to perform by +making level adjustments the three waveforms. +

+
+
overlay
+

Presents information identical to that in the parade, except +that the graphs representing color components are superimposed directly +over one another. +

+

This display mode in waveform histogram mode makes it easier to spot +relative differences or similarities in overlapping areas of the color +components that are supposed to be identical, such as neutral whites, grays, +or blacks. +

+
+

Default is parade. +

+
+
levels_mode
+

Set mode for levels. Can be either linear, or logarithmic. +Default is linear. +

+
+ + +

37.42.1 Examples

+ +
    +
  • +Calculate and draw histogram: +
     
    ffplay -i input -vf histogram
    +
    + +
+ +

+

+

37.43 hqdn3d

+ +

High precision/quality 3d denoise filter. This filter aims to reduce +image noise producing smooth images and making still images really +still. It should enhance compressibility. +

+

It accepts the following optional parameters: +

+
+
luma_spatial
+

a non-negative float number which specifies spatial luma strength, +defaults to 4.0 +

+
+
chroma_spatial
+

a non-negative float number which specifies spatial chroma strength, +defaults to 3.0*luma_spatial/4.0 +

+
+
luma_tmp
+

a float number which specifies luma temporal strength, defaults to +6.0*luma_spatial/4.0 +

+
+
chroma_tmp
+

a float number which specifies chroma temporal strength, defaults to +luma_tmp*chroma_spatial/luma_spatial +

+
+ + +

37.44 hue

+ +

Modify the hue and/or the saturation of the input. +

+

This filter accepts the following options: +

+
+
h
+

Specify the hue angle as a number of degrees. It accepts an expression, +and defaults to "0". +

+
+
s
+

Specify the saturation in the [-10,10] range. It accepts an expression and +defaults to "1". +

+
+
H
+

Specify the hue angle as a number of radians. It accepts an +expression, and defaults to "0". +

+
+
b
+

Specify the brightness in the [-10,10] range. It accepts an expression and +defaults to "0". +

+
+ +

h’ and ‘H’ are mutually exclusive, and can’t be +specified at the same time. +

+

The ‘b’, ‘h’, ‘H’ and ‘s’ option values are +expressions containing the following constants: +

+
+
n
+

frame count of the input frame starting from 0 +

+
+
pts
+

presentation timestamp of the input frame expressed in time base units +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
tb
+

time base of the input video +

+
+ + +

37.44.1 Examples

+ +
    +
  • +Set the hue to 90 degrees and the saturation to 1.0: +
     
    hue=h=90:s=1
    +
    + +
  • +Same command but expressing the hue in radians: +
     
    hue=H=PI/2:s=1
    +
    + +
  • +Rotate hue and make the saturation swing between 0 +and 2 over a period of 1 second: +
     
    hue="H=2*PI*t: s=sin(2*PI*t)+1"
    +
    + +
  • +Apply a 3 seconds saturation fade-in effect starting at 0: +
     
    hue="s=min(t/3\,1)"
    +
    + +

    The general fade-in expression can be written as: +

     
    hue="s=min(0\, max((t-START)/DURATION\, 1))"
    +
    + +
  • +Apply a 3 seconds saturation fade-out effect starting at 5 seconds: +
     
    hue="s=max(0\, min(1\, (8-t)/3))"
    +
    + +

    The general fade-out expression can be written as: +

     
    hue="s=max(0\, min(1\, (START+DURATION-t)/DURATION))"
    +
    + +
+ + +

37.44.2 Commands

+ +

This filter supports the following commands: +

+
b
+
s
+
h
+
H
+

Modify the hue and/or the saturation and/or brightness of the input video. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

37.45 idet

+ +

Detect video interlacing type. +

+

This filter tries to detect if the input is interlaced or progressive, +top or bottom field first. +

+

The filter accepts the following options: +

+
+
intl_thres
+

Set interlacing threshold. +

+
prog_thres
+

Set progressive threshold. +

+
+ + +

37.46 il

+ +

Deinterleave or interleave fields. +

+

This filter allows one to process interlaced images fields without +deinterlacing them. Deinterleaving splits the input frame into 2 +fields (so called half pictures). Odd lines are moved to the top +half of the output image, even lines to the bottom half. +You can process (filter) them independently and then re-interleave them. +

+

The filter accepts the following options: +

+
+
luma_mode, l
+
chroma_mode, c
+
alpha_mode, a
+

Available values for luma_mode, chroma_mode and +alpha_mode are: +

+
+
none
+

Do nothing. +

+
+
deinterleave, d
+

Deinterleave fields, placing one above the other. +

+
+
interleave, i
+

Interleave fields. Reverse the effect of deinterleaving. +

+
+

Default value is none. +

+
+
luma_swap, ls
+
chroma_swap, cs
+
alpha_swap, as
+

Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0. +

+
+ + +

37.47 interlace

+ +

Simple interlacing filter from progressive contents. This interleaves upper (or +lower) lines from odd frames with lower (or upper) lines from even frames, +halving the frame rate and preserving image height. A vertical lowpass filter +is always applied in order to avoid twitter effects and reduce moiré patterns. +

+
 
   Original        Original             New Frame
+   Frame 'j'      Frame 'j+1'             (tff)
+  ==========      ===========       ==================
+    Line 0  -------------------->    Frame 'j' Line 0
+    Line 1          Line 1  ---->   Frame 'j+1' Line 1
+    Line 2 --------------------->    Frame 'j' Line 2
+    Line 3          Line 3  ---->   Frame 'j+1' Line 3
+     ...             ...                   ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+ +

It accepts the following optional parameters: +

+
+
scan
+

determines whether the interlaced frame is taken from the even (tff - default) +or odd (bff) lines of the progressive frame. +

+
+ + +

37.48 kerndeint

+ +

Deinterlace input video by applying Donald Graft’s adaptive kernel +deinterling. Work on interlaced parts of a video to produce +progressive frames. +

+

The description of the accepted parameters follows. +

+
+
thresh
+

Set the threshold which affects the filter’s tolerance when +determining if a pixel line must be processed. It must be an integer +in the range [0,255] and defaults to 10. A value of 0 will result in +applying the process on every pixels. +

+
+
map
+

Paint pixels exceeding the threshold value to white if set to 1. +Default is 0. +

+
+
order
+

Set the fields order. Swap fields if set to 1, leave fields alone if +0. Default is 0. +

+
+
sharp
+

Enable additional sharpening if set to 1. Default is 0. +

+
+
twoway
+

Enable twoway sharpening if set to 1. Default is 0. +

+
+ + +

37.48.1 Examples

+ +
    +
  • +Apply default values: +
     
    kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
    +
    + +
  • +Enable additional sharpening: +
     
    kerndeint=sharp=1
    +
    + +
  • +Paint processed pixels in white: +
     
    kerndeint=map=1
    +
    +
+ +

+

+

37.49 lut3d

+ +

Apply a 3D LUT to an input video. +

+

The filter accepts the following options: +

+
+
file
+

Set the 3D LUT file name. +

+

Currently supported formats: +

+
3dl
+

AfterEffects +

+
cube
+

Iridas +

+
dat
+

DaVinci +

+
m3d
+

Pandora +

+
+
+
interp
+

Select interpolation mode. +

+

Available values are: +

+
+
nearest
+

Use values from the nearest defined point. +

+
trilinear
+

Interpolate values using the 8 points defining a cube. +

+
tetrahedral
+

Interpolate values using a tetrahedron. +

+
+
+
+ + +

37.50 lut, lutrgb, lutyuv

+ +

Compute a look-up table for binding each pixel component input value +to an output value, and apply it to input video. +

+

lutyuv applies a lookup table to a YUV input video, lutrgb +to an RGB input video. +

+

These filters accept the following options: +

+
c0
+

set first pixel component expression +

+
c1
+

set second pixel component expression +

+
c2
+

set third pixel component expression +

+
c3
+

set fourth pixel component expression, corresponds to the alpha component +

+
+
r
+

set red component expression +

+
g
+

set green component expression +

+
b
+

set blue component expression +

+
a
+

alpha component expression +

+
+
y
+

set Y/luminance component expression +

+
u
+

set U/Cb component expression +

+
v
+

set V/Cr component expression +

+
+ +

Each of them specifies the expression to use for computing the lookup table for +the corresponding pixel component values. +

+

The exact component associated to each of the c* options depends on the +format in input. +

+

The lut filter requires either YUV or RGB pixel formats in input, +lutrgb requires RGB pixel formats in input, and lutyuv requires YUV. +

+

The expressions can contain the following constants and functions: +

+
+
w
+
h
+

the input width and height +

+
+
val
+

input value for the pixel component +

+
+
clipval
+

the input value clipped in the minval-maxval range +

+
+
maxval
+

maximum value for the pixel component +

+
+
minval
+

minimum value for the pixel component +

+
+
negval
+

the negated value for the pixel component value clipped in the +minval-maxval range , it corresponds to the expression +"maxval-clipval+minval" +

+
+
clip(val)
+

the computed value in val clipped in the +minval-maxval range +

+
+
gammaval(gamma)
+

the computed gamma correction value of the pixel component value +clipped in the minval-maxval range, corresponds to the +expression +"pow((clipval-minval)/(maxval-minval)\,gamma)*(maxval-minval)+minval" +

+
+
+ +

All expressions default to "val". +

+ +

37.50.1 Examples

+ +
    +
  • +Negate input video: +
     
    lutrgb="r=maxval+minval-val:g=maxval+minval-val:b=maxval+minval-val"
    +lutyuv="y=maxval+minval-val:u=maxval+minval-val:v=maxval+minval-val"
    +
    + +

    The above is the same as: +

     
    lutrgb="r=negval:g=negval:b=negval"
    +lutyuv="y=negval:u=negval:v=negval"
    +
    + +
  • +Negate luminance: +
     
    lutyuv=y=negval
    +
    + +
  • +Remove chroma components, turns the video into a graytone image: +
     
    lutyuv="u=128:v=128"
    +
    + +
  • +Apply a luma burning effect: +
     
    lutyuv="y=2*val"
    +
    + +
  • +Remove green and blue components: +
     
    lutrgb="g=0:b=0"
    +
    + +
  • +Set a constant alpha channel value on input: +
     
    format=rgba,lutrgb=a="maxval-minval/2"
    +
    + +
  • +Correct luminance gamma by a 0.5 factor: +
     
    lutyuv=y=gammaval(0.5)
    +
    + +
  • +Discard least significant bits of luma: +
     
    lutyuv=y='bitand(val, 128+64+32)'
    +
    +
+ + +

37.51 mergeplanes

+ +

Merge color channel components from several video streams. +

+

The filter accepts up to 4 input streams, and merge selected input +planes to the output video. +

+

This filter accepts the following options: +

+
mapping
+

Set input to output plane mapping. Default is 0. +

+

The mappings is specified as a bitmap. It should be specified as a +hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the +mapping for the first plane of the output stream. ’A’ sets the number of +the input stream to use (from 0 to 3), and ’a’ the plane number of the +corresponding input to use (from 0 to 3). The rest of the mappings is +similar, ’Bb’ describes the mapping for the output stream second +plane, ’Cc’ describes the mapping for the output stream third plane and +’Dd’ describes the mapping for the output stream fourth plane. +

+
+
format
+

Set output pixel format. Default is yuva444p. +

+
+ + +

37.51.1 Examples

+ +
    +
  • +Merge three gray video streams of same width and height into single video stream: +
     
    [a0][a1][a2]mergeplanes=0x001020:yuv444p
    +
    + +
  • +Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream: +
     
    [a0][a1]mergeplanes=0x00010210:yuva444p
    +
    + +
  • +Swap Y and A plane in yuva444p stream: +
     
    format=yuva444p,mergeplanes=0x03010200:yuva444p
    +
    + +
  • +Swap U and V plane in yuv420p stream: +
     
    format=yuv420p,mergeplanes=0x000201:yuv420p
    +
    + +
  • +Cast a rgb24 clip to yuv444p: +
     
    format=rgb24,mergeplanes=0x000102:yuv444p
    +
    +
+ + +

37.52 mcdeint

+ +

Apply motion-compensation deinterlacing. +

+

It needs one field per frame as input and must thus be used together +with yadif=1/3 or equivalent. +

+

This filter accepts the following options: +

+
mode
+

Set the deinterlacing mode. +

+

It accepts one of the following values: +

+
fast
+
medium
+
slow
+

use iterative motion estimation +

+
extra_slow
+

like ‘slow’, but use multiple reference frames. +

+
+

Default value is ‘fast’. +

+
+
parity
+

Set the picture field parity assumed for the input video. It must be +one of the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
+ +

Default value is ‘bff’. +

+
+
qp
+

Set per-block quantization parameter (QP) used by the internal +encoder. +

+

Higher values should result in a smoother motion vector field but less +optimal individual vectors. Default value is 1. +

+
+ + +

37.53 mp

+ +

Apply an MPlayer filter to the input video. +

+

This filter provides a wrapper around some of the filters of +MPlayer/MEncoder. +

+

This wrapper is considered experimental. Some of the wrapped filters +may not work properly and we may drop support for them, as they will +be implemented natively into FFmpeg. Thus you should avoid +depending on them when writing portable scripts. +

+

The filter accepts the parameters: +filter_name[:=]filter_params +

+

filter_name is the name of a supported MPlayer filter, +filter_params is a string containing the parameters accepted by +the named filter. +

+

The list of the currently supported filters follows: +

+
eq2
+
eq
+
fspp
+
ilpack
+
pp7
+
softpulldown
+
uspp
+
+ +

The parameter syntax and behavior for the listed filters are the same +of the corresponding MPlayer filters. For detailed instructions check +the "VIDEO FILTERS" section in the MPlayer manual. +

+ +

37.53.1 Examples

+ +
    +
  • +Adjust gamma, brightness, contrast: +
     
    mp=eq2=1.0:2:0.5
    +
    +
+ +

See also mplayer(1), http://www.mplayerhq.hu/. +

+ +

37.54 mpdecimate

+ +

Drop frames that do not differ greatly from the previous frame in +order to reduce frame rate. +

+

The main use of this filter is for very-low-bitrate encoding +(e.g. streaming over dialup modem), but it could in theory be used for +fixing movies that were inverse-telecined incorrectly. +

+

A description of the accepted options follows. +

+
+
max
+

Set the maximum number of consecutive frames which can be dropped (if +positive), or the minimum interval between dropped frames (if +negative). If the value is 0, the frame is dropped unregarding the +number of previous sequentially dropped frames. +

+

Default value is 0. +

+
+
hi
+
lo
+
frac
+

Set the dropping threshold values. +

+

Values for ‘hi’ and ‘lo’ are for 8x8 pixel blocks and +represent actual pixel value differences, so a threshold of 64 +corresponds to 1 unit of difference for each pixel, or the same spread +out differently over the block. +

+

A frame is a candidate for dropping if no 8x8 blocks differ by more +than a threshold of ‘hi’, and if no more than ‘frac’ blocks (1 +meaning the whole image) differ by more than a threshold of ‘lo’. +

+

Default value for ‘hi’ is 64*12, default value for ‘lo’ is +64*5, and default value for ‘frac’ is 0.33. +

+
+ + + +

37.55 negate

+ +

Negate input video. +

+

This filter accepts an integer in input, if non-zero it negates the +alpha component (if available). The default value in input is 0. +

+ +

37.56 noformat

+ +

Force libavfilter not to use any of the specified pixel formats for the +input to the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

37.56.1 Examples

+ +
    +
  • +Force libavfilter to use a format different from yuv420p for the +input to the vflip filter: +
     
    noformat=pix_fmts=yuv420p,vflip
    +
    + +
  • +Convert the input video to any of the formats not contained in the list: +
     
    noformat=yuv420p|yuv444p|yuv410p
    +
    +
+ + +

37.57 noise

+ +

Add noise on video input frame. +

+

The filter accepts the following options: +

+
+
all_seed
+
c0_seed
+
c1_seed
+
c2_seed
+
c3_seed
+

Set noise seed for specific pixel component or all pixel components in case +of all_seed. Default value is 123457. +

+
+
all_strength, alls
+
c0_strength, c0s
+
c1_strength, c1s
+
c2_strength, c2s
+
c3_strength, c3s
+

Set noise strength for specific pixel component or all pixel components in case +all_strength. Default value is 0. Allowed range is [0, 100]. +

+
+
all_flags, allf
+
c0_flags, c0f
+
c1_flags, c1f
+
c2_flags, c2f
+
c3_flags, c3f
+

Set pixel component flags or set flags for all components if all_flags. +Available values for component flags are: +

+
a
+

averaged temporal noise (smoother) +

+
p
+

mix random noise with a (semi)regular pattern +

+
t
+

temporal noise (noise pattern changes between frames) +

+
u
+

uniform noise (gaussian otherwise) +

+
+
+
+ + +

37.57.1 Examples

+ +

Add temporal and uniform noise to input video: +

 
noise=alls=20:allf=t+u
+
+ + +

37.58 null

+ +

Pass the video source unchanged to the output. +

+ +

37.59 ocv

+ +

Apply video transform using libopencv. +

+

To enable this filter install libopencv library and headers and +configure FFmpeg with --enable-libopencv. +

+

This filter accepts the following parameters: +

+
+
filter_name
+

The name of the libopencv filter to apply. +

+
+
filter_params
+

The parameters to pass to the libopencv filter. If not specified the default +values are assumed. +

+
+
+ +

Refer to the official libopencv documentation for more precise +information: +http://opencv.willowgarage.com/documentation/c/image_filtering.html +

+

Follows the list of supported libopencv filters. +

+

+

+

37.59.1 dilate

+ +

Dilate an image by using a specific structuring element. +This filter corresponds to the libopencv function cvDilate. +

+

It accepts the parameters: struct_el|nb_iterations. +

+

struct_el represents a structuring element, and has the syntax: +colsxrows+anchor_xxanchor_y/shape +

+

cols and rows represent the number of columns and rows of +the structuring element, anchor_x and anchor_y the anchor +point, and shape the shape for the structuring element, and +can be one of the values "rect", "cross", "ellipse", "custom". +

+

If the value for shape is "custom", it must be followed by a +string of the form "=filename". The file with name +filename is assumed to represent a binary image, with each +printable character corresponding to a bright pixel. When a custom +shape is used, cols and rows are ignored, the number +or columns and rows of the read file are assumed instead. +

+

The default value for struct_el is "3x3+0x0/rect". +

+

nb_iterations specifies the number of times the transform is +applied to the image, and defaults to 1. +

+

Follow some example: +

 
# use the default values
+ocv=dilate
+
+# dilate using a structuring element with a 5x5 cross, iterate two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# read the shape from the file diamond.shape, iterate two times
+# the file diamond.shape may contain a pattern of characters like this:
+#   *
+#  ***
+# *****
+#  ***
+#   *
+# the specified cols and rows are ignored (but not the anchor point coordinates)
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+ + +

37.59.2 erode

+ +

Erode an image by using a specific structuring element. +This filter corresponds to the libopencv function cvErode. +

+

The filter accepts the parameters: struct_el:nb_iterations, +with the same syntax and semantics as the dilate filter. +

+ +

37.59.3 smooth

+ +

Smooth the input video. +

+

The filter takes the following parameters: +type|param1|param2|param3|param4. +

+

type is the type of smooth filter to apply, and can be one of +the following values: "blur", "blur_no_scale", "median", "gaussian", +"bilateral". The default value is "gaussian". +

+

param1, param2, param3, and param4 are +parameters whose meanings depend on smooth type. param1 and +param2 accept integer positive values or 0, param3 and +param4 accept float values. +

+

The default value for param1 is 3, the default value for the +other parameters is 0. +

+

These parameters correspond to the parameters assigned to the +libopencv function cvSmooth. +

+

+

+

37.60 overlay

+ +

Overlay one video on top of another. +

+

It takes two inputs and one output, the first input is the "main" +video on which the second input is overlayed. +

+

This filter accepts the following parameters: +

+

A description of the accepted options follows. +

+
+
x
+
y
+

Set the expression for the x and y coordinates of the overlayed video +on the main video. Default value is "0" for both expressions. In case +the expression is invalid, it is set to a huge value (meaning that the +overlay will not be displayed within the output visible area). +

+
+
eof_action
+

The action to take when EOF is encountered on the secondary input, accepts one +of the following values: +

+
+
repeat
+

repeat the last frame (the default) +

+
endall
+

end both streams +

+
pass
+

pass through the main input +

+
+ +
+
eval
+

Set when the expressions for ‘x’, and ‘y’ are evaluated. +

+

It accepts the following values: +

+
init
+

only evaluate expressions once during the filter initialization or +when a command is processed +

+
+
frame
+

evaluate expressions for each incoming frame +

+
+ +

Default value is ‘frame’. +

+
+
shortest
+

If set to 1, force the output to terminate when the shortest input +terminates. Default value is 0. +

+
+
format
+

Set the format for the output video. +

+

It accepts the following values: +

+
yuv420
+

force YUV420 output +

+
+
yuv422
+

force YUV422 output +

+
+
yuv444
+

force YUV444 output +

+
+
rgb
+

force RGB output +

+
+ +

Default value is ‘yuv420’. +

+
+
rgb (deprecated)
+

If set to 1, force the filter to accept inputs in the RGB +color space. Default value is 0. This option is deprecated, use +‘format’ instead. +

+
+
repeatlast
+

If set to 1, force the filter to draw the last overlay frame over the +main input until the end of the stream. A value of 0 disables this +behavior. Default value is 1. +

+
+ +

The ‘x’, and ‘y’ expressions can contain the following +parameters. +

+
+
main_w, W
+
main_h, H
+

main input width and height +

+
+
overlay_w, w
+
overlay_h, h
+

overlay input width and height +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values of the output +format. For example for the pixel format "yuv422p" hsub is 2 and +vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

Note that the n, pos, t variables are available only +when evaluation is done per frame, and will evaluate to NAN +when ‘eval’ is set to ‘init’. +

+

Be aware that frames are taken from each input video in timestamp +order, hence, if their initial timestamps differ, it is a good idea +to pass the two inputs through a setpts=PTS-STARTPTS filter to +have them begin in the same zero timestamp, as it does the example for +the movie filter. +

+

You can chain together more overlays but you should test the +efficiency of such approach. +

+ +

37.60.1 Commands

+ +

This filter supports the following commands: +

+
x
+
y
+

Modify the x and y of the overlay input. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

37.60.2 Examples

+ +
    +
  • +Draw the overlay at 10 pixels from the bottom right corner of the main +video: +
     
    overlay=main_w-overlay_w-10:main_h-overlay_h-10
    +
    + +

    Using named options the example above becomes: +

     
    overlay=x=main_w-overlay_w-10:y=main_h-overlay_h-10
    +
    + +
  • +Insert a transparent PNG logo in the bottom left corner of the input, +using the ffmpeg tool with the -filter_complex option: +
     
    ffmpeg -i input -i logo -filter_complex 'overlay=10:main_h-overlay_h-10' output
    +
    + +
  • +Insert 2 different transparent PNG logos (second logo on bottom +right corner) using the ffmpeg tool: +
     
    ffmpeg -i input -i logo1 -i logo2 -filter_complex 'overlay=x=10:y=H-h-10,overlay=x=W-w-10:y=H-h-10' output
    +
    + +
  • +Add a transparent color layer on top of the main video, WxH +must specify the size of the main input to the overlay filter: +
     
    color=color=red@.3:size=WxH [over]; [in][over] overlay [out]
    +
    + +
  • +Play an original video and a filtered version (here with the deshake +filter) side by side using the ffplay tool: +
     
    ffplay input.avi -vf 'split[a][b]; [a]pad=iw*2:ih[src]; [b]deshake[filt]; [src][filt]overlay=w'
    +
    + +

    The above command is the same as: +

     
    ffplay input.avi -vf 'split[b], pad=iw*2[src], [b]deshake, [src]overlay=w'
    +
    + +
  • +Make a sliding overlay appearing from the left to the right top part of the +screen starting since time 2: +
     
    overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0
    +
    + +
  • +Compose output by putting two input videos side to side: +
     
    ffmpeg -i left.avi -i right.avi -filter_complex "
    +nullsrc=size=200x100 [background];
    +[0:v] setpts=PTS-STARTPTS, scale=100x100 [left];
    +[1:v] setpts=PTS-STARTPTS, scale=100x100 [right];
    +[background][left]       overlay=shortest=1       [background+left];
    +[background+left][right] overlay=shortest=1:x=100 [left+right]
    +"
    +
    + +
  • +mask 10-20 seconds of a video by applying the delogo filter to a section +
     
    ffmpeg -i test.avi -codec:v:0 wmv2 -ar 11025 -b:v 9000k
    +-vf '[in]split[split_main][split_delogo];[split_delogo]trim=start=360:end=371,delogo=0:0:640:480[delogoed];[split_main][delogoed]overlay=eof_action=pass[out]'
    +masked.avi
    +
    + +
  • +Chain several overlays in cascade: +
     
    nullsrc=s=200x200 [bg];
    +testsrc=s=100x100, split=4 [in0][in1][in2][in3];
    +[in0] lutrgb=r=0, [bg]   overlay=0:0     [mid0];
    +[in1] lutrgb=g=0, [mid0] overlay=100:0   [mid1];
    +[in2] lutrgb=b=0, [mid1] overlay=0:100   [mid2];
    +[in3] null,       [mid2] overlay=100:100 [out0]
    +
    + +
+ + +

37.61 owdenoise

+ +

Apply Overcomplete Wavelet denoiser. +

+

The filter accepts the following options: +

+
+
depth
+

Set depth. +

+

Larger depth values will denoise lower frequency components more, but +slow down filtering. +

+

Must be an int in the range 8-16, default is 8. +

+
+
luma_strength, ls
+

Set luma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+
chroma_strength, cs
+

Set chroma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+ + +

37.62 pad

+ +

Add paddings to the input image, and place the original input at the +given coordinates x, y. +

+

This filter accepts the following parameters: +

+
+
width, w
+
height, h
+

Specify an expression for the size of the output image with the +paddings added. If the value for width or height is 0, the +corresponding input size is used for the output. +

+

The width expression can reference the value set by the +height expression, and vice versa. +

+

The default value of width and height is 0. +

+
+
x
+
y
+

Specify an expression for the offsets where to place the input image +in the padded area with respect to the top/left border of the output +image. +

+

The x expression can reference the value set by the y +expression, and vice versa. +

+

The default value of x and y is 0. +

+
+
color
+

Specify the color of the padded area. For the syntax of this option, +check the "Color" section in the ffmpeg-utils manual. +

+

The default value of color is "black". +

+
+ +

The value for the width, height, x, and y +options are expressions containing the following constants: +

+
+
in_w
+
in_h
+

the input video width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
x
+
y
+

x and y offsets as specified by the x and y +expressions, or NAN if not yet specified +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

37.62.1 Examples

+ +
    +
  • +Add paddings with color "violet" to the input video. Output video +size is 640x480, the top-left corner of the input video is placed at +column 0, row 40: +
     
    pad=640:480:0:40:violet
    +
    + +

    The example above is equivalent to the following command: +

     
    pad=width=640:height=480:x=0:y=40:color=violet
    +
    + +
  • +Pad the input to get an output with dimensions increased by 3/2, +and put the input video at the center of the padded area: +
     
    pad="3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a squared output with size equal to the maximum +value between the input width and height, and put the input video at +the center of the padded area: +
     
    pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a final w/h ratio of 16:9: +
     
    pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +In case of anamorphic video, in order to set the output display aspect +correctly, it is necessary to use sar in the expression, +according to the relation: +
     
    (ih * X / ih) * sar = output_dar
    +X = output_dar / sar
    +
    + +

    Thus the previous example needs to be modified to: +

     
    pad="ih*16/9/sar:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Double output size and put the input video in the bottom-right +corner of the output padded area: +
     
    pad="2*iw:2*ih:ow-iw:oh-ih"
    +
    +
+ + +

37.63 perspective

+ +

Correct perspective of video not recorded perpendicular to the screen. +

+

A description of the accepted parameters follows. +

+
+
x0
+
y0
+
x1
+
y1
+
x2
+
y2
+
x3
+
y3
+

Set coordinates expression for top left, top right, bottom left and bottom right corners. +Default values are 0:0:W:0:0:H:W:H with which perspective will remain unchanged. +

+

The expressions can use the following variables: +

+
+
W
+
H
+

the width and height of video frame. +

+
+ +
+
interpolation
+

Set interpolation for perspective correction. +

+

It accepts the following values: +

+
linear
+
cubic
+
+ +

Default value is ‘linear’. +

+
+ + +

37.64 phase

+ +

Delay interlaced video by one field time so that the field order changes. +

+

The intended use is to fix PAL movies that have been captured with the +opposite field order to the film-to-video transfer. +

+

A description of the accepted parameters follows. +

+
+
mode
+

Set phase mode. +

+

It accepts the following values: +

+
t
+

Capture field order top-first, transfer bottom-first. +Filter will delay the bottom field. +

+
+
b
+

Capture field order bottom-first, transfer top-first. +Filter will delay the top field. +

+
+
p
+

Capture and transfer with the same field order. This mode only exists +for the documentation of the other options to refer to, but if you +actually select it, the filter will faithfully do nothing. +

+
+
a
+

Capture field order determined automatically by field flags, transfer +opposite. +Filter selects among ‘t’ and ‘b’ modes on a frame by frame +basis using field flags. If no field information is available, +then this works just like ‘u’. +

+
+
u
+

Capture unknown or varying, transfer opposite. +Filter selects among ‘t’ and ‘b’ on a frame by frame basis by +analyzing the images and selecting the alternative that produces best +match between the fields. +

+
+
T
+

Capture top-first, transfer unknown or varying. +Filter selects among ‘t’ and ‘p’ using image analysis. +

+
+
B
+

Capture bottom-first, transfer unknown or varying. +Filter selects among ‘b’ and ‘p’ using image analysis. +

+
+
A
+

Capture determined by field flags, transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using field flags and +image analysis. If no field information is available, then this works just +like ‘U’. This is the default mode. +

+
+
U
+

Both capture and transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using image analysis only. +

+
+
+
+ + +

37.65 pixdesctest

+ +

Pixel format descriptor test filter, mainly useful for internal +testing. The output video should be equal to the input video. +

+

For example: +

 
format=monow, pixdesctest
+
+ +

can be used to test the monowhite pixel format descriptor definition. +

+ +

37.66 pp

+ +

Enable the specified chain of postprocessing subfilters using libpostproc. This +library should be automatically selected with a GPL build (--enable-gpl). +Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’. +Each subfilter and some options have a short and a long name that can be used +interchangeably, i.e. dr/dering are the same. +

+

The filters accept the following options: +

+
+
subfilters
+

Set postprocessing subfilters string. +

+
+ +

All subfilters share common options to determine their scope: +

+
+
a/autoq
+

Honor the quality commands for this subfilter. +

+
+
c/chrom
+

Do chrominance filtering, too (default). +

+
+
y/nochrom
+

Do luminance filtering only (no chrominance). +

+
+
n/noluma
+

Do chrominance filtering only (no luminance). +

+
+ +

These options can be appended after the subfilter name, separated by a ’|’. +

+

Available subfilters are: +

+
+
hb/hdeblock[|difference[|flatness]]
+

Horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
vb/vdeblock[|difference[|flatness]]
+

Vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
ha/hadeblock[|difference[|flatness]]
+

Accurate horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
va/vadeblock[|difference[|flatness]]
+

Accurate vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+
+
+ +

The horizontal and vertical deblocking filters share the difference and +flatness values so you cannot set different horizontal and vertical +thresholds. +

+
+
h1/x1hdeblock
+

Experimental horizontal deblocking filter +

+
+
v1/x1vdeblock
+

Experimental vertical deblocking filter +

+
+
dr/dering
+

Deringing filter +

+
+
tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+
threshold1
+

larger -> stronger filtering +

+
threshold2
+

larger -> stronger filtering +

+
threshold3
+

larger -> stronger filtering +

+
+ +
+
al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+
f/fullyrange
+

Stretch luminance to 0-255. +

+
+ +
+
lb/linblenddeint
+

Linear blend deinterlacing filter that deinterlaces the given block by +filtering all lines with a (1 2 1) filter. +

+
+
li/linipoldeint
+

Linear interpolating deinterlacing filter that deinterlaces the given block by +linearly interpolating every second line. +

+
+
ci/cubicipoldeint
+

Cubic interpolating deinterlacing filter deinterlaces the given block by +cubically interpolating every second line. +

+
+
md/mediandeint
+

Median deinterlacing filter that deinterlaces the given block by applying a +median filter to every second line. +

+
+
fd/ffmpegdeint
+

FFmpeg deinterlacing filter that deinterlaces the given block by filtering every +second line with a (-1 4 2 4 -1) filter. +

+
+
l5/lowpass5
+

Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given +block by filtering all lines with a (-1 2 6 2 -1) filter. +

+
+
fq/forceQuant[|quantizer]
+

Overrides the quantizer table from the input with the constant quantizer you +specify. +

+
quantizer
+

Quantizer to use +

+
+ +
+
de/default
+

Default pp filter combination (hb|a,vb|a,dr|a) +

+
+
fa/fast
+

Fast pp filter combination (h1|a,v1|a,dr|a) +

+
+
ac
+

High quality pp filter combination (ha|a|128|7,va|a,dr|a) +

+
+ + +

37.66.1 Examples

+ +
    +
  • +Apply horizontal and vertical deblocking, deringing and automatic +brightness/contrast: +
     
    pp=hb/vb/dr/al
    +
    + +
  • +Apply default filters without brightness/contrast correction: +
     
    pp=de/-al
    +
    + +
  • +Apply default filters and temporal denoiser: +
     
    pp=default/tmpnoise|1|2|3
    +
    + +
  • +Apply deblocking on luminance only, and switch vertical deblocking on or off +automatically depending on available CPU time: +
     
    pp=hb|y/vb|a
    +
    +
+ + +

37.67 psnr

+ +

Obtain the average, maximum and minimum PSNR (Peak Signal to Noise +Ratio) between two input videos. +

+

This filter takes in input two input videos, the first input is +considered the "main" source and is passed unchanged to the +output. The second input is used as a "reference" video for computing +the PSNR. +

+

Both video inputs must have the same resolution and pixel format for +this filter to work correctly. Also it assumes that both inputs +have the same number of frames, which are compared one by one. +

+

The obtained average PSNR is printed through the logging system. +

+

The filter stores the accumulated MSE (mean squared error) of each +frame, and at the end of the processing it is averaged across all frames +equally, and the following formula is applied to obtain the PSNR: +

+
 
PSNR = 10*log10(MAX^2/MSE)
+
+ +

Where MAX is the average of the maximum values of each component of the +image. +

+

The description of the accepted parameters follows. +

+
+
stats_file, f
+

If specified the filter will use the named file to save the PSNR of +each individual frame. +

+
+ +

The file printed if stats_file is selected, contains a sequence of +key/value pairs of the form key:value for each compared +couple of frames. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 1 +

+
+
mse_avg
+

Mean Square Error pixel-by-pixel average difference of the compared +frames, averaged over all the image components. +

+
+
mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+

Mean Square Error pixel-by-pixel average difference of the compared +frames for the component specified by the suffix. +

+
+
psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+

Peak Signal to Noise ratio of the compared frames for the component +specified by the suffix. +

+
+ +

For example: +

 
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+ +

On this example the input file being processed is compared with the +reference file ‘ref_movie.mpg’. The PSNR of each individual frame +is stored in ‘stats.log’. +

+

+

+

37.68 pullup

+ +

Pulldown reversal (inverse telecine) filter, capable of handling mixed +hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive +content. +

+

The pullup filter is designed to take advantage of future context in making +its decisions. This filter is stateless in the sense that it does not lock +onto a pattern to follow, but it instead looks forward to the following +fields in order to identify matches and rebuild progressive frames. +

+

To produce content with an even framerate, insert the fps filter after +pullup, use fps=24000/1001 if the input frame rate is 29.97fps, +fps=24 for 30fps and the (rare) telecined 25fps input. +

+

The filter accepts the following options: +

+
+
jl
+
jr
+
jt
+
jb
+

These options set the amount of "junk" to ignore at the left, right, top, and +bottom of the image, respectively. Left and right are in units of 8 pixels, +while top and bottom are in units of 2 lines. +The default is 8 pixels on each side. +

+
+
sb
+

Set the strict breaks. Setting this option to 1 will reduce the chances of +filter generating an occasional mismatched frame, but it may also cause an +excessive number of frames to be dropped during high motion sequences. +Conversely, setting it to -1 will make filter match fields more easily. +This may help processing of video where there is slight blurring between +the fields, but may also cause there to be interlaced frames in the output. +Default value is 0. +

+
+
mp
+

Set the metric plane to use. It accepts the following values: +

+
l
+

Use luma plane. +

+
+
u
+

Use chroma blue plane. +

+
+
v
+

Use chroma red plane. +

+
+ +

This option may be set to use chroma plane instead of the default luma plane +for doing filter’s computations. This may improve accuracy on very clean +source material, but more likely will decrease accuracy, especially if there +is chroma noise (rainbow effect) or any grayscale video. +The main purpose of setting ‘mp’ to a chroma plane is to reduce CPU +load and make pullup usable in realtime on slow machines. +

+
+ +

For best results (without duplicated frames in the output file) it is +necessary to change the output frame rate. For example, to inverse +telecine NTSC input: +

 
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+ + +

37.69 removelogo

+ +

Suppress a TV station logo, using an image file to determine which +pixels comprise the logo. It works by filling in the pixels that +comprise the logo with neighboring pixels. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filter bitmap file, which can be any image format supported by +libavformat. The width and height of the image file must match those of the +video stream being processed. +

+
+ +

Pixels in the provided bitmap image with a value of zero are not +considered part of the logo, non-zero pixels are considered part of +the logo. If you use white (255) for the logo and black (0) for the +rest, you will be safe. For making the filter bitmap, it is +recommended to take a screen capture of a black frame with the logo +visible, and then using a threshold filter followed by the erode +filter once or twice. +

+

If needed, little splotches can be fixed manually. Remember that if +logo pixels are not covered, the filter quality will be much +reduced. Marking too many pixels as part of the logo does not hurt as +much, but it will increase the amount of blurring needed to cover over +the image and will destroy more information than necessary, and extra +pixels will slow things down on a large logo. +

+ +

37.70 rotate

+ +

Rotate video by an arbitrary angle expressed in radians. +

+

The filter accepts the following options: +

+

A description of the optional parameters follows. +

+
angle, a
+

Set an expression for the angle by which to rotate the input video +clockwise, expressed as a number of radians. A negative value will +result in a counter-clockwise rotation. By default it is set to "0". +

+

This expression is evaluated for each frame. +

+
+
out_w, ow
+

Set the output width expression, default value is "iw". +This expression is evaluated just once during configuration. +

+
+
out_h, oh
+

Set the output height expression, default value is "ih". +This expression is evaluated just once during configuration. +

+
+
bilinear
+

Enable bilinear interpolation if set to 1, a value of 0 disables +it. Default value is 1. +

+
+
fillcolor, c
+

Set the color used to fill the output area not covered by the rotated +image. For the generalsyntax of this option, check the "Color" section in the +ffmpeg-utils manual. If the special value "none" is selected then no +background is printed (useful for example if the background is never shown). +

+

Default value is "black". +

+
+ +

The expressions for the angle and the output size can contain the +following constants and functions: +

+
+
n
+

sequential number of the input frame, starting from 0. It is always NAN +before the first frame is filtered. +

+
+
t
+

time in seconds of the input frame, it is set to 0 when the filter is +configured. It is always NAN before the first frame is filtered. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_w, iw
+
in_h, ih
+

the input video width and height +

+
+
out_w, ow
+
out_h, oh
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
rotw(a)
+
roth(a)
+

the minimal width/height required for completely containing the input +video rotated by a radians. +

+

These are only available when computing the ‘out_w’ and +‘out_h’ expressions. +

+
+ + +

37.70.1 Examples

+ +
    +
  • +Rotate the input by PI/6 radians clockwise: +
     
    rotate=PI/6
    +
    + +
  • +Rotate the input by PI/6 radians counter-clockwise: +
     
    rotate=-PI/6
    +
    + +
  • +Rotate the input by 45 degrees clockwise: +
     
    rotate=45*PI/180
    +
    + +
  • +Apply a constant rotation with period T, starting from an angle of PI/3: +
     
    rotate=PI/3+2*PI*t/T
    +
    + +
  • +Make the input video rotation oscillating with a period of T +seconds and an amplitude of A radians: +
     
    rotate=A*sin(2*PI/T*t)
    +
    + +
  • +Rotate the video, output size is chosen so that the whole rotating +input video is always completely contained in the output: +
     
    rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
    +
    + +
  • +Rotate the video, reduce the output size so that no background is ever +shown: +
     
    rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
    +
    +
+ + +

37.70.2 Commands

+ +

The filter supports the following commands: +

+
+
a, angle
+

Set the angle expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

37.71 sab

+ +

Apply Shape Adaptive Blur. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set luma blur filter strength, must be a value in range 0.1-4.0, default +value is 1.0. A greater value will result in a more blurred image, and +in slower processing. +

+
+
luma_pre_filter_radius, lpfr
+

Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default +value is 1.0. +

+
+
luma_strength, ls
+

Set luma maximum difference between pixels to still be considered, must +be a value in the 0.1-100.0 range, default value is 1.0. +

+
+
chroma_radius, cr
+

Set chroma blur filter strength, must be a value in range 0.1-4.0. A +greater value will result in a more blurred image, and in slower +processing. +

+
+
chroma_pre_filter_radius, cpfr
+

Set chroma pre-filter radius, must be a value in the 0.1-2.0 range. +

+
+
chroma_strength, cs
+

Set chroma maximum difference between pixels to still be considered, +must be a value in the 0.1-100.0 range. +

+
+ +

Each chroma option value, if not explicitly specified, is set to the +corresponding luma option value. +

+

+

+

37.72 scale

+ +

Scale (resize) the input video, using the libswscale library. +

+

The scale filter forces the output display aspect ratio to be the same +of the input, by changing the output sample aspect ratio. +

+

If the input image format is different from the format requested by +the next filter, the scale filter will convert the input to the +requested format. +

+ +

37.72.1 Options

+

The filter accepts the following options, or any of the options +supported by the libswscale scaler. +

+

See (ffmpeg-scaler)scaler_options for +the complete list of scaler options. +

+
+
width, w
+
height, h
+

Set the output video dimension expression. Default value is the input +dimension. +

+

If the value is 0, the input width is used for the output. +

+

If one of the values is -1, the scale filter will use a value that +maintains the aspect ratio of the input image, calculated from the +other specified dimension. If both of them are -1, the input size is +used +

+

If one of the values is -n with n > 1, the scale filter will also use a value +that maintains the aspect ratio of the input image, calculated from the other +specified dimension. After that it will, however, make sure that the calculated +dimension is divisible by n and adjust the value if necessary. +

+

See below for the list of accepted constants for use in the dimension +expression. +

+
+
interl
+

Set the interlacing mode. It accepts the following values: +

+
+
1
+

Force interlaced aware scaling. +

+
+
0
+

Do not apply interlaced scaling. +

+
+
-1
+

Select interlaced aware scaling depending on whether the source frames +are flagged as interlaced or not. +

+
+ +

Default value is ‘0’. +

+
+
flags
+

Set libswscale scaling flags. See +(ffmpeg-scaler)sws_flags for the +complete list of values. If not explicitly specified the filter applies +the default flags. +

+
+
size, s
+

Set the video size. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
in_color_matrix
+
out_color_matrix
+

Set in/output YCbCr color space type. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. +

+

If not specified, the color space type depends on the pixel format. +

+

Possible values: +

+
+
auto
+

Choose automatically. +

+
+
bt709
+

Format conforming to International Telecommunication Union (ITU) +Recommendation BT.709. +

+
+
fcc
+

Set color space conforming to the United States Federal Communications +Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a). +

+
+
bt601
+

Set color space conforming to: +

+
    +
  • +ITU Radiocommunication Sector (ITU-R) Recommendation BT.601 + +
  • +ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G + +
  • +Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004 + +
+ +
+
smpte240m
+

Set color space conforming to SMPTE ST 240:1999. +

+
+ +
+
in_range
+
out_range
+

Set in/output YCbCr sample range. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. If not specified, the +range depends on the pixel format. Possible values: +

+
+
auto
+

Choose automatically. +

+
+
jpeg/full/pc
+

Set full range (0-255 in case of 8-bit luma). +

+
+
mpeg/tv
+

Set "MPEG" range (16-235 in case of 8-bit luma). +

+
+ +
+
force_original_aspect_ratio
+

Enable decreasing or increasing output video width or height if necessary to +keep the original aspect ratio. Possible values: +

+
+
disable
+

Scale the video as specified and disable this feature. +

+
+
decrease
+

The output video dimensions will automatically be decreased if needed. +

+
+
increase
+

The output video dimensions will automatically be increased if needed. +

+
+
+ +

One useful instance of this option is that when you know a specific device’s +maximum allowed resolution, you can use this to limit the output video to +that, while retaining the aspect ratio. For example, device A allows +1280x720 playback, and your video is 1920x800. Using this option (set it to +decrease) and specifying 1280x720 to the command line makes the output +1280x533. +

+

Please note that this is a different thing than specifying -1 for ‘w’ +or ‘h’, you still need to specify the output resolution for this option +to work. +

+
+
+ +

The values of the ‘w’ and ‘h’ options are expressions +containing the following constants: +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (scaled) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio. Calculated from (iw / ih) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical input chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
ohsub
+
ovsub
+

horizontal and vertical output chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

37.72.2 Examples

+ +
    +
  • +Scale the input video to a size of 200x100: +
     
    scale=w=200:h=100
    +
    + +

    This is equivalent to: +

     
    scale=200:100
    +
    + +

    or: +

     
    scale=200x100
    +
    + +
  • +Specify a size abbreviation for the output size: +
     
    scale=qcif
    +
    + +

    which can also be written as: +

     
    scale=size=qcif
    +
    + +
  • +Scale the input to 2x: +
     
    scale=w=2*iw:h=2*ih
    +
    + +
  • +The above is the same as: +
     
    scale=2*in_w:2*in_h
    +
    + +
  • +Scale the input to 2x with forced interlaced scaling: +
     
    scale=2*iw:2*ih:interl=1
    +
    + +
  • +Scale the input to half size: +
     
    scale=w=iw/2:h=ih/2
    +
    + +
  • +Increase the width, and set the height to the same size: +
     
    scale=3/2*iw:ow
    +
    + +
  • +Seek for Greek harmony: +
     
    scale=iw:1/PHI*iw
    +scale=ih*PHI:ih
    +
    + +
  • +Increase the height, and set the width to 3/2 of the height: +
     
    scale=w=3/2*oh:h=3/5*ih
    +
    + +
  • +Increase the size, but make the size a multiple of the chroma +subsample values: +
     
    scale="trunc(3/2*iw/hsub)*hsub:trunc(3/2*ih/vsub)*vsub"
    +
    + +
  • +Increase the width to a maximum of 500 pixels, keep the same input +aspect ratio: +
     
    scale=w='min(500\, iw*3/2):h=-1'
    +
    +
+ + +

37.73 separatefields

+ +

The separatefields takes a frame-based video input and splits +each frame into its components fields, producing a new half height clip +with twice the frame rate and twice the frame count. +

+

This filter use field-dominance information in frame to decide which +of each pair of fields to place first in the output. +If it gets it wrong use setfield filter before separatefields filter. +

+ +

37.74 setdar, setsar

+ +

The setdar filter sets the Display Aspect Ratio for the filter +output video. +

+

This is done by changing the specified Sample (aka Pixel) Aspect +Ratio, according to the following equation: +

 
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+ +

Keep in mind that the setdar filter does not modify the pixel +dimensions of the video frame. Also the display aspect ratio set by +this filter may be changed by later filters in the filterchain, +e.g. in case of scaling or if another "setdar" or a "setsar" filter is +applied. +

+

The setsar filter sets the Sample (aka Pixel) Aspect Ratio for +the filter output video. +

+

Note that as a consequence of the application of this filter, the +output display aspect ratio will change according to the equation +above. +

+

Keep in mind that the sample aspect ratio set by the setsar +filter may be changed by later filters in the filterchain, e.g. if +another "setsar" or a "setdar" filter is applied. +

+

The filters accept the following options: +

+
+
r, ratio, dar (setdar only), sar (setsar only)
+

Set the aspect ratio used by the filter. +

+

The parameter can be a floating point number string, an expression, or +a string of the form num:den, where num and +den are the numerator and denominator of the aspect ratio. If +the parameter is not specified, it is assumed the value "0". +In case the form "num:den" is used, the : character +should be escaped. +

+
+
max
+

Set the maximum integer value to use for expressing numerator and +denominator when reducing the expressed aspect ratio to a rational. +Default value is 100. +

+
+
+ +

The parameter sar is an expression containing +the following constants: +

+
+
E, PI, PHI
+

the corresponding mathematical approximated values for e +(euler number), pi (greek PI), phi (golden ratio) +

+
+
w, h
+

the input width and height +

+
+
a
+

same as w / h +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub, vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

37.74.1 Examples

+ +
    +
  • +To change the display aspect ratio to 16:9, specify one of the following: +
     
    setdar=dar=1.77777
    +setdar=dar=16/9
    +setdar=dar=1.77777
    +
    + +
  • +To change the sample aspect ratio to 10:11, specify: +
     
    setsar=sar=10/11
    +
    + +
  • +To set a display aspect ratio of 16:9, and specify a maximum integer value of +1000 in the aspect ratio reduction, use the command: +
     
    setdar=ratio=16/9:max=1000
    +
    + +
+ +

+

+

37.75 setfield

+ +

Force field for the output video frame. +

+

The setfield filter marks the interlace type field for the +output frames. It does not change the input frame, but only sets the +corresponding property, which affects how the frame is treated by +following filters (e.g. fieldorder or yadif). +

+

The filter accepts the following options: +

+
+
mode
+

Available values are: +

+
+
auto
+

Keep the same field property. +

+
+
bff
+

Mark the frame as bottom-field-first. +

+
+
tff
+

Mark the frame as top-field-first. +

+
+
prog
+

Mark the frame as progressive. +

+
+
+
+ + +

37.76 showinfo

+ +

Show a line containing various information for each input video frame. +The input video is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation TimeStamp of the input frame, expressed as a number of +time base units. The time base unit depends on the filter input pad. +

+
+
pts_time
+

Presentation TimeStamp of the input frame, expressed as a number of +seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic video) +

+
+
fmt
+

pixel format name +

+
+
sar
+

sample aspect ratio of the input frame, expressed in the form +num/den +

+
+
s
+

size of the input frame. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
i
+

interlaced mode ("P" for "progressive", "T" for top field first, "B" +for bottom field first) +

+
+
iskey
+

1 if the frame is a key frame, 0 otherwise +

+
+
type
+

picture type of the input frame ("I" for an I-frame, "P" for a +P-frame, "B" for a B-frame, "?" for unknown type). +Check also the documentation of the AVPictureType enum and of +the av_get_picture_type_char function defined in +‘libavutil/avutil.h’. +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame +

+
+
plane_checksum
+

Adler-32 checksum (printed in hexadecimal) of each plane of the input frame, +expressed in the form "[c0 c1 c2 c3]" +

+
+ +

+

+

37.77 smartblur

+ +

Blur the input video without impacting the outlines. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set the luma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
luma_strength, ls
+

Set the luma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
luma_threshold, lt
+

Set the luma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+
chroma_radius, cr
+

Set the chroma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
chroma_strength, cs
+

Set the chroma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
chroma_threshold, ct
+

Set the chroma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+ +

If a chroma option is not explicitly set, the corresponding luma value +is set. +

+ +

37.78 stereo3d

+ +

Convert between different stereoscopic image formats. +

+

The filters accept the following options: +

+
+
in
+

Set stereoscopic image format of input. +

+

Available values for input image formats are: +

+
sbsl
+

side by side parallel (left eye left, right eye right) +

+
+
sbsr
+

side by side crosseye (right eye left, left eye right) +

+
+
sbs2l
+

side by side parallel with half width resolution +(left eye left, right eye right) +

+
+
sbs2r
+

side by side crosseye with half width resolution +(right eye left, left eye right) +

+
+
abl
+

above-below (left eye above, right eye below) +

+
+
abr
+

above-below (right eye above, left eye below) +

+
+
ab2l
+

above-below with half height resolution +(left eye above, right eye below) +

+
+
ab2r
+

above-below with half height resolution +(right eye above, left eye below) +

+
+
al
+

alternating frames (left eye first, right eye second) +

+
+
ar
+

alternating frames (right eye first, left eye second) +

+

Default value is ‘sbsl’. +

+
+ +
+
out
+

Set stereoscopic image format of output. +

+

Available values for output image formats are all the input formats as well as: +

+
arbg
+

anaglyph red/blue gray +(red filter on left eye, blue filter on right eye) +

+
+
argg
+

anaglyph red/green gray +(red filter on left eye, green filter on right eye) +

+
+
arcg
+

anaglyph red/cyan gray +(red filter on left eye, cyan filter on right eye) +

+
+
arch
+

anaglyph red/cyan half colored +(red filter on left eye, cyan filter on right eye) +

+
+
arcc
+

anaglyph red/cyan color +(red filter on left eye, cyan filter on right eye) +

+
+
arcd
+

anaglyph red/cyan color optimized with the least squares projection of dubois +(red filter on left eye, cyan filter on right eye) +

+
+
agmg
+

anaglyph green/magenta gray +(green filter on left eye, magenta filter on right eye) +

+
+
agmh
+

anaglyph green/magenta half colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmc
+

anaglyph green/magenta colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmd
+

anaglyph green/magenta color optimized with the least squares projection of dubois +(green filter on left eye, magenta filter on right eye) +

+
+
aybg
+

anaglyph yellow/blue gray +(yellow filter on left eye, blue filter on right eye) +

+
+
aybh
+

anaglyph yellow/blue half colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybc
+

anaglyph yellow/blue colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybd
+

anaglyph yellow/blue color optimized with the least squares projection of dubois +(yellow filter on left eye, blue filter on right eye) +

+
+
irl
+

interleaved rows (left eye has top row, right eye starts on next row) +

+
+
irr
+

interleaved rows (right eye has top row, left eye starts on next row) +

+
+
ml
+

mono output (left eye only) +

+
+
mr
+

mono output (right eye only) +

+
+ +

Default value is ‘arcd’. +

+
+ + +

37.78.1 Examples

+ +
    +
  • +Convert input video from side by side parallel to anaglyph yellow/blue dubois: +
     
    stereo3d=sbsl:aybd
    +
    + +
  • +Convert input video from above bellow (left eye above, right eye below) to side by side crosseye. +
     
    stereo3d=abl:sbsr
    +
    +
+ + +

37.79 spp

+ +

Apply a simple postprocessing filter that compresses and decompresses the image +at several (or - in the case of ‘quality’ level 6 - all) shifts +and average the results. +

+

The filter accepts the following options: +

+
+
quality
+

Set quality. This option defines the number of levels for averaging. It accepts +an integer in the range 0-6. If set to 0, the filter will have no +effect. A value of 6 means the higher quality. For each increment of +that value the speed drops by a factor of approximately 2. Default value is +3. +

+
+
qp
+

Force a constant quantization parameter. If not set, the filter will use the QP +from the video stream (if available). +

+
+
mode
+

Set thresholding mode. Available modes are: +

+
+
hard
+

Set hard thresholding (default). +

+
soft
+

Set soft thresholding (better de-ringing effect, but likely blurrier). +

+
+ +
+
use_bframe_qp
+

Enable the use of the QP from the B-Frames if set to 1. Using this +option may cause flicker since the B-Frames have often larger QP. Default is +0 (not enabled). +

+
+ +

+

+

37.80 subtitles

+ +

Draw subtitles on top of input video using the libass library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libass. This filter also requires a build with libavcodec and +libavformat to convert the passed subtitles file to ASS (Advanced Substation +Alpha) subtitles format. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filename of the subtitle file to read. It must be specified. +

+
+
original_size
+

Specify the size of the original video, the video for which the ASS file +was composed. For the syntax of this option, check the "Video size" section in +the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic, +this is necessary to correctly scale the fonts if the aspect ratio has been +changed. +

+
+
charenc
+

Set subtitles input character encoding. subtitles filter only. Only +useful if not UTF-8. +

+
+ +

If the first key is not specified, it is assumed that the first value +specifies the ‘filename’. +

+

For example, to render the file ‘sub.srt’ on top of the input +video, use the command: +

 
subtitles=sub.srt
+
+ +

which is equivalent to: +

 
subtitles=filename=sub.srt
+
+ + +

37.81 super2xsai

+ +

Scale the input by 2x and smooth using the Super2xSaI (Scale and +Interpolate) pixel art scaling algorithm. +

+

Useful for enlarging pixel art images without reducing sharpness. +

+ +

37.82 swapuv

+

Swap U & V plane. +

+ +

37.83 telecine

+ +

Apply telecine process to the video. +

+

This filter accepts the following options: +

+
+
first_field
+
+
top, t
+

top field first +

+
bottom, b
+

bottom field first +The default value is top. +

+
+ +
+
pattern
+

A string of numbers representing the pulldown pattern you wish to apply. +The default value is 23. +

+
+ +
 
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+ + +

37.84 thumbnail

+

Select the most representative frame in a given sequence of consecutive frames. +

+

The filter accepts the following options: +

+
+
n
+

Set the frames batch size to analyze; in a set of n frames, the filter +will pick one of them, and then handle the next batch of n frames until +the end. Default is 100. +

+
+ +

Since the filter keeps track of the whole frames sequence, a bigger n +value will result in a higher memory usage, so a high value is not recommended. +

+ +

37.84.1 Examples

+ +
    +
  • +Extract one picture each 50 frames: +
     
    thumbnail=50
    +
    + +
  • +Complete example of a thumbnail creation with ffmpeg: +
     
    ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
    +
    +
+ + +

37.85 tile

+ +

Tile several successive frames together. +

+

The filter accepts the following options: +

+
+
layout
+

Set the grid size (i.e. the number of lines and columns). For the syntax of +this option, check the "Video size" section in the ffmpeg-utils manual. +

+
+
nb_frames
+

Set the maximum number of frames to render in the given area. It must be less +than or equal to wxh. The default value is 0, meaning all +the area will be used. +

+
+
margin
+

Set the outer border margin in pixels. +

+
+
padding
+

Set the inner border thickness (i.e. the number of pixels between frames). For +more advanced padding options (such as having different values for the edges), +refer to the pad video filter. +

+
+
color
+

Specify the color of the unused areaFor the syntax of this option, check the +"Color" section in the ffmpeg-utils manual. The default value of color +is "black". +

+
+ + +

37.85.1 Examples

+ +
    +
  • +Produce 8x8 PNG tiles of all keyframes (‘-skip_frame nokey’) in a movie: +
     
    ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
    +
    +

    The ‘-vsync 0’ is necessary to prevent ffmpeg from +duplicating each output frame to accommodate the originally detected frame +rate. +

    +
  • +Display 5 pictures in an area of 3x2 frames, +with 7 pixels between them, and 2 pixels of initial margin, using +mixed flat and named options: +
     
    tile=3x2:nb_frames=5:padding=7:margin=2
    +
    +
+ + +

37.86 tinterlace

+ +

Perform various types of temporal field interlacing. +

+

Frames are counted starting from 1, so the first input frame is +considered odd. +

+

The filter accepts the following options: +

+
+
mode
+

Specify the mode of the interlacing. This option can also be specified +as a value alone. See below for a list of values for this option. +

+

Available values are: +

+
+
merge, 0
+

Move odd frames into the upper field, even into the lower field, +generating a double height frame at half frame rate. +

+
+
drop_odd, 1
+

Only output even frames, odd frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
drop_even, 2
+

Only output odd frames, even frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
pad, 3
+

Expand each frame to full height, but pad alternate lines with black, +generating a frame with double height at the same input frame rate. +

+
+
interleave_top, 4
+

Interleave the upper field from odd frames with the lower field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interleave_bottom, 5
+

Interleave the lower field from odd frames with the upper field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interlacex2, 6
+

Double frame rate with unchanged height. Frames are inserted each +containing the second temporal field from the previous input frame and +the first temporal field from the next input frame. This mode relies on +the top_field_first flag. Useful for interlaced video displays with no +field synchronisation. +

+
+ +

Numeric values are deprecated but are accepted for backward +compatibility reasons. +

+

Default mode is merge. +

+
+
flags
+

Specify flags influencing the filter process. +

+

Available value for flags is: +

+
+
low_pass_filter, vlfp
+

Enable vertical low-pass filtering in the filter. +Vertical low-pass filtering is required when creating an interlaced +destination from a progressive source which contains high-frequency +vertical detail. Filtering will reduce interlace ’twitter’ and Moire +patterning. +

+

Vertical low-pass filtering can only be enabled for ‘mode’ +interleave_top and interleave_bottom. +

+
+
+
+
+ + +

37.87 transpose

+ +

Transpose rows with columns in the input video and optionally flip it. +

+

This filter accepts the following options: +

+
+
dir
+

Specify the transposition direction. +

+

Can assume the following values: +

+
0, 4, cclock_flip
+

Rotate by 90 degrees counterclockwise and vertically flip (default), that is: +

 
L.R     L.l
+. . ->  . .
+l.r     R.r
+
+ +
+
1, 5, clock
+

Rotate by 90 degrees clockwise, that is: +

 
L.R     l.L
+. . ->  . .
+l.r     r.R
+
+ +
+
2, 6, cclock
+

Rotate by 90 degrees counterclockwise, that is: +

 
L.R     R.r
+. . ->  . .
+l.r     L.l
+
+ +
+
3, 7, clock_flip
+

Rotate by 90 degrees clockwise and vertically flip, that is: +

 
L.R     r.R
+. . ->  . .
+l.r     l.L
+
+
+
+ +

For values between 4-7, the transposition is only done if the input +video geometry is portrait and not landscape. These values are +deprecated, the passthrough option should be used instead. +

+

Numerical values are deprecated, and should be dropped in favor of +symbolic constants. +

+
+
passthrough
+

Do not apply the transposition if the input geometry matches the one +specified by the specified value. It accepts the following values: +

+
none
+

Always apply transposition. +

+
portrait
+

Preserve portrait geometry (when height >= width). +

+
landscape
+

Preserve landscape geometry (when width >= height). +

+
+ +

Default value is none. +

+
+ +

For example to rotate by 90 degrees clockwise and preserve portrait +layout: +

 
transpose=dir=1:passthrough=portrait
+
+ +

The command above can also be specified as: +

 
transpose=1:portrait
+
+ + +

37.88 trim

+

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the frame with the +timestamp start will be the first frame in the output. +

+
+
end
+

Specify time of the first frame that will be dropped, i.e. the frame +immediately preceding the one with the timestamp end will be the last +frame in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in timebase +units instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in timebase units +instead of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_frame
+

Number of the first frame that should be passed to output. +

+
+
end_frame
+

Number of the first frame that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _frame variants simply count the +frames that pass through the filter. Also note that this filter does not modify +the timestamps. If you wish that the output timestamps start at zero, insert a +setpts filter after the trim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all the frames that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple trim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -vf trim=60:120
    +
    + +
  • +keep only the first second +
     
    ffmpeg -i INPUT -vf trim=duration=1
    +
    + +
+ + + +

37.89 unsharp

+ +

Sharpen or blur the input video. +

+

It accepts the following parameters: +

+
+
luma_msize_x, lx
+

Set the luma matrix horizontal size. It must be an odd integer between +3 and 63, default value is 5. +

+
+
luma_msize_y, ly
+

Set the luma matrix vertical size. It must be an odd integer between 3 +and 63, default value is 5. +

+
+
luma_amount, la
+

Set the luma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 1.0. +

+
+
chroma_msize_x, cx
+

Set the chroma matrix horizontal size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_msize_y, cy
+

Set the chroma matrix vertical size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_amount, ca
+

Set the chroma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 0.0. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ +

All parameters are optional and default to the equivalent of the +string ’5:5:1.0:5:5:0.0’. +

+ +

37.89.1 Examples

+ +
    +
  • +Apply strong luma sharpen effect: +
     
    unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
    +
    + +
  • +Apply strong blur of both luma and chroma parameters: +
     
    unsharp=7:7:-2:7:7:-2
    +
    +
+ +

+

+

37.90 vidstabdetect

+ +

Analyze video stabilization/deshaking. Perform pass 1 of 2, see +vidstabtransform for pass 2. +

+

This filter generates a file with relative translation and rotation +transform information about subsequent frames, which is then used by +the vidstabtransform filter. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+

This filter accepts the following options: +

+
+
result
+

Set the path to the file used to write the transforms information. +Default value is ‘transforms.trf’. +

+
+
shakiness
+

Set how shaky the video is and how quick the camera is. It accepts an +integer in the range 1-10, a value of 1 means little shakiness, a +value of 10 means strong shakiness. Default value is 5. +

+
+
accuracy
+

Set the accuracy of the detection process. It must be a value in the +range 1-15. A value of 1 means low accuracy, a value of 15 means high +accuracy. Default value is 15. +

+
+
stepsize
+

Set stepsize of the search process. The region around minimum is +scanned with 1 pixel resolution. Default value is 6. +

+
+
mincontrast
+

Set minimum contrast. Below this value a local measurement field is +discarded. Must be a floating point value in the range 0-1. Default +value is 0.3. +

+
+
tripod
+

Set reference frame number for tripod mode. +

+

If enabled, the motion of the frames is compared to a reference frame +in the filtered stream, identified by the specified number. The idea +is to compensate all movements in a more-or-less static scene and keep +the camera view absolutely still. +

+

If set to 0, it is disabled. The frames are counted starting from 1. +

+
+
show
+

Show fields and transforms in the resulting frames. It accepts an +integer in the range 0-2. Default value is 0, which disables any +visualization. +

+
+ + +

37.90.1 Examples

+ +
    +
  • +Use default values: +
     
    vidstabdetect
    +
    + +
  • +Analyze strongly shaky movie and put the results in file +‘mytransforms.trf’: +
     
    vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
    +
    + +
  • +Visualize the result of internal transformations in the resulting +video: +
     
    vidstabdetect=show=1
    +
    + +
  • +Analyze a video with medium shakiness using ffmpeg: +
     
    ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
    +
    +
+ +

+

+

37.91 vidstabtransform

+ +

Video stabilization/deshaking: pass 2 of 2, +see vidstabdetect for pass 1. +

+

Read a file with transform information for each frame and +apply/compensate them. Together with the vidstabdetect +filter this can be used to deshake videos. See also +http://public.hronopik.de/vid.stab. It is important to also use +the unsharp filter, see below. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+ +

37.91.1 Options

+ +
+
input
+

Set path to the file used to read the transforms. Default value is +‘transforms.trf’). +

+
+
smoothing
+

Set the number of frames (value*2 + 1) used for lowpass filtering the +camera movements. Default value is 10. +

+

For example a number of 10 means that 21 frames are used (10 in the +past and 10 in the future) to smoothen the motion in the video. A +larger values leads to a smoother video, but limits the acceleration +of the camera (pan/tilt movements). 0 is a special case where a +static camera is simulated. +

+
+
optalgo
+

Set the camera path optimization algorithm. +

+

Accepted values are: +

+
gauss
+

gaussian kernel low-pass filter on camera motion (default) +

+
avg
+

averaging on transformations +

+
+ +
+
maxshift
+

Set maximal number of pixels to translate frames. Default value is -1, +meaning no limit. +

+
+
maxangle
+

Set maximal angle in radians (degree*PI/180) to rotate frames. Default +value is -1, meaning no limit. +

+
+
crop
+

Specify how to deal with borders that may be visible due to movement +compensation. +

+

Available values are: +

+
keep
+

keep image information from previous frame (default) +

+
black
+

fill the border black +

+
+ +
+
invert
+

Invert transforms if set to 1. Default value is 0. +

+
+
relative
+

Consider transforms as relative to previsou frame if set to 1, +absolute if set to 0. Default value is 0. +

+
+
zoom
+

Set percentage to zoom. A positive value will result in a zoom-in +effect, a negative value in a zoom-out effect. Default value is 0 (no +zoom). +

+
+
optzoom
+

Set optimal zooming to avoid borders. +

+

Accepted values are: +

+
0
+

disabled +

+
1
+

optimal static zoom value is determined (only very strong movements +will lead to visible borders) (default) +

+
2
+

optimal adaptive zoom value is determined (no borders will be +visible), see ‘zoomspeed’ +

+
+ +

Note that the value given at zoom is added to the one calculated here. +

+
+
zoomspeed
+

Set percent to zoom maximally each frame (enabled when +‘optzoom’ is set to 2). Range is from 0 to 5, default value is +0.25. +

+
+
interpol
+

Specify type of interpolation. +

+

Available values are: +

+
no
+

no interpolation +

+
linear
+

linear only horizontal +

+
bilinear
+

linear in both directions (default) +

+
bicubic
+

cubic in both directions (slow) +

+
+ +
+
tripod
+

Enable virtual tripod mode if set to 1, which is equivalent to +relative=0:smoothing=0. Default value is 0. +

+

Use also tripod option of vidstabdetect. +

+
+
debug
+

Increase log verbosity if set to 1. Also the detected global motions +are written to the temporary file ‘global_motions.trf’. Default +value is 0. +

+
+ + +

37.91.2 Examples

+ +
    +
  • +Use ffmpeg for a typical stabilization with default values: +
     
    ffmpeg -i inp.mpeg -vf vidstabtransform,unsharp=5:5:0.8:3:3:0.4 inp_stabilized.mpeg
    +
    + +

    Note the use of the unsharp filter which is always recommended. +

    +
  • +Zoom in a bit more and load transform data from a given file: +
     
    vidstabtransform=zoom=5:input="mytransforms.trf"
    +
    + +
  • +Smoothen the video even more: +
     
    vidstabtransform=smoothing=30
    +
    +
+ + +

37.92 vflip

+ +

Flip the input video vertically. +

+

For example, to vertically flip a video with ffmpeg: +

 
ffmpeg -i in.avi -vf "vflip" out.avi
+
+ + +

37.93 vignette

+ +

Make or reverse a natural vignetting effect. +

+

The filter accepts the following options: +

+
+
angle, a
+

Set lens angle expression as a number of radians. +

+

The value is clipped in the [0,PI/2] range. +

+

Default value: "PI/5" +

+
+
x0
+
y0
+

Set center coordinates expressions. Respectively "w/2" and "h/2" +by default. +

+
+
mode
+

Set forward/backward mode. +

+

Available modes are: +

+
forward
+

The larger the distance from the central point, the darker the image becomes. +

+
+
backward
+

The larger the distance from the central point, the brighter the image becomes. +This can be used to reverse a vignette effect, though there is no automatic +detection to extract the lens ‘angle’ and other settings (yet). It can +also be used to create a burning effect. +

+
+ +

Default value is ‘forward’. +

+
+
eval
+

Set evaluation mode for the expressions (‘angle’, ‘x0’, ‘y0’). +

+

It accepts the following values: +

+
init
+

Evaluate expressions only once during the filter initialization. +

+
+
frame
+

Evaluate expressions for each incoming frame. This is way slower than the +‘init’ mode since it requires all the scalers to be re-computed, but it +allows advanced dynamic expressions. +

+
+ +

Default value is ‘init’. +

+
+
dither
+

Set dithering to reduce the circular banding effects. Default is 1 +(enabled). +

+
+
aspect
+

Set vignette aspect. This setting allows one to adjust the shape of the vignette. +Setting this value to the SAR of the input will make a rectangular vignetting +following the dimensions of the video. +

+

Default is 1/1. +

+
+ + +

37.93.1 Expressions

+ +

The ‘alpha’, ‘x0’ and ‘y0’ expressions can contain the +following parameters. +

+
+
w
+
h
+

input width and height +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pts
+

the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in +TB units, NAN if undefined +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
tb
+

time base of the input video +

+
+ + + +

37.93.2 Examples

+ +
    +
  • +Apply simple strong vignetting effect: +
     
    vignette=PI/4
    +
    + +
  • +Make a flickering vignetting: +
     
    vignette='PI/4+random(1)*PI/50':eval=frame
    +
    + +
+ + +

37.94 w3fdif

+ +

Deinterlace the input video ("w3fdif" stands for "Weston 3 Field +Deinterlacing Filter"). +

+

Based on the process described by Martin Weston for BBC R&D, and +implemented based on the de-interlace algorithm written by Jim +Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter +uses filter coefficients calculated by BBC R&D. +

+

There are two sets of filter coefficients, so called "simple": +and "complex". Which set of filter coefficients is used can +be set by passing an optional parameter: +

+
+
filter
+

Set the interlacing filter coefficients. Accepts one of the following values: +

+
+
simple
+

Simple filter coefficient set. +

+
complex
+

More-complex filter coefficient set. +

+
+

Default value is ‘complex’. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following values: +

+
+
all
+

Deinterlace all frames, +

+
interlaced
+

Only deinterlace frames marked as interlaced. +

+
+ +

Default value is ‘all’. +

+
+ +

+

+

37.95 yadif

+ +

Deinterlace the input video ("yadif" means "yet another deinterlacing +filter"). +

+

This filter accepts the following options: +

+ +
+
mode
+

The interlacing mode to adopt, accepts one of the following values: +

+
+
0, send_frame
+

output 1 frame for each frame +

+
1, send_field
+

output 1 frame for each field +

+
2, send_frame_nospatial
+

like send_frame but skip spatial interlacing check +

+
3, send_field_nospatial
+

like send_field but skip spatial interlacing check +

+
+ +

Default value is send_frame. +

+
+
parity
+

The picture field parity assumed for the input interlaced video, accepts one of +the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
-1, auto
+

enable automatic detection +

+
+ +

Default value is auto. +If interlacing is unknown or decoder does not export this information, +top field first will be assumed. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following +values: +

+
+
0, all
+

deinterlace all frames +

+
1, interlaced
+

only deinterlace frames marked as interlaced +

+
+ +

Default value is all. +

+
+ + + +

38. Video Sources

+ +

Below is a description of the currently available video sources. +

+ +

38.1 buffer

+ +

Buffer video frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/vsrc_buffer.h’. +

+

This source accepts the following options: +

+
+
video_size
+

Specify the size (width and height) of the buffered video frames. For the +syntax of this option, check the "Video size" section in the ffmpeg-utils +manual. +

+
+
width
+

Input video width. +

+
+
height
+

Input video height. +

+
+
pix_fmt
+

A string representing the pixel format of the buffered video frames. +It may be a number corresponding to a pixel format, or a pixel format +name. +

+
+
time_base
+

Specify the timebase assumed by the timestamps of the buffered frames. +

+
+
frame_rate
+

Specify the frame rate expected for the video stream. +

+
+
pixel_aspect, sar
+

Specify the sample aspect ratio assumed by the video frames. +

+
+
sws_param
+

Specify the optional parameters to be used for the scale filter which +is automatically inserted when an input change is detected in the +input size or format. +

+
+ +

For example: +

 
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+ +

will instruct the source to accept video frames with size 320x240 and +with format "yuv410p", assuming 1/24 as the timestamps timebase and +square pixels (1:1 sample aspect ratio). +Since the pixel format with name "yuv410p" corresponds to the number 6 +(check the enum AVPixelFormat definition in ‘libavutil/pixfmt.h’), +this example corresponds to: +

 
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+ +

Alternatively, the options can be specified as a flat string, but this +syntax is deprecated: +

+

width:height:pix_fmt:time_base.num:time_base.den:pixel_aspect.num:pixel_aspect.den[:sws_param] +

+ +

38.2 cellauto

+ +

Create a pattern generated by an elementary cellular automaton. +

+

The initial state of the cellular automaton can be defined through the +‘filename’, and ‘pattern’ options. If such options are +not specified an initial state is created randomly. +

+

At each new frame a new row in the video is filled with the result of +the cellular automaton next generation. The behavior when the whole +frame is filled is defined by the ‘scroll’ option. +

+

This source accepts the following options: +

+
+
filename, f
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified file. +In the file, each non-whitespace character is considered an alive +cell, a newline will terminate the row, and further characters in the +file will be ignored. +

+
+
pattern, p
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified string. +

+

Each non-whitespace character in the string is considered an alive +cell, a newline will terminate the row, and further characters in the +string will be ignored. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial cellular automaton row. It +is a floating point number value ranging from 0 to 1, defaults to +1/PHI. +

+

This option is ignored when a file or a pattern is specified. +

+
+
random_seed, seed
+

Set the seed for filling randomly the initial row, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the cellular automaton rule, it is a number ranging from 0 to 255. +Default value is 110. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ or ‘pattern’ is specified, the size is set +by default to the width of the specified initial state row, and the +height is set to width * PHI. +

+

If ‘size’ is set, it must contain the width of the specified +pattern string, and the specified pattern will be centered in the +larger row. +

+

If a filename or a pattern string is not specified, the size value +defaults to "320x518" (used for a randomly generated initial state). +

+
+
scroll
+

If set to 1, scroll the output upward when all the rows in the output +have been already filled. If set to 0, the new generated row will be +written over the top row just after the bottom row is filled. +Defaults to 1. +

+
+
start_full, full
+

If set to 1, completely fill the output with generated rows before +outputting the first frame. +This is the default behavior, for disabling set the value to 0. +

+
+
stitch
+

If set to 1, stitch the left and right row edges together. +This is the default behavior, for disabling set the value to 0. +

+
+ + +

38.2.1 Examples

+ +
    +
  • +Read the initial state from ‘pattern’, and specify an output of +size 200x400. +
     
    cellauto=f=pattern:s=200x400
    +
    + +
  • +Generate a random initial row with a width of 200 cells, with a fill +ratio of 2/3: +
     
    cellauto=ratio=2/3:s=200x200
    +
    + +
  • +Create a pattern generated by rule 18 starting by a single alive cell +centered on an initial row with width 100: +
     
    cellauto=p=@:s=100x400:full=0:rule=18
    +
    + +
  • +Specify a more elaborated initial pattern: +
     
    cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
    +
    + +
+ + +

38.3 mandelbrot

+ +

Generate a Mandelbrot set fractal, and progressively zoom towards the +point specified with start_x and start_y. +

+

This source accepts the following options: +

+
+
end_pts
+

Set the terminal pts value. Default value is 400. +

+
+
end_scale
+

Set the terminal scale value. +Must be a floating point value. Default value is 0.3. +

+
+
inner
+

Set the inner coloring mode, that is the algorithm used to draw the +Mandelbrot fractal internal region. +

+

It shall assume one of the following values: +

+
black
+

Set black mode. +

+
convergence
+

Show time until convergence. +

+
mincol
+

Set color based on point closest to the origin of the iterations. +

+
period
+

Set period mode. +

+
+ +

Default value is mincol. +

+
+
bailout
+

Set the bailout value. Default value is 10.0. +

+
+
maxiter
+

Set the maximum of iterations performed by the rendering +algorithm. Default value is 7189. +

+
+
outer
+

Set outer coloring mode. +It shall assume one of following values: +

+
iteration_count
+

Set iteration cound mode. +

+
normalized_iteration_count
+

set normalized iteration count mode. +

+
+

Default value is normalized_iteration_count. +

+
+
rate, r
+

Set frame rate, expressed as number of frames per second. Default +value is "25". +

+
+
size, s
+

Set frame size. For the syntax of this option, check the "Video +size" section in the ffmpeg-utils manual. Default value is "640x480". +

+
+
start_scale
+

Set the initial scale value. Default value is 3.0. +

+
+
start_x
+

Set the initial x position. Must be a floating point value between +-100 and 100. Default value is -0.743643887037158704752191506114774. +

+
+
start_y
+

Set the initial y position. Must be a floating point value between +-100 and 100. Default value is -0.131825904205311970493132056385139. +

+
+ + +

38.4 mptestsrc

+ +

Generate various test patterns, as generated by the MPlayer test filter. +

+

The size of the generated video is fixed, and is 256x256. +This source is useful in particular for testing encoding features. +

+

This source accepts the following options: +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH:MM:SS[.m...]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
test, t
+
+

Set the number or the name of the test to perform. Supported tests are: +

+
dc_luma
+
dc_chroma
+
freq_luma
+
freq_chroma
+
amp_luma
+
amp_chroma
+
cbp
+
mv
+
ring1
+
ring2
+
all
+
+ +

Default value is "all", which will cycle through the list of all tests. +

+
+ +

For example the following: +

 
testsrc=t=dc_luma
+
+ +

will generate a "dc_luma" test pattern. +

+ +

38.5 frei0r_src

+ +

Provide a frei0r source. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This source accepts the following options: +

+
+
size
+

The size of the video to generate. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+
+
framerate
+

Framerate of the generated video, may be a string of the form +num/den or a frame rate abbreviation. +

+
+
filter_name
+

The name to the frei0r source to load. For more information regarding frei0r and +how to set the parameters read the section frei0r in the description of +the video filters. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r source. +

+
+
+ +

For example, to generate a frei0r partik0l source with size 200x200 +and frame rate 10 which is overlayed on the overlay filter main input: +

 
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+ + +

38.6 life

+ +

Generate a life pattern. +

+

This source is based on a generalization of John Conway’s life game. +

+

The sourced input represents a life grid, each pixel represents a cell +which can be in one of two possible states, alive or dead. Every cell +interacts with its eight neighbours, which are the cells that are +horizontally, vertically, or diagonally adjacent. +

+

At each interaction the grid evolves according to the adopted rule, +which specifies the number of neighbor alive cells which will make a +cell stay alive or born. The ‘rule’ option allows one to specify +the rule to adopt. +

+

This source accepts the following options: +

+
+
filename, f
+

Set the file from which to read the initial grid state. In the file, +each non-whitespace character is considered an alive cell, and newline +is used to delimit the end of each row. +

+

If this option is not specified, the initial grid is generated +randomly. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial random grid. It is a +floating point number value ranging from 0 to 1, defaults to 1/PHI. +It is ignored when a file is specified. +

+
+
random_seed, seed
+

Set the seed for filling the initial random grid, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the life rule. +

+

A rule can be specified with a code of the kind "SNS/BNB", +where NS and NB are sequences of numbers in the range 0-8, +NS specifies the number of alive neighbor cells which make a +live cell stay alive, and NB the number of alive neighbor cells +which make a dead cell to become alive (i.e. to "born"). +"s" and "b" can be used in place of "S" and "B", respectively. +

+

Alternatively a rule can be specified by an 18-bits integer. The 9 +high order bits are used to encode the next cell state if it is alive +for each number of neighbor alive cells, the low order bits specify +the rule for "borning" new cells. Higher order bits encode for an +higher number of neighbor cells. +For example the number 6153 = (12<<9)+9 specifies a stay alive +rule of 12 and a born rule of 9, which corresponds to "S23/B03". +

+

Default value is "S23/B3", which is the original Conway’s game of life +rule, and will keep a cell alive if it has 2 or 3 neighbor alive +cells, and will born a new cell if there are three alive cells around +a dead cell. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ is specified, the size is set by default to the +same size of the input file. If ‘size’ is set, it must contain +the size specified in the input file, and the initial grid defined in +that file is centered in the larger resulting area. +

+

If a filename is not specified, the size value defaults to "320x240" +(used for a randomly generated initial grid). +

+
+
stitch
+

If set to 1, stitch the left and right grid edges together, and the +top and bottom edges also. Defaults to 1. +

+
+
mold
+

Set cell mold speed. If set, a dead cell will go from ‘death_color’ to +‘mold_color’ with a step of ‘mold’. ‘mold’ can have a +value from 0 to 255. +

+
+
life_color
+

Set the color of living (or new born) cells. +

+
+
death_color
+

Set the color of dead cells. If ‘mold’ is set, this is the first color +used to represent a dead cell. +

+
+
mold_color
+

Set mold color, for definitely dead and moldy cells. +

+

For the syntax of these 3 color options, check the "Color" section in the +ffmpeg-utils manual. +

+
+ + +

38.6.1 Examples

+ +
    +
  • +Read a grid from ‘pattern’, and center it on a grid of size +300x300 pixels: +
     
    life=f=pattern:s=300x300
    +
    + +
  • +Generate a random grid of size 200x200, with a fill ratio of 2/3: +
     
    life=ratio=2/3:s=200x200
    +
    + +
  • +Specify a custom rule for evolving a randomly generated grid: +
     
    life=rule=S14/B34
    +
    + +
  • +Full example with slow death effect (mold) using ffplay: +
     
    ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
    +
    +
+ +

+ + + + + + +

+

38.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc

+ +

The color source provides an uniformly colored input. +

+

The haldclutsrc source provides an identity Hald CLUT. See also +haldclut filter. +

+

The nullsrc source returns unprocessed video frames. It is +mainly useful to be employed in analysis / debugging tools, or as the +source for filters which ignore the input data. +

+

The rgbtestsrc source generates an RGB test pattern useful for +detecting RGB vs BGR issues. You should see a red, green and blue +stripe from top to bottom. +

+

The smptebars source generates a color bars pattern, based on +the SMPTE Engineering Guideline EG 1-1990. +

+

The smptehdbars source generates a color bars pattern, based on +the SMPTE RP 219-2002. +

+

The testsrc source generates a test video pattern, showing a +color pattern, a scrolling gradient and a timestamp. This is mainly +intended for testing purposes. +

+

The sources accept the following options: +

+
+
color, c
+

Specify the color of the source, only available in the color +source. For the syntax of this option, check the "Color" section in the +ffmpeg-utils manual. +

+
+
level
+

Specify the level of the Hald CLUT, only available in the haldclutsrc +source. A level of N generates a picture of N*N*N by N*N*N +pixels to be used as identity matrix for 3D lookup tables. Each component is +coded on a 1/(N*N) scale. +

+
+
size, s
+

Specify the size of the sourced video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. The default value is +"320x240". +

+

This option is not available with the haldclutsrc filter. +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
sar
+

Set the sample aspect ratio of the sourced video. +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
decimals, n
+

Set the number of decimals to show in the timestamp, only available in the +testsrc source. +

+

The displayed timestamp value will correspond to the original +timestamp value multiplied by the power of 10 of the specified +value. Default value is 0. +

+
+ +

For example the following: +

 
testsrc=duration=5.3:size=qcif:rate=10
+
+ +

will generate a video with a duration of 5.3 seconds, with size +176x144 and a frame rate of 10 frames per second. +

+

The following graph description will generate a red source +with an opacity of 0.2, with size "qcif" and a frame rate of 10 +frames per second. +

 
color=c=red@0.2:s=qcif:r=10
+
+ +

If the input content is to be ignored, nullsrc can be used. The +following command generates noise in the luminance plane by employing +the geq filter: +

 
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+ + +

38.7.1 Commands

+ +

The color source supports the following commands: +

+
+
c, color
+

Set the color of the created image. Accepts the same syntax of the +corresponding ‘color’ option. +

+
+ + + +

39. Video Sinks

+ +

Below is a description of the currently available video sinks. +

+ +

39.1 buffersink

+ +

Buffer video frames, and make them available to the end of the filter +graph. +

+

This sink is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVBufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

39.2 nullsink

+ +

Null video sink, do absolutely nothing with the input video. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

40. Multimedia Filters

+ +

Below is a description of the currently available multimedia filters. +

+ +

40.1 avectorscope

+ +

Convert input audio to a video output, representing the audio vector +scope. +

+

The filter is used to measure the difference between channels of stereo +audio stream. A monoaural signal, consisting of identical left and right +signal, results in straight vertical line. Any stereo separation is visible +as a deviation from this line, creating a Lissajous figure. +If the straight (or deviation from it) but horizontal line appears this +indicates that the left and right channels are out of phase. +

+

The filter accepts the following options: +

+
+
mode, m
+

Set the vectorscope mode. +

+

Available values are: +

+
lissajous
+

Lissajous rotated by 45 degrees. +

+
+
lissajous_xy
+

Same as above but not rotated. +

+
+ +

Default value is ‘lissajous’. +

+
+
size, s
+

Set the video size for the output. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. Default value is 400x400. +

+
+
rate, r
+

Set the output frame rate. Default value is 25. +

+
+
rc
+
gc
+
bc
+

Specify the red, green and blue contrast. Default values are 40, 160 and 80. +Allowed range is [0, 255]. +

+
+
rf
+
gf
+
bf
+

Specify the red, green and blue fade. Default values are 15, 10 and 5. +Allowed range is [0, 255]. +

+
+
zoom
+

Set the zoom factor. Default value is 1. Allowed range is [1, 10]. +

+
+ + +

40.1.1 Examples

+ +
    +
  • +Complete example using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
    +
    +
+ + +

40.2 concat

+ +

Concatenate audio and video streams, joining them together one after the +other. +

+

The filter works on segments of synchronized video and audio streams. All +segments must have the same number of streams of each type, and that will +also be the number of streams at output. +

+

The filter accepts the following options: +

+
+
n
+

Set the number of segments. Default is 2. +

+
+
v
+

Set the number of output video streams, that is also the number of video +streams in each segment. Default is 1. +

+
+
a
+

Set the number of output audio streams, that is also the number of video +streams in each segment. Default is 0. +

+
+
unsafe
+

Activate unsafe mode: do not fail if segments have a different format. +

+
+
+ +

The filter has v+a outputs: first v video outputs, then +a audio outputs. +

+

There are nx(v+a) inputs: first the inputs for the first +segment, in the same order as the outputs, then the inputs for the second +segment, etc. +

+

Related streams do not always have exactly the same duration, for various +reasons including codec frame size or sloppy authoring. For that reason, +related synchronized streams (e.g. a video and its audio track) should be +concatenated at once. The concat filter will use the duration of the longest +stream in each segment (except the last one), and if necessary pad shorter +audio streams with silence. +

+

For this filter to work correctly, all segments must start at timestamp 0. +

+

All corresponding streams must have the same parameters in all segments; the +filtering system will automatically select a common pixel format for video +streams, and a common sample format, sample rate and channel layout for +audio streams, but other settings, such as resolution, must be converted +explicitly by the user. +

+

Different frame rates are acceptable but will result in variable frame rate +at output; be sure to configure the output file to handle it. +

+ +

40.2.1 Examples

+ +
    +
  • +Concatenate an opening, an episode and an ending, all in bilingual version +(video in stream 0, audio in streams 1 and 2): +
     
    ffmpeg -i opening.mkv -i episode.mkv -i ending.mkv -filter_complex \
    +  '[0:0] [0:1] [0:2] [1:0] [1:1] [1:2] [2:0] [2:1] [2:2]
    +   concat=n=3:v=1:a=2 [v] [a1] [a2]' \
    +  -map '[v]' -map '[a1]' -map '[a2]' output.mkv
    +
    + +
  • +Concatenate two parts, handling audio and video separately, using the +(a)movie sources, and adjusting the resolution: +
     
    movie=part1.mp4, scale=512:288 [v1] ; amovie=part1.mp4 [a1] ;
    +movie=part2.mp4, scale=512:288 [v2] ; amovie=part2.mp4 [a2] ;
    +[v1] [v2] concat [outv] ; [a1] [a2] concat=v=0:a=1 [outa]
    +
    +

    Note that a desync will happen at the stitch if the audio and video streams +do not have exactly the same duration in the first file. +

    +
+ + +

40.3 ebur128

+ +

EBU R128 scanner filter. This filter takes an audio stream as input and outputs +it unchanged. By default, it logs a message at a frequency of 10Hz with the +Momentary loudness (identified by M), Short-term loudness (S), +Integrated loudness (I) and Loudness Range (LRA). +

+

The filter also has a video output (see the video option) with a real +time graph to observe the loudness evolution. The graphic contains the logged +message mentioned above, so it is not printed anymore when this option is set, +unless the verbose logging is set. The main graphing area contains the +short-term loudness (3 seconds of analysis), and the gauge on the right is for +the momentary loudness (400 milliseconds). +

+

More information about the Loudness Recommendation EBU R128 on +http://tech.ebu.ch/loudness. +

+

The filter accepts the following options: +

+
+
video
+

Activate the video output. The audio stream is passed unchanged whether this +option is set or no. The video stream will be the first output stream if +activated. Default is 0. +

+
+
size
+

Set the video size. This option is for video only. For the syntax of this +option, check the "Video size" section in the ffmpeg-utils manual. Default +and minimum resolution is 640x480. +

+
+
meter
+

Set the EBU scale meter. Default is 9. Common values are 9 and +18, respectively for EBU scale meter +9 and EBU scale meter +18. Any +other integer value between this range is allowed. +

+
+
metadata
+

Set metadata injection. If set to 1, the audio input will be segmented +into 100ms output frames, each of them containing various loudness information +in metadata. All the metadata keys are prefixed with lavfi.r128.. +

+

Default is 0. +

+
+
framelog
+

Force the frame logging level. +

+

Available values are: +

+
info
+

information logging level +

+
verbose
+

verbose logging level +

+
+ +

By default, the logging level is set to info. If the ‘video’ or +the ‘metadata’ options are set, it switches to verbose. +

+
+
peak
+

Set peak mode(s). +

+

Available modes can be cumulated (the option is a flag type). Possible +values are: +

+
none
+

Disable any peak mode (default). +

+
sample
+

Enable sample-peak mode. +

+

Simple peak mode looking for the higher sample value. It logs a message +for sample-peak (identified by SPK). +

+
true
+

Enable true-peak mode. +

+

If enabled, the peak lookup is done on an over-sampled version of the input +stream for better peak accuracy. It logs a message for true-peak. +(identified by TPK) and true-peak per frame (identified by FTPK). +This mode requires a build with libswresample. +

+
+ +
+
+ + +

40.3.1 Examples

+ +
    +
  • +Real-time graph using ffplay, with a EBU scale meter +18: +
     
    ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
    +
    + +
  • +Run an analysis with ffmpeg: +
     
    ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
    +
    +
+ + +

40.4 interleave, ainterleave

+ +

Temporally interleave frames from several inputs. +

+

interleave works with video inputs, ainterleave with audio. +

+

These filters read frames from several inputs and send the oldest +queued frame to the output. +

+

Input streams must have a well defined, monotonically increasing frame +timestamp values. +

+

In order to submit one frame to output, these filters need to enqueue +at least one frame for each input, so they cannot work in case one +input is not yet terminated and will not receive incoming frames. +

+

For example consider the case when one input is a select filter +which always drop input frames. The interleave filter will keep +reading from that input, but it will never be able to send new frames +to output until the input will send an end-of-stream signal. +

+

Also, depending on inputs synchronization, the filters will drop +frames in case one input receives more frames than the other ones, and +the queue is already filled. +

+

These filters accept the following options: +

+
+
nb_inputs, n
+

Set the number of different inputs, it is 2 by default. +

+
+ + +

40.4.1 Examples

+ +
    +
  • +Interleave frames belonging to different streams using ffmpeg: +
     
    ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
    +
    + +
  • +Add flickering blur effect: +
     
    select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
    +
    +
+ + +

40.5 perms, aperms

+ +

Set read/write permissions for the output frames. +

+

These filters are mainly aimed at developers to test direct path in the +following filter in the filtergraph. +

+

The filters accept the following options: +

+
+
mode
+

Select the permissions mode. +

+

It accepts the following values: +

+
none
+

Do nothing. This is the default. +

+
ro
+

Set all the output frames read-only. +

+
rw
+

Set all the output frames directly writable. +

+
toggle
+

Make the frame read-only if writable, and writable if read-only. +

+
random
+

Set each output frame read-only or writable randomly. +

+
+ +
+
seed
+

Set the seed for the random mode, must be an integer included between +0 and UINT32_MAX. If not specified, or if explicitly set to +-1, the filter will try to use a good random seed on a best effort +basis. +

+
+ +

Note: in case of auto-inserted filter between the permission filter and the +following one, the permission might not be received as expected in that +following filter. Inserting a format or aformat filter before the +perms/aperms filter can avoid this problem. +

+ +

40.6 select, aselect

+ +

Select frames to pass in output. +

+

This filter accepts the following options: +

+
+
expr, e
+

Set expression, which is evaluated for each input frame. +

+

If the expression is evaluated to zero, the frame is discarded. +

+

If the evaluation result is negative or NaN, the frame is sent to the +first output; otherwise it is sent to the output with index +ceil(val)-1, assuming that the input index starts from 0. +

+

For example a value of 1.2 corresponds to the output with index +ceil(1.2)-1 = 2-1 = 1, that is the second output. +

+
+
outputs, n
+

Set the number of outputs. The output to which to send the selected +frame is based on the result of the evaluation. Default value is 1. +

+
+ +

The expression can contain the following constants: +

+
+
n
+

the sequential number of the filtered frame, starting from 0 +

+
+
selected_n
+

the sequential number of the selected frame, starting from 0 +

+
+
prev_selected_n
+

the sequential number of the last selected frame, NAN if undefined +

+
+
TB
+

timebase of the input timestamps +

+
+
pts
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in TB units, NAN if undefined +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
prev_pts
+

the PTS of the previously filtered video frame, NAN if undefined +

+
+
prev_selected_pts
+

the PTS of the last previously filtered video frame, NAN if undefined +

+
+
prev_selected_t
+

the PTS of the last previously selected video frame, NAN if undefined +

+
+
start_pts
+

the PTS of the first video frame in the video, NAN if undefined +

+
+
start_t
+

the time of the first video frame in the video, NAN if undefined +

+
+
pict_type (video only)
+

the type of the filtered frame, can assume one of the following +values: +

+
I
+
P
+
B
+
S
+
SI
+
SP
+
BI
+
+ +
+
interlace_type (video only)
+

the frame interlace type, can assume one of the following values: +

+
PROGRESSIVE
+

the frame is progressive (not interlaced) +

+
TOPFIRST
+

the frame is top-field-first +

+
BOTTOMFIRST
+

the frame is bottom-field-first +

+
+ +
+
consumed_sample_n (audio only)
+

the number of selected samples before the current frame +

+
+
samples_n (audio only)
+

the number of samples in the current frame +

+
+
sample_rate (audio only)
+

the input sample rate +

+
+
key
+

1 if the filtered frame is a key-frame, 0 otherwise +

+
+
pos
+

the position in the file of the filtered frame, -1 if the information +is not available (e.g. for synthetic video) +

+
+
scene (video only)
+

value between 0 and 1 to indicate a new scene; a low value reflects a low +probability for the current frame to introduce a new scene, while a higher +value means the current frame is more likely to be one (see the example below) +

+
+
+ +

The default value of the select expression is "1". +

+ +

40.6.1 Examples

+ +
    +
  • +Select all frames in input: +
     
    select
    +
    + +

    The example above is the same as: +

     
    select=1
    +
    + +
  • +Skip all frames: +
     
    select=0
    +
    + +
  • +Select only I-frames: +
     
    select='eq(pict_type\,I)'
    +
    + +
  • +Select one frame every 100: +
     
    select='not(mod(n\,100))'
    +
    + +
  • +Select only frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)
    +
    + +
  • +Select only I frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)*eq(pict_type\,I)
    +
    + +
  • +Select frames with a minimum distance of 10 seconds: +
     
    select='isnan(prev_selected_t)+gte(t-prev_selected_t\,10)'
    +
    + +
  • +Use aselect to select only audio frames with samples number > 100: +
     
    aselect='gt(samples_n\,100)'
    +
    + +
  • +Create a mosaic of the first scenes: +
     
    ffmpeg -i video.avi -vf select='gt(scene\,0.4)',scale=160:120,tile -frames:v 1 preview.png
    +
    + +

    Comparing scene against a value between 0.3 and 0.5 is generally a sane +choice. +

    +
  • +Send even and odd frames to separate outputs, and compose them: +
     
    select=n=2:e='mod(n, 2)+1' [odd][even]; [odd] pad=h=2*ih [tmp]; [tmp][even] overlay=y=h
    +
    +
+ + +

40.7 sendcmd, asendcmd

+ +

Send commands to filters in the filtergraph. +

+

These filters read commands to be sent to other filters in the +filtergraph. +

+

sendcmd must be inserted between two video filters, +asendcmd must be inserted between two audio filters, but apart +from that they act the same way. +

+

The specification of commands can be provided in the filter arguments +with the commands option, or in a file specified by the +filename option. +

+

These filters accept the following options: +

+
commands, c
+

Set the commands to be read and sent to the other filters. +

+
filename, f
+

Set the filename of the commands to be read and sent to the other +filters. +

+
+ + +

40.7.1 Commands syntax

+ +

A commands description consists of a sequence of interval +specifications, comprising a list of commands to be executed when a +particular event related to that interval occurs. The occurring event +is typically the current frame time entering or leaving a given time +interval. +

+

An interval is specified by the following syntax: +

 
START[-END] COMMANDS;
+
+ +

The time interval is specified by the START and END times. +END is optional and defaults to the maximum time. +

+

The current frame time is considered within the specified interval if +it is included in the interval [START, END), that is when +the time is greater or equal to START and is lesser than +END. +

+

COMMANDS consists of a sequence of one or more command +specifications, separated by ",", relating to that interval. The +syntax of a command specification is given by: +

 
[FLAGS] TARGET COMMAND ARG
+
+ +

FLAGS is optional and specifies the type of events relating to +the time interval which enable sending the specified command, and must +be a non-null sequence of identifier flags separated by "+" or "|" and +enclosed between "[" and "]". +

+

The following flags are recognized: +

+
enter
+

The command is sent when the current frame timestamp enters the +specified interval. In other words, the command is sent when the +previous frame timestamp was not in the given interval, and the +current is. +

+
+
leave
+

The command is sent when the current frame timestamp leaves the +specified interval. In other words, the command is sent when the +previous frame timestamp was in the given interval, and the +current is not. +

+
+ +

If FLAGS is not specified, a default value of [enter] is +assumed. +

+

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional list of argument for +the given COMMAND. +

+

Between one interval specification and another, whitespaces, or +sequences of characters starting with # until the end of line, +are ignored and can be used to annotate comments. +

+

A simplified BNF description of the commands specification syntax +follows: +

 
COMMAND_FLAG  ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG]
+COMMAND       ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG]
+COMMANDS      ::= COMMAND [,COMMANDS]
+INTERVAL      ::= START[-END] COMMANDS
+INTERVALS     ::= INTERVAL[;INTERVALS]
+
+ + +

40.7.2 Examples

+ +
    +
  • +Specify audio tempo change at second 4: +
     
    asendcmd=c='4.0 atempo tempo 1.5',atempo
    +
    + +
  • +Specify a list of drawtext and hue commands in a file. +
     
    # show text in the interval 5-10
    +5.0-10.0 [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=hello world',
    +         [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=';
    +
    +# desaturate the image in the interval 15-20
    +15.0-20.0 [enter] hue s 0,
    +          [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=nocolor',
    +          [leave] hue s 1,
    +          [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=color';
    +
    +# apply an exponential saturation fade-out effect, starting from time 25
    +25 [enter] hue s exp(25-t)
    +
    + +

    A filtergraph allowing to read and process the above command list +stored in a file ‘test.cmd’, can be specified with: +

     
    sendcmd=f=test.cmd,drawtext=fontfile=FreeSerif.ttf:text='',hue
    +
    +
+ +

+

+

40.8 setpts, asetpts

+ +

Change the PTS (presentation timestamp) of the input frames. +

+

setpts works on video frames, asetpts on audio frames. +

+

This filter accepts the following options: +

+
+
expr
+

The expression which is evaluated for each frame to construct its timestamp. +

+
+
+ +

The expression is evaluated through the eval API and can contain the following +constants: +

+
+
FRAME_RATE
+

frame rate, only defined for constant frame-rate video +

+
+
PTS
+

the presentation timestamp in input +

+
+
N
+

the count of the input frame for video or the number of consumed samples, +not including the current frame for audio, starting from 0. +

+
+
NB_CONSUMED_SAMPLES
+

the number of consumed samples, not including the current frame (only +audio) +

+
+
NB_SAMPLES, S
+

the number of samples in the current frame (only audio) +

+
+
SAMPLE_RATE, SR
+

audio sample rate +

+
+
STARTPTS
+

the PTS of the first frame +

+
+
STARTT
+

the time in seconds of the first frame +

+
+
INTERLACED
+

tell if the current frame is interlaced +

+
+
T
+

the time in seconds of the current frame +

+
+
POS
+

original position in the file of the frame, or undefined if undefined +for the current frame +

+
+
PREV_INPTS
+

previous input PTS +

+
+
PREV_INT
+

previous input time in seconds +

+
+
PREV_OUTPTS
+

previous output PTS +

+
+
PREV_OUTT
+

previous output time in seconds +

+
+
RTCTIME
+

wallclock (RTC) time in microseconds. This is deprecated, use time(0) +instead. +

+
+
RTCSTART
+

wallclock (RTC) time at the start of the movie in microseconds +

+
+
TB
+

timebase of the input timestamps +

+
+
+ + +

40.8.1 Examples

+ +
    +
  • +Start counting PTS from zero +
     
    setpts=PTS-STARTPTS
    +
    + +
  • +Apply fast motion effect: +
     
    setpts=0.5*PTS
    +
    + +
  • +Apply slow motion effect: +
     
    setpts=2.0*PTS
    +
    + +
  • +Set fixed rate of 25 frames per second: +
     
    setpts=N/(25*TB)
    +
    + +
  • +Set fixed rate 25 fps with some jitter: +
     
    setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
    +
    + +
  • +Apply an offset of 10 seconds to the input PTS: +
     
    setpts=PTS+10/TB
    +
    + +
  • +Generate timestamps from a "live source" and rebase onto the current timebase: +
     
    setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
    +
    + +
  • +Generate timestamps by counting samples: +
     
    asetpts=N/SR/TB
    +
    + +
+ + +

40.9 settb, asettb

+ +

Set the timebase to use for the output frames timestamps. +It is mainly useful for testing timebase configuration. +

+

This filter accepts the following options: +

+
+
expr, tb
+

The expression which is evaluated into the output timebase. +

+
+
+ +

The value for ‘tb’ is an arithmetic expression representing a +rational. The expression can contain the constants "AVTB" (the default +timebase), "intb" (the input timebase) and "sr" (the sample rate, +audio only). Default value is "intb". +

+ +

40.9.1 Examples

+ +
    +
  • +Set the timebase to 1/25: +
     
    settb=expr=1/25
    +
    + +
  • +Set the timebase to 1/10: +
     
    settb=expr=0.1
    +
    + +
  • +Set the timebase to 1001/1000: +
     
    settb=1+0.001
    +
    + +
  • +Set the timebase to 2*intb: +
     
    settb=2*intb
    +
    + +
  • +Set the default timebase value: +
     
    settb=AVTB
    +
    +
+ + +

40.10 showspectrum

+ +

Convert input audio to a video output, representing the audio frequency +spectrum. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value is +640x512. +

+
+
slide
+

Specify if the spectrum should slide along the window. Default value is +0. +

+
+
mode
+

Specify display mode. +

+

It accepts the following values: +

+
combined
+

all channels are displayed in the same row +

+
separate
+

all channels are displayed in separate rows +

+
+ +

Default value is ‘combined’. +

+
+
color
+

Specify display color mode. +

+

It accepts the following values: +

+
channel
+

each channel is displayed in a separate color +

+
intensity
+

each channel is is displayed using the same color scheme +

+
+ +

Default value is ‘channel’. +

+
+
scale
+

Specify scale used for calculating intensity color values. +

+

It accepts the following values: +

+
lin
+

linear +

+
sqrt
+

square root, default +

+
cbrt
+

cubic root +

+
log
+

logarithmic +

+
+ +

Default value is ‘sqrt’. +

+
+
saturation
+

Set saturation modifier for displayed colors. Negative values provide +alternative color scheme. 0 is no saturation at all. +Saturation must be in [-10.0, 10.0] range. +Default value is 1. +

+
+
win_func
+

Set window function. +

+

It accepts the following values: +

+
none
+

No samples pre-processing (do not expect this to be faster) +

+
hann
+

Hann window +

+
hamming
+

Hamming window +

+
blackman
+

Blackman window +

+
+ +

Default value is hann. +

+
+ +

The usage is very similar to the showwaves filter; see the examples in that +section. +

+ +

40.10.1 Examples

+ +
    +
  • +Large window with logarithmic color scaling: +
     
    showspectrum=s=1280x480:scale=log
    +
    + +
  • +Complete example for a colored and sliding spectrum per channel using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
    +
    +
+ + +

40.11 showwaves

+ +

Convert input audio to a video output, representing the samples waves. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value +is "600x240". +

+
+
mode
+

Set display mode. +

+

Available values are: +

+
point
+

Draw a point for each sample. +

+
+
line
+

Draw a vertical line for each sample. +

+
+ +

Default value is point. +

+
+
n
+

Set the number of samples which are printed on the same column. A +larger value will decrease the frame rate. Must be a positive +integer. This option can be set only if the value for rate +is not explicitly specified. +

+
+
rate, r
+

Set the (approximate) output frame rate. This is done by setting the +option n. Default value is "25". +

+
+
+ + +

40.11.1 Examples

+ +
    +
  • +Output the input file audio and the corresponding video representation +at the same time: +
     
    amovie=a.mp3,asplit[out0],showwaves[out1]
    +
    + +
  • +Create a synthetic signal and show it with showwaves, forcing a +frame rate of 30 frames per second: +
     
    aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
    +
    +
+ + +

40.12 split, asplit

+ +

Split input into several identical outputs. +

+

asplit works with audio input, split with video. +

+

The filter accepts a single parameter which specifies the number of outputs. If +unspecified, it defaults to 2. +

+ +

40.12.1 Examples

+ +
    +
  • +Create two separate outputs from the same input: +
     
    [in] split [out0][out1]
    +
    + +
  • +To create 3 or more outputs, you need to specify the number of +outputs, like in: +
     
    [in] asplit=3 [out0][out1][out2]
    +
    + +
  • +Create two separate outputs from the same input, one cropped and +one padded: +
     
    [in] split [splitout1][splitout2];
    +[splitout1] crop=100:100:0:0    [cropout];
    +[splitout2] pad=200:200:100:100 [padout];
    +
    + +
  • +Create 5 copies of the input audio with ffmpeg: +
     
    ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
    +
    +
+ + +

40.13 zmq, azmq

+ +

Receive commands sent through a libzmq client, and forward them to +filters in the filtergraph. +

+

zmq and azmq work as a pass-through filters. zmq +must be inserted between two video filters, azmq between two +audio filters. +

+

To enable these filters you need to install the libzmq library and +headers and configure FFmpeg with --enable-libzmq. +

+

For more information about libzmq see: +http://www.zeromq.org/ +

+

The zmq and azmq filters work as a libzmq server, which +receives messages sent through a network interface defined by the +‘bind_address’ option. +

+

The received message must be in the form: +

 
TARGET COMMAND [ARG]
+
+ +

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional argument list for the +given COMMAND. +

+

Upon reception, the message is processed and the corresponding command +is injected into the filtergraph. Depending on the result, the filter +will send a reply to the client, adopting the format: +

 
ERROR_CODE ERROR_REASON
+MESSAGE
+
+ +

MESSAGE is optional. +

+ +

40.13.1 Examples

+ +

Look at ‘tools/zmqsend’ for an example of a zmq client which can +be used to send commands processed by these filters. +

+

Consider the following filtergraph generated by ffplay +

 
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red  [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l]   overlay      [bg+l];
+[bg+l][r] overlay=x=100 "
+
+ +

To change the color of the left side of the video, the following +command can be used: +

 
echo Parsed_color_0 c yellow | tools/zmqsend
+
+ +

To change the right side: +

 
echo Parsed_color_1 c pink | tools/zmqsend
+
+ + + +

41. Multimedia Sources

+ +

Below is a description of the currently available multimedia sources. +

+ +

41.1 amovie

+ +

This is the same as movie source, except it selects an audio +stream by default. +

+

+

+

41.2 movie

+ +

Read audio and/or video stream(s) from a movie container. +

+

This filter accepts the following options: +

+
+
filename
+

The name of the resource to read (not necessarily a file but also a device or a +stream accessed through some protocol). +

+
+
format_name, f
+

Specifies the format assumed for the movie to read, and can be either +the name of a container or an input device. If not specified the +format is guessed from movie_name or by probing. +

+
+
seek_point, sp
+

Specifies the seek point in seconds, the frames will be output +starting from this seek point, the parameter is evaluated with +av_strtod so the numerical value may be suffixed by an IS +postfix. Default value is "0". +

+
+
streams, s
+

Specifies the streams to read. Several streams can be specified, +separated by "+". The source will then have as many outputs, in the +same order. The syntax is explained in the “Stream specifiers” +section in the ffmpeg manual. Two special names, "dv" and "da" specify +respectively the default (best suited) video and audio stream. Default +is "dv", or "da" if the filter is called as "amovie". +

+
+
stream_index, si
+

Specifies the index of the video stream to read. If the value is -1, +the best suited video stream will be automatically selected. Default +value is "-1". Deprecated. If the filter is called "amovie", it will select +audio instead of video. +

+
+
loop
+

Specifies how many times to read the stream in sequence. +If the value is less than 1, the stream will be read again and again. +Default value is "1". +

+

Note that when the movie is looped the source timestamps are not +changed, so it will generate non monotonically increasing timestamps. +

+
+ +

This filter allows one to overlay a second video on top of main input of +a filtergraph as shown in this graph: +

 
input -----------> deltapts0 --> overlay --> output
+                                    ^
+                                    |
+movie --> scale--> deltapts1 -------+
+
+ + +

41.2.1 Examples

+ +
    +
  • +Skip 3.2 seconds from the start of the avi file in.avi, and overlay it +on top of the input labelled as "in": +
     
    movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read from a video4linux2 device, and overlay it on top of the input +labelled as "in": +
     
    movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read the first video stream and the audio stream with id 0x81 from +dvd.vob; the video is connected to the pad named "video" and the audio is +connected to the pad named "audio": +
     
    movie=dvd.vob:s=v:0+#0x81 [video] [audio]
    +
    +
+ + + +

42. See Also

+ +

ffmpeg +ffplay, ffprobe, ffserver, +ffmpeg-utils, +ffmpeg-scaler, +ffmpeg-resampler, +ffmpeg-codecs, +ffmpeg-bitstream-filters, +ffmpeg-formats, +ffmpeg-devices, +ffmpeg-protocols, +ffmpeg-filters +

+ + +

43. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-bitstream-filters.html b/dependencies64/ffmpeg/doc/ffmpeg-bitstream-filters.html new file mode 100644 index 000000000..115a058a9 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-bitstream-filters.html @@ -0,0 +1,225 @@ + + + + + +FFmpeg documentation : FFmpeg Bitstream Filters + + + + + + + + + + +
+
+ + +

FFmpeg Bitstream Filters Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

This document describes the bitstream filters provided by the +libavcodec library. +

+

A bitstream filter operates on the encoded stream data, and performs +bitstream level modifications without performing decoding. +

+ + +

2. Bitstream Filters

+ +

When you configure your FFmpeg build, all the supported bitstream +filters are enabled by default. You can list all available ones using +the configure option --list-bsfs. +

+

You can disable all the bitstream filters using the configure option +--disable-bsfs, and selectively enable any bitstream filter using +the option --enable-bsf=BSF, or you can disable a particular +bitstream filter using the option --disable-bsf=BSF. +

+

The option -bsfs of the ff* tools will display the list of +all the supported bitstream filters included in your build. +

+

Below is a description of the currently available bitstream filters. +

+ +

2.1 aac_adtstoasc

+ +

Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration +bitstream filter. +

+

This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 +ADTS header and removes the ADTS header. +

+

This is required for example when copying an AAC stream from a raw +ADTS AAC container to a FLV or a MOV/MP4 file. +

+ +

2.2 chomp

+ +

Remove zero padding at the end of a packet. +

+ +

2.3 dump_extra

+ +

Add extradata to the beginning of the filtered packets. +

+

The additional argument specifies which packets should be filtered. +It accepts the values: +

+
a
+

add extradata to all key packets, but only if local_header is +set in the ‘flags2’ codec context field +

+
+
k
+

add extradata to all key packets +

+
+
e
+

add extradata to all packets +

+
+ +

If not specified it is assumed ‘k’. +

+

For example the following ffmpeg command forces a global +header (thus disabling individual packet headers) in the H.264 packets +generated by the libx264 encoder, but corrects them by adding +the header stored in extradata to the key packets: +

 
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+ + +

2.4 h264_mp4toannexb

+ +

Convert an H.264 bitstream from length prefixed mode to start code +prefixed mode (as defined in the Annex B of the ITU-T H.264 +specification). +

+

This is required by some streaming formats, typically the MPEG-2 +transport stream format ("mpegts"). +

+

For example to remux an MP4 file containing an H.264 stream to mpegts +format with ffmpeg, you can use the command: +

+
 
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+ + +

2.5 imx_dump_header

+ + +

2.6 mjpeg2jpeg

+ +

Convert MJPEG/AVI1 packets to full JPEG/JFIF packets. +

+

MJPEG is a video codec wherein each video frame is essentially a +JPEG image. The individual frames can be extracted without loss, +e.g. by +

+
 
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+ +

Unfortunately, these chunks are incomplete JPEG images, because +they lack the DHT segment required for decoding. Quoting from +http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml: +

+

Avery Lee, writing in the rec.video.desktop newsgroup in 2001, +commented that "MJPEG, or at least the MJPEG in AVIs having the +MJPG fourcc, is restricted JPEG with a fixed – and *omitted* – +Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2, +and it must use basic Huffman encoding, not arithmetic or +progressive. . . . You can indeed extract the MJPEG frames and +decode them with a regular JPEG decoder, but you have to prepend +the DHT segment to them, or else the decoder won’t have any idea +how to decompress the data. The exact table necessary is given in +the OpenDML spec." +

+

This bitstream filter patches the header of frames extracted from an MJPEG +stream (carrying the AVI1 header ID and lacking a DHT segment) to +produce fully qualified JPEG images. +

+
 
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+ + +

2.7 mjpega_dump_header

+ + +

2.8 movsub

+ + +

2.9 mp3_header_decompress

+ + +

2.10 noise

+ + +

2.11 remove_extra

+ + + +

3. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavcodec +

+ + +

4. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-codecs.html b/dependencies64/ffmpeg/doc/ffmpeg-codecs.html new file mode 100644 index 000000000..a22768530 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-codecs.html @@ -0,0 +1,4261 @@ + + + + + +FFmpeg documentation : FFmpeg Codecs + + + + + + + + + + +
+
+ + +

FFmpeg Codecs Documentation

+ + +

Table of Contents

+
+ + +
+ + +

1. Description

+ +

This document describes the codecs (decoders and encoders) provided by +the libavcodec library. +

+ +

+

+

2. Codec Options

+ +

libavcodec provides some generic global options, which can be set on +all the encoders and decoders. In addition each codec may support +so-called private options, which are specific for a given codec. +

+

Sometimes, a global option may only affect a specific kind of codec, +and may be unsensical or ignored by another, so you need to be aware +of the meaning of the specified options. Also some options are +meant only for decoding or encoding. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVCodecContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follow: +

+
+
b integer (encoding,audio,video)
+

Set bitrate in bits/s. Default value is 200K. +

+
+
ab integer (encoding,audio)
+

Set audio bitrate (in bits/s). Default value is 128K. +

+
+
bt integer (encoding,video)
+

Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate +tolerance specifies how far ratecontrol is willing to deviate from the +target average bitrate value. This is not related to min/max +bitrate. Lowering tolerance too much has an adverse effect on quality. +

+
+
flags flags (decoding/encoding,audio,video,subtitles)
+

Set generic flags. +

+

Possible values: +

+
mv4
+

Use four motion vector by macroblock (mpeg4). +

+
qpel
+

Use 1/4 pel motion compensation. +

+
loop
+

Use loop filter. +

+
qscale
+

Use fixed qscale. +

+
gmc
+

Use gmc. +

+
mv0
+

Always try a mb with mv=<0,0>. +

+
input_preserved
+
pass1
+

Use internal 2pass ratecontrol in first pass mode. +

+
pass2
+

Use internal 2pass ratecontrol in second pass mode. +

+
gray
+

Only decode/encode grayscale. +

+
emu_edge
+

Do not draw edges. +

+
psnr
+

Set error[?] variables during encoding. +

+
truncated
+
naq
+

Normalize adaptive quantization. +

+
ildct
+

Use interlaced DCT. +

+
low_delay
+

Force low delay. +

+
global_header
+

Place global headers in extradata instead of every keyframe. +

+
bitexact
+

Use only bitexact stuff (except (I)DCT). +

+
aic
+

Apply H263 advanced intra coding / mpeg4 ac prediction. +

+
cbp
+

Deprecated, use mpegvideo private options instead. +

+
qprd
+

Deprecated, use mpegvideo private options instead. +

+
ilme
+

Apply interlaced motion estimation. +

+
cgop
+

Use closed gop. +

+
+ +
+
me_method integer (encoding,video)
+

Set motion estimation method. +

+

Possible values: +

+
zero
+

zero motion estimation (fastest) +

+
full
+

full motion estimation (slowest) +

+
epzs
+

EPZS motion estimation (default) +

+
esa
+

esa motion estimation (alias for full) +

+
tesa
+

tesa motion estimation +

+
dia
+

dia motion estimation (alias for epzs) +

+
log
+

log motion estimation +

+
phods
+

phods motion estimation +

+
x1
+

X1 motion estimation +

+
hex
+

hex motion estimation +

+
umh
+

umh motion estimation +

+
iter
+

iter motion estimation +

+
+ +
+
extradata_size integer
+

Set extradata size. +

+
+
time_base rational number
+

Set codec time base. +

+

It is the fundamental unit of time (in seconds) in terms of which +frame timestamps are represented. For fixed-fps content, timebase +should be 1 / frame_rate and timestamp increments should be +identically 1. +

+
+
g integer (encoding,video)
+

Set the group of picture size. Default value is 12. +

+
+
ar integer (decoding/encoding,audio)
+

Set audio sampling rate (in Hz). +

+
+
ac integer (decoding/encoding,audio)
+

Set number of audio channels. +

+
+
cutoff integer (encoding,audio)
+

Set cutoff bandwidth. +

+
+
frame_size integer (encoding,audio)
+

Set audio frame size. +

+

Each submitted frame except the last must contain exactly frame_size +samples per channel. May be 0 when the codec has +CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not +restricted. It is set by some decoders to indicate constant frame +size. +

+
+
frame_number integer
+

Set the frame number. +

+
+
delay integer
+
qcomp float (encoding,video)
+

Set video quantizer scale compression (VBR). It is used as a constant +in the ratecontrol equation. Recommended range for default rc_eq: +0.0-1.0. +

+
+
qblur float (encoding,video)
+

Set video quantizer scale blur (VBR). +

+
+
qmin integer (encoding,video)
+

Set min video quantizer scale (VBR). Must be included between -1 and +69, default value is 2. +

+
+
qmax integer (encoding,video)
+

Set max video quantizer scale (VBR). Must be included between -1 and +1024, default value is 31. +

+
+
qdiff integer (encoding,video)
+

Set max difference between the quantizer scale (VBR). +

+
+
bf integer (encoding,video)
+

Set max number of B frames between non-B-frames. +

+

Must be an integer between -1 and 16. 0 means that B-frames are +disabled. If a value of -1 is used, it will choose an automatic value +depending on the encoder. +

+

Default value is 0. +

+
+
b_qfactor float (encoding,video)
+

Set qp factor between P and B frames. +

+
+
rc_strategy integer (encoding,video)
+

Set ratecontrol method. +

+
+
b_strategy integer (encoding,video)
+

Set strategy to choose between I/P/B-frames. +

+
+
ps integer (encoding,video)
+

Set RTP payload size in bytes. +

+
+
mv_bits integer
+
header_bits integer
+
i_tex_bits integer
+
p_tex_bits integer
+
i_count integer
+
p_count integer
+
skip_count integer
+
misc_bits integer
+
frame_bits integer
+
codec_tag integer
+
bug flags (decoding,video)
+

Workaround not auto detected encoder bugs. +

+

Possible values: +

+
autodetect
+
old_msmpeg4
+

some old lavc generated msmpeg4v3 files (no autodetection) +

+
xvid_ilace
+

Xvid interlacing bug (autodetected if fourcc==XVIX) +

+
ump4
+

(autodetected if fourcc==UMP4) +

+
no_padding
+

padding bug (autodetected) +

+
amv
+
ac_vlc
+

illegal vlc bug (autodetected per fourcc) +

+
qpel_chroma
+
std_qpel
+

old standard qpel (autodetected per fourcc/version) +

+
qpel_chroma2
+
direct_blocksize
+

direct-qpel-blocksize bug (autodetected per fourcc/version) +

+
edge
+

edge padding bug (autodetected per fourcc/version) +

+
hpel_chroma
+
dc_clip
+
ms
+

Workaround various bugs in microsoft broken decoders. +

+
trunc
+

trancated frames +

+
+ +
+
lelim integer (encoding,video)
+

Set single coefficient elimination threshold for luminance (negative +values also consider DC coefficient). +

+
+
celim integer (encoding,video)
+

Set single coefficient elimination threshold for chrominance (negative +values also consider dc coefficient) +

+
+
strict integer (decoding/encoding,audio,video)
+

Specify how strictly to follow the standards. +

+

Possible values: +

+
very
+

strictly conform to a older more strict version of the spec or reference software +

+
strict
+

strictly conform to all the things in the spec no matter what consequences +

+
normal
+
unofficial
+

allow unofficial extensions +

+
experimental
+

allow non standardized experimental things, experimental +(unfinished/work in progress/not well tested) decoders and encoders. +Note: experimental decoders can pose a security risk, do not use this for +decoding untrusted input. +

+
+ +
+
b_qoffset float (encoding,video)
+

Set QP offset between P and B frames. +

+
+
err_detect flags (decoding,audio,video)
+

Set error detection flags. +

+

Possible values: +

+
crccheck
+

verify embedded CRCs +

+
bitstream
+

detect bitstream specification deviations +

+
buffer
+

detect improper bitstream length +

+
explode
+

abort decoding on minor error detection +

+
careful
+

consider things that violate the spec and have not been seen in the wild as errors +

+
compliant
+

consider all spec non compliancies as errors +

+
aggressive
+

consider things that a sane encoder should not do as an error +

+
+ +
+
has_b_frames integer
+
block_align integer
+
mpeg_quant integer (encoding,video)
+

Use MPEG quantizers instead of H.263. +

+
+
qsquish float (encoding,video)
+

How to keep quantizer between qmin and qmax (0 = clip, 1 = use +differentiable function). +

+
+
rc_qmod_amp float (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_qmod_freq integer (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_override_count integer
+
rc_eq string (encoding,video)
+

Set rate control equation. When computing the expression, besides the +standard functions defined in the section ’Expression Evaluation’, the +following functions are available: bits2qp(bits), qp2bits(qp). Also +the following constants are available: iTex pTex tex mv fCode iCount +mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex +avgTex. +

+
+
maxrate integer (encoding,audio,video)
+

Set max bitrate tolerance (in bits/s). Requires bufsize to be set. +

+
+
minrate integer (encoding,audio,video)
+

Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR +encode. It is of little use elsewise. +

+
+
bufsize integer (encoding,audio,video)
+

Set ratecontrol buffer size (in bits). +

+
+
rc_buf_aggressivity float (encoding,video)
+

Currently useless. +

+
+
i_qfactor float (encoding,video)
+

Set QP factor between P and I frames. +

+
+
i_qoffset float (encoding,video)
+

Set QP offset between P and I frames. +

+
+
rc_init_cplx float (encoding,video)
+

Set initial complexity for 1-pass encoding. +

+
+
dct integer (encoding,video)
+

Set DCT algorithm. +

+

Possible values: +

+
auto
+

autoselect a good one (default) +

+
fastint
+

fast integer +

+
int
+

accurate integer +

+
mmx
+
altivec
+
faan
+

floating point AAN DCT +

+
+ +
+
lumi_mask float (encoding,video)
+

Compress bright areas stronger than medium ones. +

+
+
tcplx_mask float (encoding,video)
+

Set temporal complexity masking. +

+
+
scplx_mask float (encoding,video)
+

Set spatial complexity masking. +

+
+
p_mask float (encoding,video)
+

Set inter masking. +

+
+
dark_mask float (encoding,video)
+

Compress dark areas stronger than medium ones. +

+
+
idct integer (decoding/encoding,video)
+

Select IDCT implementation. +

+

Possible values: +

+
auto
+
int
+
simple
+
simplemmx
+
arm
+
altivec
+
sh4
+
simplearm
+
simplearmv5te
+
simplearmv6
+
simpleneon
+
simplealpha
+
ipp
+
xvidmmx
+
faani
+

floating point AAN IDCT +

+
+ +
+
slice_count integer
+
ec flags (decoding,video)
+

Set error concealment strategy. +

+

Possible values: +

+
guess_mvs
+

iterative motion vector (MV) search (slow) +

+
deblock
+

use strong deblock filter for damaged MBs +

+
+ +
+
bits_per_coded_sample integer
+
pred integer (encoding,video)
+

Set prediction method. +

+

Possible values: +

+
left
+
plane
+
median
+
+ +
+
aspect rational number (encoding,video)
+

Set sample aspect ratio. +

+
+
debug flags (decoding/encoding,audio,video,subtitles)
+

Print specific debug info. +

+

Possible values: +

+
pict
+

picture info +

+
rc
+

rate control +

+
bitstream
+
mb_type
+

macroblock (MB) type +

+
qp
+

per-block quantization parameter (QP) +

+
mv
+

motion vector +

+
dct_coeff
+
skip
+
startcode
+
pts
+
er
+

error recognition +

+
mmco
+

memory management control operations (H.264) +

+
bugs
+
vis_qp
+

visualize quantization parameter (QP), lower QP are tinted greener +

+
vis_mb_type
+

visualize block types +

+
buffers
+

picture buffer allocations +

+
thread_ops
+

threading operations +

+
+ +
+
vismv integer (decoding,video)
+

Visualize motion vectors (MVs). +

+

Possible values: +

+
pf
+

forward predicted MVs of P-frames +

+
bf
+

forward predicted MVs of B-frames +

+
bb
+

backward predicted MVs of B-frames +

+
+ +
+
cmp integer (encoding,video)
+

Set full pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
subcmp integer (encoding,video)
+

Set sub pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
mbcmp integer (encoding,video)
+

Set macroblock compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
ildctcmp integer (encoding,video)
+

Set interlaced dct compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation. +

+
+
last_pred integer (encoding,video)
+

Set amount of motion predictors from the previous frame. +

+
+
preme integer (encoding,video)
+

Set pre motion estimation. +

+
+
precmp integer (encoding,video)
+

Set pre motion estimation compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
pre_dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation pre-pass. +

+
+
subq integer (encoding,video)
+

Set sub pel motion estimation quality. +

+
+
dtg_active_format integer
+
me_range integer (encoding,video)
+

Set limit motion vectors range (1023 for DivX player). +

+
+
ibias integer (encoding,video)
+

Set intra quant bias. +

+
+
pbias integer (encoding,video)
+

Set inter quant bias. +

+
+
color_table_id integer
+
global_quality integer (encoding,audio,video)
+
coder integer (encoding,video)
+
+

Possible values: +

+
vlc
+

variable length coder / huffman coder +

+
ac
+

arithmetic coder +

+
raw
+

raw (no encoding) +

+
rle
+

run-length coder +

+
deflate
+

deflate-based coder +

+
+ +
+
context integer (encoding,video)
+

Set context model. +

+
+
slice_flags integer
+
xvmc_acceleration integer
+
mbd integer (encoding,video)
+

Set macroblock decision algorithm (high quality mode). +

+

Possible values: +

+
simple
+

use mbcmp (default) +

+
bits
+

use fewest bits +

+
rd
+

use best rate distortion +

+
+ +
+
stream_codec_tag integer
+
sc_threshold integer (encoding,video)
+

Set scene change threshold. +

+
+
lmin integer (encoding,video)
+

Set min lagrange factor (VBR). +

+
+
lmax integer (encoding,video)
+

Set max lagrange factor (VBR). +

+
+
nr integer (encoding,video)
+

Set noise reduction. +

+
+
rc_init_occupancy integer (encoding,video)
+

Set number of bits which should be loaded into the rc buffer before +decoding starts. +

+
+
flags2 flags (decoding/encoding,audio,video)
+
+

Possible values: +

+
fast
+

Allow non spec compliant speedup tricks. +

+
sgop
+

Deprecated, use mpegvideo private options instead. +

+
noout
+

Skip bitstream encoding. +

+
ignorecrop
+

Ignore cropping information from sps. +

+
local_header
+

Place global headers at every keyframe instead of in extradata. +

+
chunks
+

Frame data might be split into multiple chunks. +

+
showall
+

Show all frames before the first keyframe. +

+
skiprd
+

Deprecated, use mpegvideo private options instead. +

+
+ +
+
error integer (encoding,video)
+
qns integer (encoding,video)
+

Deprecated, use mpegvideo private options instead. +

+
+
threads integer (decoding/encoding,video)
+
+

Possible values: +

+
auto
+

detect a good number of threads +

+
+ +
+
me_threshold integer (encoding,video)
+

Set motion estimation threshold. +

+
+
mb_threshold integer (encoding,video)
+

Set macroblock threshold. +

+
+
dc integer (encoding,video)
+

Set intra_dc_precision. +

+
+
nssew integer (encoding,video)
+

Set nsse weight. +

+
+
skip_top integer (decoding,video)
+

Set number of macroblock rows at the top which are skipped. +

+
+
skip_bottom integer (decoding,video)
+

Set number of macroblock rows at the bottom which are skipped. +

+
+
profile integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
aac_main
+
aac_low
+
aac_ssr
+
aac_ltp
+
aac_he
+
aac_he_v2
+
aac_ld
+
aac_eld
+
mpeg2_aac_low
+
mpeg2_aac_he
+
dts
+
dts_es
+
dts_96_24
+
dts_hd_hra
+
dts_hd_ma
+
+ +
+
level integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
+ +
+
lowres integer (decoding,audio,video)
+

Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions. +

+
+
skip_threshold integer (encoding,video)
+

Set frame skip threshold. +

+
+
skip_factor integer (encoding,video)
+

Set frame skip factor. +

+
+
skip_exp integer (encoding,video)
+

Set frame skip exponent. +Negative values behave identical to the corresponding positive ones, except +that the score is normalized. +Positive values exist primarly for compatibility reasons and are not so useful. +

+
+
skipcmp integer (encoding,video)
+

Set frame skip compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
border_mask float (encoding,video)
+

Increase the quantizer for macroblocks close to borders. +

+
+
mblmin integer (encoding,video)
+

Set min macroblock lagrange factor (VBR). +

+
+
mblmax integer (encoding,video)
+

Set max macroblock lagrange factor (VBR). +

+
+
mepc integer (encoding,video)
+

Set motion estimation bitrate penalty compensation (1.0 = 256). +

+
+
skip_loop_filter integer (decoding,video)
+
skip_idct integer (decoding,video)
+
skip_frame integer (decoding,video)
+
+

Make decoder discard processing depending on the frame type selected +by the option value. +

+

skip_loop_filter’ skips frame loop filtering, ‘skip_idct’ +skips frame IDCT/dequantization, ‘skip_frame’ skips decoding. +

+

Possible values: +

+
none
+

Discard no frame. +

+
+
default
+

Discard useless frames like 0-sized frames. +

+
+
noref
+

Discard all non-reference frames. +

+
+
bidir
+

Discard all bidirectional frames. +

+
+
nokey
+

Discard all frames excepts keyframes. +

+
+
all
+

Discard all frames. +

+
+ +

Default value is ‘default’. +

+
+
bidir_refine integer (encoding,video)
+

Refine the two motion vectors used in bidirectional macroblocks. +

+
+
brd_scale integer (encoding,video)
+

Downscale frames for dynamic B-frame decision. +

+
+
keyint_min integer (encoding,video)
+

Set minimum interval between IDR-frames. +

+
+
refs integer (encoding,video)
+

Set reference frames to consider for motion compensation. +

+
+
chromaoffset integer (encoding,video)
+

Set chroma qp offset from luma. +

+
+
trellis integer (encoding,audio,video)
+

Set rate-distortion optimal quantization. +

+
+
sc_factor integer (encoding,video)
+

Set value multiplied by qscale for each frame and added to +scene_change_score. +

+
+
mv0_threshold integer (encoding,video)
+
b_sensitivity integer (encoding,video)
+

Adjust sensitivity of b_frame_strategy 1. +

+
+
compression_level integer (encoding,audio,video)
+
min_prediction_order integer (encoding,audio)
+
max_prediction_order integer (encoding,audio)
+
timecode_frame_start integer (encoding,video)
+

Set GOP timecode frame start number, in non drop frame format. +

+
+
request_channels integer (decoding,audio)
+

Set desired number of audio channels. +

+
+
bits_per_raw_sample integer
+
channel_layout integer (decoding/encoding,audio)
+
+

Possible values: +

+
request_channel_layout integer (decoding,audio)
+
+

Possible values: +

+
rc_max_vbv_use float (encoding,video)
+
rc_min_vbv_use float (encoding,video)
+
ticks_per_frame integer (decoding/encoding,audio,video)
+
color_primaries integer (decoding/encoding,video)
+
color_trc integer (decoding/encoding,video)
+
colorspace integer (decoding/encoding,video)
+
color_range integer (decoding/encoding,video)
+
chroma_sample_location integer (decoding/encoding,video)
+
log_level_offset integer
+

Set the log level offset. +

+
+
slices integer (encoding,video)
+

Number of slices, used in parallelized encoding. +

+
+
thread_type flags (decoding/encoding,video)
+

Select multithreading type. +

+

Possible values: +

+
slice
+
frame
+
+
+
audio_service_type integer (encoding,audio)
+

Set audio service type. +

+

Possible values: +

+
ma
+

Main Audio Service +

+
ef
+

Effects +

+
vi
+

Visually Impaired +

+
hi
+

Hearing Impaired +

+
di
+

Dialogue +

+
co
+

Commentary +

+
em
+

Emergency +

+
vo
+

Voice Over +

+
ka
+

Karaoke +

+
+ +
+
request_sample_fmt sample_fmt (decoding,audio)
+

Set sample format audio decoders should prefer. Default value is +none. +

+
+
pkt_timebase rational number
+
sub_charenc encoding (decoding,subtitles)
+

Set the input subtitles character encoding. +

+
+
field_order field_order (video)
+

Set/override the field order of the video. +Possible values: +

+
progressive
+

Progressive video +

+
tt
+

Interlaced video, top field coded and displayed first +

+
bb
+

Interlaced video, bottom field coded and displayed first +

+
tb
+

Interlaced video, top coded first, bottom displayed first +

+
bt
+

Interlaced video, bottom coded first, top displayed first +

+
+ +
+
skip_alpha integer (decoding,video)
+

Set to 1 to disable processing alpha (transparency). This works like the +‘gray’ flag in the ‘flags’ option which skips chroma information +instead of alpha. Default is 0. +

+
+ + + +

3. Decoders

+ +

Decoders are configured elements in FFmpeg which allow the decoding of +multimedia streams. +

+

When you configure your FFmpeg build, all the supported native decoders +are enabled by default. Decoders requiring an external library must be enabled +manually via the corresponding --enable-lib option. You can list all +available decoders using the configure option --list-decoders. +

+

You can disable all the decoders with the configure option +--disable-decoders and selectively enable / disable single decoders +with the options --enable-decoder=DECODER / +--disable-decoder=DECODER. +

+

The option -decoders of the ff* tools will display the list of +enabled decoders. +

+ + +

4. Video Decoders

+ +

A description of some of the currently available video decoders +follows. +

+ +

4.1 rawvideo

+ +

Raw video decoder. +

+

This decoder decodes rawvideo streams. +

+ +

4.1.1 Options

+ +
+
top top_field_first
+

Specify the assumed field type of the input video. +

+
-1
+

the video is assumed to be progressive (default) +

+
0
+

bottom-field-first is assumed +

+
1
+

top-field-first is assumed +

+
+ +
+
+ + + +

5. Audio Decoders

+ +

A description of some of the currently available audio decoders +follows. +

+ +

5.1 ac3

+ +

AC-3 audio decoder. +

+

This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as +the undocumented RealAudio 3 (a.k.a. dnet). +

+ +

5.1.1 AC-3 Decoder Options

+ +
+
-drc_scale value
+

Dynamic Range Scale Factor. The factor to apply to dynamic range values +from the AC-3 stream. This factor is applied exponentially. +There are 3 notable scale factor ranges: +

+
drc_scale == 0
+

DRC disabled. Produces full range audio. +

+
0 < drc_scale <= 1
+

DRC enabled. Applies a fraction of the stream DRC value. +Audio reproduction is between full range and full compression. +

+
drc_scale > 1
+

DRC enabled. Applies drc_scale asymmetrically. +Loud sounds are fully compressed. Soft sounds are enhanced. +

+
+ +
+
+ + +

5.2 ffwavesynth

+ +

Internal wave synthetizer. +

+

This decoder generates wave patterns according to predefined sequences. Its +use is purely internal and the format of the data it accepts is not publicly +documented. +

+ +

5.3 libcelt

+ +

libcelt decoder wrapper. +

+

libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec. +Requires the presence of the libcelt headers and library during configuration. +You need to explicitly configure the build with --enable-libcelt. +

+ +

5.4 libgsm

+ +

libgsm decoder wrapper. +

+

libgsm allows libavcodec to decode the GSM full rate audio codec. Requires +the presence of the libgsm headers and library during configuration. You need +to explicitly configure the build with --enable-libgsm. +

+

This decoder supports both the ordinary GSM and the Microsoft variant. +

+ +

5.5 libilbc

+ +

libilbc decoder wrapper. +

+

libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC) +audio codec. Requires the presence of the libilbc headers and library during +configuration. You need to explicitly configure the build with +--enable-libilbc. +

+ +

5.5.1 Options

+ +

The following option is supported by the libilbc wrapper. +

+
+
enhance
+
+

Enable the enhancement of the decoded audio when set to 1. The default +value is 0 (disabled). +

+
+
+ + +

5.6 libopencore-amrnb

+ +

libopencore-amrnb decoder wrapper. +

+

libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate +Narrowband audio codec. Using it requires the presence of the +libopencore-amrnb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrnb. +

+

An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB +without this library. +

+ +

5.7 libopencore-amrwb

+ +

libopencore-amrwb decoder wrapper. +

+

libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate +Wideband audio codec. Using it requires the presence of the +libopencore-amrwb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrwb. +

+

An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB +without this library. +

+ +

5.8 libopus

+ +

libopus decoder wrapper. +

+

libopus allows libavcodec to decode the Opus Interactive Audio Codec. +Requires the presence of the libopus headers and library during +configuration. You need to explicitly configure the build with +--enable-libopus. +

+ + +

6. Subtitles Decoders

+ + +

6.1 dvdsub

+ +

This codec decodes the bitmap subtitles used in DVDs; the same subtitles can +also be found in VobSub file pairs and in some Matroska files. +

+ +

6.1.1 Options

+ +
+
palette
+

Specify the global palette used by the bitmaps. When stored in VobSub, the +palette is normally specified in the index file; in Matroska, the palette is +stored in the codec extra-data in the same format as in VobSub. In DVDs, the +palette is stored in the IFO file, and therefore not available when reading +from dumped VOB files. +

+

The format for this option is a string containing 16 24-bits hexadecimal +numbers (without 0x prefix) separated by comas, for example 0d00ee, +ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1, +7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b. +

+
+ + +

6.2 libzvbi-teletext

+ +

Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext +subtitles. Requires the presence of the libzvbi headers and library during +configuration. You need to explicitly configure the build with +--enable-libzvbi. +

+ +

6.2.1 Options

+ +
+
txt_page
+

List of teletext page numbers to decode. You may use the special * string to +match all pages. Pages that do not match the specified list are dropped. +Default value is *. +

+
txt_chop_top
+

Discards the top teletext line. Default value is 1. +

+
txt_format
+

Specifies the format of the decoded subtitles. The teletext decoder is capable +of decoding the teletext pages to bitmaps or to simple text, you should use +"bitmap" for teletext pages, because certain graphics and colors cannot be +expressed in simple text. You might use "text" for teletext based subtitles if +your application can handle simple text based subtitles. Default value is +bitmap. +

+
txt_left
+

X offset of generated bitmaps, default is 0. +

+
txt_top
+

Y offset of generated bitmaps, default is 0. +

+
txt_chop_spaces
+

Chops leading and trailing spaces and removes empty lines from the generated +text. This option is useful for teletext based subtitles where empty spaces may +be present at the start or at the end of the lines or empty lines may be +present between the subtitle lines because of double-sized teletext charactes. +Default value is 1. +

+
txt_duration
+

Sets the display duration of the decoded teletext pages or subtitles in +miliseconds. Default value is 30000 which is 30 seconds. +

+
txt_transparent
+

Force transparent background of the generated teletext bitmaps. Default value +is 0 which means an opaque (black) background. +

+
+ + +

7. Encoders

+ +

Encoders are configured elements in FFmpeg which allow the encoding of +multimedia streams. +

+

When you configure your FFmpeg build, all the supported native encoders +are enabled by default. Encoders requiring an external library must be enabled +manually via the corresponding --enable-lib option. You can list all +available encoders using the configure option --list-encoders. +

+

You can disable all the encoders with the configure option +--disable-encoders and selectively enable / disable single encoders +with the options --enable-encoder=ENCODER / +--disable-encoder=ENCODER. +

+

The option -encoders of the ff* tools will display the list of +enabled encoders. +

+ + +

8. Audio Encoders

+ +

A description of some of the currently available audio encoders +follows. +

+

+

+

8.1 aac

+ +

Advanced Audio Coding (AAC) encoder. +

+

This encoder is an experimental FFmpeg-native AAC encoder. Currently only the +low complexity (AAC-LC) profile is supported. To use this encoder, you must set +‘strict’ option to ‘experimental’ or lower. +

+

As this encoder is experimental, unexpected behavior may exist from time to +time. For a more stable AAC encoder, see libvo-aacenc. However, be warned +that it has a worse quality reported by some users. +

+

See also libfdk_aac and libfaac. +

+ +

8.1.1 Options

+ +
+
b
+

Set bit rate in bits/s. Setting this automatically activates constant bit rate +(CBR) mode. +

+
+
q
+

Set quality for variable bit rate (VBR) mode. This option is valid only using +the ffmpeg command-line tool. For library interface users, use +‘global_quality’. +

+
+
stereo_mode
+

Set stereo encoding mode. Possible values: +

+
+
auto
+

Automatically selected by the encoder. +

+
+
ms_off
+

Disable middle/side encoding. This is the default. +

+
+
ms_force
+

Force middle/side encoding. +

+
+ +
+
aac_coder
+

Set AAC encoder coding method. Possible values: +

+
+
faac
+

FAAC-inspired method. +

+

This method is a simplified reimplementation of the method used in FAAC, which +sets thresholds proportional to the band energies, and then decreases all the +thresholds with quantizer steps to find the appropriate quantization with +distortion below threshold band by band. +

+

The quality of this method is comparable to the two loop searching method +descibed below, but somewhat a little better and slower. +

+
+
anmr
+

Average noise to mask ratio (ANMR) trellis-based solution. +

+

This has a theoretic best quality out of all the coding methods, but at the +cost of the slowest speed. +

+
+
twoloop
+

Two loop searching (TLS) method. +

+

This method first sets quantizers depending on band thresholds and then tries +to find an optimal combination by adding or subtracting a specific value from +all quantizers and adjusting some individual quantizer a little. +

+

This method produces similar quality with the FAAC method and is the default. +

+
+
fast
+

Constant quantizer method. +

+

This method sets a constant quantizer for all bands. This is the fastest of all +the methods, yet produces the worst quality. +

+
+
+ +
+
+ + +

8.2 ac3 and ac3_fixed

+ +

AC-3 audio encoders. +

+

These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as +the undocumented RealAudio 3 (a.k.a. dnet). +

+

The ac3 encoder uses floating-point math, while the ac3_fixed +encoder only uses fixed-point integer math. This does not mean that one is +always faster, just that one or the other may be better suited to a +particular system. The floating-point encoder will generally produce better +quality audio for a given bitrate. The ac3_fixed encoder is not the +default codec for any of the output formats, so it must be specified explicitly +using the option -acodec ac3_fixed in order to use it. +

+ +

8.2.1 AC-3 Metadata

+ +

The AC-3 metadata options are used to set parameters that describe the audio, +but in most cases do not affect the audio encoding itself. Some of the options +do directly affect or influence the decoding and playback of the resulting +bitstream, while others are just for informational purposes. A few of the +options will add bits to the output stream that could otherwise be used for +audio data, and will thus affect the quality of the output. Those will be +indicated accordingly with a note in the option list below. +

+

These parameters are described in detail in several publicly-available +documents. +

+ + +

8.2.1.1 Metadata Control Options

+ +
+
-per_frame_metadata boolean
+

Allow Per-Frame Metadata. Specifies if the encoder should check for changing +metadata for each frame. +

+
0
+

The metadata values set at initialization will be used for every frame in the +stream. (default) +

+
1
+

Metadata values can be changed before encoding each frame. +

+
+ +
+
+ + +

8.2.1.2 Downmix Levels

+ +
+
-center_mixlev level
+

Center Mix Level. The amount of gain the decoder should apply to the center +channel when downmixing to stereo. This field will only be written to the +bitstream if a center channel is present. The value is specified as a scale +factor. There are 3 valid values: +

+
0.707
+

Apply -3dB gain +

+
0.595
+

Apply -4.5dB gain (default) +

+
0.500
+

Apply -6dB gain +

+
+ +
+
-surround_mixlev level
+

Surround Mix Level. The amount of gain the decoder should apply to the surround +channel(s) when downmixing to stereo. This field will only be written to the +bitstream if one or more surround channels are present. The value is specified +as a scale factor. There are 3 valid values: +

+
0.707
+

Apply -3dB gain +

+
0.500
+

Apply -6dB gain (default) +

+
0.000
+

Silence Surround Channel(s) +

+
+ +
+
+ + +

8.2.1.3 Audio Production Information

+

Audio Production Information is optional information describing the mixing +environment. Either none or both of the fields are written to the bitstream. +

+
+
-mixing_level number
+

Mixing Level. Specifies peak sound pressure level (SPL) in the production +environment when the mix was mastered. Valid values are 80 to 111, or -1 for +unknown or not indicated. The default value is -1, but that value cannot be +used if the Audio Production Information is written to the bitstream. Therefore, +if the room_type option is not the default value, the mixing_level +option must not be -1. +

+
+
-room_type type
+

Room Type. Describes the equalization used during the final mixing session at +the studio or on the dubbing stage. A large room is a dubbing stage with the +industry standard X-curve equalization; a small room has flat equalization. +This field will not be written to the bitstream if both the mixing_level +option and the room_type option have the default values. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
large
+

Large Room +

+
2
+
small
+

Small Room +

+
+ +
+
+ + +

8.2.1.4 Other Metadata Options

+ +
+
-copyright boolean
+

Copyright Indicator. Specifies whether a copyright exists for this audio. +

+
0
+
off
+

No Copyright Exists (default) +

+
1
+
on
+

Copyright Exists +

+
+ +
+
-dialnorm value
+

Dialogue Normalization. Indicates how far the average dialogue level of the +program is below digital 100% full scale (0 dBFS). This parameter determines a +level shift during audio reproduction that sets the average volume of the +dialogue to a preset level. The goal is to match volume level between program +sources. A value of -31dB will result in no volume level change, relative to +the source volume, during audio reproduction. Valid values are whole numbers in +the range -31 to -1, with -31 being the default. +

+
+
-dsur_mode mode
+

Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround +(Pro Logic). This field will only be written to the bitstream if the audio +stream is stereo. Using this option does NOT mean the encoder will actually +apply Dolby Surround processing. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
off
+

Not Dolby Surround Encoded +

+
2
+
on
+

Dolby Surround Encoded +

+
+ +
+
-original boolean
+

Original Bit Stream Indicator. Specifies whether this audio is from the +original source and not a copy. +

+
0
+
off
+

Not Original Source +

+
1
+
on
+

Original Source (default) +

+
+ +
+
+ + +

8.2.2 Extended Bitstream Information

+

The extended bitstream options are part of the Alternate Bit Stream Syntax as +specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts. +If any one parameter in a group is specified, all values in that group will be +written to the bitstream. Default values are used for those that are written +but have not been specified. If the mixing levels are written, the decoder +will use these values instead of the ones specified in the center_mixlev +and surround_mixlev options if it supports the Alternate Bit Stream +Syntax. +

+ +

8.2.2.1 Extended Bitstream Information - Part 1

+ +
+
-dmix_mode mode
+

Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt +(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
ltrt
+

Lt/Rt Downmix Preferred +

+
2
+
loro
+

Lo/Ro Downmix Preferred +

+
+ +
+
-ltrt_cmixlev level
+

Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the +center channel when downmixing to stereo in Lt/Rt mode. +

+
1.414
+

Apply +3dB gain +

+
1.189
+

Apply +1.5dB gain +

+
1.000
+

Apply 0dB gain +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain (default) +

+
0.500
+

Apply -6.0dB gain +

+
0.000
+

Silence Center Channel +

+
+ +
+
-ltrt_surmixlev level
+

Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the +surround channel(s) when downmixing to stereo in Lt/Rt mode. +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain +

+
0.500
+

Apply -6.0dB gain (default) +

+
0.000
+

Silence Surround Channel(s) +

+
+ +
+
-loro_cmixlev level
+

Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the +center channel when downmixing to stereo in Lo/Ro mode. +

+
1.414
+

Apply +3dB gain +

+
1.189
+

Apply +1.5dB gain +

+
1.000
+

Apply 0dB gain +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain (default) +

+
0.500
+

Apply -6.0dB gain +

+
0.000
+

Silence Center Channel +

+
+ +
+
-loro_surmixlev level
+

Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the +surround channel(s) when downmixing to stereo in Lo/Ro mode. +

+
0.841
+

Apply -1.5dB gain +

+
0.707
+

Apply -3.0dB gain +

+
0.595
+

Apply -4.5dB gain +

+
0.500
+

Apply -6.0dB gain (default) +

+
0.000
+

Silence Surround Channel(s) +

+
+ +
+
+ + +

8.2.2.2 Extended Bitstream Information - Part 2

+ +
+
-dsurex_mode mode
+

Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX +(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually +apply Dolby Surround EX processing. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
on
+

Dolby Surround EX Off +

+
2
+
off
+

Dolby Surround EX On +

+
+ +
+
-dheadphone_mode mode
+

Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone +encoding (multi-channel matrixed to 2.0 for use with headphones). Using this +option does NOT mean the encoder will actually apply Dolby Headphone +processing. +

+
0
+
notindicated
+

Not Indicated (default) +

+
1
+
on
+

Dolby Headphone Off +

+
2
+
off
+

Dolby Headphone On +

+
+ +
+
-ad_conv_type type
+

A/D Converter Type. Indicates whether the audio has passed through HDCD A/D +conversion. +

+
0
+
standard
+

Standard A/D Converter (default) +

+
1
+
hdcd
+

HDCD A/D Converter +

+
+ +
+
+ + +

8.2.3 Other AC-3 Encoding Options

+ +
+
-stereo_rematrixing boolean
+

Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This +is an optional AC-3 feature that increases quality by selectively encoding +the left/right channels as mid/side. This option is enabled by default, and it +is highly recommended that it be left as enabled except for testing purposes. +

+
+
+ + +

8.2.4 Floating-Point-Only AC-3 Encoding Options

+ +

These options are only valid for the floating-point encoder and do not exist +for the fixed-point encoder due to the corresponding features not being +implemented in fixed-point. +

+
+
-channel_coupling boolean
+

Enables/Disables use of channel coupling, which is an optional AC-3 feature +that increases quality by combining high frequency information from multiple +channels into a single channel. The per-channel high frequency information is +sent with less accuracy in both the frequency and time domains. This allows +more bits to be used for lower frequencies while preserving enough information +to reconstruct the high frequencies. This option is enabled by default for the +floating-point encoder and should generally be left as enabled except for +testing purposes or to increase encoding speed. +

+
-1
+
auto
+

Selected by Encoder (default) +

+
0
+
off
+

Disable Channel Coupling +

+
1
+
on
+

Enable Channel Coupling +

+
+ +
+
-cpl_start_band number
+

Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a +value higher than the bandwidth is used, it will be reduced to 1 less than the +coupling end band. If auto is used, the start band will be determined by +the encoder based on the bit rate, sample rate, and channel layout. This option +has no effect if channel coupling is disabled. +

+
-1
+
auto
+

Selected by Encoder (default) +

+
+ +
+
+ +

+

+

8.3 libfaac

+ +

libfaac AAC (Advanced Audio Coding) encoder wrapper. +

+

Requires the presence of the libfaac headers and library during +configuration. You need to explicitly configure the build with +--enable-libfaac --enable-nonfree. +

+

This encoder is considered to be of higher quality with respect to the +the native experimental FFmpeg AAC encoder. +

+

For more information see the libfaac project at +http://www.audiocoding.com/faac.html/. +

+ +

8.3.1 Options

+ +

The following shared FFmpeg codec options are recognized. +

+

The following options are supported by the libfaac wrapper. The +faac-equivalent of the options are listed in parentheses. +

+
+
b (-b)
+

Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate +is not explicitly specified, it is automatically set to a suitable +value depending on the selected profile. faac bitrate is +expressed in kilobits/s. +

+

Note that libfaac does not support CBR (Constant Bit Rate) but only +ABR (Average Bit Rate). +

+

If VBR mode is enabled this option is ignored. +

+
+
ar (-R)
+

Set audio sampling rate (in Hz). +

+
+
ac (-c)
+

Set the number of audio channels. +

+
+
cutoff (-C)
+

Set cutoff frequency. If not specified (or explicitly set to 0) it +will use a value automatically computed by the library. Default value +is 0. +

+
+
profile
+

Set audio profile. +

+

The following profiles are recognized: +

+
aac_main
+

Main AAC (Main) +

+
+
aac_low
+

Low Complexity AAC (LC) +

+
+
aac_ssr
+

Scalable Sample Rate (SSR) +

+
+
aac_ltp
+

Long Term Prediction (LTP) +

+
+ +

If not specified it is set to ‘aac_low’. +

+
+
flags +qscale
+

Set constant quality VBR (Variable Bit Rate) mode. +

+
+
global_quality
+

Set quality in VBR mode as an integer number of lambda units. +

+

Only relevant when VBR mode is enabled with flags +qscale. The +value is converted to QP units by dividing it by FF_QP2LAMBDA, +and used to set the quality value used by libfaac. A reasonable range +for the option value in QP units is [10-500], the higher the value the +higher the quality. +

+
+
q (-q)
+

Enable VBR mode when set to a non-negative value, and set constant +quality value as a double floating point value in QP units. +

+

The value sets the quality value used by libfaac. A reasonable range +for the option value is [10-500], the higher the value the higher the +quality. +

+

This option is valid only using the ffmpeg command-line +tool. For library interface users, use ‘global_quality’. +

+
+ + +

8.3.2 Examples

+ +
    +
  • +Use ffmpeg to convert an audio file to ABR 128 kbps AAC in an M4A (MP4) +container: +
     
    ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
    +
    + +
  • +Use ffmpeg to convert an audio file to VBR AAC, using the +LTP AAC profile: +
     
    ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
    +
    +
+ +

+

+

8.4 libfdk_aac

+ +

libfdk-aac AAC (Advanced Audio Coding) encoder wrapper. +

+

The libfdk-aac library is based on the Fraunhofer FDK AAC code from +the Android project. +

+

Requires the presence of the libfdk-aac headers and library during +configuration. You need to explicitly configure the build with +--enable-libfdk-aac. The library is also incompatible with GPL, +so if you allow the use of GPL, you should configure with +--enable-gpl --enable-nonfree --enable-libfdk-aac. +

+

This encoder is considered to be of higher quality with respect to +both the native experimental FFmpeg AAC encoder and +libfaac. +

+

VBR encoding, enabled through the ‘vbr’ or ‘flags ++qscale’ options, is experimental and only works with some +combinations of parameters. +

+

Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or +higher. +

+

For more information see the fdk-aac project at +http://sourceforge.net/p/opencore-amr/fdk-aac/. +

+ +

8.4.1 Options

+ +

The following options are mapped on the shared FFmpeg codec options. +

+
+
b
+

Set bit rate in bits/s. If the bitrate is not explicitly specified, it +is automatically set to a suitable value depending on the selected +profile. +

+

In case VBR mode is enabled the option is ignored. +

+
+
ar
+

Set audio sampling rate (in Hz). +

+
+
channels
+

Set the number of audio channels. +

+
+
flags +qscale
+

Enable fixed quality, VBR (Variable Bit Rate) mode. +Note that VBR is implicitly enabled when the ‘vbr’ value is +positive. +

+
+
cutoff
+

Set cutoff frequency. If not specified (or explicitly set to 0) it +will use a value automatically computed by the library. Default value +is 0. +

+
+
profile
+

Set audio profile. +

+

The following profiles are recognized: +

+
aac_low
+

Low Complexity AAC (LC) +

+
+
aac_he
+

High Efficiency AAC (HE-AAC) +

+
+
aac_he_v2
+

High Efficiency AAC version 2 (HE-AACv2) +

+
+
aac_ld
+

Low Delay AAC (LD) +

+
+
aac_eld
+

Enhanced Low Delay AAC (ELD) +

+
+ +

If not specified it is set to ‘aac_low’. +

+
+ +

The following are private options of the libfdk_aac encoder. +

+
+
afterburner
+

Enable afterburner feature if set to 1, disabled if set to 0. This +improves the quality but also the required processing power. +

+

Default value is 1. +

+
+
eld_sbr
+

Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled +if set to 0. +

+

Default value is 0. +

+
+
signaling
+

Set SBR/PS signaling style. +

+

It can assume one of the following values: +

+
default
+

choose signaling implicitly (explicit hierarchical by default, +implicit if global header is disabled) +

+
+
implicit
+

implicit backwards compatible signaling +

+
+
explicit_sbr
+

explicit SBR, implicit PS signaling +

+
+
explicit_hierarchical
+

explicit hierarchical signaling +

+
+ +

Default value is ‘default’. +

+
+
latm
+

Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0. +

+

Default value is 0. +

+
+
header_period
+

Set StreamMuxConfig and PCE repetition period (in frames) for sending +in-band configuration buffers within LATM/LOAS transport layer. +

+

Must be a 16-bits non-negative integer. +

+

Default value is 0. +

+
+
vbr
+

Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty +good) and 5 is highest quality. A value of 0 will disable VBR, and CBR +(Constant Bit Rate) is enabled. +

+

Currently only the ‘aac_low’ profile supports VBR encoding. +

+

VBR modes 1-5 correspond to roughly the following average bit rates: +

+
+
1
+

32 kbps/channel +

+
2
+

40 kbps/channel +

+
3
+

48-56 kbps/channel +

+
4
+

64 kbps/channel +

+
5
+

about 80-96 kbps/channel +

+
+ +

Default value is 0. +

+
+ + +

8.4.2 Examples

+ +
    +
  • +Use ffmpeg to convert an audio file to VBR AAC in an M4A (MP4) +container: +
     
    ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
    +
    + +
  • +Use ffmpeg to convert an audio file to CBR 64k kbps AAC, using the +High-Efficiency AAC profile: +
     
    ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
    +
    +
+ +

+

+

8.5 libmp3lame

+ +

LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper. +

+

Requires the presence of the libmp3lame headers and library during +configuration. You need to explicitly configure the build with +--enable-libmp3lame. +

+

See libshine for a fixed-point MP3 encoder, although with a +lower quality. +

+ +

8.5.1 Options

+ +

The following options are supported by the libmp3lame wrapper. The +lame-equivalent of the options are listed in parentheses. +

+
+
b (-b)
+

Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate is +expressed in kilobits/s. +

+
+
q (-V)
+

Set constant quality setting for VBR. This option is valid only +using the ffmpeg command-line tool. For library interface +users, use ‘global_quality’. +

+
+
compression_level (-q)
+

Set algorithm quality. Valid arguments are integers in the 0-9 range, +with 0 meaning highest quality but slowest, and 9 meaning fastest +while producing the worst quality. +

+
+
reservoir
+

Enable use of bit reservoir when set to 1. Default value is 1. LAME +has this enabled by default, but can be overridden by use +‘--nores’ option. +

+
+
joint_stereo (-m j)
+

Enable the encoder to use (on a frame by frame basis) either L/R +stereo or mid/side stereo. Default value is 1. +

+
+
abr (--abr)
+

Enable the encoder to use ABR when set to 1. The lame +‘--abr’ sets the target bitrate, while this options only +tells FFmpeg to use ABR still relies on ‘b’ to set bitrate. +

+
+
+ + +

8.6 libopencore-amrnb

+ +

OpenCORE Adaptive Multi-Rate Narrowband encoder. +

+

Requires the presence of the libopencore-amrnb headers and library during +configuration. You need to explicitly configure the build with +--enable-libopencore-amrnb --enable-version3. +

+

This is a mono-only encoder. Officially it only supports 8000Hz sample rate, +but you can override it by setting ‘strict’ to ‘unofficial’ or +lower. +

+ +

8.6.1 Options

+ +
+
b
+

Set bitrate in bits per second. Only the following bitrates are supported, +otherwise libavcodec will round to the nearest valid bitrate. +

+
+
4750
+
5150
+
5900
+
6700
+
7400
+
7950
+
10200
+
12200
+
+ +
+
dtx
+

Allow discontinuous transmission (generate comfort noise) when set to 1. The +default value is 0 (disabled). +

+
+
+ +

+

+

8.7 libshine

+ +

Shine Fixed-Point MP3 encoder wrapper. +

+

Shine is a fixed-point MP3 encoder. It has a far better performance on +platforms without an FPU, e.g. armel CPUs, and some phones and tablets. +However, as it is more targeted on performance than quality, it is not on par +with LAME and other production-grade encoders quality-wise. Also, according to +the project’s homepage, this encoder may not be free of bugs as the code was +written a long time ago and the project was dead for at least 5 years. +

+

This encoder only supports stereo and mono input. This is also CBR-only. +

+

The original project (last updated in early 2007) is at +http://sourceforge.net/projects/libshine-fxp/. We only support the +updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine. +

+

Requires the presence of the libshine headers and library during +configuration. You need to explicitly configure the build with +--enable-libshine. +

+

See also libmp3lame. +

+ +

8.7.1 Options

+ +

The following options are supported by the libshine wrapper. The +shineenc-equivalent of the options are listed in parentheses. +

+
+
b (-b)
+

Set bitrate expressed in bits/s for CBR. shineenc-b’ option +is expressed in kilobits/s. +

+
+
+ + +

8.8 libtwolame

+ +

TwoLAME MP2 encoder wrapper. +

+

Requires the presence of the libtwolame headers and library during +configuration. You need to explicitly configure the build with +--enable-libtwolame. +

+ +

8.8.1 Options

+ +

The following options are supported by the libtwolame wrapper. The +twolame-equivalent options follow the FFmpeg ones and are in +parentheses. +

+
+
b (-b)
+

Set bitrate expressed in bits/s for CBR. twolameb’ +option is expressed in kilobits/s. Default value is 128k. +

+
+
q (-V)
+

Set quality for experimental VBR support. Maximum value range is +from -50 to 50, useful range is from -10 to 10. The higher the +value, the better the quality. This option is valid only using the +ffmpeg command-line tool. For library interface users, +use ‘global_quality’. +

+
+
mode (--mode)
+

Set the mode of the resulting audio. Possible values: +

+
+
auto
+

Choose mode automatically based on the input. This is the default. +

+
stereo
+

Stereo +

+
joint_stereo
+

Joint stereo +

+
dual_channel
+

Dual channel +

+
mono
+

Mono +

+
+ +
+
psymodel (--psyc-mode)
+

Set psychoacoustic model to use in encoding. The argument must be +an integer between -1 and 4, inclusive. The higher the value, the +better the quality. The default value is 3. +

+
+
energy_levels (--energy)
+

Enable energy levels extensions when set to 1. The default value is +0 (disabled). +

+
+
error_protection (--protect)
+

Enable CRC error protection when set to 1. The default value is 0 +(disabled). +

+
+
copyright (--copyright)
+

Set MPEG audio copyright flag when set to 1. The default value is 0 +(disabled). +

+
+
original (--original)
+

Set MPEG audio original flag when set to 1. The default value is 0 +(disabled). +

+
+
+ +

+

+

8.9 libvo-aacenc

+ +

VisualOn AAC encoder. +

+

Requires the presence of the libvo-aacenc headers and library during +configuration. You need to explicitly configure the build with +--enable-libvo-aacenc --enable-version3. +

+

This encoder is considered to be worse than the +native experimental FFmpeg AAC encoder, according to +multiple sources. +

+ +

8.9.1 Options

+ +

The VisualOn AAC encoder only support encoding AAC-LC and up to 2 +channels. It is also CBR-only. +

+
+
b
+

Set bit rate in bits/s. +

+
+
+ + +

8.10 libvo-amrwbenc

+ +

VisualOn Adaptive Multi-Rate Wideband encoder. +

+

Requires the presence of the libvo-amrwbenc headers and library during +configuration. You need to explicitly configure the build with +--enable-libvo-amrwbenc --enable-version3. +

+

This is a mono-only encoder. Officially it only supports 16000Hz sample +rate, but you can override it by setting ‘strict’ to +‘unofficial’ or lower. +

+ +

8.10.1 Options

+ +
+
b
+

Set bitrate in bits/s. Only the following bitrates are supported, otherwise +libavcodec will round to the nearest valid bitrate. +

+
+
6600
+
8850
+
12650
+
14250
+
15850
+
18250
+
19850
+
23050
+
23850
+
+ +
+
dtx
+

Allow discontinuous transmission (generate comfort noise) when set to 1. The +default value is 0 (disabled). +

+
+
+ + +

8.11 libopus

+ +

libopus Opus Interactive Audio Codec encoder wrapper. +

+

Requires the presence of the libopus headers and library during +configuration. You need to explicitly configure the build with +--enable-libopus. +

+ +

8.11.1 Option Mapping

+ +

Most libopus options are modeled after the opusenc utility from +opus-tools. The following is an option mapping chart describing options +supported by the libopus wrapper, and their opusenc-equivalent +in parentheses. +

+
+
b (bitrate)
+

Set the bit rate in bits/s. FFmpeg’s ‘b’ option is +expressed in bits/s, while opusenc’s ‘bitrate’ in +kilobits/s. +

+
+
vbr (vbr, hard-cbr, and cvbr)
+

Set VBR mode. The FFmpeg ‘vbr’ option has the following +valid arguments, with the their opusenc equivalent options +in parentheses: +

+
+
off (hard-cbr)
+

Use constant bit rate encoding. +

+
+
on (vbr)
+

Use variable bit rate encoding (the default). +

+
+
constrained (cvbr)
+

Use constrained variable bit rate encoding. +

+
+ +
+
compression_level (comp)
+

Set encoding algorithm complexity. Valid options are integers in +the 0-10 range. 0 gives the fastest encodes but lower quality, while 10 +gives the highest quality but slowest encoding. The default is 10. +

+
+
frame_duration (framesize)
+

Set maximum frame size, or duration of a frame in milliseconds. The +argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller +frame sizes achieve lower latency but less quality at a given bitrate. +Sizes greater than 20ms are only interesting at fairly low bitrates. +The default is 20ms. +

+
+
packet_loss (expect-loss)
+

Set expected packet loss percentage. The default is 0. +

+
+
application (N.A.)
+

Set intended application type. Valid options are listed below: +

+
+
voip
+

Favor improved speech intelligibility. +

+
audio
+

Favor faithfulness to the input (the default). +

+
lowdelay
+

Restrict to only the lowest delay modes. +

+
+ +
+
cutoff (N.A.)
+

Set cutoff bandwidth in Hz. The argument must be exactly one of the +following: 4000, 6000, 8000, 12000, or 20000, corresponding to +narrowband, mediumband, wideband, super wideband, and fullband +respectively. The default is 0 (cutoff disabled). +

+
+
+ + +

8.12 libvorbis

+ +

libvorbis encoder wrapper. +

+

Requires the presence of the libvorbisenc headers and library during +configuration. You need to explicitly configure the build with +--enable-libvorbis. +

+ +

8.12.1 Options

+ +

The following options are supported by the libvorbis wrapper. The +oggenc-equivalent of the options are listed in parentheses. +

+

To get a more accurate and extensive documentation of the libvorbis +options, consult the libvorbisenc’s and oggenc’s documentations. +See http://xiph.org/vorbis/, +http://wiki.xiph.org/Vorbis-tools, and oggenc(1). +

+
+
b (-b)
+

Set bitrate expressed in bits/s for ABR. oggenc-b’ is +expressed in kilobits/s. +

+
+
q (-q)
+

Set constant quality setting for VBR. The value should be a float +number in the range of -1.0 to 10.0. The higher the value, the better +the quality. The default value is ‘3.0’. +

+

This option is valid only using the ffmpeg command-line tool. +For library interface users, use ‘global_quality’. +

+
+
cutoff (--advanced-encode-option lowpass_frequency=N)
+

Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc’s +related option is expressed in kHz. The default value is ‘0’ (cutoff +disabled). +

+
+
minrate (-m)
+

Set minimum bitrate expressed in bits/s. oggenc-m’ is +expressed in kilobits/s. +

+
+
maxrate (-M)
+

Set maximum bitrate expressed in bits/s. oggenc-M’ is +expressed in kilobits/s. This only has effect on ABR mode. +

+
+
iblock (--advanced-encode-option impulse_noisetune=N)
+

Set noise floor bias for impulse blocks. The value is a float number from +-15.0 to 0.0. A negative bias instructs the encoder to pay special attention +to the crispness of transients in the encoded audio. The tradeoff for better +transient response is a higher bitrate. +

+
+
+ +

+

+

8.13 libwavpack

+ +

A wrapper providing WavPack encoding through libwavpack. +

+

Only lossless mode using 32-bit integer samples is supported currently. +

+

Requires the presence of the libwavpack headers and library during +configuration. You need to explicitly configure the build with +--enable-libwavpack. +

+

Note that a libavcodec-native encoder for the WavPack codec exists so users can +encode audios with this codec without using this encoder. See wavpackenc. +

+ +

8.13.1 Options

+ +

wavpack command line utility’s corresponding options are listed in +parentheses, if any. +

+
+
frame_size (--blocksize)
+

Default is 32768. +

+
+
compression_level
+

Set speed vs. compression tradeoff. Acceptable arguments are listed below: +

+
+
0 (-f)
+

Fast mode. +

+
+
1
+

Normal (default) settings. +

+
+
2 (-h)
+

High quality. +

+
+
3 (-hh)
+

Very high quality. +

+
+
4-8 (-hh -xEXTRAPROC)
+

Same as ‘3’, but with extra processing enabled. +

+

4’ is the same as ‘-x2’ and ‘8’ is the same as ‘-x6’. +

+
+
+
+
+ +

+

+

8.14 wavpack

+ +

WavPack lossless audio encoder. +

+

This is a libavcodec-native WavPack encoder. There is also an encoder based on +libwavpack, but there is virtually no reason to use that encoder. +

+

See also libwavpack. +

+ +

8.14.1 Options

+ +

The equivalent options for wavpack command line utility are listed in +parentheses. +

+ +

8.14.1.1 Shared options

+ +

The following shared options are effective for this encoder. Only special notes +about this particular encoder will be documented here. For the general meaning +of the options, see the Codec Options chapter. +

+
+
frame_size (--blocksize)
+

For this encoder, the range for this option is between 128 and 131072. Default +is automatically decided based on sample rate and number of channel. +

+

For the complete formula of calculating default, see +‘libavcodec/wavpackenc.c’. +

+
+
compression_level (-f, -h, -hh, and -x)
+

This option’s syntax is consistent with libwavpack’s. +

+
+ + +

8.14.1.2 Private options

+ +
+
joint_stereo (-j)
+

Set whether to enable joint stereo. Valid values are: +

+
+
on (1)
+

Force mid/side audio encoding. +

+
off (0)
+

Force left/right audio encoding. +

+
auto
+

Let the encoder decide automatically. +

+
+ +
+
optimize_mono
+

Set whether to enable optimization for mono. This option is only effective for +non-mono streams. Available values: +

+
+
on
+

enabled +

+
off
+

disabled +

+
+ +
+
+ + + +

9. Video Encoders

+ +

A description of some of the currently available video encoders +follows. +

+ +

9.1 libtheora

+ +

libtheora Theora encoder wrapper. +

+

Requires the presence of the libtheora headers and library during +configuration. You need to explicitly configure the build with +--enable-libtheora. +

+

For more information about the libtheora project see +http://www.theora.org/. +

+ +

9.1.1 Options

+ +

The following global options are mapped to internal libtheora options +which affect the quality and the bitrate of the encoded stream. +

+
+
b
+

Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In +case VBR (Variable Bit Rate) mode is enabled this option is ignored. +

+
+
flags
+

Used to enable constant quality mode (VBR) encoding through the +‘qscale’ flag, and to enable the pass1 and pass2 +modes. +

+
+
g
+

Set the GOP size. +

+
+
global_quality
+

Set the global quality as an integer in lambda units. +

+

Only relevant when VBR mode is enabled with flags +qscale. The +value is converted to QP units by dividing it by FF_QP2LAMBDA, +clipped in the [0 - 10] range, and then multiplied by 6.3 to get a +value in the native libtheora range [0-63]. A higher value corresponds +to a higher quality. +

+
+
q
+

Enable VBR mode when set to a non-negative value, and set constant +quality value as a double floating point value in QP units. +

+

The value is clipped in the [0-10] range, and then multiplied by 6.3 +to get a value in the native libtheora range [0-63]. +

+

This option is valid only using the ffmpeg command-line +tool. For library interface users, use ‘global_quality’. +

+
+ + +

9.1.2 Examples

+ +
    +
  • +Set maximum constant quality (VBR) encoding with ffmpeg: +
     
    ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
    +
    + +
  • +Use ffmpeg to convert a CBR 1000 kbps Theora video stream: +
     
    ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
    +
    +
+ + +

9.2 libvpx

+ +

VP8 format supported through libvpx. +

+

Requires the presence of the libvpx headers and library during configuration. +You need to explicitly configure the build with --enable-libvpx. +

+ +

9.2.1 Options

+ +

Mapping from FFmpeg to libvpx options with conversion notes in parentheses. +

+
+
threads
+

g_threads +

+
+
profile
+

g_profile +

+
+
vb
+

rc_target_bitrate +

+
+
g
+

kf_max_dist +

+
+
keyint_min
+

kf_min_dist +

+
+
qmin
+

rc_min_quantizer +

+
+
qmax
+

rc_max_quantizer +

+
+
bufsize, vb
+

rc_buf_sz +(bufsize * 1000 / vb) +

+

rc_buf_optimal_sz +(bufsize * 1000 / vb * 5 / 6) +

+
+
rc_init_occupancy, vb
+

rc_buf_initial_sz +(rc_init_occupancy * 1000 / vb) +

+
+
rc_buffer_aggressivity
+

rc_undershoot_pct +

+
+
skip_threshold
+

rc_dropframe_thresh +

+
+
qcomp
+

rc_2pass_vbr_bias_pct +

+
+
maxrate, vb
+

rc_2pass_vbr_maxsection_pct +(maxrate * 100 / vb) +

+
+
minrate, vb
+

rc_2pass_vbr_minsection_pct +(minrate * 100 / vb) +

+
+
minrate, maxrate, vb
+

VPX_CBR +(minrate == maxrate == vb) +

+
+
crf
+

VPX_CQ, VP8E_SET_CQ_LEVEL +

+
+
quality
+
+
best
+

VPX_DL_BEST_QUALITY +

+
good
+

VPX_DL_GOOD_QUALITY +

+
realtime
+

VPX_DL_REALTIME +

+
+ +
+
speed
+

VP8E_SET_CPUUSED +

+
+
nr
+

VP8E_SET_NOISE_SENSITIVITY +

+
+
mb_threshold
+

VP8E_SET_STATIC_THRESHOLD +

+
+
slices
+

VP8E_SET_TOKEN_PARTITIONS +

+
+
max-intra-rate
+

VP8E_SET_MAX_INTRA_BITRATE_PCT +

+
+
force_key_frames
+

VPX_EFLAG_FORCE_KF +

+
+
Alternate reference frame related
+
+
vp8flags altref
+

VP8E_SET_ENABLEAUTOALTREF +

+
arnr_max_frames
+

VP8E_SET_ARNR_MAXFRAMES +

+
arnr_type
+

VP8E_SET_ARNR_TYPE +

+
arnr_strength
+

VP8E_SET_ARNR_STRENGTH +

+
rc_lookahead
+

g_lag_in_frames +

+
+ +
+
vp8flags error_resilient
+

g_error_resilient +

+
+
+ +

For more information about libvpx see: +http://www.webmproject.org/ +

+ + +

9.3 libwebp

+ +

libwebp WebP Image encoder wrapper +

+

libwebp is Google’s official encoder for WebP images. It can encode in either +lossy or lossless mode. Lossy images are essentially a wrapper around a VP8 +frame. Lossless images are a separate codec developed by Google. +

+ +

9.3.1 Pixel Format

+ +

Currently, libwebp only supports YUV420 for lossy and RGB for lossless due +to limitations of the format and libwebp. Alpha is supported for either mode. +Because of API limitations, if RGB is passed in when encoding lossy or YUV is +passed in for encoding lossless, the pixel format will automatically be +converted using functions from libwebp. This is not ideal and is done only for +convenience. +

+ +

9.3.2 Options

+ +
+
-lossless boolean
+

Enables/Disables use of lossless mode. Default is 0. +

+
+
-compression_level integer
+

For lossy, this is a quality/speed tradeoff. Higher values give better quality +for a given size at the cost of increased encoding time. For lossless, this is +a size/speed tradeoff. Higher values give smaller size at the cost of increased +encoding time. More specifically, it controls the number of extra algorithms +and compression tools used, and varies the combination of these tools. This +maps to the method option in libwebp. The valid range is 0 to 6. +Default is 4. +

+
+
-qscale float
+

For lossy encoding, this controls image quality, 0 to 100. For lossless +encoding, this controls the effort and time spent at compressing more. The +default value is 75. Note that for usage via libavcodec, this option is called +global_quality and must be multiplied by FF_QP2LAMBDA. +

+
+
-preset type
+

Configuration preset. This does some automatic settings based on the general +type of the image. +

+
none
+

Do not use a preset. +

+
default
+

Use the encoder default. +

+
picture
+

Digital picture, like portrait, inner shot +

+
photo
+

Outdoor photograph, with natural lighting +

+
drawing
+

Hand or line drawing, with high-contrast details +

+
icon
+

Small-sized colorful images +

+
text
+

Text-like +

+
+ +
+
+ + +

9.4 libx264, libx264rgb

+ +

x264 H.264/MPEG-4 AVC encoder wrapper. +

+

This encoder requires the presence of the libx264 headers and library +during configuration. You need to explicitly configure the build with +--enable-libx264. +

+

libx264 supports an impressive number of features, including 8x8 and +4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC +entropy coding, interlacing (MBAFF), lossless mode, psy optimizations +for detail retention (adaptive quantization, psy-RD, psy-trellis). +

+

Many libx264 encoder options are mapped to FFmpeg global codec +options, while unique encoder options are provided through private +options. Additionally the ‘x264opts’ and ‘x264-params’ +private options allows one to pass a list of key=value tuples as accepted +by the libx264 x264_param_parse function. +

+

The x264 project website is at +http://www.videolan.org/developers/x264.html. +

+

The libx264rgb encoder is the same as libx264, except it accepts packed RGB +pixel formats as input instead of YUV. +

+ +

9.4.1 Supported Pixel Formats

+ +

x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at +x264’s configure time. FFmpeg only supports one bit depth in one particular +build. In other words, it is not possible to build one FFmpeg with multiple +versions of x264 with different bit depths. +

+ +

9.4.2 Options

+ +

The following options are supported by the libx264 wrapper. The +x264-equivalent options or values are listed in parentheses +for easy migration. +

+

To reduce the duplication of documentation, only the private options +and some others requiring special attention are documented here. For +the documentation of the undocumented generic options, see +the Codec Options chapter. +

+

To get a more accurate and extensive documentation of the libx264 +options, invoke the command x264 --full-help or consult +the libx264 documentation. +

+
+
b (bitrate)
+

Set bitrate in bits/s. Note that FFmpeg’s ‘b’ option is +expressed in bits/s, while x264’s ‘bitrate’ is in +kilobits/s. +

+
+
bf (bframes)
+
g (keyint)
+
qmax (qpmax)
+
qmin (qpmin)
+
qdiff (qpstep)
+
qblur (qblur)
+
qcomp (qcomp)
+
refs (ref)
+
sc_threshold (scenecut)
+
trellis (trellis)
+
nr (nr)
+
me_range (merange)
+
me_method (me)
+

Set motion estimation method. Possible values in the decreasing order +of speed: +

+
+
dia (dia)
+
epzs (dia)
+

Diamond search with radius 1 (fastest). ‘epzs’ is an alias for +‘dia’. +

+
hex (hex)
+

Hexagonal search with radius 2. +

+
umh (umh)
+

Uneven multi-hexagon search. +

+
esa (esa)
+

Exhaustive search. +

+
tesa (tesa)
+

Hadamard exhaustive search (slowest). +

+
+ +
+
subq (subme)
+
b_strategy (b-adapt)
+
keyint_min (min-keyint)
+
coder
+

Set entropy encoder. Possible values: +

+
+
ac
+

Enable CABAC. +

+
+
vlc
+

Enable CAVLC and disable CABAC. It generates the same effect as +x264’s ‘--no-cabac’ option. +

+
+ +
+
cmp
+

Set full pixel motion estimation comparation algorithm. Possible values: +

+
+
chroma
+

Enable chroma in motion estimation. +

+
+
sad
+

Ignore chroma in motion estimation. It generates the same effect as +x264’s ‘--no-chroma-me’ option. +

+
+ +
+
threads (threads)
+
thread_type
+

Set multithreading technique. Possible values: +

+
+
slice
+

Slice-based multithreading. It generates the same effect as +x264’s ‘--sliced-threads’ option. +

+
frame
+

Frame-based multithreading. +

+
+ +
+
flags
+

Set encoding flags. It can be used to disable closed GOP and enable +open GOP by setting it to -cgop. The result is similar to +the behavior of x264’s ‘--open-gop’ option. +

+
+
rc_init_occupancy (vbv-init)
+
preset (preset)
+

Set the encoding preset. +

+
+
tune (tune)
+

Set tuning of the encoding params. +

+
+
profile (profile)
+

Set profile restrictions. +

+
+
fastfirstpass
+

Enable fast settings when encoding first pass, when set to 1. When set +to 0, it has the same effect of x264’s +‘--slow-firstpass’ option. +

+
+
crf (crf)
+

Set the quality for constant quality mode. +

+
+
crf_max (crf-max)
+

In CRF mode, prevents VBV from lowering quality beyond this point. +

+
+
qp (qp)
+

Set constant quantization rate control method parameter. +

+
+
aq-mode (aq-mode)
+

Set AQ method. Possible values: +

+
+
none (0)
+

Disabled. +

+
+
variance (1)
+

Variance AQ (complexity mask). +

+
+
autovariance (2)
+

Auto-variance AQ (experimental). +

+
+ +
+
aq-strength (aq-strength)
+

Set AQ strength, reduce blocking and blurring in flat and textured areas. +

+
+
psy
+

Use psychovisual optimizations when set to 1. When set to 0, it has the +same effect as x264’s ‘--no-psy’ option. +

+
+
psy-rd (psy-rd)
+

Set strength of psychovisual optimization, in +psy-rd:psy-trellis format. +

+
+
rc-lookahead (rc-lookahead)
+

Set number of frames to look ahead for frametype and ratecontrol. +

+
+
weightb
+

Enable weighted prediction for B-frames when set to 1. When set to 0, +it has the same effect as x264’s ‘--no-weightb’ option. +

+
+
weightp (weightp)
+

Set weighted prediction method for P-frames. Possible values: +

+
+
none (0)
+

Disabled +

+
simple (1)
+

Enable only weighted refs +

+
smart (2)
+

Enable both weighted refs and duplicates +

+
+ +
+
ssim (ssim)
+

Enable calculation and printing SSIM stats after the encoding. +

+
+
intra-refresh (intra-refresh)
+

Enable the use of Periodic Intra Refresh instead of IDR frames when set +to 1. +

+
+
bluray-compat (bluray-compat)
+

Configure the encoder to be compatible with the bluray standard. +It is a shorthand for setting "bluray-compat=1 force-cfr=1". +

+
+
b-bias (b-bias)
+

Set the influence on how often B-frames are used. +

+
+
b-pyramid (b-pyramid)
+

Set method for keeping of some B-frames as references. Possible values: +

+
+
none (none)
+

Disabled. +

+
strict (strict)
+

Strictly hierarchical pyramid. +

+
normal (normal)
+

Non-strict (not Blu-ray compatible). +

+
+ +
+
mixed-refs
+

Enable the use of one reference per partition, as opposed to one +reference per macroblock when set to 1. When set to 0, it has the +same effect as x264’s ‘--no-mixed-refs’ option. +

+
+
8x8dct
+

Enable adaptive spatial transform (high profile 8x8 transform) +when set to 1. When set to 0, it has the same effect as +x264’s ‘--no-8x8dct’ option. +

+
+
fast-pskip
+

Enable early SKIP detection on P-frames when set to 1. When set +to 0, it has the same effect as x264’s +‘--no-fast-pskip’ option. +

+
+
aud (aud)
+

Enable use of access unit delimiters when set to 1. +

+
+
mbtree
+

Enable use macroblock tree ratecontrol when set to 1. When set +to 0, it has the same effect as x264’s +‘--no-mbtree’ option. +

+
+
deblock (deblock)
+

Set loop filter parameters, in alpha:beta form. +

+
+
cplxblur (cplxblur)
+

Set fluctuations reduction in QP (before curve compression). +

+
+
partitions (partitions)
+

Set partitions to consider as a comma-separated list of. Possible +values in the list: +

+
+
p8x8
+

8x8 P-frame partition. +

+
p4x4
+

4x4 P-frame partition. +

+
b8x8
+

4x4 B-frame partition. +

+
i8x8
+

8x8 I-frame partition. +

+
i4x4
+

4x4 I-frame partition. +(Enabling ‘p4x4’ requires ‘p8x8’ to be enabled. Enabling +‘i8x8’ requires adaptive spatial transform (‘8x8dct’ +option) to be enabled.) +

+
none (none)
+

Do not consider any partitions. +

+
all (all)
+

Consider every partition. +

+
+ +
+
direct-pred (direct)
+

Set direct MV prediction mode. Possible values: +

+
+
none (none)
+

Disable MV prediction. +

+
spatial (spatial)
+

Enable spatial predicting. +

+
temporal (temporal)
+

Enable temporal predicting. +

+
auto (auto)
+

Automatically decided. +

+
+ +
+
slice-max-size (slice-max-size)
+

Set the limit of the size of each slice in bytes. If not specified +but RTP payload size (‘ps’) is specified, that is used. +

+
+
stats (stats)
+

Set the file name for multi-pass stats. +

+
+
nal-hrd (nal-hrd)
+

Set signal HRD information (requires ‘vbv-bufsize’ to be set). +Possible values: +

+
+
none (none)
+

Disable HRD information signaling. +

+
vbr (vbr)
+

Variable bit rate. +

+
cbr (cbr)
+

Constant bit rate (not allowed in MP4 container). +

+
+ +
+
x264opts (N.A.)
+

Set any x264 option, see x264 --fullhelp for a list. +

+

Argument is a list of key=value couples separated by +":". In filter and psy-rd options that use ":" as a separator +themselves, use "," instead. They accept it as well since long ago but this +is kept undocumented for some reason. +

+

For example to specify libx264 encoding options with ffmpeg: +

 
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
+
+ +
+
x264-params (N.A.)
+

Override the x264 configuration using a :-separated list of key=value +parameters. +

+

This option is functionally the same as the ‘x264opts’, but is +duplicated for compatibility with the Libav fork. +

+

For example to specify libx264 encoding options with ffmpeg: +

 
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
+cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
+no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
+
+
+
+ +

Encoding ffpresets for common usages are provided so they can be used with the +general presets system (e.g. passing the ‘pre’ option). +

+ +

9.5 libxvid

+ +

Xvid MPEG-4 Part 2 encoder wrapper. +

+

This encoder requires the presence of the libxvidcore headers and library +during configuration. You need to explicitly configure the build with +--enable-libxvid --enable-gpl. +

+

The native mpeg4 encoder supports the MPEG-4 Part 2 format, so +users can encode to this format without this library. +

+ +

9.5.1 Options

+ +

The following options are supported by the libxvid wrapper. Some of +the following options are listed but are not documented, and +correspond to shared codec options. See the Codec Options chapter for their documentation. The other shared options +which are not listed have no effect for the libxvid encoder. +

+
+
b
+
g
+
qmin
+
qmax
+
mpeg_quant
+
threads
+
bf
+
b_qfactor
+
b_qoffset
+
flags
+

Set specific encoding flags. Possible values: +

+
+
mv4
+

Use four motion vector by macroblock. +

+
+
aic
+

Enable high quality AC prediction. +

+
+
gray
+

Only encode grayscale. +

+
+
gmc
+

Enable the use of global motion compensation (GMC). +

+
+
qpel
+

Enable quarter-pixel motion compensation. +

+
+
cgop
+

Enable closed GOP. +

+
+
global_header
+

Place global headers in extradata instead of every keyframe. +

+
+
+ +
+
trellis
+
me_method
+

Set motion estimation method. Possible values in decreasing order of +speed and increasing order of quality: +

+
+
zero
+

Use no motion estimation (default). +

+
+
phods
+
x1
+
log
+

Enable advanced diamond zonal search for 16x16 blocks and half-pixel +refinement for 16x16 blocks. ‘x1’ and ‘log’ are aliases for +‘phods’. +

+
+
epzs
+

Enable all of the things described above, plus advanced diamond zonal +search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion +estimation on chroma planes. +

+
+
full
+

Enable all of the things described above, plus extended 16x16 and 8x8 +blocks search. +

+
+ +
+
mbd
+

Set macroblock decision algorithm. Possible values in the increasing +order of quality: +

+
+
simple
+

Use macroblock comparing function algorithm (default). +

+
+
bits
+

Enable rate distortion-based half pixel and quarter pixel refinement for +16x16 blocks. +

+
+
rd
+

Enable all of the things described above, plus rate distortion-based +half pixel and quarter pixel refinement for 8x8 blocks, and rate +distortion-based search using square pattern. +

+
+ +
+
lumi_aq
+

Enable lumi masking adaptive quantization when set to 1. Default is 0 +(disabled). +

+
+
variance_aq
+

Enable variance adaptive quantization when set to 1. Default is 0 +(disabled). +

+

When combined with ‘lumi_aq’, the resulting quality will not +be better than any of the two specified individually. In other +words, the resulting quality will be the worse one of the two +effects. +

+
+
ssim
+

Set structural similarity (SSIM) displaying method. Possible values: +

+
+
off
+

Disable displaying of SSIM information. +

+
+
avg
+

Output average SSIM at the end of encoding to stdout. The format of +showing the average SSIM is: +

+
 
Average SSIM: %f
+
+ +

For users who are not familiar with C, %f means a float number, or +a decimal (e.g. 0.939232). +

+
+
frame
+

Output both per-frame SSIM data during encoding and average SSIM at +the end of encoding to stdout. The format of per-frame information +is: +

+
 
       SSIM: avg: %1.3f min: %1.3f max: %1.3f
+
+ +

For users who are not familiar with C, %1.3f means a float number +rounded to 3 digits after the dot (e.g. 0.932). +

+
+
+ +
+
ssim_acc
+

Set SSIM accuracy. Valid options are integers within the range of +0-4, while 0 gives the most accurate result and 4 computes the +fastest. +

+
+
+ + +

9.6 png

+ +

PNG image encoder. +

+ +

9.6.1 Private options

+ +
+
dpi integer
+

Set physical density of pixels, in dots per inch, unset by default +

+
dpm integer
+

Set physical density of pixels, in dots per meter, unset by default +

+
+ + +

9.7 ProRes

+ +

Apple ProRes encoder. +

+

FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder. +The used encoder can be chosen with the -vcodec option. +

+ +

9.7.1 Private Options for prores-ks

+ +
+
profile integer
+

Select the ProRes profile to encode +

+
proxy
+
lt
+
standard
+
hq
+
4444
+
+ +
+
quant_mat integer
+

Select quantization matrix. +

+
auto
+
default
+
proxy
+
lt
+
standard
+
hq
+
+

If set to auto, the matrix matching the profile will be picked. +If not set, the matrix providing the highest quality, default, will be +picked. +

+
+
bits_per_mb integer
+

How many bits to allot for coding one macroblock. Different profiles use +between 200 and 2400 bits per macroblock, the maximum is 8000. +

+
+
mbs_per_slice integer
+

Number of macroblocks in each slice (1-8); the default value (8) +should be good in almost all situations. +

+
+
vendor string
+

Override the 4-byte vendor ID. +A custom vendor ID like apl0 would claim the stream was produced by +the Apple encoder. +

+
+
alpha_bits integer
+

Specify number of bits for alpha component. +Possible values are 0, 8 and 16. +Use 0 to disable alpha plane coding. +

+
+
+ + +

9.7.2 Speed considerations

+ +

In the default mode of operation the encoder has to honor frame constraints +(i.e. not produc frames with size bigger than requested) while still making +output picture as good as possible. +A frame containing a lot of small details is harder to compress and the encoder +would spend more time searching for appropriate quantizers for each slice. +

+

Setting a higher ‘bits_per_mb’ limit will improve the speed. +

+

For the fastest encoding speed set the ‘qscale’ parameter (4 is the +recommended value) and do not set a size constraint. +

+ + +

10. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavcodec +

+ + +

11. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-devices.html b/dependencies64/ffmpeg/doc/ffmpeg-devices.html new file mode 100644 index 000000000..bc5a30b59 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-devices.html @@ -0,0 +1,1443 @@ + + + + + +FFmpeg documentation : FFmpeg Devices + + + + + + + + + + +
+
+ + +

FFmpeg Devices Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

This document describes the input and output devices provided by the +libavdevice library. +

+ + +

2. Device Options

+ +

The libavdevice library provides the same interface as +libavformat. Namely, an input device is considered like a demuxer, and +an output device like a muxer, and the interface and generic device +options are the same provided by libavformat (see the ffmpeg-formats +manual). +

+

In addition each input or output device may support so-called private +options, which are specific for that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the device +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+ + +

3. Input Devices

+ +

Input devices are configured elements in FFmpeg which allow to access +the data coming from a multimedia device attached to your system. +

+

When you configure your FFmpeg build, all the supported input devices +are enabled by default. You can list all available ones using the +configure option "–list-indevs". +

+

You can disable all the input devices using the configure option +"–disable-indevs", and selectively enable an input device using the +option "–enable-indev=INDEV", or you can disable a particular +input device using the option "–disable-indev=INDEV". +

+

The option "-formats" of the ff* tools will display the list of +supported input devices (amongst the demuxers). +

+

A description of the currently available input devices follows. +

+ +

3.1 alsa

+ +

ALSA (Advanced Linux Sound Architecture) input device. +

+

To enable this input device during configuration you need libasound +installed on your system. +

+

This device allows capturing from an ALSA device. The name of the +device to capture has to be an ALSA card identifier. +

+

An ALSA identifier has the syntax: +

 
hw:CARD[,DEV[,SUBDEV]]
+
+ +

where the DEV and SUBDEV components are optional. +

+

The three arguments (in order: CARD,DEV,SUBDEV) +specify card number or identifier, device number and subdevice number +(-1 means any). +

+

To see the list of cards currently recognized by your system check the +files ‘/proc/asound/cards’ and ‘/proc/asound/devices’. +

+

For example to capture with ffmpeg from an ALSA device with +card id 0, you may run the command: +

 
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+ +

For more information see: +http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html +

+ +

3.2 bktr

+ +

BSD video input device. +

+ +

3.3 dshow

+ +

Windows DirectShow input device. +

+

DirectShow support is enabled when FFmpeg is built with the mingw-w64 project. +Currently only audio and video devices are supported. +

+

Multiple devices may be opened as separate inputs, but they may also be +opened on the same input, which should improve synchronism between them. +

+

The input name should be in the format: +

+
 
TYPE=NAME[:TYPE=NAME]
+
+ +

where TYPE can be either audio or video, +and NAME is the device’s name. +

+ +

3.3.1 Options

+ +

If no options are specified, the device’s defaults are used. +If the device does not support the requested options, it will +fail to open. +

+
+
video_size
+

Set the video size in the captured video. +

+
+
framerate
+

Set the frame rate in the captured video. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. +

+
+
channels
+

Set the number of channels in the captured audio. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +

+
+
list_options
+

If set to ‘true’, print a list of selected device’s options +and exit. +

+
+
video_device_number
+

Set video device number for devices with same name (starts at 0, +defaults to 0). +

+
+
audio_device_number
+

Set audio device number for devices with same name (starts at 0, +defaults to 0). +

+
+
pixel_format
+

Select pixel format to be used by DirectShow. This may only be set when +the video codec is not set or set to rawvideo. +

+
+
audio_buffer_size
+

Set audio device buffer size in milliseconds (which can directly +impact latency, depending on the device). +Defaults to using the audio device’s +default buffer size (typically some multiple of 500ms). +Setting this value too low can degrade performance. +See also +http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx +

+
+
+ + +

3.3.2 Examples

+ +
    +
  • +Print the list of DirectShow supported devices and exit: +
     
    $ ffmpeg -list_devices true -f dshow -i dummy
    +
    + +
  • +Open video device Camera: +
     
    $ ffmpeg -f dshow -i video="Camera"
    +
    + +
  • +Open second video device with name Camera: +
     
    $ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
    +
    + +
  • +Open video device Camera and audio device Microphone: +
     
    $ ffmpeg -f dshow -i video="Camera":audio="Microphone"
    +
    + +
  • +Print the list of supported options in selected device and exit: +
     
    $ ffmpeg -list_options true -f dshow -i video="Camera"
    +
    + +
+ + +

3.4 dv1394

+ +

Linux DV 1394 input device. +

+ +

3.5 fbdev

+ +

Linux framebuffer input device. +

+

The Linux framebuffer is a graphic hardware-independent abstraction +layer to show graphics on a computer monitor, typically on the +console. It is accessed through a file device node, usually +‘/dev/fb0’. +

+

For more detailed information read the file +Documentation/fb/framebuffer.txt included in the Linux source tree. +

+

To record from the framebuffer device ‘/dev/fb0’ with +ffmpeg: +

 
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+ +

You can take a single screenshot image with the command: +

 
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+ +

See also http://linux-fbdev.sourceforge.net/, and fbset(1). +

+ +

3.6 iec61883

+ +

FireWire DV/HDV input device using libiec61883. +

+

To enable this input device, you need libiec61883, libraw1394 and +libavc1394 installed on your system. Use the configure option +--enable-libiec61883 to compile with the device enabled. +

+

The iec61883 capture device supports capturing from a video device +connected via IEEE1394 (FireWire), using libiec61883 and the new Linux +FireWire stack (juju). This is the default DV/HDV input method in Linux +Kernel 2.6.37 and later, since the old FireWire stack was removed. +

+

Specify the FireWire port to be used as input file, or "auto" +to choose the first port connected. +

+ +

3.6.1 Options

+ +
+
dvtype
+

Override autodetection of DV/HDV. This should only be used if auto +detection does not work, or if usage of a different device type +should be prohibited. Treating a DV device as HDV (or vice versa) will +not work and result in undefined behavior. +The values ‘auto’, ‘dv’ and ‘hdv’ are supported. +

+
+
dvbuffer
+

Set maxiumum size of buffer for incoming data, in frames. For DV, this +is an exact value. For HDV, it is not frame exact, since HDV does +not have a fixed frame size. +

+
+
dvguid
+

Select the capture device by specifying it’s GUID. Capturing will only +be performed from the specified device and fails if no device with the +given GUID is found. This is useful to select the input if multiple +devices are connected at the same time. +Look at /sys/bus/firewire/devices to find out the GUIDs. +

+
+
+ + +

3.6.2 Examples

+ +
    +
  • +Grab and show the input of a FireWire DV/HDV device. +
     
    ffplay -f iec61883 -i auto
    +
    + +
  • +Grab and record the input of a FireWire DV/HDV device, +using a packet buffer of 100000 packets if the source is HDV. +
     
    ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
    +
    + +
+ + +

3.7 jack

+ +

JACK input device. +

+

To enable this input device during configuration you need libjack +installed on your system. +

+

A JACK input device creates one or more JACK writable clients, one for +each audio channel, with name client_name:input_N, where +client_name is the name provided by the application, and N +is a number which identifies the channel. +Each writable client will send the acquired data to the FFmpeg input +device. +

+

Once you have created one or more JACK readable clients, you need to +connect them to one or more JACK writable clients. +

+

To connect or disconnect JACK clients you can use the jack_connect +and jack_disconnect programs, or do it through a graphical interface, +for example with qjackctl. +

+

To list the JACK clients and their properties you can invoke the command +jack_lsp. +

+

Follows an example which shows how to capture a JACK readable client +with ffmpeg. +

 
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+ +

For more information read: +http://jackaudio.org/ +

+ +

3.8 lavfi

+ +

Libavfilter input virtual device. +

+

This input device reads data from the open output pads of a libavfilter +filtergraph. +

+

For each filtergraph open output, the input device will create a +corresponding stream which is mapped to the generated output. Currently +only video data is supported. The filtergraph is specified through the +option ‘graph’. +

+ +

3.8.1 Options

+ +
+
graph
+

Specify the filtergraph to use as input. Each video open output must be +labelled by a unique string of the form "outN", where N is a +number starting from 0 corresponding to the mapped input stream +generated by the device. +The first unlabelled output is automatically assigned to the "out0" +label, but all the others need to be specified explicitly. +

+

If not specified defaults to the filename specified for the input +device. +

+
+
graph_file
+

Set the filename of the filtergraph to be read and sent to the other +filters. Syntax of the filtergraph is the same as the one specified by +the option graph. +

+
+
+ + +

3.8.2 Examples

+ +
    +
  • +Create a color video stream and play it back with ffplay: +
     
    ffplay -f lavfi -graph "color=c=pink [out0]" dummy
    +
    + +
  • +As the previous example, but use filename for specifying the graph +description, and omit the "out0" label: +
     
    ffplay -f lavfi color=c=pink
    +
    + +
  • +Create three different video test filtered sources and play them: +
     
    ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
    +
    + +
  • +Read an audio stream from a file using the amovie source and play it +back with ffplay: +
     
    ffplay -f lavfi "amovie=test.wav"
    +
    + +
  • +Read an audio stream and a video stream and play it back with +ffplay: +
     
    ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
    +
    + +
+ + +

3.9 libdc1394

+ +

IIDC1394 input device, based on libdc1394 and libraw1394. +

+ +

3.10 openal

+ +

The OpenAL input device provides audio capture on all systems with a +working OpenAL 1.1 implementation. +

+

To enable this input device during configuration, you need OpenAL +headers and libraries installed on your system, and need to configure +FFmpeg with --enable-openal. +

+

OpenAL headers and libraries should be provided as part of your OpenAL +implementation, or as an additional download (an SDK). Depending on your +installation you may need to specify additional flags via the +--extra-cflags and --extra-ldflags for allowing the build +system to locate the OpenAL headers and libraries. +

+

An incomplete list of OpenAL implementations follows: +

+
+
Creative
+

The official Windows implementation, providing hardware acceleration +with supported devices and software fallback. +See http://openal.org/. +

+
OpenAL Soft
+

Portable, open source (LGPL) software implementation. Includes +backends for the most common sound APIs on the Windows, Linux, +Solaris, and BSD operating systems. +See http://kcat.strangesoft.net/openal.html. +

+
Apple
+

OpenAL is part of Core Audio, the official Mac OS X Audio interface. +See http://developer.apple.com/technologies/mac/audio-and-video.html +

+
+ +

This device allows one to capture from an audio input device handled +through OpenAL. +

+

You need to specify the name of the device to capture in the provided +filename. If the empty string is provided, the device will +automatically select the default device. You can get the list of the +supported devices by using the option list_devices. +

+ +

3.10.1 Options

+ +
+
channels
+

Set the number of channels in the captured audio. Only the values +‘1’ (monaural) and ‘2’ (stereo) are currently supported. +Defaults to ‘2’. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. Only the values +‘8’ and ‘16’ are currently supported. Defaults to +‘16’. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +Defaults to ‘44.1k’. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +Defaults to ‘false’. +

+
+
+ + +

3.10.2 Examples

+ +

Print the list of OpenAL supported devices and exit: +

 
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+ +

Capture from the OpenAL device ‘DR-BT101 via PulseAudio’: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+ +

Capture from the default device (note the empty string ” as filename): +

 
$ ffmpeg -f openal -i '' out.ogg
+
+ +

Capture from two devices simultaneously, writing to two different files, +within the same ffmpeg command: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+

Note: not all OpenAL implementations support multiple simultaneous capture - +try the latest OpenAL Soft if the above does not work. +

+ +

3.11 oss

+ +

Open Sound System input device. +

+

The filename to provide to the input device is the device node +representing the OSS input device, and is usually set to +‘/dev/dsp’. +

+

For example to grab from ‘/dev/dsp’ using ffmpeg use the +command: +

 
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+ +

For more information about OSS see: +http://manuals.opensound.com/usersguide/dsp.html +

+ +

3.12 pulse

+ +

PulseAudio input device. +

+

To enable this output device you need to configure FFmpeg with --enable-libpulse. +

+

The filename to provide to the input device is a source device or the +string "default" +

+

To list the PulseAudio source devices and their properties you can invoke +the command pactl list sources. +

+

More information about PulseAudio can be found on http://www.pulseaudio.org. +

+ +

3.12.1 Options

+
+
server
+

Connect to a specific PulseAudio server, specified by an IP address. +Default server is used when not provided. +

+
+
name
+

Specify the application name PulseAudio will use when showing active clients, +by default it is the LIBAVFORMAT_IDENT string. +

+
+
stream_name
+

Specify the stream name PulseAudio will use when showing active streams, +by default it is "record". +

+
+
sample_rate
+

Specify the samplerate in Hz, by default 48kHz is used. +

+
+
channels
+

Specify the channels in use, by default 2 (stereo) is set. +

+
+
frame_size
+

Specify the number of bytes per frame, by default it is set to 1024. +

+
+
fragment_size
+

Specify the minimal buffering fragment in PulseAudio, it will affect the +audio latency. By default it is unset. +

+
+ + +

3.12.2 Examples

+

Record a stream from default device: +

 
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+ + +

3.13 sndio

+ +

sndio input device. +

+

To enable this input device during configuration you need libsndio +installed on your system. +

+

The filename to provide to the input device is the device node +representing the sndio input device, and is usually set to +‘/dev/audio0’. +

+

For example to grab from ‘/dev/audio0’ using ffmpeg use the +command: +

 
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+ + +

3.14 video4linux2, v4l2

+ +

Video4Linux2 input video device. +

+

"v4l2" can be used as alias for "video4linux2". +

+

If FFmpeg is built with v4l-utils support (by using the +--enable-libv4l2 configure option), it is possible to use it with the +-use_libv4l2 input device option. +

+

The name of the device to grab is a file device node, usually Linux +systems tend to automatically create such nodes when the device +(e.g. an USB webcam) is plugged into the system, and has a name of the +kind ‘/dev/videoN’, where N is a number associated to +the device. +

+

Video4Linux2 devices usually support a limited set of +widthxheight sizes and frame rates. You can check which are +supported using -list_formats all for Video4Linux2 devices. +Some devices, like TV cards, support one or more standards. It is possible +to list all the supported standards using -list_standards all. +

+

The time base for the timestamps is 1 microsecond. Depending on the kernel +version and configuration, the timestamps may be derived from the real time +clock (origin at the Unix Epoch) or the monotonic clock (origin usually at +boot time, unaffected by NTP or manual changes to the clock). The +‘-timestamps abs’ or ‘-ts abs’ option can be used to force +conversion into the real time clock. +

+

Some usage examples of the video4linux2 device with ffmpeg +and ffplay: +

    +
  • +Grab and show the input of a video4linux2 device: +
     
    ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
    +
    + +
  • +Grab and record the input of a video4linux2 device, leave the +frame rate and size as previously set: +
     
    ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
    +
    +
+ +

For more information about Video4Linux, check http://linuxtv.org/. +

+ +

3.14.1 Options

+ +
+
standard
+

Set the standard. Must be the name of a supported standard. To get a +list of the supported standards, use the ‘list_standards’ +option. +

+
+
channel
+

Set the input channel number. Default to -1, which means using the +previously selected channel. +

+
+
video_size
+

Set the video frame size. The argument must be a string in the form +WIDTHxHEIGHT or a valid size abbreviation. +

+
+
pixel_format
+

Select the pixel format (only valid for raw video input). +

+
+
input_format
+

Set the preferred pixel format (for raw video) or a codec name. +This option allows one to select the input format, when several are +available. +

+
+
framerate
+

Set the preferred video frame rate. +

+
+
list_formats
+

List available formats (supported pixel formats, codecs, and frame +sizes) and exit. +

+

Available values are: +

+
all
+

Show all available (compressed and non-compressed) formats. +

+
+
raw
+

Show only raw video (non-compressed) formats. +

+
+
compressed
+

Show only compressed formats. +

+
+ +
+
list_standards
+

List supported standards and exit. +

+

Available values are: +

+
all
+

Show all supported standards. +

+
+ +
+
timestamps, ts
+

Set type of timestamps for grabbed frames. +

+

Available values are: +

+
default
+

Use timestamps from the kernel. +

+
+
abs
+

Use absolute timestamps (wall clock). +

+
+
mono2abs
+

Force conversion from monotonic to absolute timestamps. +

+
+ +

Default value is default. +

+
+ + +

3.15 vfwcap

+ +

VfW (Video for Windows) capture input device. +

+

The filename passed as input is the capture driver number, ranging from +0 to 9. You may use "list" as filename to print a list of drivers. Any +other filename will be interpreted as device number 0. +

+ +

3.16 x11grab

+ +

X11 video input device. +

+

This device allows one to capture a region of an X11 display. +

+

The filename passed as input has the syntax: +

 
[hostname]:display_number.screen_number[+x_offset,y_offset]
+
+ +

hostname:display_number.screen_number specifies the +X11 display name of the screen to grab from. hostname can be +omitted, and defaults to "localhost". The environment variable +DISPLAY contains the default display name. +

+

x_offset and y_offset specify the offsets of the grabbed +area with respect to the top-left border of the X11 screen. They +default to 0. +

+

Check the X11 documentation (e.g. man X) for more detailed information. +

+

Use the dpyinfo program for getting basic information about the +properties of your X11 display (e.g. grep for "name" or "dimensions"). +

+

For example to grab from ‘:0.0’ using ffmpeg: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

Grab at position 10,20: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ + +

3.16.1 Options

+ +
+
draw_mouse
+

Specify whether to draw the mouse pointer. A value of 0 specify +not to draw the pointer. Default value is 1. +

+
+
follow_mouse
+

Make the grabbed area follow the mouse. The argument can be +centered or a number of pixels PIXELS. +

+

When it is specified with "centered", the grabbing region follows the mouse +pointer and keeps the pointer at the center of region; otherwise, the region +follows only when the mouse pointer reaches within PIXELS (greater than +zero) to the edge of region. +

+

For example: +

 
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

To follow only when the mouse pointer reaches within 100 pixels to edge: +

 
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
framerate
+

Set the grabbing frame rate. Default value is ntsc, +corresponding to a frame rate of 30000/1001. +

+
+
show_region
+

Show grabbed region on screen. +

+

If show_region is specified with 1, then the grabbing +region will be indicated on screen. With this option, it is easy to +know what is being grabbed if only a portion of the screen is grabbed. +

+

For example: +

 
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ +

With follow_mouse: +

 
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
video_size
+

Set the video frame size. Default value is vga. +

+
+ + +

4. Output Devices

+ +

Output devices are configured elements in FFmpeg that can write +multimedia data to an output device attached to your system. +

+

When you configure your FFmpeg build, all the supported output devices +are enabled by default. You can list all available ones using the +configure option "–list-outdevs". +

+

You can disable all the output devices using the configure option +"–disable-outdevs", and selectively enable an output device using the +option "–enable-outdev=OUTDEV", or you can disable a particular +input device using the option "–disable-outdev=OUTDEV". +

+

The option "-formats" of the ff* tools will display the list of +enabled output devices (amongst the muxers). +

+

A description of the currently available output devices follows. +

+ +

4.1 alsa

+ +

ALSA (Advanced Linux Sound Architecture) output device. +

+ +

4.1.1 Examples

+ +
    +
  • +Play a file on default ALSA device: +
     
    ffmpeg -i INPUT -f alsa default
    +
    + +
  • +Play a file on soundcard 1, audio device 7: +
     
    ffmpeg -i INPUT -f alsa hw:1,7
    +
    +
+ + +

4.2 caca

+ +

CACA output device. +

+

This output device allows one to show a video stream in CACA window. +Only one CACA window is allowed per application, so you can +have only one instance of this output device in an application. +

+

To enable this output device you need to configure FFmpeg with +--enable-libcaca. +libcaca is a graphics library that outputs text instead of pixels. +

+

For more information about libcaca, check: +http://caca.zoy.org/wiki/libcaca +

+ +

4.2.1 Options

+ +
+
window_title
+

Set the CACA window title, if not specified default to the filename +specified for the output device. +

+
+
window_size
+

Set the CACA window size, can be a string of the form +widthxheight or a video size abbreviation. +If not specified it defaults to the size of the input video. +

+
+
driver
+

Set display driver. +

+
+
algorithm
+

Set dithering algorithm. Dithering is necessary +because the picture being rendered has usually far more colours than +the available palette. +The accepted values are listed with -list_dither algorithms. +

+
+
antialias
+

Set antialias method. Antialiasing smoothens the rendered +image and avoids the commonly seen staircase effect. +The accepted values are listed with -list_dither antialiases. +

+
+
charset
+

Set which characters are going to be used when rendering text. +The accepted values are listed with -list_dither charsets. +

+
+
color
+

Set color to be used when rendering text. +The accepted values are listed with -list_dither colors. +

+
+
list_drivers
+

If set to ‘true’, print a list of available drivers and exit. +

+
+
list_dither
+

List available dither options related to the argument. +The argument must be one of algorithms, antialiases, +charsets, colors. +

+
+ + +

4.2.2 Examples

+ +
    +
  • +The following command shows the ffmpeg output is an +CACA window, forcing its size to 80x25: +
     
    ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
    +
    + +
  • +Show the list of available drivers and exit: +
     
    ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
    +
    + +
  • +Show the list of available dither colors and exit: +
     
    ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
    +
    +
+ + +

4.3 decklink

+ +

The decklink output device provides playback capabilities for Blackmagic +DeckLink devices. +

+

To enable this output device, you need the Blackmagic DeckLink SDK and you +need to configure with the appropriate --extra-cflags +and --extra-ldflags. +On Windows, you need to run the IDL files through widl. +

+

DeckLink is very picky about the formats it supports. Pixel format is always +uyvy422, framerate and video size must be determined for your device with +-list_formats 1. Audio sample rate is always 48 kHz. +

+ +

4.3.1 Options

+ +
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +Defaults to ‘false’. +

+
+
list_formats
+

If set to ‘true’, print a list of supported formats and exit. +Defaults to ‘false’. +

+
+
preroll
+

Amount of time to preroll video in seconds. +Defaults to ‘0.5’. +

+
+
+ + +

4.3.2 Examples

+ +
    +
  • +List output devices: +
     
    ffmpeg -i test.avi -f decklink -list_devices 1 dummy
    +
    + +
  • +List supported formats: +
     
    ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
    +
    + +
  • +Play video clip: +
     
    ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
    +
    + +
  • +Play video clip with non-standard framerate or video size: +
     
    ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
    +
    + +
+ + +

4.4 fbdev

+ +

Linux framebuffer output device. +

+

The Linux framebuffer is a graphic hardware-independent abstraction +layer to show graphics on a computer monitor, typically on the +console. It is accessed through a file device node, usually +‘/dev/fb0’. +

+

For more detailed information read the file +‘Documentation/fb/framebuffer.txt’ included in the Linux source tree. +

+ +

4.4.1 Options

+
+
xoffset
+
yoffset
+

Set x/y coordinate of top left corner. Default is 0. +

+
+ + +

4.4.2 Examples

+

Play a file on framebuffer device ‘/dev/fb0’. +Required pixel format depends on current framebuffer settings. +

 
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
+
+ +

See also http://linux-fbdev.sourceforge.net/, and fbset(1). +

+ +

4.5 opengl

+

OpenGL output device. +

+

To enable this output device you need to configure FFmpeg with --enable-opengl. +

+

This output device allows one to render to OpenGL context. +Context may be provided by application or default SDL window is created. +

+

When device renders to external context, application must implement handlers for following messages: +AV_CTL_MESSAGE_CREATE_WINDOW_BUFFER - create OpenGL context on current thread. +AV_CTL_MESSAGE_PREPARE_WINDOW_BUFFER - make OpenGL context current. +AV_CTL_MESSAGE_DISPLAY_WINDOW_BUFFER - swap buffers. +AV_CTL_MESSAGE_DESTROY_WINDOW_BUFFER - destroy OpenGL context. +Application is also required to inform a device about current resolution by sending AV_DEVICE_WINDOW_RESIZED message. +

+ +

4.5.1 Options

+
+
background
+

Set background color. Black is a default. +

+
no_window
+

Disables default SDL window when set to non-zero value. +Application must provide OpenGL context and both window_size_cb and window_swap_buffers_cb callbacks when set. +

+
window_title
+

Set the SDL window title, if not specified default to the filename specified for the output device. +Ignored when ‘no_window’ is set. +

+
+
+ + +

4.5.2 Examples

+

Play a file on SDL window using OpenGL rendering: +

 
ffmpeg  -i INPUT -f opengl "window title"
+
+ + +

4.6 oss

+ +

OSS (Open Sound System) output device. +

+ +

4.7 pulse

+ +

PulseAudio output device. +

+

To enable this output device you need to configure FFmpeg with --enable-libpulse. +

+

More information about PulseAudio can be found on http://www.pulseaudio.org +

+ +

4.7.1 Options

+
+
server
+

Connect to a specific PulseAudio server, specified by an IP address. +Default server is used when not provided. +

+
+
name
+

Specify the application name PulseAudio will use when showing active clients, +by default it is the LIBAVFORMAT_IDENT string. +

+
+
stream_name
+

Specify the stream name PulseAudio will use when showing active streams, +by default it is set to the specified output name. +

+
+
device
+

Specify the device to use. Default device is used when not provided. +List of output devices can be obtained with command pactl list sinks. +

+
+
buffer_size
+
buffer_duration
+

Control the size and duration of the PulseAudio buffer. A small buffer +gives more control, but requires more frequent updates. +

+

buffer_size’ specifies size in bytes while +‘buffer_duration’ specifies duration in milliseconds. +

+

When both options are provided then the highest value is used +(duration is recalculated to bytes using stream parameters). If they +are set to 0 (which is default), the device will use the default +PulseAudio duration value. By default PulseAudio set buffer duration +to around 2 seconds. +

+
+ + +

4.7.2 Examples

+

Play a file on default device on default server: +

 
ffmpeg  -i INPUT -f pulse "stream name"
+
+ + +

4.8 sdl

+ +

SDL (Simple DirectMedia Layer) output device. +

+

This output device allows one to show a video stream in an SDL +window. Only one SDL window is allowed per application, so you can +have only one instance of this output device in an application. +

+

To enable this output device you need libsdl installed on your system +when configuring your build. +

+

For more information about SDL, check: +http://www.libsdl.org/ +

+ +

4.8.1 Options

+ +
+
window_title
+

Set the SDL window title, if not specified default to the filename +specified for the output device. +

+
+
icon_title
+

Set the name of the iconified SDL window, if not specified it is set +to the same value of window_title. +

+
+
window_size
+

Set the SDL window size, can be a string of the form +widthxheight or a video size abbreviation. +If not specified it defaults to the size of the input video, +downscaled according to the aspect ratio. +

+
+
window_fullscreen
+

Set fullscreen mode when non-zero value is provided. +Default value is zero. +

+
+ + +

4.8.2 Interactive commands

+ +

The window created by the device can be controlled through the +following interactive commands. +

+
+
<q, ESC>
+

Quit the device immediately. +

+
+ + +

4.8.3 Examples

+ +

The following command shows the ffmpeg output is an +SDL window, forcing its size to the qcif format: +

 
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
+
+ + +

4.9 sndio

+ +

sndio audio output device. +

+ +

4.10 xv

+ +

XV (XVideo) output device. +

+

This output device allows one to show a video stream in a X Window System +window. +

+ +

4.10.1 Options

+ +
+
display_name
+

Specify the hardware display name, which determines the display and +communications domain to be used. +

+

The display name or DISPLAY environment variable can be a string in +the format hostname[:number[.screen_number]]. +

+

hostname specifies the name of the host machine on which the +display is physically attached. number specifies the number of +the display server on that host machine. screen_number specifies +the screen to be used on that server. +

+

If unspecified, it defaults to the value of the DISPLAY environment +variable. +

+

For example, dual-headed:0.1 would specify screen 1 of display +0 on the machine named “dual-headed”. +

+

Check the X11 specification for more detailed information about the +display name format. +

+
+
window_size
+

Set the created window size, can be a string of the form +widthxheight or a video size abbreviation. If not +specified it defaults to the size of the input video. +

+
+
window_x
+
window_y
+

Set the X and Y window offsets for the created window. They are both +set to 0 by default. The values may be ignored by the window manager. +

+
+
window_title
+

Set the window title, if not specified default to the filename +specified for the output device. +

+
+ +

For more information about XVideo see http://www.x.org/. +

+ +

4.10.2 Examples

+ +
    +
  • +Decode, display and encode video input with ffmpeg at the +same time: +
     
    ffmpeg -i INPUT OUTPUT -f xv display
    +
    + +
  • +Decode and display the input video to multiple X11 windows: +
     
    ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
    +
    +
+ + + +

5. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavdevice +

+ + +

6. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-filters.html b/dependencies64/ffmpeg/doc/ffmpeg-filters.html new file mode 100644 index 000000000..1c161218b --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-filters.html @@ -0,0 +1,11807 @@ + + + + + +FFmpeg documentation : FFmpeg Filters + + + + + + + + + + +
+
+ + +

FFmpeg Filters Documentation

+ + +

Table of Contents

+
+ + +
+ + +

1. Description

+ +

This document describes filters, sources, and sinks provided by the +libavfilter library. +

+ + +

2. Filtering Introduction

+ +

Filtering in FFmpeg is enabled through the libavfilter library. +

+

In libavfilter, a filter can have multiple inputs and multiple +outputs. +To illustrate the sorts of things that are possible, we consider the +following filtergraph. +

+
 
                [main]
+input --> split ---------------------> overlay --> output
+            |                             ^
+            |[tmp]                  [flip]|
+            +-----> crop --> vflip -------+
+
+ +

This filtergraph splits the input stream in two streams, sends one +stream through the crop filter and the vflip filter before merging it +back with the other stream by overlaying it on top. You can use the +following command to achieve this: +

+
 
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+ +

The result will be that in output the top half of the video is mirrored +onto the bottom half. +

+

Filters in the same linear chain are separated by commas, and distinct +linear chains of filters are separated by semicolons. In our example, +crop,vflip are in one linear chain, split and +overlay are separately in another. The points where the linear +chains join are labelled by names enclosed in square brackets. In the +example, the split filter generates two outputs that are associated to +the labels [main] and [tmp]. +

+

The stream sent to the second output of split, labelled as +[tmp], is processed through the crop filter, which crops +away the lower half part of the video, and then vertically flipped. The +overlay filter takes in input the first unchanged output of the +split filter (which was labelled as [main]), and overlay on its +lower half the output generated by the crop,vflip filterchain. +

+

Some filters take in input a list of parameters: they are specified +after the filter name and an equal sign, and are separated from each other +by a colon. +

+

There exist so-called source filters that do not have an +audio/video input, and sink filters that will not have audio/video +output. +

+ + +

3. graph2dot

+ +

The ‘graph2dot’ program included in the FFmpeg ‘tools’ +directory can be used to parse a filtergraph description and issue a +corresponding textual representation in the dot language. +

+

Invoke the command: +

 
graph2dot -h
+
+ +

to see how to use ‘graph2dot’. +

+

You can then pass the dot description to the ‘dot’ program (from +the graphviz suite of programs) and obtain a graphical representation +of the filtergraph. +

+

For example the sequence of commands: +

 
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+ +

can be used to create and display an image representing the graph +described by the GRAPH_DESCRIPTION string. Note that this string must be +a complete self-contained graph, with its inputs and outputs explicitly defined. +For example if your command line is of the form: +

 
ffmpeg -i infile -vf scale=640:360 outfile
+
+

your GRAPH_DESCRIPTION string will need to be of the form: +

 
nullsrc,scale=640:360,nullsink
+
+

you may also need to set the nullsrc parameters and add a format +filter in order to simulate a specific input file. +

+ + +

4. Filtergraph description

+ +

A filtergraph is a directed graph of connected filters. It can contain +cycles, and there can be multiple links between a pair of +filters. Each link has one input pad on one side connecting it to one +filter from which it takes its input, and one output pad on the other +side connecting it to the one filter accepting its output. +

+

Each filter in a filtergraph is an instance of a filter class +registered in the application, which defines the features and the +number of input and output pads of the filter. +

+

A filter with no input pads is called a "source", a filter with no +output pads is called a "sink". +

+

+

+

4.1 Filtergraph syntax

+ +

A filtergraph can be represented using a textual representation, which is +recognized by the ‘-filter’/‘-vf’ and ‘-filter_complex’ +options in ffmpeg and ‘-vf’ in ffplay, and by the +avfilter_graph_parse()/avfilter_graph_parse2() function defined in +‘libavfilter/avfilter.h’. +

+

A filterchain consists of a sequence of connected filters, each one +connected to the previous one in the sequence. A filterchain is +represented by a list of ","-separated filter descriptions. +

+

A filtergraph consists of a sequence of filterchains. A sequence of +filterchains is represented by a list of ";"-separated filterchain +descriptions. +

+

A filter is represented by a string of the form: +[in_link_1]...[in_link_N]filter_name=arguments[out_link_1]...[out_link_M] +

+

filter_name is the name of the filter class of which the +described filter is an instance of, and has to be the name of one of +the filter classes registered in the program. +The name of the filter class is optionally followed by a string +"=arguments". +

+

arguments is a string which contains the parameters used to +initialize the filter instance. It may have one of the following forms: +

    +
  • +A ’:’-separated list of key=value pairs. + +
  • +A ’:’-separated list of value. In this case, the keys are assumed to be +the option names in the order they are declared. E.g. the fade filter +declares three options in this order – ‘type’, ‘start_frame’ and +‘nb_frames’. Then the parameter list in:0:30 means that the value +in is assigned to the option ‘type’, 0 to +‘start_frame’ and 30 to ‘nb_frames’. + +
  • +A ’:’-separated list of mixed direct value and long key=value +pairs. The direct value must precede the key=value pairs, and +follow the same constraints order of the previous point. The following +key=value pairs can be set in any preferred order. + +
+ +

If the option value itself is a list of items (e.g. the format filter +takes a list of pixel formats), the items in the list are usually separated by +’|’. +

+

The list of arguments can be quoted using the character "’" as initial +and ending mark, and the character ’\’ for escaping the characters +within the quoted text; otherwise the argument string is considered +terminated when the next special character (belonging to the set +"[]=;,") is encountered. +

+

The name and arguments of the filter are optionally preceded and +followed by a list of link labels. +A link label allows one to name a link and associate it to a filter output +or input pad. The preceding labels in_link_1 +... in_link_N, are associated to the filter input pads, +the following labels out_link_1 ... out_link_M, are +associated to the output pads. +

+

When two link labels with the same name are found in the +filtergraph, a link between the corresponding input and output pad is +created. +

+

If an output pad is not labelled, it is linked by default to the first +unlabelled input pad of the next filter in the filterchain. +For example in the filterchain: +

 
nullsrc, split[L1], [L2]overlay, nullsink
+
+

the split filter instance has two output pads, and the overlay filter +instance two input pads. The first output pad of split is labelled +"L1", the first input pad of overlay is labelled "L2", and the second +output pad of split is linked to the second input pad of overlay, +which are both unlabelled. +

+

In a complete filterchain all the unlabelled filter input and output +pads must be connected. A filtergraph is considered valid if all the +filter input and output pads of all the filterchains are connected. +

+

Libavfilter will automatically insert scale filters where format +conversion is required. It is possible to specify swscale flags +for those automatically inserted scalers by prepending +sws_flags=flags; +to the filtergraph description. +

+

Follows a BNF description for the filtergraph syntax: +

 
NAME             ::= sequence of alphanumeric characters and '_'
+LINKLABEL        ::= "[" NAME "]"
+LINKLABELS       ::= LINKLABEL [LINKLABELS]
+FILTER_ARGUMENTS ::= sequence of chars (eventually quoted)
+FILTER           ::= [LINKLABELS] NAME ["=" FILTER_ARGUMENTS] [LINKLABELS]
+FILTERCHAIN      ::= FILTER [,FILTERCHAIN]
+FILTERGRAPH      ::= [sws_flags=flags;] FILTERCHAIN [;FILTERGRAPH]
+
+ + +

4.2 Notes on filtergraph escaping

+ +

Filtergraph description composition entails several levels of +escaping. See (ffmpeg-utils)quoting_and_escaping for more +information about the employed escaping procedure. +

+

A first level escaping affects the content of each filter option +value, which may contain the special character : used to +separate values, or one of the escaping characters \'. +

+

A second level escaping affects the whole filter description, which +may contain the escaping characters \' or the special +characters [],; used by the filtergraph description. +

+

Finally, when you specify a filtergraph on a shell commandline, you +need to perform a third level escaping for the shell special +characters contained within it. +

+

For example, consider the following string to be embedded in +the drawtext filter description ‘text’ value: +

 
this is a 'string': may contain one, or more, special characters
+
+ +

This string contains the ' special escaping character, and the +: special character, so it needs to be escaped in this way: +

 
text=this is a \'string\'\: may contain one, or more, special characters
+
+ +

A second level of escaping is required when embedding the filter +description in a filtergraph description, in order to escape all the +filtergraph special characters. Thus the example above becomes: +

 
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+

(note that in addition to the \' escaping special characters, +also , needs to be escaped). +

+

Finally an additional level of escaping is needed when writing the +filtergraph description in a shell command, which depends on the +escaping rules of the adopted shell. For example, assuming that +\ is special and needs to be escaped with another \, the +previous string will finally result in: +

 
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+ + +

5. Timeline editing

+ +

Some filters support a generic ‘enable’ option. For the filters +supporting timeline editing, this option can be set to an expression which is +evaluated before sending a frame to the filter. If the evaluation is non-zero, +the filter will be enabled, otherwise the frame will be sent unchanged to the +next filter in the filtergraph. +

+

The expression accepts the following values: +

+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+ +

Additionally, these filters support an ‘enable’ command that can be used +to re-define the expression. +

+

Like any other filtering option, the ‘enable’ option follows the same +rules. +

+

For example, to enable a blur filter (smartblur) from 10 seconds to 3 +minutes, and a curves filter starting at 3 seconds: +

 
smartblur = enable='between(t,10,3*60)',
+curves    = enable='gte(t,3)' : preset=cross_process
+
+ + + +

6. Audio Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the audio filters included in your +build. +

+

Below is a description of the currently available audio filters. +

+ +

6.1 aconvert

+ +

Convert the input audio format to the specified formats. +

+

This filter is deprecated. Use aformat instead. +

+

The filter accepts a string of the form: +"sample_format:channel_layout". +

+

sample_format specifies the sample format, and can be a string or the +corresponding numeric value defined in ‘libavutil/samplefmt.h’. Use ’p’ +suffix for a planar sample format. +

+

channel_layout specifies the channel layout, and can be a string +or the corresponding number value defined in ‘libavutil/channel_layout.h’. +

+

The special parameter "auto", signifies that the filter will +automatically select the output format depending on the output filter. +

+ +

6.1.1 Examples

+ +
    +
  • +Convert input to float, planar, stereo: +
     
    aconvert=fltp:stereo
    +
    + +
  • +Convert input to unsigned 8-bit, automatically select out channel layout: +
     
    aconvert=u8:auto
    +
    +
+ + +

6.2 adelay

+ +

Delay one or more audio channels. +

+

Samples in delayed channel are filled with silence. +

+

The filter accepts the following option: +

+
+
delays
+

Set list of delays in milliseconds for each channel separated by ’|’. +At least one delay greater than 0 should be provided. +Unused delays will be silently ignored. If number of given delays is +smaller than number of channels all remaining channels will not be delayed. +

+
+ + +

6.2.1 Examples

+ +
    +
  • +Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave +the second channel (and any other channels that may be present) unchanged. +
     
    adelay=1500|0|500
    +
    +
+ + +

6.3 aecho

+ +

Apply echoing to the input audio. +

+

Echoes are reflected sound and can occur naturally amongst mountains +(and sometimes large buildings) when talking or shouting; digital echo +effects emulate this behaviour and are often used to help fill out the +sound of a single instrument or vocal. The time difference between the +original signal and the reflection is the delay, and the +loudness of the reflected signal is the decay. +Multiple echoes can have different delays and decays. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain of reflected signal. Default is 0.6. +

+
+
out_gain
+

Set output gain of reflected signal. Default is 0.3. +

+
+
delays
+

Set list of time intervals in milliseconds between original signal and reflections +separated by ’|’. Allowed range for each delay is (0 - 90000.0]. +Default is 1000. +

+
+
decays
+

Set list of loudnesses of reflected signals separated by ’|’. +Allowed range for each decay is (0 - 1.0]. +Default is 0.5. +

+
+ + +

6.3.1 Examples

+ +
    +
  • +Make it sound as if there are twice as many instruments as are actually playing: +
     
    aecho=0.8:0.88:60:0.4
    +
    + +
  • +If delay is very short, then it sound like a (metallic) robot playing music: +
     
    aecho=0.8:0.88:6:0.4
    +
    + +
  • +A longer delay will sound like an open air concert in the mountains: +
     
    aecho=0.8:0.9:1000:0.3
    +
    + +
  • +Same as above but with one more mountain: +
     
    aecho=0.8:0.9:1000|1800:0.3|0.25
    +
    +
+ + +

6.4 aeval

+ +

Modify an audio signal according to the specified expressions. +

+

This filter accepts one or more expressions (one for each channel), +which are evaluated and used to modify a corresponding audio signal. +

+

This filter accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. If +the number of input channels is greater than the number of +expressions, the last specified expression is used for the remaining +output channels. +

+
+
channel_layout, c
+

Set output channel layout. If not specified, the channel layout is +specified by the number of expressions. If set to ‘same’, it will +use by default the same input channel layout. +

+
+ +

Each expression in exprs can contain the following constants and functions: +

+
+
ch
+

channel number of the current expression +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
s
+

sample rate +

+
+
t
+

time of the evaluated sample expressed in seconds +

+
+
nb_in_channels
+
nb_out_channels
+

input and output number of channels +

+
+
val(CH)
+

the value of input channel with number CH +

+
+ +

Note: this filter is slow. For faster processing you should use a +dedicated filter. +

+ +

6.4.1 Examples

+ +
    +
  • +Half volume: +
     
    aeval=val(ch)/2:c=same
    +
    + +
  • +Invert phase of the second channel: +
     
    eval=val(0)|-val(1)
    +
    +
+ + +

6.5 afade

+ +

Apply fade-in/out effect to input audio. +

+

A description of the accepted parameters follows. +

+
+
type, t
+

Specify the effect type, can be either in for fade-in, or +out for a fade-out effect. Default is in. +

+
+
start_sample, ss
+

Specify the number of the start sample for starting to apply the fade +effect. Default is 0. +

+
+
nb_samples, ns
+

Specify the number of samples for which the fade effect has to last. At +the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. Default is 44100. +

+
+
start_time, st
+

Specify time for starting to apply the fade effect. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +If set this option is used instead of start_sample one. +

+
+
duration, d
+

Specify the duration for which the fade effect has to last. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +At the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. +If set this option is used instead of nb_samples one. +

+
+
curve
+

Set curve for fade transition. +

+

It accepts the following values: +

+
tri
+

select triangular, linear slope (default) +

+
qsin
+

select quarter of sine wave +

+
hsin
+

select half of sine wave +

+
esin
+

select exponential sine wave +

+
log
+

select logarithmic +

+
par
+

select inverted parabola +

+
qua
+

select quadratic +

+
cub
+

select cubic +

+
squ
+

select square root +

+
cbr
+

select cubic root +

+
+
+
+ + +

6.5.1 Examples

+ +
    +
  • +Fade in first 15 seconds of audio: +
     
    afade=t=in:ss=0:d=15
    +
    + +
  • +Fade out last 25 seconds of a 900 seconds audio: +
     
    afade=t=out:st=875:d=25
    +
    +
+ +

+

+

6.6 aformat

+ +

Set output format constraints for the input audio. The framework will +negotiate the most appropriate format to minimize conversions. +

+

The filter accepts the following named parameters: +

+
sample_fmts
+

A ’|’-separated list of requested sample formats. +

+
+
sample_rates
+

A ’|’-separated list of requested sample rates. +

+
+
channel_layouts
+

A ’|’-separated list of requested channel layouts. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+ +

If a parameter is omitted, all values are allowed. +

+

For example to force the output to either unsigned 8-bit or signed 16-bit stereo: +

 
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+ + +

6.7 allpass

+ +

Apply a two-pole all-pass filter with central frequency (in Hz) +frequency, and filter-width width. +An all-pass filter changes the audio’s frequency to phase relationship +without changing its frequency to amplitude relationship. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

6.8 amerge

+ +

Merge two or more audio streams into a single multi-channel stream. +

+

The filter accepts the following options: +

+
+
inputs
+

Set the number of inputs. Default is 2. +

+
+
+ +

If the channel layouts of the inputs are disjoint, and therefore compatible, +the channel layout of the output will be set accordingly and the channels +will be reordered as necessary. If the channel layouts of the inputs are not +disjoint, the output will have all the channels of the first input then all +the channels of the second input, in that order, and the channel layout of +the output will be the default value corresponding to the total number of +channels. +

+

For example, if the first input is in 2.1 (FL+FR+LF) and the second input +is FC+BL+BR, then the output will be in 5.1, with the channels in the +following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the +first input, b1 is the first channel of the second input). +

+

On the other hand, if both input are in stereo, the output channels will be +in the default order: a1, a2, b1, b2, and the channel layout will be +arbitrarily set to 4.0, which may or may not be the expected value. +

+

All inputs must have the same sample rate, and format. +

+

If inputs do not have the same duration, the output will stop with the +shortest. +

+ +

6.8.1 Examples

+ +
    +
  • +Merge two mono files into a stereo stream: +
     
    amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
    +
    + +
  • +Multiple merges assuming 1 video stream and 6 audio streams in ‘input.mkv’: +
     
    ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
    +
    +
+ + +

6.9 amix

+ +

Mixes multiple audio inputs into a single output. +

+

For example +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+

will mix 3 input audio streams to a single output with the same duration as the +first input and a dropout transition time of 3 seconds. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of inputs. If unspecified, it defaults to 2. +

+
+
duration
+

How to determine the end-of-stream. +

+
longest
+

Duration of longest input. (default) +

+
+
shortest
+

Duration of shortest input. +

+
+
first
+

Duration of first input. +

+
+
+ +
+
dropout_transition
+

Transition time, in seconds, for volume renormalization when an input +stream ends. The default value is 2 seconds. +

+
+
+ + +

6.10 anull

+ +

Pass the audio source unchanged to the output. +

+ +

6.11 apad

+ +

Pad the end of a audio stream with silence, this can be used together with +-shortest to extend audio streams to the same length as the video stream. +

+ +

6.12 aphaser

+

Add a phasing effect to the input audio. +

+

A phaser filter creates series of peaks and troughs in the frequency spectrum. +The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain. Default is 0.4. +

+
+
out_gain
+

Set output gain. Default is 0.74 +

+
+
delay
+

Set delay in milliseconds. Default is 3.0. +

+
+
decay
+

Set decay. Default is 0.4. +

+
+
speed
+

Set modulation speed in Hz. Default is 0.5. +

+
+
type
+

Set modulation type. Default is triangular. +

+

It accepts the following values: +

+
triangular, t
+
sinusoidal, s
+
+
+
+ +

+

+

6.13 aresample

+ +

Resample the input audio to the specified parameters, using the +libswresample library. If none are specified then the filter will +automatically convert between its input and output. +

+

This filter is also able to stretch/squeeze the audio data to make it match +the timestamps or to inject silence / cut out audio to make it match the +timestamps, do a combination of both or do neither. +

+

The filter accepts the syntax +[sample_rate:]resampler_options, where sample_rate +expresses a sample rate and resampler_options is a list of +key=value pairs, separated by ":". See the +ffmpeg-resampler manual for the complete list of supported options. +

+ +

6.13.1 Examples

+ +
    +
  • +Resample the input audio to 44100Hz: +
     
    aresample=44100
    +
    + +
  • +Stretch/squeeze samples to the given timestamps, with a maximum of 1000 +samples per second compensation: +
     
    aresample=async=1000
    +
    +
+ + +

6.14 asetnsamples

+ +

Set the number of samples per each output audio frame. +

+

The last output packet may contain a different number of samples, as +the filter will flush all the remaining samples when the input audio +signal its end. +

+

The filter accepts the following options: +

+
+
nb_out_samples, n
+

Set the number of frames per each output audio frame. The number is +intended as the number of samples per each channel. +Default value is 1024. +

+
+
pad, p
+

If set to 1, the filter will pad the last audio frame with zeroes, so +that the last frame will contain the same number of samples as the +previous ones. Default value is 1. +

+
+ +

For example, to set the number of per-frame samples to 1234 and +disable padding for the last frame, use: +

 
asetnsamples=n=1234:p=0
+
+ + +

6.15 asetrate

+ +

Set the sample rate without altering the PCM data. +This will result in a change of speed and pitch. +

+

The filter accepts the following options: +

+
+
sample_rate, r
+

Set the output sample rate. Default is 44100 Hz. +

+
+ + +

6.16 ashowinfo

+ +

Show a line containing various information for each input audio frame. +The input audio is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation timestamp of the input frame, in time base units; the time base +depends on the filter input pad, and is usually 1/sample_rate. +

+
+
pts_time
+

presentation timestamp of the input frame in seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic audio) +

+
+
fmt
+

sample format +

+
+
chlayout
+

channel layout +

+
+
rate
+

sample rate for the audio frame +

+
+
nb_samples
+

number of samples (per channel) in the frame +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of the audio data. For planar audio +the data is treated as if all the planes were concatenated. +

+
+
plane_checksums
+

A list of Adler-32 checksums for each data plane. +

+
+ + +

6.17 astats

+ +

Display time domain statistical information about the audio channels. +Statistics are calculated and displayed for each audio channel and, +where applicable, an overall figure is also given. +

+

The filter accepts the following option: +

+
length
+

Short window length in seconds, used for peak and trough RMS measurement. +Default is 0.05 (50 miliseconds). Allowed range is [0.1 - 10]. +

+
+ +

A description of each shown parameter follows: +

+
+
DC offset
+

Mean amplitude displacement from zero. +

+
+
Min level
+

Minimal sample level. +

+
+
Max level
+

Maximal sample level. +

+
+
Peak level dB
+
RMS level dB
+

Standard peak and RMS level measured in dBFS. +

+
+
RMS peak dB
+
RMS trough dB
+

Peak and trough values for RMS level measured over a short window. +

+
+
Crest factor
+

Standard ratio of peak to RMS level (note: not in dB). +

+
+
Flat factor
+

Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels +(i.e. either Min level or Max level). +

+
+
Peak count
+

Number of occasions (not the number of samples) that the signal attained either +Min level or Max level. +

+
+ + +

6.18 astreamsync

+ +

Forward two audio streams and control the order the buffers are forwarded. +

+

The filter accepts the following options: +

+
+
expr, e
+

Set the expression deciding which stream should be +forwarded next: if the result is negative, the first stream is forwarded; if +the result is positive or zero, the second stream is forwarded. It can use +the following variables: +

+
+
b1 b2
+

number of buffers forwarded so far on each stream +

+
s1 s2
+

number of samples forwarded so far on each stream +

+
t1 t2
+

current timestamp of each stream +

+
+ +

The default value is t1-t2, which means to always forward the stream +that has a smaller timestamp. +

+
+ + +

6.18.1 Examples

+ +

Stress-test amerge by randomly sending buffers on the wrong +input, while avoiding too much of a desynchronization: +

 
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+ + +

6.19 asyncts

+ +

Synchronize audio data with timestamps by squeezing/stretching it and/or +dropping samples/adding silence when needed. +

+

This filter is not built by default, please use aresample to do squeezing/stretching. +

+

The filter accepts the following named parameters: +

+
compensate
+

Enable stretching/squeezing the data to make it match the timestamps. Disabled +by default. When disabled, time gaps are covered with silence. +

+
+
min_delta
+

Minimum difference between timestamps and audio data (in seconds) to trigger +adding/dropping samples. Default value is 0.1. If you get non-perfect sync with +this filter, try setting this parameter to 0. +

+
+
max_comp
+

Maximum compensation in samples per second. Relevant only with compensate=1. +Default value 500. +

+
+
first_pts
+

Assume the first pts should be this value. The time base is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
+ + +

6.20 atempo

+ +

Adjust audio tempo. +

+

The filter accepts exactly one parameter, the audio tempo. If not +specified then the filter will assume nominal 1.0 tempo. Tempo must +be in the [0.5, 2.0] range. +

+ +

6.20.1 Examples

+ +
    +
  • +Slow down audio to 80% tempo: +
     
    atempo=0.8
    +
    + +
  • +To speed up audio to 125% tempo: +
     
    atempo=1.25
    +
    +
+ + +

6.21 atrim

+ +

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the audio sample +with the timestamp start will be the first sample in the output. +

+
+
end
+

Specify time of the first audio sample that will be dropped, i.e. the +audio sample immediately preceding the one with the timestamp end will be +the last sample in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in samples +instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in samples instead +of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_sample
+

Number of the first sample that should be passed to output. +

+
+
end_sample
+

Number of the first sample that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _sample options simply count the +samples that pass through the filter. So start/end_pts and start/end_sample will +give different results when the timestamps are wrong, inexact or do not start at +zero. Also note that this filter does not modify the timestamps. If you wish +that the output timestamps start at zero, insert the asetpts filter after the +atrim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all samples that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple atrim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -af atrim=60:120
    +
    + +
  • +keep only the first 1000 samples +
     
    ffmpeg -i INPUT -af atrim=end_sample=1000
    +
    + +
+ + +

6.22 bandpass

+ +

Apply a two-pole Butterworth band-pass filter with central +frequency frequency, and (3dB-point) band-width width. +The csg option selects a constant skirt gain (peak gain = Q) +instead of the default: constant 0dB peak gain. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
csg
+

Constant skirt gain if set to 1. Defaults to 0. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

6.23 bandreject

+ +

Apply a two-pole Butterworth band-reject filter with central +frequency frequency, and (3dB-point) band-width width. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

6.24 bass

+ +

Boost or cut the bass (lower) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at 0 Hz. Its useful range is about -20 +(for a large cut) to +20 (for a large boost). +Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 100 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

6.25 biquad

+ +

Apply a biquad IIR filter with the given coefficients. +Where b0, b1, b2 and a0, a1, a2 +are the numerator and denominator coefficients respectively. +

+ +

6.26 channelmap

+ +

Remap input channels to new locations. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the output stream. +

+
+
map
+

Map channels from input to output. The argument is a ’|’-separated list of +mappings, each in the in_channel-out_channel or +in_channel form. in_channel can be either the name of the input +channel (e.g. FL for front left) or its index in the input channel layout. +out_channel is the name of the output channel or its index in the output +channel layout. If out_channel is not given then it is implicitly an +index, starting with zero and increasing by one for each mapping. +

+
+ +

If no mapping is present, the filter will implicitly map input channels to +output channels preserving index. +

+

For example, assuming a 5.1+downmix input MOV file +

 
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+

will create an output WAV file tagged as stereo from the downmix channels of +the input. +

+

To fix a 5.1 WAV improperly encoded in AAC’s native channel order +

 
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+ + +

6.27 channelsplit

+ +

Split each channel in input audio stream into a separate output stream. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the input stream. Default is "stereo". +

+
+ +

For example, assuming a stereo input MP3 file +

 
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+

will create an output Matroska file with two audio streams, one containing only +the left channel and the other the right channel. +

+

To split a 5.1 WAV file into per-channel files +

 
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+ + +

6.28 compand

+

Compress or expand audio dynamic range. +

+

A description of the accepted options follows. +

+
+
attacks
+
decays
+

Set list of times in seconds for each channel over which the instantaneous level +of the input signal is averaged to determine its volume. attacks refers to +increase of volume and decays refers to decrease of volume. For most +situations, the attack time (response to the audio getting louder) should be +shorter than the decay time because the human ear is more sensitive to sudden +loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and +a typical value for decay is 0.8 seconds. +

+
+
points
+

Set list of points for the transfer function, specified in dB relative to the +maximum possible signal amplitude. Each key points list must be defined using +the following syntax: x0/y0|x1/y1|x2/y2|.... or +x0/y0 x1/y1 x2/y2 .... +

+

The input values must be in strictly increasing order but the transfer function +does not have to be monotonically rising. The point 0/0 is assumed but +may be overridden (by 0/out-dBn). Typical values for the transfer +function are -70/-70|-60/-20. +

+
+
soft-knee
+

Set the curve radius in dB for all joints. Defaults to 0.01. +

+
+
gain
+

Set additional gain in dB to be applied at all points on the transfer function. +This allows easy adjustment of the overall gain. Defaults to 0. +

+
+
volume
+

Set initial volume in dB to be assumed for each channel when filtering starts. +This permits the user to supply a nominal level initially, so that, for +example, a very large gain is not applied to initial signal levels before the +companding has begun to operate. A typical value for audio which is initially +quiet is -90 dB. Defaults to 0. +

+
+
delay
+

Set delay in seconds. The input audio is analyzed immediately, but audio is +delayed before being fed to the volume adjuster. Specifying a delay +approximately equal to the attack/decay times allows the filter to effectively +operate in predictive rather than reactive mode. Defaults to 0. +

+
+
+ + +

6.28.1 Examples

+ +
    +
  • +Make music with both quiet and loud passages suitable for listening in a noisy +environment: +
     
    compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
    +
    + +
  • +Noise gate for when the noise is at a lower level than the signal: +
     
    compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
    +
    + +
  • +Here is another noise gate, this time for when the noise is at a higher level +than the signal (making it, in some ways, similar to squelch): +
     
    compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
    +
    +
+ + +

6.29 earwax

+ +

Make audio easier to listen to on headphones. +

+

This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio +so that when listened to on headphones the stereo image is moved from +inside your head (standard for headphones) to outside and in front of +the listener (standard for speakers). +

+

Ported from SoX. +

+ +

6.30 equalizer

+ +

Apply a two-pole peaking equalisation (EQ) filter. With this +filter, the signal-level at and around a selected frequency can +be increased or decreased, whilst (unlike bandpass and bandreject +filters) that at all other frequencies is unchanged. +

+

In order to produce complex equalisation curves, this filter can +be given several times, each with a different central frequency. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+
gain, g
+

Set the required gain or attenuation in dB. +Beware of clipping when using a positive gain. +

+
+ + +

6.30.1 Examples

+
    +
  • +Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz: +
     
    equalizer=f=1000:width_type=h:width=200:g=-10
    +
    + +
  • +Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2: +
     
    equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
    +
    +
+ + +

6.31 highpass

+ +

Apply a high-pass filter with 3dB point frequency. +The filter can be either single-pole, or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 3000. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

6.32 join

+ +

Join multiple input streams into one multi-channel stream. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of input streams. Defaults to 2. +

+
+
channel_layout
+

Desired output channel layout. Defaults to stereo. +

+
+
map
+

Map channels from inputs to output. The argument is a ’|’-separated list of +mappings, each in the input_idx.in_channel-out_channel +form. input_idx is the 0-based index of the input stream. in_channel +can be either the name of the input channel (e.g. FL for front left) or its +index in the specified input stream. out_channel is the name of the output +channel. +

+
+ +

The filter will attempt to guess the mappings when those are not specified +explicitly. It does so by first trying to find an unused matching input channel +and if that fails it picks the first unused input channel. +

+

E.g. to join 3 inputs (with properly set channel layouts) +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+ +

To build a 5.1 output from 6 single-channel streams: +

 
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+ + +

6.33 ladspa

+ +

Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-ladspa. +

+
+
file, f
+

Specifies the name of LADSPA plugin library to load. If the environment +variable LADSPA_PATH is defined, the LADSPA plugin is searched in +each one of the directories specified by the colon separated list in +LADSPA_PATH, otherwise in the standard LADSPA paths, which are in +this order: ‘HOME/.ladspa/lib/’, ‘/usr/local/lib/ladspa/’, +‘/usr/lib/ladspa/’. +

+
+
plugin, p
+

Specifies the plugin within the library. Some libraries contain only +one plugin, but others contain many of them. If this is not set filter +will list all available plugins within the specified library. +

+
+
controls, c
+

Set the ’|’ separated list of controls which are zero or more floating point +values that determine the behavior of the loaded plugin (for example delay, +threshold or gain). +Controls need to be defined using the following syntax: +c0=value0|c1=value1|c2=value2|..., where +valuei is the value set on the i-th control. +If ‘controls’ is set to help, all available controls and +their valid ranges are printed. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. Only used if plugin have +zero inputs. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, default +is 1024. Only used if plugin have zero inputs. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format, also check the "Time duration" +section in the ffmpeg-utils manual. +Note that the resulting duration may be greater than the specified duration, +as the generated audio is always cut at the end of a complete frame. +If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +Only used if plugin have zero inputs. +

+
+
+ + +

6.33.1 Examples

+ +
    +
  • +List all available plugins within amp (LADSPA example plugin) library: +
     
    ladspa=file=amp
    +
    + +
  • +List all available controls and their valid ranges for vcf_notch +plugin from VCF library: +
     
    ladspa=f=vcf:p=vcf_notch:c=help
    +
    + +
  • +Simulate low quality audio equipment using Computer Music Toolkit (CMT) +plugin library: +
     
    ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
    +
    + +
  • +Add reverberation to the audio using TAP-plugins +(Tom’s Audio Processing plugins): +
     
    ladspa=file=tap_reverb:tap_reverb
    +
    + +
  • +Generate white noise, with 0.2 amplitude: +
     
    ladspa=file=cmt:noise_source_white:c=c0=.2
    +
    + +
  • +Generate 20 bpm clicks using plugin C* Click - Metronome from the +C* Audio Plugin Suite (CAPS) library: +
     
    ladspa=file=caps:Click:c=c1=20'
    +
    + +
  • +Apply C* Eq10X2 - Stereo 10-band equaliser effect: +
     
    ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
    +
    +
+ + +

6.33.2 Commands

+ +

This filter supports the following commands: +

+
cN
+

Modify the N-th control value. +

+

If the specified value is not valid, it is ignored and prior one is kept. +

+
+ + +

6.34 lowpass

+ +

Apply a low-pass filter with 3dB point frequency. +The filter can be either single-pole or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 500. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

6.35 pan

+ +

Mix channels with specific gain levels. The filter accepts the output +channel layout followed by a set of channels definitions. +

+

This filter is also designed to remap efficiently the channels of an audio +stream. +

+

The filter accepts parameters of the form: +"l:outdef:outdef:..." +

+
+
l
+

output channel layout or number of channels +

+
+
outdef
+

output channel specification, of the form: +"out_name=[gain*]in_name[+[gain*]in_name...]" +

+
+
out_name
+

output channel to define, either a channel name (FL, FR, etc.) or a channel +number (c0, c1, etc.) +

+
+
gain
+

multiplicative coefficient for the channel, 1 leaving the volume unchanged +

+
+
in_name
+

input channel to use, see out_name for details; it is not possible to mix +named and numbered input channels +

+
+ +

If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for +that specification will be renormalized so that the total is 1, thus +avoiding clipping noise. +

+ +

6.35.1 Mixing examples

+ +

For example, if you want to down-mix from stereo to mono, but with a bigger +factor for the left channel: +

 
pan=1:c0=0.9*c0+0.1*c1
+
+ +

A customized down-mix to stereo that works automatically for 3-, 4-, 5- and +7-channels surround: +

 
pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+ +

Note that ffmpeg integrates a default down-mix (and up-mix) system +that should be preferred (see "-ac" option) unless you have very specific +needs. +

+ +

6.35.2 Remapping examples

+ +

The channel remapping will be effective if, and only if: +

+
    +
  • gain coefficients are zeroes or ones, +
  • only one input per channel output, +
+ +

If all these conditions are satisfied, the filter will notify the user ("Pure +channel mapping detected"), and use an optimized and lossless method to do the +remapping. +

+

For example, if you have a 5.1 source and want a stereo audio stream by +dropping the extra channels: +

 
pan="stereo: c0=FL : c1=FR"
+
+ +

Given the same source, you can also switch front left and front right channels +and keep the input channel layout: +

 
pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
+
+ +

If the input is a stereo audio stream, you can mute the front left channel (and +still keep the stereo channel layout) with: +

 
pan="stereo:c1=c1"
+
+ +

Still with a stereo audio stream input, you can copy the right channel in both +front left and right: +

 
pan="stereo: c0=FR : c1=FR"
+
+ + +

6.36 replaygain

+ +

ReplayGain scanner filter. This filter takes an audio stream as an input and +outputs it unchanged. +At end of filtering it displays track_gain and track_peak. +

+ +

6.37 resample

+ +

Convert the audio sample format, sample rate and channel layout. This filter is +not meant to be used directly. +

+ +

6.38 silencedetect

+ +

Detect silence in an audio stream. +

+

This filter logs a message when it detects that the input audio volume is less +or equal to a noise tolerance value for a duration greater or equal to the +minimum detected noise duration. +

+

The printed times and duration are expressed in seconds. +

+

The filter accepts the following options: +

+
+
duration, d
+

Set silence duration until notification (default is 2 seconds). +

+
+
noise, n
+

Set noise tolerance. Can be specified in dB (in case "dB" is appended to the +specified value) or amplitude ratio. Default is -60dB, or 0.001. +

+
+ + +

6.38.1 Examples

+ +
    +
  • +Detect 5 seconds of silence with -50dB noise tolerance: +
     
    silencedetect=n=-50dB:d=5
    +
    + +
  • +Complete example with ffmpeg to detect silence with 0.0001 noise +tolerance in ‘silence.mp3’: +
     
    ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
    +
    +
+ + +

6.39 treble

+ +

Boost or cut treble (upper) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at whichever is the lower of ~22 kHz and the +Nyquist frequency. Its useful range is about -20 (for a large cut) +to +20 (for a large boost). Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 3000 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

6.40 volume

+ +

Adjust the input audio volume. +

+

The filter accepts the following options: +

+
+
volume
+

Set audio volume expression. +

+

Output values are clipped to the maximum value. +

+

The output audio volume is given by the relation: +

 
output_volume = volume * input_volume
+
+ +

Default value for volume is "1.0". +

+
+
precision
+

Set the mathematical precision. +

+

This determines which input sample formats will be allowed, which affects the +precision of the volume scaling. +

+
+
fixed
+

8-bit fixed-point; limits input sample format to U8, S16, and S32. +

+
float
+

32-bit floating-point; limits input sample format to FLT. (default) +

+
double
+

64-bit floating-point; limits input sample format to DBL. +

+
+ +
+
eval
+

Set when the volume expression is evaluated. +

+

It accepts the following values: +

+
once
+

only evaluate expression once during the filter initialization, or +when the ‘volume’ command is sent +

+
+
frame
+

evaluate expression for each incoming frame +

+
+ +

Default value is ‘once’. +

+
+ +

The volume expression can contain the following parameters. +

+
+
n
+

frame number (starting at zero) +

+
nb_channels
+

number of channels +

+
nb_consumed_samples
+

number of samples consumed by the filter +

+
nb_samples
+

number of samples in the current frame +

+
pos
+

original frame position in the file +

+
pts
+

frame PTS +

+
sample_rate
+

sample rate +

+
startpts
+

PTS at start of stream +

+
startt
+

time at start of stream +

+
t
+

frame time +

+
tb
+

timestamp timebase +

+
volume
+

last set volume value +

+
+ +

Note that when ‘eval’ is set to ‘once’ only the +sample_rate and tb variables are available, all other +variables will evaluate to NAN. +

+ +

6.40.1 Commands

+ +

This filter supports the following commands: +

+
volume
+

Modify the volume expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

6.40.2 Examples

+ +
    +
  • +Halve the input audio volume: +
     
    volume=volume=0.5
    +volume=volume=1/2
    +volume=volume=-6.0206dB
    +
    + +

    In all the above example the named key for ‘volume’ can be +omitted, for example like in: +

     
    volume=0.5
    +
    + +
  • +Increase input audio power by 6 decibels using fixed-point precision: +
     
    volume=volume=6dB:precision=fixed
    +
    + +
  • +Fade volume after time 10 with an annihilation period of 5 seconds: +
     
    volume='if(lt(t,10),1,max(1-(t-10)/5,0))':eval=frame
    +
    +
+ + +

6.41 volumedetect

+ +

Detect the volume of the input video. +

+

The filter has no parameters. The input is not modified. Statistics about +the volume will be printed in the log when the input stream end is reached. +

+

In particular it will show the mean volume (root mean square), maximum +volume (on a per-sample basis), and the beginning of a histogram of the +registered volume values (from the maximum value to a cumulated 1/1000 of +the samples). +

+

All volumes are in decibels relative to the maximum PCM value. +

+ +

6.41.1 Examples

+ +

Here is an excerpt of the output: +

 
[Parsed_volumedetect_0  0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0  0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0  0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0  0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0  0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0  0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0  0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0  0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0  0xa23120] histogram_10db: 8409
+
+ +

It means that: +

    +
  • +The mean square energy is approximately -27 dB, or 10^-2.7. +
  • +The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB. +
  • +There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc. +
+ +

In other words, raising the volume by +4 dB does not cause any clipping, +raising it by +5 dB causes clipping for 6 samples, etc. +

+ + +

7. Audio Sources

+ +

Below is a description of the currently available audio sources. +

+ +

7.1 abuffer

+ +

Buffer audio frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/asrc_abuffer.h’. +

+

It accepts the following named parameters: +

+
+
time_base
+

Timebase which will be used for timestamps of submitted frames. It must be +either a floating-point number or in numerator/denominator form. +

+
+
sample_rate
+

The sample rate of the incoming audio buffers. +

+
+
sample_fmt
+

The sample format of the incoming audio buffers. +Either a sample format name or its corresponging integer representation from +the enum AVSampleFormat in ‘libavutil/samplefmt.h’ +

+
+
channel_layout
+

The channel layout of the incoming audio buffers. +Either a channel layout name from channel_layout_map in +‘libavutil/channel_layout.c’ or its corresponding integer representation +from the AV_CH_LAYOUT_* macros in ‘libavutil/channel_layout.h’ +

+
+
channels
+

The number of channels of the incoming audio buffers. +If both channels and channel_layout are specified, then they +must be consistent. +

+
+
+ + +

7.1.1 Examples

+ +
 
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+ +

will instruct the source to accept planar 16bit signed stereo at 44100Hz. +Since the sample format with name "s16p" corresponds to the number +6 and the "stereo" channel layout corresponds to the value 0x3, this is +equivalent to: +

 
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+ + +

7.2 aevalsrc

+ +

Generate an audio signal specified by an expression. +

+

This source accepts in input one or more expressions (one for each +channel), which are evaluated and used to generate a corresponding +audio signal. +

+

This source accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. In case the +‘channel_layout’ option is not specified, the selected channel layout +depends on the number of provided expressions. Otherwise the last +specified expression is applied to the remaining output channels. +

+
+
channel_layout, c
+

Set the channel layout. The number of channels in the specified layout +must be equal to the number of specified expressions. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format. +Note that the resulting duration may be greater than the specified +duration, as the generated audio is always cut at the end of a +complete frame. +

+

If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, +default to 1024. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. +

+
+ +

Each expression in exprs can contain the following constants: +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
t
+

time of the evaluated sample expressed in seconds, starting from 0 +

+
+
s
+

sample rate +

+
+
+ + +

7.2.1 Examples

+ +
    +
  • +Generate silence: +
     
    aevalsrc=0
    +
    + +
  • +Generate a sin signal with frequency of 440 Hz, set sample rate to +8000 Hz: +
     
    aevalsrc="sin(440*2*PI*t):s=8000"
    +
    + +
  • +Generate a two channels signal, specify the channel layout (Front +Center + Back Center) explicitly: +
     
    aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
    +
    + +
  • +Generate white noise: +
     
    aevalsrc="-2+random(0)"
    +
    + +
  • +Generate an amplitude modulated signal: +
     
    aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
    +
    + +
  • +Generate 2.5 Hz binaural beats on a 360 Hz carrier: +
     
    aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
    +
    + +
+ + +

7.3 anullsrc

+ +

Null audio source, return unprocessed audio frames. It is mainly useful +as a template and to be employed in analysis / debugging tools, or as +the source for filters which ignore the input data (for example the sox +synth filter). +

+

This source accepts the following options: +

+
+
channel_layout, cl
+
+

Specify the channel layout, and can be either an integer or a string +representing a channel layout. The default value of channel_layout +is "stereo". +

+

Check the channel_layout_map definition in +‘libavutil/channel_layout.c’ for the mapping between strings and +channel layout values. +

+
+
sample_rate, r
+

Specify the sample rate, and defaults to 44100. +

+
+
nb_samples, n
+

Set the number of samples per requested frames. +

+
+
+ + +

7.3.1 Examples

+ +
    +
  • +Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO. +
     
    anullsrc=r=48000:cl=4
    +
    + +
  • +Do the same operation with a more obvious syntax: +
     
    anullsrc=r=48000:cl=mono
    +
    +
+ +

All the parameters need to be explicitly defined. +

+ +

7.4 flite

+ +

Synthesize a voice utterance using the libflite library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libflite. +

+

Note that the flite library is not thread-safe. +

+

The filter accepts the following options: +

+
+
list_voices
+

If set to 1, list the names of the available voices and exit +immediately. Default value is 0. +

+
+
nb_samples, n
+

Set the maximum number of samples per frame. Default value is 512. +

+
+
textfile
+

Set the filename containing the text to speak. +

+
+
text
+

Set the text to speak. +

+
+
voice, v
+

Set the voice to use for the speech synthesis. Default value is +kal. See also the list_voices option. +

+
+ + +

7.4.1 Examples

+ +
    +
  • +Read from file ‘speech.txt’, and synthetize the text using the +standard flite voice: +
     
    flite=textfile=speech.txt
    +
    + +
  • +Read the specified text selecting the slt voice: +
     
    flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Input text to ffmpeg: +
     
    ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Make ‘ffplay’ speak the specified text, using flite and +the lavfi device: +
     
    ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
    +
    +
+ +

For more information about libflite, check: +http://www.speech.cs.cmu.edu/flite/ +

+ +

7.5 sine

+ +

Generate an audio signal made of a sine wave with amplitude 1/8. +

+

The audio signal is bit-exact. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the carrier frequency. Default is 440 Hz. +

+
+
beep_factor, b
+

Enable a periodic beep every second with frequency beep_factor times +the carrier frequency. Default is 0, meaning the beep is disabled. +

+
+
sample_rate, r
+

Specify the sample rate, default is 44100. +

+
+
duration, d
+

Specify the duration of the generated audio stream. +

+
+
samples_per_frame
+

Set the number of samples per output frame, default is 1024. +

+
+ + +

7.5.1 Examples

+ +
    +
  • +Generate a simple 440 Hz sine wave: +
     
    sine
    +
    + +
  • +Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds: +
     
    sine=220:4:d=5
    +sine=f=220:b=4:d=5
    +sine=frequency=220:beep_factor=4:duration=5
    +
    + +
+ + + +

8. Audio Sinks

+ +

Below is a description of the currently available audio sinks. +

+ +

8.1 abuffersink

+ +

Buffer audio frames, and make them available to the end of filter chain. +

+

This sink is mainly intended for programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVABufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

8.2 anullsink

+ +

Null audio sink, do absolutely nothing with the input audio. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

9. Video Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the video filters included in your +build. +

+

Below is a description of the currently available video filters. +

+ +

9.1 alphaextract

+ +

Extract the alpha component from the input as a grayscale video. This +is especially useful with the alphamerge filter. +

+ +

9.2 alphamerge

+ +

Add or replace the alpha component of the primary input with the +grayscale value of a second input. This is intended for use with +alphaextract to allow the transmission or storage of frame +sequences that have alpha in a format that doesn’t support an alpha +channel. +

+

For example, to reconstruct full frames from a normal YUV-encoded video +and a separate video created with alphaextract, you might use: +

 
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+ +

Since this filter is designed for reconstruction, it operates on frame +sequences without considering timestamps, and terminates when either +input reaches end of stream. This will cause problems if your encoding +pipeline drops frames. If you’re trying to apply an image as an +overlay to a video stream, consider the overlay filter instead. +

+ +

9.3 ass

+ +

Same as the subtitles filter, except that it doesn’t require libavcodec +and libavformat to work. On the other hand, it is limited to ASS (Advanced +Substation Alpha) subtitles files. +

+ +

9.4 bbox

+ +

Compute the bounding box for the non-black pixels in the input frame +luminance plane. +

+

This filter computes the bounding box containing all the pixels with a +luminance value greater than the minimum allowed value. +The parameters describing the bounding box are printed on the filter +log. +

+

The filter accepts the following option: +

+
+
min_val
+

Set the minimal luminance value. Default is 16. +

+
+ + +

9.5 blackdetect

+ +

Detect video intervals that are (almost) completely black. Can be +useful to detect chapter transitions, commercials, or invalid +recordings. Output lines contains the time for the start, end and +duration of the detected black interval expressed in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
black_min_duration, d
+

Set the minimum detected black duration expressed in seconds. It must +be a non-negative floating point number. +

+

Default value is 2.0. +

+
+
picture_black_ratio_th, pic_th
+

Set the threshold for considering a picture "black". +Express the minimum value for the ratio: +

 
nb_black_pixels / nb_pixels
+
+ +

for which a picture is considered black. +Default value is 0.98. +

+
+
pixel_black_th, pix_th
+

Set the threshold for considering a pixel "black". +

+

The threshold expresses the maximum pixel luminance value for which a +pixel is considered "black". The provided value is scaled according to +the following equation: +

 
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+ +

luminance_range_size and luminance_minimum_value depend on +the input video format, the range is [0-255] for YUV full-range +formats and [16-235] for YUV non full-range formats. +

+

Default value is 0.10. +

+
+ +

The following example sets the maximum pixel threshold to the minimum +value, and detects only black intervals of 2 or more seconds: +

 
blackdetect=d=2:pix_th=0.00
+
+ + +

9.6 blackframe

+ +

Detect frames that are (almost) completely black. Can be useful to +detect chapter transitions or commercials. Output lines consist of +the frame number of the detected frame, the percentage of blackness, +the position in the file if known or -1 and the timestamp in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
amount
+

Set the percentage of the pixels that have to be below the threshold, defaults +to 98. +

+
+
threshold, thresh
+

Set the threshold below which a pixel value is considered black, defaults to +32. +

+
+
+ + +

9.7 blend

+ +

Blend two video frames into each other. +

+

It takes two input streams and outputs one stream, the first input is the +"top" layer and second input is "bottom" layer. +Output terminates when shortest input terminates. +

+

A description of the accepted options follows. +

+
+
c0_mode
+
c1_mode
+
c2_mode
+
c3_mode
+
all_mode
+

Set blend mode for specific pixel component or all pixel components in case +of all_mode. Default value is normal. +

+

Available values for component modes are: +

+
addition
+
and
+
average
+
burn
+
darken
+
difference
+
divide
+
dodge
+
exclusion
+
hardlight
+
lighten
+
multiply
+
negation
+
normal
+
or
+
overlay
+
phoenix
+
pinlight
+
reflect
+
screen
+
softlight
+
subtract
+
vividlight
+
xor
+
+ +
+
c0_opacity
+
c1_opacity
+
c2_opacity
+
c3_opacity
+
all_opacity
+

Set blend opacity for specific pixel component or all pixel components in case +of all_opacity. Only used in combination with pixel component blend modes. +

+
+
c0_expr
+
c1_expr
+
c2_expr
+
c3_expr
+
all_expr
+

Set blend expression for specific pixel component or all pixel components in case +of all_expr. Note that related mode options will be ignored if those are set. +

+

The expressions can use the following variables: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

the coordinates of the current sample +

+
+
W
+
H
+

the width and height of currently filtered plane +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
TOP, A
+

Value of pixel component at current location for first video frame (top layer). +

+
+
BOTTOM, B
+

Value of pixel component at current location for second video frame (bottom layer). +

+
+ +
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last bottom frame after the end of the stream. A value of +0 disable the filter after the last frame of the bottom layer is reached. +Default is 1. +

+
+ + +

9.7.1 Examples

+ +
    +
  • +Apply transition from bottom layer to top layer in first 10 seconds: +
     
    blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
    +
    + +
  • +Apply 1x1 checkerboard effect: +
     
    blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
    +
    + +
  • +Apply uncover left effect: +
     
    blend=all_expr='if(gte(N*SW+X,W),A,B)'
    +
    + +
  • +Apply uncover down effect: +
     
    blend=all_expr='if(gte(Y-N*SH,0),A,B)'
    +
    + +
  • +Apply uncover up-left effect: +
     
    blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
    +
    +
+ + +

9.8 boxblur

+ +

Apply boxblur algorithm to the input video. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+
luma_power, lp
+
chroma_radius, cr
+
chroma_power, cp
+
alpha_radius, ar
+
alpha_power, ap
+
+ +

A description of the accepted options follows. +

+
+
luma_radius, lr
+
chroma_radius, cr
+
alpha_radius, ar
+

Set an expression for the box radius in pixels used for blurring the +corresponding input plane. +

+

The radius value must be a non-negative number, and must not be +greater than the value of the expression min(w,h)/2 for the +luma and alpha planes, and of min(cw,ch)/2 for the chroma +planes. +

+

Default value for ‘luma_radius’ is "2". If not specified, +‘chroma_radius’ and ‘alpha_radius’ default to the +corresponding value set for ‘luma_radius’. +

+

The expressions can contain the following constants: +

+
w
+
h
+

the input width and height in pixels +

+
+
cw
+
ch
+

the input chroma image width and height in pixels +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ +
+
luma_power, lp
+
chroma_power, cp
+
alpha_power, ap
+

Specify how many times the boxblur filter is applied to the +corresponding plane. +

+

Default value for ‘luma_power’ is 2. If not specified, +‘chroma_power’ and ‘alpha_power’ default to the +corresponding value set for ‘luma_power’. +

+

A value of 0 will disable the effect. +

+
+ + +

9.8.1 Examples

+ +
    +
  • +Apply a boxblur filter with luma, chroma, and alpha radius +set to 2: +
     
    boxblur=luma_radius=2:luma_power=1
    +boxblur=2:1
    +
    + +
  • +Set luma radius to 2, alpha and chroma radius to 0: +
     
    boxblur=2:1:cr=0:ar=0
    +
    + +
  • +Set luma and chroma radius to a fraction of the video dimension: +
     
    boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
    +
    +
+ + +

9.9 colorbalance

+

Modify intensity of primary colors (red, green and blue) of input frames. +

+

The filter allows an input frame to be adjusted in the shadows, midtones or highlights +regions for the red-cyan, green-magenta or blue-yellow balance. +

+

A positive adjustment value shifts the balance towards the primary color, a negative +value towards the complementary color. +

+

The filter accepts the following options: +

+
+
rs
+
gs
+
bs
+

Adjust red, green and blue shadows (darkest pixels). +

+
+
rm
+
gm
+
bm
+

Adjust red, green and blue midtones (medium pixels). +

+
+
rh
+
gh
+
bh
+

Adjust red, green and blue highlights (brightest pixels). +

+

Allowed ranges for options are [-1.0, 1.0]. Defaults are 0. +

+
+ + +

9.9.1 Examples

+ +
    +
  • +Add red color cast to shadows: +
     
    colorbalance=rs=.3
    +
    +
+ + +

9.10 colorchannelmixer

+ +

Adjust video input frames by re-mixing color channels. +

+

This filter modifies a color channel by adding the values associated to +the other channels of the same pixels. For example if the value to +modify is red, the output value will be: +

 
red=red*rr + blue*rb + green*rg + alpha*ra
+
+ +

The filter accepts the following options: +

+
+
rr
+
rg
+
rb
+
ra
+

Adjust contribution of input red, green, blue and alpha channels for output red channel. +Default is 1 for rr, and 0 for rg, rb and ra. +

+
+
gr
+
gg
+
gb
+
ga
+

Adjust contribution of input red, green, blue and alpha channels for output green channel. +Default is 1 for gg, and 0 for gr, gb and ga. +

+
+
br
+
bg
+
bb
+
ba
+

Adjust contribution of input red, green, blue and alpha channels for output blue channel. +Default is 1 for bb, and 0 for br, bg and ba. +

+
+
ar
+
ag
+
ab
+
aa
+

Adjust contribution of input red, green, blue and alpha channels for output alpha channel. +Default is 1 for aa, and 0 for ar, ag and ab. +

+

Allowed ranges for options are [-2.0, 2.0]. +

+
+ + +

9.10.1 Examples

+ +
    +
  • +Convert source to grayscale: +
     
    colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
    +
    +
  • +Simulate sepia tones: +
     
    colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
    +
    +
+ + +

9.11 colormatrix

+ +

Convert color matrix. +

+

The filter accepts the following options: +

+
+
src
+
dst
+

Specify the source and destination color matrix. Both values must be +specified. +

+

The accepted values are: +

+
bt709
+

BT.709 +

+
+
bt601
+

BT.601 +

+
+
smpte240m
+

SMPTE-240M +

+
+
fcc
+

FCC +

+
+
+
+ +

For example to convert from BT.601 to SMPTE-240M, use the command: +

 
colormatrix=bt601:smpte240m
+
+ + +

9.12 copy

+ +

Copy the input source unchanged to the output. Mainly useful for +testing purposes. +

+ +

9.13 crop

+ +

Crop the input video to given dimensions. +

+

The filter accepts the following options: +

+
+
w, out_w
+

Width of the output video. It defaults to iw. +This expression is evaluated only once during the filter +configuration. +

+
+
h, out_h
+

Height of the output video. It defaults to ih. +This expression is evaluated only once during the filter +configuration. +

+
+
x
+

Horizontal position, in the input video, of the left edge of the output video. +It defaults to (in_w-out_w)/2. +This expression is evaluated per-frame. +

+
+
y
+

Vertical position, in the input video, of the top edge of the output video. +It defaults to (in_h-out_h)/2. +This expression is evaluated per-frame. +

+
+
keep_aspect
+

If set to 1 will force the output display aspect ratio +to be the same of the input, by changing the output sample aspect +ratio. It defaults to 0. +

+
+ +

The out_w, out_h, x, y parameters are +expressions containing the following constants: +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (cropped) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

The expression for out_w may depend on the value of out_h, +and the expression for out_h may depend on out_w, but they +cannot depend on x and y, as x and y are +evaluated after out_w and out_h. +

+

The x and y parameters specify the expressions for the +position of the top-left corner of the output (non-cropped) area. They +are evaluated for each frame. If the evaluated value is not valid, it +is approximated to the nearest valid value. +

+

The expression for x may depend on y, and the expression +for y may depend on x. +

+ +

9.13.1 Examples

+ +
    +
  • +Crop area with size 100x100 at position (12,34). +
     
    crop=100:100:12:34
    +
    + +

    Using named options, the example above becomes: +

     
    crop=w=100:h=100:x=12:y=34
    +
    + +
  • +Crop the central input area with size 100x100: +
     
    crop=100:100
    +
    + +
  • +Crop the central input area with size 2/3 of the input video: +
     
    crop=2/3*in_w:2/3*in_h
    +
    + +
  • +Crop the input video central square: +
     
    crop=out_w=in_h
    +crop=in_h
    +
    + +
  • +Delimit the rectangle with the top-left corner placed at position +100:100 and the right-bottom corner corresponding to the right-bottom +corner of the input image: +
     
    crop=in_w-100:in_h-100:100:100
    +
    + +
  • +Crop 10 pixels from the left and right borders, and 20 pixels from +the top and bottom borders +
     
    crop=in_w-2*10:in_h-2*20
    +
    + +
  • +Keep only the bottom right quarter of the input image: +
     
    crop=in_w/2:in_h/2:in_w/2:in_h/2
    +
    + +
  • +Crop height for getting Greek harmony: +
     
    crop=in_w:1/PHI*in_w
    +
    + +
  • +Appply trembling effect: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(n/10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(n/7)
    +
    + +
  • +Apply erratic camera effect depending on timestamp: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(t*10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(t*13)"
    +
    + +
  • +Set x depending on the value of y: +
     
    crop=in_w/2:in_h/2:y:10+10*sin(n/10)
    +
    +
+ + +

9.14 cropdetect

+ +

Auto-detect crop size. +

+

Calculate necessary cropping parameters and prints the recommended +parameters through the logging system. The detected dimensions +correspond to the non-black area of the input video. +

+

The filter accepts the following options: +

+
+
limit
+

Set higher black value threshold, which can be optionally specified +from nothing (0) to everything (255). An intensity value greater +to the set value is considered non-black. Default value is 24. +

+
+
round
+

Set the value for which the width/height should be divisible by. The +offset is automatically adjusted to center the video. Use 2 to get +only even dimensions (needed for 4:2:2 video). 16 is best when +encoding to most video codecs. Default value is 16. +

+
+
reset_count, reset
+

Set the counter that determines after how many frames cropdetect will +reset the previously detected largest video area and start over to +detect the current optimal crop area. Default value is 0. +

+

This can be useful when channel logos distort the video area. 0 +indicates never reset and return the largest area encountered during +playback. +

+
+ +

+

+

9.15 curves

+ +

Apply color adjustments using curves. +

+

This filter is similar to the Adobe Photoshop and GIMP curves tools. Each +component (red, green and blue) has its values defined by N key points +tied from each other using a smooth curve. The x-axis represents the pixel +values from the input frame, and the y-axis the new pixel values to be set for +the output frame. +

+

By default, a component curve is defined by the two points (0;0) and +(1;1). This creates a straight line where each original pixel value is +"adjusted" to its own value, which means no change to the image. +

+

The filter allows you to redefine these two points and add some more. A new +curve (using a natural cubic spline interpolation) will be define to pass +smoothly through all these new coordinates. The new defined points needs to be +strictly increasing over the x-axis, and their x and y values must +be in the [0;1] interval. If the computed curves happened to go outside +the vector spaces, the values will be clipped accordingly. +

+

If there is no key point defined in x=0, the filter will automatically +insert a (0;0) point. In the same way, if there is no key point defined +in x=1, the filter will automatically insert a (1;1) point. +

+

The filter accepts the following options: +

+
+
preset
+

Select one of the available color presets. This option can be used in addition +to the ‘r’, ‘g’, ‘b’ parameters; in this case, the later +options takes priority on the preset values. +Available presets are: +

+
none
+
color_negative
+
cross_process
+
darker
+
increase_contrast
+
lighter
+
linear_contrast
+
medium_contrast
+
negative
+
strong_contrast
+
vintage
+
+

Default is none. +

+
master, m
+

Set the master key points. These points will define a second pass mapping. It +is sometimes called a "luminance" or "value" mapping. It can be used with +‘r’, ‘g’, ‘b’ or ‘all’ since it acts like a +post-processing LUT. +

+
red, r
+

Set the key points for the red component. +

+
green, g
+

Set the key points for the green component. +

+
blue, b
+

Set the key points for the blue component. +

+
all
+

Set the key points for all components (not including master). +Can be used in addition to the other key points component +options. In this case, the unset component(s) will fallback on this +‘all’ setting. +

+
psfile
+

Specify a Photoshop curves file (.asv) to import the settings from. +

+
+ +

To avoid some filtergraph syntax conflicts, each key points list need to be +defined using the following syntax: x0/y0 x1/y1 x2/y2 .... +

+ +

9.15.1 Examples

+ +
    +
  • +Increase slightly the middle level of blue: +
     
    curves=blue='0.5/0.58'
    +
    + +
  • +Vintage effect: +
     
    curves=r='0/0.11 .42/.51 1/0.95':g='0.50/0.48':b='0/0.22 .49/.44 1/0.8'
    +
    +

    Here we obtain the following coordinates for each components: +

    +
    red
    +

    (0;0.11) (0.42;0.51) (1;0.95) +

    +
    green
    +

    (0;0) (0.50;0.48) (1;1) +

    +
    blue
    +

    (0;0.22) (0.49;0.44) (1;0.80) +

    +
    + +
  • +The previous example can also be achieved with the associated built-in preset: +
     
    curves=preset=vintage
    +
    + +
  • +Or simply: +
     
    curves=vintage
    +
    + +
  • +Use a Photoshop preset and redefine the points of the green component: +
     
    curves=psfile='MyCurvesPresets/purple.asv':green='0.45/0.53'
    +
    +
+ + +

9.16 dctdnoiz

+ +

Denoise frames using 2D DCT (frequency domain filtering). +

+

This filter is not designed for real time and can be extremely slow. +

+

The filter accepts the following options: +

+
+
sigma, s
+

Set the noise sigma constant. +

+

This sigma defines a hard threshold of 3 * sigma; every DCT +coefficient (absolute value) below this threshold with be dropped. +

+

If you need a more advanced filtering, see ‘expr’. +

+

Default is 0. +

+
+
overlap
+

Set number overlapping pixels for each block. Each block is of size +16x16. Since the filter can be slow, you may want to reduce this value, +at the cost of a less effective filter and the risk of various artefacts. +

+

If the overlapping value doesn’t allow to process the whole input width or +height, a warning will be displayed and according borders won’t be denoised. +

+

Default value is 15. +

+
+
expr, e
+

Set the coefficient factor expression. +

+

For each coefficient of a DCT block, this expression will be evaluated as a +multiplier value for the coefficient. +

+

If this is option is set, the ‘sigma’ option will be ignored. +

+

The absolute value of the coefficient can be accessed through the c +variable. +

+
+ + +

9.16.1 Examples

+ +

Apply a denoise with a ‘sigma’ of 4.5: +

 
dctdnoiz=4.5
+
+ +

The same operation can be achieved using the expression system: +

 
dctdnoiz=e='gte(c, 4.5*3)'
+
+ +

+

+

9.17 decimate

+ +

Drop duplicated frames at regular intervals. +

+

The filter accepts the following options: +

+
+
cycle
+

Set the number of frames from which one will be dropped. Setting this to +N means one frame in every batch of N frames will be dropped. +Default is 5. +

+
+
dupthresh
+

Set the threshold for duplicate detection. If the difference metric for a frame +is less than or equal to this value, then it is declared as duplicate. Default +is 1.1 +

+
+
scthresh
+

Set scene change threshold. Default is 15. +

+
+
blockx
+
blocky
+

Set the size of the x and y-axis blocks used during metric calculations. +Larger blocks give better noise suppression, but also give worse detection of +small movements. Must be a power of two. Default is 32. +

+
+
ppsrc
+

Mark main input as a pre-processed input and activate clean source input +stream. This allows the input to be pre-processed with various filters to help +the metrics calculation while keeping the frame selection lossless. When set to +1, the first stream is for the pre-processed input, and the second +stream is the clean source from where the kept frames are chosen. Default is +0. +

+
+
chroma
+

Set whether or not chroma is considered in the metric calculations. Default is +1. +

+
+ + +

9.18 dejudder

+ +

Remove judder produced by partially interlaced telecined content. +

+

Judder can be introduced, for instance, by pullup filter. If the original +source was partially telecined content then the output of pullup,dejudder +will have a variable frame rate. May change the recorded frame rate of the +container. Aside from that change, this filter will not affect constant frame +rate video. +

+

The option available in this filter is: +

+
cycle
+

Specify the length of the window over which the judder repeats. +

+

Accepts any interger greater than 1. Useful values are: +

+
4
+

If the original was telecined from 24 to 30 fps (Film to NTSC). +

+
+
5
+

If the original was telecined from 25 to 30 fps (PAL to NTSC). +

+
+
20
+

If a mixture of the two. +

+
+ +

The default is ‘4’. +

+
+ + +

9.19 delogo

+ +

Suppress a TV station logo by a simple interpolation of the surrounding +pixels. Just set a rectangle covering the logo and watch it disappear +(and sometimes something even uglier appear - your mileage may vary). +

+

This filter accepts the following options: +

+
x
+
y
+

Specify the top left corner coordinates of the logo. They must be +specified. +

+
+
w
+
h
+

Specify the width and height of the logo to clear. They must be +specified. +

+
+
band, t
+

Specify the thickness of the fuzzy edge of the rectangle (added to +w and h). The default value is 4. +

+
+
show
+

When set to 1, a green rectangle is drawn on the screen to simplify +finding the right x, y, w, and h parameters. +The default value is 0. +

+

The rectangle is drawn on the outermost pixels which will be (partly) +replaced with interpolated values. The values of the next pixels +immediately outside this rectangle in each direction will be used to +compute the interpolated pixel values inside the rectangle. +

+
+
+ + +

9.19.1 Examples

+ +
    +
  • +Set a rectangle covering the area with top left corner coordinates 0,0 +and size 100x77, setting a band of size 10: +
     
    delogo=x=0:y=0:w=100:h=77:band=10
    +
    + +
+ + +

9.20 deshake

+ +

Attempt to fix small changes in horizontal and/or vertical shift. This +filter helps remove camera shake from hand-holding a camera, bumping a +tripod, moving on a vehicle, etc. +

+

The filter accepts the following options: +

+
+
x
+
y
+
w
+
h
+

Specify a rectangular area where to limit the search for motion +vectors. +If desired the search for motion vectors can be limited to a +rectangular area of the frame defined by its top left corner, width +and height. These parameters have the same meaning as the drawbox +filter which can be used to visualise the position of the bounding +box. +

+

This is useful when simultaneous movement of subjects within the frame +might be confused for camera motion by the motion vector search. +

+

If any or all of x, y, w and h are set to -1 +then the full frame is used. This allows later options to be set +without specifying the bounding box for the motion vector search. +

+

Default - search the whole frame. +

+
+
rx
+
ry
+

Specify the maximum extent of movement in x and y directions in the +range 0-64 pixels. Default 16. +

+
+
edge
+

Specify how to generate pixels to fill blanks at the edge of the +frame. Available values are: +

+
blank, 0
+

Fill zeroes at blank locations +

+
original, 1
+

Original image at blank locations +

+
clamp, 2
+

Extruded edge value at blank locations +

+
mirror, 3
+

Mirrored edge at blank locations +

+
+

Default value is ‘mirror’. +

+
+
blocksize
+

Specify the blocksize to use for motion search. Range 4-128 pixels, +default 8. +

+
+
contrast
+

Specify the contrast threshold for blocks. Only blocks with more than +the specified contrast (difference between darkest and lightest +pixels) will be considered. Range 1-255, default 125. +

+
+
search
+

Specify the search strategy. Available values are: +

+
exhaustive, 0
+

Set exhaustive search +

+
less, 1
+

Set less exhaustive search. +

+
+

Default value is ‘exhaustive’. +

+
+
filename
+

If set then a detailed log of the motion search is written to the +specified file. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ + +

9.21 drawbox

+ +

Draw a colored box on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the top left corner coordinates of the box. Default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the box, if 0 they are interpreted as +the input width and height. Default to 0. +

+
+
color, c
+

Specify the color of the box to write. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the box edge color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the box edge. Default value is 3. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y offset coordinates where the box is drawn. +

+
+
w
+
h
+

The width and height of the drawn box. +

+
+
t
+

The thickness of the drawn box. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

9.21.1 Examples

+ +
    +
  • +Draw a black box around the edge of the input image: +
     
    drawbox
    +
    + +
  • +Draw a box with color red and an opacity of 50%: +
     
    drawbox=10:20:200:60:red@0.5
    +
    + +

    The previous example can be specified as: +

     
    drawbox=x=10:y=20:w=200:h=60:color=red@0.5
    +
    + +
  • +Fill the box with pink color: +
     
    drawbox=x=10:y=10:w=100:h=100:color=pink@0.5:t=max
    +
    + +
  • +Draw a 2-pixel red 2.40:1 mask: +
     
    drawbox=x=-t:y=0.5*(ih-iw/2.4)-t:w=iw+t*2:h=iw/2.4+t*2:t=2:c=red
    +
    +
+ + +

9.22 drawgrid

+ +

Draw a grid on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the +input width and height, respectively, minus thickness, so image gets +framed. Default to 0. +

+
+
color, c
+

Specify the color of the grid. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the grid color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the grid line. Default value is 1. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input grid cell width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y coordinates of some point of grid intersection (meant to configure offset). +

+
+
w
+
h
+

The width and height of the drawn cell. +

+
+
t
+

The thickness of the drawn cell. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

9.22.1 Examples

+ +
    +
  • +Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%: +
     
    drawgrid=width=100:height=100:thickness=2:color=red@0.5
    +
    + +
  • +Draw a white 3x3 grid with an opacity of 50%: +
     
    drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
    +
    +
+ +

+

+

9.23 drawtext

+ +

Draw text string or text from specified file on top of video using the +libfreetype library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libfreetype. +

+ +

9.23.1 Syntax

+ +

The description of the accepted parameters follows. +

+
+
box
+

Used to draw a box around text using background color. +Value should be either 1 (enable) or 0 (disable). +The default value of box is 0. +

+
+
boxcolor
+

The color to be used for drawing box around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of boxcolor is "white". +

+
+
borderw
+

Set the width of the border to be drawn around the text using bordercolor. +The default value of borderw is 0. +

+
+
bordercolor
+

Set the color to be used for drawing border around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of bordercolor is "black". +

+
+
expansion
+

Select how the text is expanded. Can be either none, +strftime (deprecated) or +normal (default). See the Text expansion section +below for details. +

+
+
fix_bounds
+

If true, check and fix text coords to avoid clipping. +

+
+
fontcolor
+

The color to be used for drawing fonts. For the syntax of this option, check +the "Color" section in the ffmpeg-utils manual. +

+

The default value of fontcolor is "black". +

+
+
fontfile
+

The font file to be used for drawing text. Path must be included. +This parameter is mandatory. +

+
+
fontsize
+

The font size to be used for drawing text. +The default value of fontsize is 16. +

+
+
ft_load_flags
+

Flags to be used for loading the fonts. +

+

The flags map the corresponding flags supported by libfreetype, and are +a combination of the following values: +

+
default
+
no_scale
+
no_hinting
+
render
+
no_bitmap
+
vertical_layout
+
force_autohint
+
crop_bitmap
+
pedantic
+
ignore_global_advance_width
+
no_recurse
+
ignore_transform
+
monochrome
+
linear_design
+
no_autohint
+
+ +

Default value is "default". +

+

For more information consult the documentation for the FT_LOAD_* +libfreetype flags. +

+
+
shadowcolor
+

The color to be used for drawing a shadow behind the drawn text. For the +syntax of this option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of shadowcolor is "black". +

+
+
shadowx
+
shadowy
+

The x and y offsets for the text shadow position with respect to the +position of the text. They can be either positive or negative +values. Default value for both is "0". +

+
+
start_number
+

The starting frame number for the n/frame_num variable. The default value +is "0". +

+
+
tabsize
+

The size in number of spaces to use for rendering the tab. +Default value is 4. +

+
+
timecode
+

Set the initial timecode representation in "hh:mm:ss[:;.]ff" +format. It can be used with or without text parameter. timecode_rate +option must be specified. +

+
+
timecode_rate, rate, r
+

Set the timecode frame rate (timecode only). +

+
+
text
+

The text string to be drawn. The text must be a sequence of UTF-8 +encoded characters. +This parameter is mandatory if no file is specified with the parameter +textfile. +

+
+
textfile
+

A text file containing text to be drawn. The text must be a sequence +of UTF-8 encoded characters. +

+

This parameter is mandatory if no text string is specified with the +parameter text. +

+

If both text and textfile are specified, an error is thrown. +

+
+
reload
+

If set to 1, the textfile will be reloaded before each frame. +Be sure to update it atomically, or it may be read partially, or even fail. +

+
+
x
+
y
+

The expressions which specify the offsets where text will be drawn +within the video frame. They are relative to the top/left border of the +output image. +

+

The default value of x and y is "0". +

+

See below for the list of accepted constants and functions. +

+
+ +

The parameters for x and y are expressions containing the +following constants and functions: +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
line_h, lh
+

the height of each text line +

+
+
main_h, h, H
+

the input height +

+
+
main_w, w, W
+

the input width +

+
+
max_glyph_a, ascent
+

the maximum distance from the baseline to the highest/upper grid +coordinate used to place a glyph outline point, for all the rendered +glyphs. +It is a positive value, due to the grid’s orientation with the Y axis +upwards. +

+
+
max_glyph_d, descent
+

the maximum distance from the baseline to the lowest grid coordinate +used to place a glyph outline point, for all the rendered glyphs. +This is a negative value, due to the grid’s orientation, with the Y axis +upwards. +

+
+
max_glyph_h
+

maximum glyph height, that is the maximum height for all the glyphs +contained in the rendered text, it is equivalent to ascent - +descent. +

+
+
max_glyph_w
+

maximum glyph width, that is the maximum width for all the glyphs +contained in the rendered text +

+
+
n
+

the number of input frame, starting from 0 +

+
+
rand(min, max)
+

return a random number included between min and max +

+
+
sar
+

input sample aspect ratio +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
text_h, th
+

the height of the rendered text +

+
+
text_w, tw
+

the width of the rendered text +

+
+
x
+
y
+

the x and y offset coordinates where the text is drawn. +

+

These parameters allow the x and y expressions to refer +each other, so you can for example specify y=x/dar. +

+
+ +

If libavfilter was built with --enable-fontconfig, then +‘fontfile’ can be a fontconfig pattern or omitted. +

+

+

+

9.23.2 Text expansion

+ +

If ‘expansion’ is set to strftime, +the filter recognizes strftime() sequences in the provided text and +expands them accordingly. Check the documentation of strftime(). This +feature is deprecated. +

+

If ‘expansion’ is set to none, the text is printed verbatim. +

+

If ‘expansion’ is set to normal (which is the default), +the following expansion mechanism is used. +

+

The backslash character ’\’, followed by any character, always expands to +the second character. +

+

Sequence of the form %{...} are expanded. The text between the +braces is a function name, possibly followed by arguments separated by ’:’. +If the arguments contain special characters or delimiters (’:’ or ’}’), +they should be escaped. +

+

Note that they probably must also be escaped as the value for the +‘text’ option in the filter argument string and as the filter +argument in the filtergraph description, and possibly also for the shell, +that makes up to four levels of escaping; using a text file avoids these +problems. +

+

The following functions are available: +

+
+
expr, e
+

The expression evaluation result. +

+

It must take one argument specifying the expression to be evaluated, +which accepts the same constants and functions as the x and +y values. Note that not all constants should be used, for +example the text size is not known when evaluating the expression, so +the constants text_w and text_h will have an undefined +value. +

+
+
gmtime
+

The time at which the filter is running, expressed in UTC. +It can accept an argument: a strftime() format string. +

+
+
localtime
+

The time at which the filter is running, expressed in the local time zone. +It can accept an argument: a strftime() format string. +

+
+
metadata
+

Frame metadata. It must take one argument specifying metadata key. +

+
+
n, frame_num
+

The frame number, starting from 0. +

+
+
pict_type
+

A 1 character description of the current picture type. +

+
+
pts
+

The timestamp of the current frame, in seconds, with microsecond accuracy. +

+
+
+ + +

9.23.3 Examples

+ +
    +
  • +Draw "Test Text" with font FreeSerif, using the default values for the +optional parameters. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text'"
    +
    + +
  • +Draw ’Test Text’ with font FreeSerif of size 24 at position x=100 +and y=50 (counting from the top-left corner of the screen), text is +yellow with a red box around it. Both the text and the box have an +opacity of 20%. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text':\
    +          x=100: y=50: fontsize=24: fontcolor=yellow@0.2: box=1: boxcolor=red@0.2"
    +
    + +

    Note that the double quotes are not necessary if spaces are not used +within the parameter list. +

    +
  • +Show the text at the center of the video frame: +
     
    drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2"
    +
    + +
  • +Show a text line sliding from right to left in the last row of the video +frame. The file ‘LONG_LINE’ is assumed to contain a single line +with no newlines. +
     
    drawtext="fontsize=15:fontfile=FreeSerif.ttf:text=LONG_LINE:y=h-line_h:x=-50*t"
    +
    + +
  • +Show the content of file ‘CREDITS’ off the bottom of the frame and scroll up. +
     
    drawtext="fontsize=20:fontfile=FreeSerif.ttf:textfile=CREDITS:y=h-20*t"
    +
    + +
  • +Draw a single green letter "g", at the center of the input video. +The glyph baseline is placed at half screen height. +
     
    drawtext="fontsize=60:fontfile=FreeSerif.ttf:fontcolor=green:text=g:x=(w-max_glyph_w)/2:y=h/2-ascent"
    +
    + +
  • +Show text for 1 second every 3 seconds: +
     
    drawtext="fontfile=FreeSerif.ttf:fontcolor=white:x=100:y=x/dar:enable=lt(mod(t\,3)\,1):text='blink'"
    +
    + +
  • +Use fontconfig to set the font. Note that the colons need to be escaped. +
     
    drawtext='fontfile=Linux Libertine O-40\:style=Semibold:text=FFmpeg'
    +
    + +
  • +Print the date of a real-time encoding (see strftime(3)): +
     
    drawtext='fontfile=FreeSans.ttf:text=%{localtime:%a %b %d %Y}'
    +
    + +
+ +

For more information about libfreetype, check: +http://www.freetype.org/. +

+

For more information about fontconfig, check: +http://freedesktop.org/software/fontconfig/fontconfig-user.html. +

+ +

9.24 edgedetect

+ +

Detect and draw edges. The filter uses the Canny Edge Detection algorithm. +

+

The filter accepts the following options: +

+
+
low
+
high
+

Set low and high threshold values used by the Canny thresholding +algorithm. +

+

The high threshold selects the "strong" edge pixels, which are then +connected through 8-connectivity with the "weak" edge pixels selected +by the low threshold. +

+

low and high threshold values must be chosen in the range +[0,1], and low should be lesser or equal to high. +

+

Default value for low is 20/255, and default value for high +is 50/255. +

+
+ +

Example: +

 
edgedetect=low=0.1:high=0.4
+
+ + +

9.25 extractplanes

+ +

Extract color channel components from input video stream into +separate grayscale video streams. +

+

The filter accepts the following option: +

+
+
planes
+

Set plane(s) to extract. +

+

Available values for planes are: +

+
y
+
u
+
v
+
a
+
r
+
g
+
b
+
+ +

Choosing planes not available in the input will result in an error. +That means you cannot select r, g, b planes +with y, u, v planes at same time. +

+
+ + +

9.25.1 Examples

+ +
    +
  • +Extract luma, u and v color channel component from input video frame +into 3 grayscale outputs: +
     
    ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
    +
    +
+ + +

9.26 elbg

+ +

Apply a posterize effect using the ELBG (Enhanced LBG) algorithm. +

+

For each input image, the filter will compute the optimal mapping from +the input to the output given the codebook length, that is the number +of distinct output colors. +

+

This filter accepts the following options. +

+
+
codebook_length, l
+

Set codebook length. The value must be a positive integer, and +represents the number of distinct output colors. Default value is 256. +

+
+
nb_steps, n
+

Set the maximum number of iterations to apply for computing the optimal +mapping. The higher the value the better the result and the higher the +computation time. Default value is 1. +

+
+
seed, s
+

Set a random seed, must be an integer included between 0 and +UINT32_MAX. If not specified, or if explicitly set to -1, the filter +will try to use a good random seed on a best effort basis. +

+
+ + +

9.27 fade

+ +

Apply fade-in/out effect to input video. +

+

This filter accepts the following options: +

+
+
type, t
+

The effect type – can be either "in" for fade-in, or "out" for a fade-out +effect. +Default is in. +

+
+
start_frame, s
+

Specify the number of the start frame for starting to apply the fade +effect. Default is 0. +

+
+
nb_frames, n
+

The number of frames for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +Default is 25. +

+
+
alpha
+

If set to 1, fade only alpha channel, if one exists on the input. +Default value is 0. +

+
+
start_time, st
+

Specify the timestamp (in seconds) of the frame to start to apply the fade +effect. If both start_frame and start_time are specified, the fade will start at +whichever comes last. Default is 0. +

+
+
duration, d
+

The number of seconds for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +If both duration and nb_frames are specified, duration is used. Default is 0. +

+
+
color, c
+

Specify the color of the fade. Default is "black". +

+
+ + +

9.27.1 Examples

+ +
    +
  • +Fade in first 30 frames of video: +
     
    fade=in:0:30
    +
    + +

    The command above is equivalent to: +

     
    fade=t=in:s=0:n=30
    +
    + +
  • +Fade out last 45 frames of a 200-frame video: +
     
    fade=out:155:45
    +fade=type=out:start_frame=155:nb_frames=45
    +
    + +
  • +Fade in first 25 frames and fade out last 25 frames of a 1000-frame video: +
     
    fade=in:0:25, fade=out:975:25
    +
    + +
  • +Make first 5 frames yellow, then fade in from frame 5-24: +
     
    fade=in:5:20:color=yellow
    +
    + +
  • +Fade in alpha over first 25 frames of video: +
     
    fade=in:0:25:alpha=1
    +
    + +
  • +Make first 5.5 seconds black, then fade in for 0.5 seconds: +
     
    fade=t=in:st=5.5:d=0.5
    +
    + +
+ + +

9.28 field

+ +

Extract a single field from an interlaced image using stride +arithmetic to avoid wasting CPU time. The output frames are marked as +non-interlaced. +

+

The filter accepts the following options: +

+
+
type
+

Specify whether to extract the top (if the value is 0 or +top) or the bottom field (if the value is 1 or +bottom). +

+
+ + +

9.29 fieldmatch

+ +

Field matching filter for inverse telecine. It is meant to reconstruct the +progressive frames from a telecined stream. The filter does not drop duplicated +frames, so to achieve a complete inverse telecine fieldmatch needs to be +followed by a decimation filter such as decimate in the filtergraph. +

+

The separation of the field matching and the decimation is notably motivated by +the possibility of inserting a de-interlacing filter fallback between the two. +If the source has mixed telecined and real interlaced content, +fieldmatch will not be able to match fields for the interlaced parts. +But these remaining combed frames will be marked as interlaced, and thus can be +de-interlaced by a later filter such as yadif before decimation. +

+

In addition to the various configuration options, fieldmatch can take an +optional second stream, activated through the ‘ppsrc’ option. If +enabled, the frames reconstruction will be based on the fields and frames from +this second stream. This allows the first input to be pre-processed in order to +help the various algorithms of the filter, while keeping the output lossless +(assuming the fields are matched properly). Typically, a field-aware denoiser, +or brightness/contrast adjustments can help. +

+

Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project) +and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from +which fieldmatch is based on. While the semantic and usage are very +close, some behaviour and options names can differ. +

+

The filter accepts the following options: +

+
+
order
+

Specify the assumed field order of the input stream. Available values are: +

+
+
auto
+

Auto detect parity (use FFmpeg’s internal parity value). +

+
bff
+

Assume bottom field first. +

+
tff
+

Assume top field first. +

+
+ +

Note that it is sometimes recommended not to trust the parity announced by the +stream. +

+

Default value is auto. +

+
+
mode
+

Set the matching mode or strategy to use. ‘pc’ mode is the safest in the +sense that it won’t risk creating jerkiness due to duplicate frames when +possible, but if there are bad edits or blended fields it will end up +outputting combed frames when a good match might actually exist. On the other +hand, ‘pcn_ub’ mode is the most risky in terms of creating jerkiness, +but will almost always find a good frame if there is one. The other values are +all somewhere in between ‘pc’ and ‘pcn_ub’ in terms of risking +jerkiness and creating duplicate frames versus finding good matches in sections +with bad edits, orphaned fields, blended fields, etc. +

+

More details about p/c/n/u/b are available in p/c/n/u/b meaning section. +

+

Available values are: +

+
+
pc
+

2-way matching (p/c) +

+
pc_n
+

2-way matching, and trying 3rd match if still combed (p/c + n) +

+
pc_u
+

2-way matching, and trying 3rd match (same order) if still combed (p/c + u) +

+
pc_n_ub
+

2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if +still combed (p/c + n + u/b) +

+
pcn
+

3-way matching (p/c/n) +

+
pcn_ub
+

3-way matching, and trying 4th/5th matches if all 3 of the original matches are +detected as combed (p/c/n + u/b) +

+
+ +

The parenthesis at the end indicate the matches that would be used for that +mode assuming ‘order’=tff (and ‘field’ on auto or +top). +

+

In terms of speed ‘pc’ mode is by far the fastest and ‘pcn_ub’ is +the slowest. +

+

Default value is pc_n. +

+
+
ppsrc
+

Mark the main input stream as a pre-processed input, and enable the secondary +input stream as the clean source to pick the fields from. See the filter +introduction for more details. It is similar to the ‘clip2’ feature from +VFM/TFM. +

+

Default value is 0 (disabled). +

+
+
field
+

Set the field to match from. It is recommended to set this to the same value as +‘order’ unless you experience matching failures with that setting. In +certain circumstances changing the field that is used to match from can have a +large impact on matching performance. Available values are: +

+
+
auto
+

Automatic (same value as ‘order’). +

+
bottom
+

Match from the bottom field. +

+
top
+

Match from the top field. +

+
+ +

Default value is auto. +

+
+
mchroma
+

Set whether or not chroma is included during the match comparisons. In most +cases it is recommended to leave this enabled. You should set this to 0 +only if your clip has bad chroma problems such as heavy rainbowing or other +artifacts. Setting this to 0 could also be used to speed things up at +the cost of some accuracy. +

+

Default value is 1. +

+
+
y0
+
y1
+

These define an exclusion band which excludes the lines between ‘y0’ and +‘y1’ from being included in the field matching decision. An exclusion +band can be used to ignore subtitles, a logo, or other things that may +interfere with the matching. ‘y0’ sets the starting scan line and +‘y1’ sets the ending line; all lines in between ‘y0’ and +‘y1’ (including ‘y0’ and ‘y1’) will be ignored. Setting +‘y0’ and ‘y1’ to the same value will disable the feature. +‘y0’ and ‘y1’ defaults to 0. +

+
+
scthresh
+

Set the scene change detection threshold as a percentage of maximum change on +the luma plane. Good values are in the [8.0, 14.0] range. Scene change +detection is only relevant in case ‘combmatch’=sc. The range for +‘scthresh’ is [0.0, 100.0]. +

+

Default value is 12.0. +

+
+
combmatch
+

When ‘combatch’ is not none, fieldmatch will take into +account the combed scores of matches when deciding what match to use as the +final match. Available values are: +

+
+
none
+

No final matching based on combed scores. +

+
sc
+

Combed scores are only used when a scene change is detected. +

+
full
+

Use combed scores all the time. +

+
+ +

Default is sc. +

+
+
combdbg
+

Force fieldmatch to calculate the combed metrics for certain matches and +print them. This setting is known as ‘micout’ in TFM/VFM vocabulary. +Available values are: +

+
+
none
+

No forced calculation. +

+
pcn
+

Force p/c/n calculations. +

+
pcnub
+

Force p/c/n/u/b calculations. +

+
+ +

Default value is none. +

+
+
cthresh
+

This is the area combing threshold used for combed frame detection. This +essentially controls how "strong" or "visible" combing must be to be detected. +Larger values mean combing must be more visible and smaller values mean combing +can be less visible or strong and still be detected. Valid settings are from +-1 (every pixel will be detected as combed) to 255 (no pixel will +be detected as combed). This is basically a pixel difference value. A good +range is [8, 12]. +

+

Default value is 9. +

+
+
chroma
+

Sets whether or not chroma is considered in the combed frame decision. Only +disable this if your source has chroma problems (rainbowing, etc.) that are +causing problems for the combed frame detection with chroma enabled. Actually, +using ‘chroma’=0 is usually more reliable, except for the case +where there is chroma only combing in the source. +

+

Default value is 0. +

+
+
blockx
+
blocky
+

Respectively set the x-axis and y-axis size of the window used during combed +frame detection. This has to do with the size of the area in which +‘combpel’ pixels are required to be detected as combed for a frame to be +declared combed. See the ‘combpel’ parameter description for more info. +Possible values are any number that is a power of 2 starting at 4 and going up +to 512. +

+

Default value is 16. +

+
+
combpel
+

The number of combed pixels inside any of the ‘blocky’ by +‘blockx’ size blocks on the frame for the frame to be detected as +combed. While ‘cthresh’ controls how "visible" the combing must be, this +setting controls "how much" combing there must be in any localized area (a +window defined by the ‘blockx’ and ‘blocky’ settings) on the +frame. Minimum value is 0 and maximum is blocky x blockx (at +which point no frames will ever be detected as combed). This setting is known +as ‘MI’ in TFM/VFM vocabulary. +

+

Default value is 80. +

+
+ +

+

+

9.29.1 p/c/n/u/b meaning

+ + +

9.29.1.1 p/c/n

+ +

We assume the following telecined stream: +

+
 
Top fields:     1 2 2 3 4
+Bottom fields:  1 2 3 4 4
+
+ +

The numbers correspond to the progressive frame the fields relate to. Here, the +first two frames are progressive, the 3rd and 4th are combed, and so on. +

+

When fieldmatch is configured to run a matching from bottom +(‘field’=bottom) this is how this input stream get transformed: +

+
 
Input stream:
+                T     1 2 2 3 4
+                B     1 2 3 4 4   <-- matching reference
+
+Matches:              c c n n c
+
+Output stream:
+                T     1 2 3 4 4
+                B     1 2 3 4 4
+
+ +

As a result of the field matching, we can see that some frames get duplicated. +To perform a complete inverse telecine, you need to rely on a decimation filter +after this operation. See for instance the decimate filter. +

+

The same operation now matching from top fields (‘field’=top) +looks like this: +

+
 
Input stream:
+                T     1 2 2 3 4   <-- matching reference
+                B     1 2 3 4 4
+
+Matches:              c c p p c
+
+Output stream:
+                T     1 2 2 3 4
+                B     1 2 2 3 4
+
+ +

In these examples, we can see what p, c and n mean; +basically, they refer to the frame and field of the opposite parity: +

+
    +
  • p matches the field of the opposite parity in the previous frame +
  • c matches the field of the opposite parity in the current frame +
  • n matches the field of the opposite parity in the next frame +
+ + +

9.29.1.2 u/b

+ +

The u and b matching are a bit special in the sense that they match +from the opposite parity flag. In the following examples, we assume that we are +currently matching the 2nd frame (Top:2, bottom:2). According to the match, a +’x’ is placed above and below each matched fields. +

+

With bottom matching (‘field’=bottom): +

 
Match:           c         p           n          b          u
+
+                 x       x               x        x          x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x         x           x        x              x
+
+Output frames:
+                 2          1          2          2          2
+                 2          2          2          1          3
+
+ +

With top matching (‘field’=top): +

 
Match:           c         p           n          b          u
+
+                 x         x           x        x              x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x       x               x        x          x
+
+Output frames:
+                 2          2          2          1          2
+                 2          1          3          2          2
+
+ + +

9.29.2 Examples

+ +

Simple IVTC of a top field first telecined stream: +

 
fieldmatch=order=tff:combmatch=none, decimate
+
+ +

Advanced IVTC, with fallback on yadif for still combed frames: +

 
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+ + +

9.30 fieldorder

+ +

Transform the field order of the input video. +

+

This filter accepts the following options: +

+
+
order
+

Output field order. Valid values are tff for top field first or bff +for bottom field first. +

+
+ +

Default value is ‘tff’. +

+

Transformation is achieved by shifting the picture content up or down +by one line, and filling the remaining line with appropriate picture content. +This method is consistent with most broadcast field order converters. +

+

If the input video is not flagged as being interlaced, or it is already +flagged as being of the required output field order then this filter does +not alter the incoming video. +

+

This filter is very useful when converting to or from PAL DV material, +which is bottom field first. +

+

For example: +

 
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+ + +

9.31 fifo

+ +

Buffer input images and send them when they are requested. +

+

This filter is mainly useful when auto-inserted by the libavfilter +framework. +

+

The filter does not take parameters. +

+

+

+

9.32 format

+ +

Convert the input video to one of the specified pixel formats. +Libavfilter will try to pick one that is supported for the input to +the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

9.32.1 Examples

+ +
    +
  • +Convert the input video to the format yuv420p +
     
    format=pix_fmts=yuv420p
    +
    + +

    Convert the input video to any of the formats in the list +

     
    format=pix_fmts=yuv420p|yuv444p|yuv410p
    +
    +
+ +

+

+

9.33 fps

+ +

Convert the video to specified constant frame rate by duplicating or dropping +frames as necessary. +

+

This filter accepts the following named parameters: +

+
fps
+

Desired output frame rate. The default is 25. +

+
+
round
+

Rounding method. +

+

Possible values are: +

+
zero
+

zero round towards 0 +

+
inf
+

round away from 0 +

+
down
+

round towards -infinity +

+
up
+

round towards +infinity +

+
near
+

round to nearest +

+
+

The default is near. +

+
+
start_time
+

Assume the first PTS should be the given value, in seconds. This allows for +padding/trimming at the start of stream. By default, no assumption is made +about the first frame’s expected PTS, so no padding or trimming is done. +For example, this could be set to 0 to pad the beginning with duplicates of +the first frame if a video stream starts after the audio stream or to trim any +frames with a negative PTS. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +fps[:round]. +

+

See also the setpts filter. +

+ +

9.33.1 Examples

+ +
    +
  • +A typical usage in order to set the fps to 25: +
     
    fps=fps=25
    +
    + +
  • +Sets the fps to 24, using abbreviation and rounding method to round to nearest: +
     
    fps=fps=film:round=near
    +
    +
+ + +

9.34 framepack

+ +

Pack two different video streams into a stereoscopic video, setting proper +metadata on supported codecs. The two views should have the same size and +framerate and processing will stop when the shorter video ends. Please note +that you may conveniently adjust view properties with the scale and +fps filters. +

+

This filter accepts the following named parameters: +

+
format
+

Desired packing format. Supported values are: +

+
+
sbs
+

Views are next to each other (default). +

+
+
tab
+

Views are on top of each other. +

+
+
lines
+

Views are packed by line. +

+
+
columns
+

Views are eacked by column. +

+
+
frameseq
+

Views are temporally interleaved. +

+
+
+ +
+
+ +

Some examples follow: +

+
 
# Convert left and right views into a frame sequential video.
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input.
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+ + +

9.35 framestep

+ +

Select one frame every N-th frame. +

+

This filter accepts the following option: +

+
step
+

Select frame after every step frames. +Allowed values are positive integers higher than 0. Default value is 1. +

+
+ +

+

+

9.36 frei0r

+ +

Apply a frei0r effect to the input video. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This filter accepts the following options: +

+
+
filter_name
+

The name to the frei0r effect to load. If the environment variable +FREI0R_PATH is defined, the frei0r effect is searched in each one of the +directories specified by the colon separated list in FREIOR_PATH, +otherwise in the standard frei0r paths, which are in this order: +‘HOME/.frei0r-1/lib/’, ‘/usr/local/lib/frei0r-1/’, +‘/usr/lib/frei0r-1/’. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r effect. +

+
+
+ +

A frei0r effect parameter can be a boolean (whose values are specified +with "y" and "n"), a double, a color (specified by the syntax +R/G/B, (R, G, and B being float +numbers from 0.0 to 1.0) or by a color description specified in the "Color" +section in the ffmpeg-utils manual), a position (specified by the syntax X/Y, +X and Y being float numbers) and a string. +

+

The number and kind of parameters depend on the loaded effect. If an +effect parameter is not specified the default value is set. +

+ +

9.36.1 Examples

+ +
    +
  • +Apply the distort0r effect, set the first two double parameters: +
     
    frei0r=filter_name=distort0r:filter_params=0.5|0.01
    +
    + +
  • +Apply the colordistance effect, take a color as first parameter: +
     
    frei0r=colordistance:0.2/0.3/0.4
    +frei0r=colordistance:violet
    +frei0r=colordistance:0x112233
    +
    + +
  • +Apply the perspective effect, specify the top left and top right image +positions: +
     
    frei0r=perspective:0.2/0.2|0.8/0.2
    +
    +
+ +

For more information see: +http://frei0r.dyne.org +

+ +

9.37 geq

+ +

The filter accepts the following options: +

+
+
lum_expr, lum
+

Set the luminance expression. +

+
cb_expr, cb
+

Set the chrominance blue expression. +

+
cr_expr, cr
+

Set the chrominance red expression. +

+
alpha_expr, a
+

Set the alpha expression. +

+
red_expr, r
+

Set the red expression. +

+
green_expr, g
+

Set the green expression. +

+
blue_expr, b
+

Set the blue expression. +

+
+ +

The colorspace is selected according to the specified options. If one +of the ‘lum_expr’, ‘cb_expr’, or ‘cr_expr’ +options is specified, the filter will automatically select a YCbCr +colorspace. If one of the ‘red_expr’, ‘green_expr’, or +‘blue_expr’ options is specified, it will select an RGB +colorspace. +

+

If one of the chrominance expression is not defined, it falls back on the other +one. If no alpha expression is specified it will evaluate to opaque value. +If none of chrominance expressions are specified, they will evaluate +to the luminance expression. +

+

The expressions can use the following variables and functions: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

The coordinates of the current sample. +

+
+
W
+
H
+

The width and height of the image. +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
p(x, y)
+

Return the value of the pixel at location (x,y) of the current +plane. +

+
+
lum(x, y)
+

Return the value of the pixel at location (x,y) of the luminance +plane. +

+
+
cb(x, y)
+

Return the value of the pixel at location (x,y) of the +blue-difference chroma plane. Return 0 if there is no such plane. +

+
+
cr(x, y)
+

Return the value of the pixel at location (x,y) of the +red-difference chroma plane. Return 0 if there is no such plane. +

+
+
r(x, y)
+
g(x, y)
+
b(x, y)
+

Return the value of the pixel at location (x,y) of the +red/green/blue component. Return 0 if there is no such component. +

+
+
alpha(x, y)
+

Return the value of the pixel at location (x,y) of the alpha +plane. Return 0 if there is no such plane. +

+
+ +

For functions, if x and y are outside the area, the value will be +automatically clipped to the closer edge. +

+ +

9.37.1 Examples

+ +
    +
  • +Flip the image horizontally: +
     
    geq=p(W-X\,Y)
    +
    + +
  • +Generate a bidimensional sine wave, with angle PI/3 and a +wavelength of 100 pixels: +
     
    geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
    +
    + +
  • +Generate a fancy enigmatic moving light: +
     
    nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
    +
    + +
  • +Generate a quick emboss effect: +
     
    format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
    +
    + +
  • +Modify RGB components depending on pixel position: +
     
    geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
    +
    +
+ + +

9.38 gradfun

+ +

Fix the banding artifacts that are sometimes introduced into nearly flat +regions by truncation to 8bit color depth. +Interpolate the gradients that should go where the bands are, and +dither them. +

+

This filter is designed for playback only. Do not use it prior to +lossy compression, because compression tends to lose the dither and +bring back the bands. +

+

This filter accepts the following options: +

+
+
strength
+

The maximum amount by which the filter will change any one pixel. Also the +threshold for detecting nearly flat regions. Acceptable values range from .51 to +64, default value is 1.2, out-of-range values will be clipped to the valid +range. +

+
+
radius
+

The neighborhood to fit the gradient to. A larger radius makes for smoother +gradients, but also prevents the filter from modifying the pixels near detailed +regions. Acceptable values are 8-32, default value is 16, out-of-range values +will be clipped to the valid range. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +strength[:radius] +

+ +

9.38.1 Examples

+ +
    +
  • +Apply the filter with a 3.5 strength and radius of 8: +
     
    gradfun=3.5:8
    +
    + +
  • +Specify radius, omitting the strength (which will fall-back to the default +value): +
     
    gradfun=radius=8
    +
    + +
+ +

+

+

9.39 haldclut

+ +

Apply a Hald CLUT to a video stream. +

+

First input is the video stream to process, and second one is the Hald CLUT. +The Hald CLUT input can be a simple picture or a complete video stream. +

+

The filter accepts the following options: +

+
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last CLUT after the end of the stream. A value of +0 disable the filter after the last frame of the CLUT is reached. +Default is 1. +

+
+ +

haldclut also has the same interpolation options as lut3d (both +filters share the same internals). +

+

More information about the Hald CLUT can be found on Eskil Steenberg’s website +(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html. +

+ +

9.39.1 Workflow examples

+ + +

9.39.1.1 Hald CLUT video stream

+ +

Generate an identity Hald CLUT stream altered with various effects: +

 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+ +

Note: make sure you use a lossless codec. +

+

Then use it with haldclut to apply it on some random stream: +

 
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+ +

The Hald CLUT will be applied to the 10 first seconds (duration of +‘clut.nut’), then the latest picture of that CLUT stream will be applied +to the remaining frames of the mandelbrot stream. +

+ +

9.39.1.2 Hald CLUT with preview

+ +

A Hald CLUT is supposed to be a squared image of Level*Level*Level by +Level*Level*Level pixels. For a given Hald CLUT, FFmpeg will select the +biggest possible square starting at the top left of the picture. The remaining +padding pixels (bottom or right) will be ignored. This area can be used to add +a preview of the Hald CLUT. +

+

Typically, the following generated Hald CLUT will be supported by the +haldclut filter: +

+
 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "
+   pad=iw+320 [padded_clut];
+   smptebars=s=320x256, split [a][b];
+   [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+   [main][b] overlay=W-320" -frames:v 1 clut.png
+
+ +

It contains the original and a preview of the effect of the CLUT: SMPTE color +bars are displayed on the right-top, and below the same color bars processed by +the color changes. +

+

Then, the effect of this Hald CLUT can be visualized with: +

 
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+ + +

9.40 hflip

+ +

Flip the input video horizontally. +

+

For example to horizontally flip the input video with ffmpeg: +

 
ffmpeg -i in.avi -vf "hflip" out.avi
+
+ + +

9.41 histeq

+

This filter applies a global color histogram equalization on a +per-frame basis. +

+

It can be used to correct video that has a compressed range of pixel +intensities. The filter redistributes the pixel intensities to +equalize their distribution across the intensity range. It may be +viewed as an "automatically adjusting contrast filter". This filter is +useful only for correcting degraded or poorly captured source +video. +

+

The filter accepts the following options: +

+
+
strength
+

Determine the amount of equalization to be applied. As the strength +is reduced, the distribution of pixel intensities more-and-more +approaches that of the input frame. The value must be a float number +in the range [0,1] and defaults to 0.200. +

+
+
intensity
+

Set the maximum intensity that can generated and scale the output +values appropriately. The strength should be set as desired and then +the intensity can be limited if needed to avoid washing-out. The value +must be a float number in the range [0,1] and defaults to 0.210. +

+
+
antibanding
+

Set the antibanding level. If enabled the filter will randomly vary +the luminance of output pixels by a small amount to avoid banding of +the histogram. Possible values are none, weak or +strong. It defaults to none. +

+
+ + +

9.42 histogram

+ +

Compute and draw a color distribution histogram for the input video. +

+

The computed histogram is a representation of the color component +distribution in an image. +

+

The filter accepts the following options: +

+
+
mode
+

Set histogram mode. +

+

It accepts the following values: +

+
levels
+

Standard histogram that displays the color components distribution in an +image. Displays color graph for each color component. Shows distribution of +the Y, U, V, A or R, G, B components, depending on input format, in the +current frame. Below each graph a color component scale meter is shown. +

+
+
color
+

Displays chroma values (U/V color placement) in a two dimensional +graph (which is called a vectorscope). The brighter a pixel in the +vectorscope, the more pixels of the input frame correspond to that pixel +(i.e., more pixels have this chroma value). The V component is displayed on +the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost +side being V = 255. The U component is displayed on the vertical (Y) axis, +with the top representing U = 0 and the bottom representing U = 255. +

+

The position of a white pixel in the graph corresponds to the chroma value of +a pixel of the input clip. The graph can therefore be used to read the hue +(color flavor) and the saturation (the dominance of the hue in the color). As +the hue of a color changes, it moves around the square. At the center of the +square the saturation is zero, which means that the corresponding pixel has no +color. If the amount of a specific color is increased (while leaving the other +colors unchanged) the saturation increases, and the indicator moves towards +the edge of the square. +

+
+
color2
+

Chroma values in vectorscope, similar as color but actual chroma values +are displayed. +

+
+
waveform
+

Per row/column color component graph. In row mode, the graph on the left side +represents color component value 0 and the right side represents value = 255. +In column mode, the top side represents color component value = 0 and bottom +side represents value = 255. +

+
+

Default value is levels. +

+
+
level_height
+

Set height of level in levels. Default value is 200. +Allowed range is [50, 2048]. +

+
+
scale_height
+

Set height of color scale in levels. Default value is 12. +Allowed range is [0, 40]. +

+
+
step
+

Set step for waveform mode. Smaller values are useful to find out how +many values of the same luminance are distributed across input rows/columns. +Default value is 10. Allowed range is [1, 255]. +

+
+
waveform_mode
+

Set mode for waveform. Can be either row, or column. +Default is row. +

+
+
waveform_mirror
+

Set mirroring mode for waveform. 0 means unmirrored, 1 +means mirrored. In mirrored mode, higher values will be represented on the left +side for row mode and at the top for column mode. Default is +0 (unmirrored). +

+
+
display_mode
+

Set display mode for waveform and levels. +It accepts the following values: +

+
parade
+

Display separate graph for the color components side by side in +row waveform mode or one below the other in column waveform mode +for waveform histogram mode. For levels histogram mode, +per color component graphs are placed below each other. +

+

Using this display mode in waveform histogram mode makes it easy to +spot color casts in the highlights and shadows of an image, by comparing the +contours of the top and the bottom graphs of each waveform. Since whites, +grays, and blacks are characterized by exactly equal amounts of red, green, +and blue, neutral areas of the picture should display three waveforms of +roughly equal width/height. If not, the correction is easy to perform by +making level adjustments the three waveforms. +

+
+
overlay
+

Presents information identical to that in the parade, except +that the graphs representing color components are superimposed directly +over one another. +

+

This display mode in waveform histogram mode makes it easier to spot +relative differences or similarities in overlapping areas of the color +components that are supposed to be identical, such as neutral whites, grays, +or blacks. +

+
+

Default is parade. +

+
+
levels_mode
+

Set mode for levels. Can be either linear, or logarithmic. +Default is linear. +

+
+ + +

9.42.1 Examples

+ +
    +
  • +Calculate and draw histogram: +
     
    ffplay -i input -vf histogram
    +
    + +
+ +

+

+

9.43 hqdn3d

+ +

High precision/quality 3d denoise filter. This filter aims to reduce +image noise producing smooth images and making still images really +still. It should enhance compressibility. +

+

It accepts the following optional parameters: +

+
+
luma_spatial
+

a non-negative float number which specifies spatial luma strength, +defaults to 4.0 +

+
+
chroma_spatial
+

a non-negative float number which specifies spatial chroma strength, +defaults to 3.0*luma_spatial/4.0 +

+
+
luma_tmp
+

a float number which specifies luma temporal strength, defaults to +6.0*luma_spatial/4.0 +

+
+
chroma_tmp
+

a float number which specifies chroma temporal strength, defaults to +luma_tmp*chroma_spatial/luma_spatial +

+
+ + +

9.44 hue

+ +

Modify the hue and/or the saturation of the input. +

+

This filter accepts the following options: +

+
+
h
+

Specify the hue angle as a number of degrees. It accepts an expression, +and defaults to "0". +

+
+
s
+

Specify the saturation in the [-10,10] range. It accepts an expression and +defaults to "1". +

+
+
H
+

Specify the hue angle as a number of radians. It accepts an +expression, and defaults to "0". +

+
+
b
+

Specify the brightness in the [-10,10] range. It accepts an expression and +defaults to "0". +

+
+ +

h’ and ‘H’ are mutually exclusive, and can’t be +specified at the same time. +

+

The ‘b’, ‘h’, ‘H’ and ‘s’ option values are +expressions containing the following constants: +

+
+
n
+

frame count of the input frame starting from 0 +

+
+
pts
+

presentation timestamp of the input frame expressed in time base units +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
tb
+

time base of the input video +

+
+ + +

9.44.1 Examples

+ +
    +
  • +Set the hue to 90 degrees and the saturation to 1.0: +
     
    hue=h=90:s=1
    +
    + +
  • +Same command but expressing the hue in radians: +
     
    hue=H=PI/2:s=1
    +
    + +
  • +Rotate hue and make the saturation swing between 0 +and 2 over a period of 1 second: +
     
    hue="H=2*PI*t: s=sin(2*PI*t)+1"
    +
    + +
  • +Apply a 3 seconds saturation fade-in effect starting at 0: +
     
    hue="s=min(t/3\,1)"
    +
    + +

    The general fade-in expression can be written as: +

     
    hue="s=min(0\, max((t-START)/DURATION\, 1))"
    +
    + +
  • +Apply a 3 seconds saturation fade-out effect starting at 5 seconds: +
     
    hue="s=max(0\, min(1\, (8-t)/3))"
    +
    + +

    The general fade-out expression can be written as: +

     
    hue="s=max(0\, min(1\, (START+DURATION-t)/DURATION))"
    +
    + +
+ + +

9.44.2 Commands

+ +

This filter supports the following commands: +

+
b
+
s
+
h
+
H
+

Modify the hue and/or the saturation and/or brightness of the input video. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

9.45 idet

+ +

Detect video interlacing type. +

+

This filter tries to detect if the input is interlaced or progressive, +top or bottom field first. +

+

The filter accepts the following options: +

+
+
intl_thres
+

Set interlacing threshold. +

+
prog_thres
+

Set progressive threshold. +

+
+ + +

9.46 il

+ +

Deinterleave or interleave fields. +

+

This filter allows one to process interlaced images fields without +deinterlacing them. Deinterleaving splits the input frame into 2 +fields (so called half pictures). Odd lines are moved to the top +half of the output image, even lines to the bottom half. +You can process (filter) them independently and then re-interleave them. +

+

The filter accepts the following options: +

+
+
luma_mode, l
+
chroma_mode, c
+
alpha_mode, a
+

Available values for luma_mode, chroma_mode and +alpha_mode are: +

+
+
none
+

Do nothing. +

+
+
deinterleave, d
+

Deinterleave fields, placing one above the other. +

+
+
interleave, i
+

Interleave fields. Reverse the effect of deinterleaving. +

+
+

Default value is none. +

+
+
luma_swap, ls
+
chroma_swap, cs
+
alpha_swap, as
+

Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0. +

+
+ + +

9.47 interlace

+ +

Simple interlacing filter from progressive contents. This interleaves upper (or +lower) lines from odd frames with lower (or upper) lines from even frames, +halving the frame rate and preserving image height. A vertical lowpass filter +is always applied in order to avoid twitter effects and reduce moiré patterns. +

+
 
   Original        Original             New Frame
+   Frame 'j'      Frame 'j+1'             (tff)
+  ==========      ===========       ==================
+    Line 0  -------------------->    Frame 'j' Line 0
+    Line 1          Line 1  ---->   Frame 'j+1' Line 1
+    Line 2 --------------------->    Frame 'j' Line 2
+    Line 3          Line 3  ---->   Frame 'j+1' Line 3
+     ...             ...                   ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+ +

It accepts the following optional parameters: +

+
+
scan
+

determines whether the interlaced frame is taken from the even (tff - default) +or odd (bff) lines of the progressive frame. +

+
+ + +

9.48 kerndeint

+ +

Deinterlace input video by applying Donald Graft’s adaptive kernel +deinterling. Work on interlaced parts of a video to produce +progressive frames. +

+

The description of the accepted parameters follows. +

+
+
thresh
+

Set the threshold which affects the filter’s tolerance when +determining if a pixel line must be processed. It must be an integer +in the range [0,255] and defaults to 10. A value of 0 will result in +applying the process on every pixels. +

+
+
map
+

Paint pixels exceeding the threshold value to white if set to 1. +Default is 0. +

+
+
order
+

Set the fields order. Swap fields if set to 1, leave fields alone if +0. Default is 0. +

+
+
sharp
+

Enable additional sharpening if set to 1. Default is 0. +

+
+
twoway
+

Enable twoway sharpening if set to 1. Default is 0. +

+
+ + +

9.48.1 Examples

+ +
    +
  • +Apply default values: +
     
    kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
    +
    + +
  • +Enable additional sharpening: +
     
    kerndeint=sharp=1
    +
    + +
  • +Paint processed pixels in white: +
     
    kerndeint=map=1
    +
    +
+ +

+

+

9.49 lut3d

+ +

Apply a 3D LUT to an input video. +

+

The filter accepts the following options: +

+
+
file
+

Set the 3D LUT file name. +

+

Currently supported formats: +

+
3dl
+

AfterEffects +

+
cube
+

Iridas +

+
dat
+

DaVinci +

+
m3d
+

Pandora +

+
+
+
interp
+

Select interpolation mode. +

+

Available values are: +

+
+
nearest
+

Use values from the nearest defined point. +

+
trilinear
+

Interpolate values using the 8 points defining a cube. +

+
tetrahedral
+

Interpolate values using a tetrahedron. +

+
+
+
+ + +

9.50 lut, lutrgb, lutyuv

+ +

Compute a look-up table for binding each pixel component input value +to an output value, and apply it to input video. +

+

lutyuv applies a lookup table to a YUV input video, lutrgb +to an RGB input video. +

+

These filters accept the following options: +

+
c0
+

set first pixel component expression +

+
c1
+

set second pixel component expression +

+
c2
+

set third pixel component expression +

+
c3
+

set fourth pixel component expression, corresponds to the alpha component +

+
+
r
+

set red component expression +

+
g
+

set green component expression +

+
b
+

set blue component expression +

+
a
+

alpha component expression +

+
+
y
+

set Y/luminance component expression +

+
u
+

set U/Cb component expression +

+
v
+

set V/Cr component expression +

+
+ +

Each of them specifies the expression to use for computing the lookup table for +the corresponding pixel component values. +

+

The exact component associated to each of the c* options depends on the +format in input. +

+

The lut filter requires either YUV or RGB pixel formats in input, +lutrgb requires RGB pixel formats in input, and lutyuv requires YUV. +

+

The expressions can contain the following constants and functions: +

+
+
w
+
h
+

the input width and height +

+
+
val
+

input value for the pixel component +

+
+
clipval
+

the input value clipped in the minval-maxval range +

+
+
maxval
+

maximum value for the pixel component +

+
+
minval
+

minimum value for the pixel component +

+
+
negval
+

the negated value for the pixel component value clipped in the +minval-maxval range , it corresponds to the expression +"maxval-clipval+minval" +

+
+
clip(val)
+

the computed value in val clipped in the +minval-maxval range +

+
+
gammaval(gamma)
+

the computed gamma correction value of the pixel component value +clipped in the minval-maxval range, corresponds to the +expression +"pow((clipval-minval)/(maxval-minval)\,gamma)*(maxval-minval)+minval" +

+
+
+ +

All expressions default to "val". +

+ +

9.50.1 Examples

+ +
    +
  • +Negate input video: +
     
    lutrgb="r=maxval+minval-val:g=maxval+minval-val:b=maxval+minval-val"
    +lutyuv="y=maxval+minval-val:u=maxval+minval-val:v=maxval+minval-val"
    +
    + +

    The above is the same as: +

     
    lutrgb="r=negval:g=negval:b=negval"
    +lutyuv="y=negval:u=negval:v=negval"
    +
    + +
  • +Negate luminance: +
     
    lutyuv=y=negval
    +
    + +
  • +Remove chroma components, turns the video into a graytone image: +
     
    lutyuv="u=128:v=128"
    +
    + +
  • +Apply a luma burning effect: +
     
    lutyuv="y=2*val"
    +
    + +
  • +Remove green and blue components: +
     
    lutrgb="g=0:b=0"
    +
    + +
  • +Set a constant alpha channel value on input: +
     
    format=rgba,lutrgb=a="maxval-minval/2"
    +
    + +
  • +Correct luminance gamma by a 0.5 factor: +
     
    lutyuv=y=gammaval(0.5)
    +
    + +
  • +Discard least significant bits of luma: +
     
    lutyuv=y='bitand(val, 128+64+32)'
    +
    +
+ + +

9.51 mergeplanes

+ +

Merge color channel components from several video streams. +

+

The filter accepts up to 4 input streams, and merge selected input +planes to the output video. +

+

This filter accepts the following options: +

+
mapping
+

Set input to output plane mapping. Default is 0. +

+

The mappings is specified as a bitmap. It should be specified as a +hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the +mapping for the first plane of the output stream. ’A’ sets the number of +the input stream to use (from 0 to 3), and ’a’ the plane number of the +corresponding input to use (from 0 to 3). The rest of the mappings is +similar, ’Bb’ describes the mapping for the output stream second +plane, ’Cc’ describes the mapping for the output stream third plane and +’Dd’ describes the mapping for the output stream fourth plane. +

+
+
format
+

Set output pixel format. Default is yuva444p. +

+
+ + +

9.51.1 Examples

+ +
    +
  • +Merge three gray video streams of same width and height into single video stream: +
     
    [a0][a1][a2]mergeplanes=0x001020:yuv444p
    +
    + +
  • +Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream: +
     
    [a0][a1]mergeplanes=0x00010210:yuva444p
    +
    + +
  • +Swap Y and A plane in yuva444p stream: +
     
    format=yuva444p,mergeplanes=0x03010200:yuva444p
    +
    + +
  • +Swap U and V plane in yuv420p stream: +
     
    format=yuv420p,mergeplanes=0x000201:yuv420p
    +
    + +
  • +Cast a rgb24 clip to yuv444p: +
     
    format=rgb24,mergeplanes=0x000102:yuv444p
    +
    +
+ + +

9.52 mcdeint

+ +

Apply motion-compensation deinterlacing. +

+

It needs one field per frame as input and must thus be used together +with yadif=1/3 or equivalent. +

+

This filter accepts the following options: +

+
mode
+

Set the deinterlacing mode. +

+

It accepts one of the following values: +

+
fast
+
medium
+
slow
+

use iterative motion estimation +

+
extra_slow
+

like ‘slow’, but use multiple reference frames. +

+
+

Default value is ‘fast’. +

+
+
parity
+

Set the picture field parity assumed for the input video. It must be +one of the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
+ +

Default value is ‘bff’. +

+
+
qp
+

Set per-block quantization parameter (QP) used by the internal +encoder. +

+

Higher values should result in a smoother motion vector field but less +optimal individual vectors. Default value is 1. +

+
+ + +

9.53 mp

+ +

Apply an MPlayer filter to the input video. +

+

This filter provides a wrapper around some of the filters of +MPlayer/MEncoder. +

+

This wrapper is considered experimental. Some of the wrapped filters +may not work properly and we may drop support for them, as they will +be implemented natively into FFmpeg. Thus you should avoid +depending on them when writing portable scripts. +

+

The filter accepts the parameters: +filter_name[:=]filter_params +

+

filter_name is the name of a supported MPlayer filter, +filter_params is a string containing the parameters accepted by +the named filter. +

+

The list of the currently supported filters follows: +

+
eq2
+
eq
+
fspp
+
ilpack
+
pp7
+
softpulldown
+
uspp
+
+ +

The parameter syntax and behavior for the listed filters are the same +of the corresponding MPlayer filters. For detailed instructions check +the "VIDEO FILTERS" section in the MPlayer manual. +

+ +

9.53.1 Examples

+ +
    +
  • +Adjust gamma, brightness, contrast: +
     
    mp=eq2=1.0:2:0.5
    +
    +
+ +

See also mplayer(1), http://www.mplayerhq.hu/. +

+ +

9.54 mpdecimate

+ +

Drop frames that do not differ greatly from the previous frame in +order to reduce frame rate. +

+

The main use of this filter is for very-low-bitrate encoding +(e.g. streaming over dialup modem), but it could in theory be used for +fixing movies that were inverse-telecined incorrectly. +

+

A description of the accepted options follows. +

+
+
max
+

Set the maximum number of consecutive frames which can be dropped (if +positive), or the minimum interval between dropped frames (if +negative). If the value is 0, the frame is dropped unregarding the +number of previous sequentially dropped frames. +

+

Default value is 0. +

+
+
hi
+
lo
+
frac
+

Set the dropping threshold values. +

+

Values for ‘hi’ and ‘lo’ are for 8x8 pixel blocks and +represent actual pixel value differences, so a threshold of 64 +corresponds to 1 unit of difference for each pixel, or the same spread +out differently over the block. +

+

A frame is a candidate for dropping if no 8x8 blocks differ by more +than a threshold of ‘hi’, and if no more than ‘frac’ blocks (1 +meaning the whole image) differ by more than a threshold of ‘lo’. +

+

Default value for ‘hi’ is 64*12, default value for ‘lo’ is +64*5, and default value for ‘frac’ is 0.33. +

+
+ + + +

9.55 negate

+ +

Negate input video. +

+

This filter accepts an integer in input, if non-zero it negates the +alpha component (if available). The default value in input is 0. +

+ +

9.56 noformat

+ +

Force libavfilter not to use any of the specified pixel formats for the +input to the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

9.56.1 Examples

+ +
    +
  • +Force libavfilter to use a format different from yuv420p for the +input to the vflip filter: +
     
    noformat=pix_fmts=yuv420p,vflip
    +
    + +
  • +Convert the input video to any of the formats not contained in the list: +
     
    noformat=yuv420p|yuv444p|yuv410p
    +
    +
+ + +

9.57 noise

+ +

Add noise on video input frame. +

+

The filter accepts the following options: +

+
+
all_seed
+
c0_seed
+
c1_seed
+
c2_seed
+
c3_seed
+

Set noise seed for specific pixel component or all pixel components in case +of all_seed. Default value is 123457. +

+
+
all_strength, alls
+
c0_strength, c0s
+
c1_strength, c1s
+
c2_strength, c2s
+
c3_strength, c3s
+

Set noise strength for specific pixel component or all pixel components in case +all_strength. Default value is 0. Allowed range is [0, 100]. +

+
+
all_flags, allf
+
c0_flags, c0f
+
c1_flags, c1f
+
c2_flags, c2f
+
c3_flags, c3f
+

Set pixel component flags or set flags for all components if all_flags. +Available values for component flags are: +

+
a
+

averaged temporal noise (smoother) +

+
p
+

mix random noise with a (semi)regular pattern +

+
t
+

temporal noise (noise pattern changes between frames) +

+
u
+

uniform noise (gaussian otherwise) +

+
+
+
+ + +

9.57.1 Examples

+ +

Add temporal and uniform noise to input video: +

 
noise=alls=20:allf=t+u
+
+ + +

9.58 null

+ +

Pass the video source unchanged to the output. +

+ +

9.59 ocv

+ +

Apply video transform using libopencv. +

+

To enable this filter install libopencv library and headers and +configure FFmpeg with --enable-libopencv. +

+

This filter accepts the following parameters: +

+
+
filter_name
+

The name of the libopencv filter to apply. +

+
+
filter_params
+

The parameters to pass to the libopencv filter. If not specified the default +values are assumed. +

+
+
+ +

Refer to the official libopencv documentation for more precise +information: +http://opencv.willowgarage.com/documentation/c/image_filtering.html +

+

Follows the list of supported libopencv filters. +

+

+

+

9.59.1 dilate

+ +

Dilate an image by using a specific structuring element. +This filter corresponds to the libopencv function cvDilate. +

+

It accepts the parameters: struct_el|nb_iterations. +

+

struct_el represents a structuring element, and has the syntax: +colsxrows+anchor_xxanchor_y/shape +

+

cols and rows represent the number of columns and rows of +the structuring element, anchor_x and anchor_y the anchor +point, and shape the shape for the structuring element, and +can be one of the values "rect", "cross", "ellipse", "custom". +

+

If the value for shape is "custom", it must be followed by a +string of the form "=filename". The file with name +filename is assumed to represent a binary image, with each +printable character corresponding to a bright pixel. When a custom +shape is used, cols and rows are ignored, the number +or columns and rows of the read file are assumed instead. +

+

The default value for struct_el is "3x3+0x0/rect". +

+

nb_iterations specifies the number of times the transform is +applied to the image, and defaults to 1. +

+

Follow some example: +

 
# use the default values
+ocv=dilate
+
+# dilate using a structuring element with a 5x5 cross, iterate two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# read the shape from the file diamond.shape, iterate two times
+# the file diamond.shape may contain a pattern of characters like this:
+#   *
+#  ***
+# *****
+#  ***
+#   *
+# the specified cols and rows are ignored (but not the anchor point coordinates)
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+ + +

9.59.2 erode

+ +

Erode an image by using a specific structuring element. +This filter corresponds to the libopencv function cvErode. +

+

The filter accepts the parameters: struct_el:nb_iterations, +with the same syntax and semantics as the dilate filter. +

+ +

9.59.3 smooth

+ +

Smooth the input video. +

+

The filter takes the following parameters: +type|param1|param2|param3|param4. +

+

type is the type of smooth filter to apply, and can be one of +the following values: "blur", "blur_no_scale", "median", "gaussian", +"bilateral". The default value is "gaussian". +

+

param1, param2, param3, and param4 are +parameters whose meanings depend on smooth type. param1 and +param2 accept integer positive values or 0, param3 and +param4 accept float values. +

+

The default value for param1 is 3, the default value for the +other parameters is 0. +

+

These parameters correspond to the parameters assigned to the +libopencv function cvSmooth. +

+

+

+

9.60 overlay

+ +

Overlay one video on top of another. +

+

It takes two inputs and one output, the first input is the "main" +video on which the second input is overlayed. +

+

This filter accepts the following parameters: +

+

A description of the accepted options follows. +

+
+
x
+
y
+

Set the expression for the x and y coordinates of the overlayed video +on the main video. Default value is "0" for both expressions. In case +the expression is invalid, it is set to a huge value (meaning that the +overlay will not be displayed within the output visible area). +

+
+
eof_action
+

The action to take when EOF is encountered on the secondary input, accepts one +of the following values: +

+
+
repeat
+

repeat the last frame (the default) +

+
endall
+

end both streams +

+
pass
+

pass through the main input +

+
+ +
+
eval
+

Set when the expressions for ‘x’, and ‘y’ are evaluated. +

+

It accepts the following values: +

+
init
+

only evaluate expressions once during the filter initialization or +when a command is processed +

+
+
frame
+

evaluate expressions for each incoming frame +

+
+ +

Default value is ‘frame’. +

+
+
shortest
+

If set to 1, force the output to terminate when the shortest input +terminates. Default value is 0. +

+
+
format
+

Set the format for the output video. +

+

It accepts the following values: +

+
yuv420
+

force YUV420 output +

+
+
yuv422
+

force YUV422 output +

+
+
yuv444
+

force YUV444 output +

+
+
rgb
+

force RGB output +

+
+ +

Default value is ‘yuv420’. +

+
+
rgb (deprecated)
+

If set to 1, force the filter to accept inputs in the RGB +color space. Default value is 0. This option is deprecated, use +‘format’ instead. +

+
+
repeatlast
+

If set to 1, force the filter to draw the last overlay frame over the +main input until the end of the stream. A value of 0 disables this +behavior. Default value is 1. +

+
+ +

The ‘x’, and ‘y’ expressions can contain the following +parameters. +

+
+
main_w, W
+
main_h, H
+

main input width and height +

+
+
overlay_w, w
+
overlay_h, h
+

overlay input width and height +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values of the output +format. For example for the pixel format "yuv422p" hsub is 2 and +vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

Note that the n, pos, t variables are available only +when evaluation is done per frame, and will evaluate to NAN +when ‘eval’ is set to ‘init’. +

+

Be aware that frames are taken from each input video in timestamp +order, hence, if their initial timestamps differ, it is a good idea +to pass the two inputs through a setpts=PTS-STARTPTS filter to +have them begin in the same zero timestamp, as it does the example for +the movie filter. +

+

You can chain together more overlays but you should test the +efficiency of such approach. +

+ +

9.60.1 Commands

+ +

This filter supports the following commands: +

+
x
+
y
+

Modify the x and y of the overlay input. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

9.60.2 Examples

+ +
    +
  • +Draw the overlay at 10 pixels from the bottom right corner of the main +video: +
     
    overlay=main_w-overlay_w-10:main_h-overlay_h-10
    +
    + +

    Using named options the example above becomes: +

     
    overlay=x=main_w-overlay_w-10:y=main_h-overlay_h-10
    +
    + +
  • +Insert a transparent PNG logo in the bottom left corner of the input, +using the ffmpeg tool with the -filter_complex option: +
     
    ffmpeg -i input -i logo -filter_complex 'overlay=10:main_h-overlay_h-10' output
    +
    + +
  • +Insert 2 different transparent PNG logos (second logo on bottom +right corner) using the ffmpeg tool: +
     
    ffmpeg -i input -i logo1 -i logo2 -filter_complex 'overlay=x=10:y=H-h-10,overlay=x=W-w-10:y=H-h-10' output
    +
    + +
  • +Add a transparent color layer on top of the main video, WxH +must specify the size of the main input to the overlay filter: +
     
    color=color=red@.3:size=WxH [over]; [in][over] overlay [out]
    +
    + +
  • +Play an original video and a filtered version (here with the deshake +filter) side by side using the ffplay tool: +
     
    ffplay input.avi -vf 'split[a][b]; [a]pad=iw*2:ih[src]; [b]deshake[filt]; [src][filt]overlay=w'
    +
    + +

    The above command is the same as: +

     
    ffplay input.avi -vf 'split[b], pad=iw*2[src], [b]deshake, [src]overlay=w'
    +
    + +
  • +Make a sliding overlay appearing from the left to the right top part of the +screen starting since time 2: +
     
    overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0
    +
    + +
  • +Compose output by putting two input videos side to side: +
     
    ffmpeg -i left.avi -i right.avi -filter_complex "
    +nullsrc=size=200x100 [background];
    +[0:v] setpts=PTS-STARTPTS, scale=100x100 [left];
    +[1:v] setpts=PTS-STARTPTS, scale=100x100 [right];
    +[background][left]       overlay=shortest=1       [background+left];
    +[background+left][right] overlay=shortest=1:x=100 [left+right]
    +"
    +
    + +
  • +mask 10-20 seconds of a video by applying the delogo filter to a section +
     
    ffmpeg -i test.avi -codec:v:0 wmv2 -ar 11025 -b:v 9000k
    +-vf '[in]split[split_main][split_delogo];[split_delogo]trim=start=360:end=371,delogo=0:0:640:480[delogoed];[split_main][delogoed]overlay=eof_action=pass[out]'
    +masked.avi
    +
    + +
  • +Chain several overlays in cascade: +
     
    nullsrc=s=200x200 [bg];
    +testsrc=s=100x100, split=4 [in0][in1][in2][in3];
    +[in0] lutrgb=r=0, [bg]   overlay=0:0     [mid0];
    +[in1] lutrgb=g=0, [mid0] overlay=100:0   [mid1];
    +[in2] lutrgb=b=0, [mid1] overlay=0:100   [mid2];
    +[in3] null,       [mid2] overlay=100:100 [out0]
    +
    + +
+ + +

9.61 owdenoise

+ +

Apply Overcomplete Wavelet denoiser. +

+

The filter accepts the following options: +

+
+
depth
+

Set depth. +

+

Larger depth values will denoise lower frequency components more, but +slow down filtering. +

+

Must be an int in the range 8-16, default is 8. +

+
+
luma_strength, ls
+

Set luma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+
chroma_strength, cs
+

Set chroma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+ + +

9.62 pad

+ +

Add paddings to the input image, and place the original input at the +given coordinates x, y. +

+

This filter accepts the following parameters: +

+
+
width, w
+
height, h
+

Specify an expression for the size of the output image with the +paddings added. If the value for width or height is 0, the +corresponding input size is used for the output. +

+

The width expression can reference the value set by the +height expression, and vice versa. +

+

The default value of width and height is 0. +

+
+
x
+
y
+

Specify an expression for the offsets where to place the input image +in the padded area with respect to the top/left border of the output +image. +

+

The x expression can reference the value set by the y +expression, and vice versa. +

+

The default value of x and y is 0. +

+
+
color
+

Specify the color of the padded area. For the syntax of this option, +check the "Color" section in the ffmpeg-utils manual. +

+

The default value of color is "black". +

+
+ +

The value for the width, height, x, and y +options are expressions containing the following constants: +

+
+
in_w
+
in_h
+

the input video width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
x
+
y
+

x and y offsets as specified by the x and y +expressions, or NAN if not yet specified +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

9.62.1 Examples

+ +
    +
  • +Add paddings with color "violet" to the input video. Output video +size is 640x480, the top-left corner of the input video is placed at +column 0, row 40: +
     
    pad=640:480:0:40:violet
    +
    + +

    The example above is equivalent to the following command: +

     
    pad=width=640:height=480:x=0:y=40:color=violet
    +
    + +
  • +Pad the input to get an output with dimensions increased by 3/2, +and put the input video at the center of the padded area: +
     
    pad="3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a squared output with size equal to the maximum +value between the input width and height, and put the input video at +the center of the padded area: +
     
    pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a final w/h ratio of 16:9: +
     
    pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +In case of anamorphic video, in order to set the output display aspect +correctly, it is necessary to use sar in the expression, +according to the relation: +
     
    (ih * X / ih) * sar = output_dar
    +X = output_dar / sar
    +
    + +

    Thus the previous example needs to be modified to: +

     
    pad="ih*16/9/sar:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Double output size and put the input video in the bottom-right +corner of the output padded area: +
     
    pad="2*iw:2*ih:ow-iw:oh-ih"
    +
    +
+ + +

9.63 perspective

+ +

Correct perspective of video not recorded perpendicular to the screen. +

+

A description of the accepted parameters follows. +

+
+
x0
+
y0
+
x1
+
y1
+
x2
+
y2
+
x3
+
y3
+

Set coordinates expression for top left, top right, bottom left and bottom right corners. +Default values are 0:0:W:0:0:H:W:H with which perspective will remain unchanged. +

+

The expressions can use the following variables: +

+
+
W
+
H
+

the width and height of video frame. +

+
+ +
+
interpolation
+

Set interpolation for perspective correction. +

+

It accepts the following values: +

+
linear
+
cubic
+
+ +

Default value is ‘linear’. +

+
+ + +

9.64 phase

+ +

Delay interlaced video by one field time so that the field order changes. +

+

The intended use is to fix PAL movies that have been captured with the +opposite field order to the film-to-video transfer. +

+

A description of the accepted parameters follows. +

+
+
mode
+

Set phase mode. +

+

It accepts the following values: +

+
t
+

Capture field order top-first, transfer bottom-first. +Filter will delay the bottom field. +

+
+
b
+

Capture field order bottom-first, transfer top-first. +Filter will delay the top field. +

+
+
p
+

Capture and transfer with the same field order. This mode only exists +for the documentation of the other options to refer to, but if you +actually select it, the filter will faithfully do nothing. +

+
+
a
+

Capture field order determined automatically by field flags, transfer +opposite. +Filter selects among ‘t’ and ‘b’ modes on a frame by frame +basis using field flags. If no field information is available, +then this works just like ‘u’. +

+
+
u
+

Capture unknown or varying, transfer opposite. +Filter selects among ‘t’ and ‘b’ on a frame by frame basis by +analyzing the images and selecting the alternative that produces best +match between the fields. +

+
+
T
+

Capture top-first, transfer unknown or varying. +Filter selects among ‘t’ and ‘p’ using image analysis. +

+
+
B
+

Capture bottom-first, transfer unknown or varying. +Filter selects among ‘b’ and ‘p’ using image analysis. +

+
+
A
+

Capture determined by field flags, transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using field flags and +image analysis. If no field information is available, then this works just +like ‘U’. This is the default mode. +

+
+
U
+

Both capture and transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using image analysis only. +

+
+
+
+ + +

9.65 pixdesctest

+ +

Pixel format descriptor test filter, mainly useful for internal +testing. The output video should be equal to the input video. +

+

For example: +

 
format=monow, pixdesctest
+
+ +

can be used to test the monowhite pixel format descriptor definition. +

+ +

9.66 pp

+ +

Enable the specified chain of postprocessing subfilters using libpostproc. This +library should be automatically selected with a GPL build (--enable-gpl). +Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’. +Each subfilter and some options have a short and a long name that can be used +interchangeably, i.e. dr/dering are the same. +

+

The filters accept the following options: +

+
+
subfilters
+

Set postprocessing subfilters string. +

+
+ +

All subfilters share common options to determine their scope: +

+
+
a/autoq
+

Honor the quality commands for this subfilter. +

+
+
c/chrom
+

Do chrominance filtering, too (default). +

+
+
y/nochrom
+

Do luminance filtering only (no chrominance). +

+
+
n/noluma
+

Do chrominance filtering only (no luminance). +

+
+ +

These options can be appended after the subfilter name, separated by a ’|’. +

+

Available subfilters are: +

+
+
hb/hdeblock[|difference[|flatness]]
+

Horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
vb/vdeblock[|difference[|flatness]]
+

Vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
ha/hadeblock[|difference[|flatness]]
+

Accurate horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
va/vadeblock[|difference[|flatness]]
+

Accurate vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+
+
+ +

The horizontal and vertical deblocking filters share the difference and +flatness values so you cannot set different horizontal and vertical +thresholds. +

+
+
h1/x1hdeblock
+

Experimental horizontal deblocking filter +

+
+
v1/x1vdeblock
+

Experimental vertical deblocking filter +

+
+
dr/dering
+

Deringing filter +

+
+
tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+
threshold1
+

larger -> stronger filtering +

+
threshold2
+

larger -> stronger filtering +

+
threshold3
+

larger -> stronger filtering +

+
+ +
+
al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+
f/fullyrange
+

Stretch luminance to 0-255. +

+
+ +
+
lb/linblenddeint
+

Linear blend deinterlacing filter that deinterlaces the given block by +filtering all lines with a (1 2 1) filter. +

+
+
li/linipoldeint
+

Linear interpolating deinterlacing filter that deinterlaces the given block by +linearly interpolating every second line. +

+
+
ci/cubicipoldeint
+

Cubic interpolating deinterlacing filter deinterlaces the given block by +cubically interpolating every second line. +

+
+
md/mediandeint
+

Median deinterlacing filter that deinterlaces the given block by applying a +median filter to every second line. +

+
+
fd/ffmpegdeint
+

FFmpeg deinterlacing filter that deinterlaces the given block by filtering every +second line with a (-1 4 2 4 -1) filter. +

+
+
l5/lowpass5
+

Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given +block by filtering all lines with a (-1 2 6 2 -1) filter. +

+
+
fq/forceQuant[|quantizer]
+

Overrides the quantizer table from the input with the constant quantizer you +specify. +

+
quantizer
+

Quantizer to use +

+
+ +
+
de/default
+

Default pp filter combination (hb|a,vb|a,dr|a) +

+
+
fa/fast
+

Fast pp filter combination (h1|a,v1|a,dr|a) +

+
+
ac
+

High quality pp filter combination (ha|a|128|7,va|a,dr|a) +

+
+ + +

9.66.1 Examples

+ +
    +
  • +Apply horizontal and vertical deblocking, deringing and automatic +brightness/contrast: +
     
    pp=hb/vb/dr/al
    +
    + +
  • +Apply default filters without brightness/contrast correction: +
     
    pp=de/-al
    +
    + +
  • +Apply default filters and temporal denoiser: +
     
    pp=default/tmpnoise|1|2|3
    +
    + +
  • +Apply deblocking on luminance only, and switch vertical deblocking on or off +automatically depending on available CPU time: +
     
    pp=hb|y/vb|a
    +
    +
+ + +

9.67 psnr

+ +

Obtain the average, maximum and minimum PSNR (Peak Signal to Noise +Ratio) between two input videos. +

+

This filter takes in input two input videos, the first input is +considered the "main" source and is passed unchanged to the +output. The second input is used as a "reference" video for computing +the PSNR. +

+

Both video inputs must have the same resolution and pixel format for +this filter to work correctly. Also it assumes that both inputs +have the same number of frames, which are compared one by one. +

+

The obtained average PSNR is printed through the logging system. +

+

The filter stores the accumulated MSE (mean squared error) of each +frame, and at the end of the processing it is averaged across all frames +equally, and the following formula is applied to obtain the PSNR: +

+
 
PSNR = 10*log10(MAX^2/MSE)
+
+ +

Where MAX is the average of the maximum values of each component of the +image. +

+

The description of the accepted parameters follows. +

+
+
stats_file, f
+

If specified the filter will use the named file to save the PSNR of +each individual frame. +

+
+ +

The file printed if stats_file is selected, contains a sequence of +key/value pairs of the form key:value for each compared +couple of frames. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 1 +

+
+
mse_avg
+

Mean Square Error pixel-by-pixel average difference of the compared +frames, averaged over all the image components. +

+
+
mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+

Mean Square Error pixel-by-pixel average difference of the compared +frames for the component specified by the suffix. +

+
+
psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+

Peak Signal to Noise ratio of the compared frames for the component +specified by the suffix. +

+
+ +

For example: +

 
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+ +

On this example the input file being processed is compared with the +reference file ‘ref_movie.mpg’. The PSNR of each individual frame +is stored in ‘stats.log’. +

+

+

+

9.68 pullup

+ +

Pulldown reversal (inverse telecine) filter, capable of handling mixed +hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive +content. +

+

The pullup filter is designed to take advantage of future context in making +its decisions. This filter is stateless in the sense that it does not lock +onto a pattern to follow, but it instead looks forward to the following +fields in order to identify matches and rebuild progressive frames. +

+

To produce content with an even framerate, insert the fps filter after +pullup, use fps=24000/1001 if the input frame rate is 29.97fps, +fps=24 for 30fps and the (rare) telecined 25fps input. +

+

The filter accepts the following options: +

+
+
jl
+
jr
+
jt
+
jb
+

These options set the amount of "junk" to ignore at the left, right, top, and +bottom of the image, respectively. Left and right are in units of 8 pixels, +while top and bottom are in units of 2 lines. +The default is 8 pixels on each side. +

+
+
sb
+

Set the strict breaks. Setting this option to 1 will reduce the chances of +filter generating an occasional mismatched frame, but it may also cause an +excessive number of frames to be dropped during high motion sequences. +Conversely, setting it to -1 will make filter match fields more easily. +This may help processing of video where there is slight blurring between +the fields, but may also cause there to be interlaced frames in the output. +Default value is 0. +

+
+
mp
+

Set the metric plane to use. It accepts the following values: +

+
l
+

Use luma plane. +

+
+
u
+

Use chroma blue plane. +

+
+
v
+

Use chroma red plane. +

+
+ +

This option may be set to use chroma plane instead of the default luma plane +for doing filter’s computations. This may improve accuracy on very clean +source material, but more likely will decrease accuracy, especially if there +is chroma noise (rainbow effect) or any grayscale video. +The main purpose of setting ‘mp’ to a chroma plane is to reduce CPU +load and make pullup usable in realtime on slow machines. +

+
+ +

For best results (without duplicated frames in the output file) it is +necessary to change the output frame rate. For example, to inverse +telecine NTSC input: +

 
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+ + +

9.69 removelogo

+ +

Suppress a TV station logo, using an image file to determine which +pixels comprise the logo. It works by filling in the pixels that +comprise the logo with neighboring pixels. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filter bitmap file, which can be any image format supported by +libavformat. The width and height of the image file must match those of the +video stream being processed. +

+
+ +

Pixels in the provided bitmap image with a value of zero are not +considered part of the logo, non-zero pixels are considered part of +the logo. If you use white (255) for the logo and black (0) for the +rest, you will be safe. For making the filter bitmap, it is +recommended to take a screen capture of a black frame with the logo +visible, and then using a threshold filter followed by the erode +filter once or twice. +

+

If needed, little splotches can be fixed manually. Remember that if +logo pixels are not covered, the filter quality will be much +reduced. Marking too many pixels as part of the logo does not hurt as +much, but it will increase the amount of blurring needed to cover over +the image and will destroy more information than necessary, and extra +pixels will slow things down on a large logo. +

+ +

9.70 rotate

+ +

Rotate video by an arbitrary angle expressed in radians. +

+

The filter accepts the following options: +

+

A description of the optional parameters follows. +

+
angle, a
+

Set an expression for the angle by which to rotate the input video +clockwise, expressed as a number of radians. A negative value will +result in a counter-clockwise rotation. By default it is set to "0". +

+

This expression is evaluated for each frame. +

+
+
out_w, ow
+

Set the output width expression, default value is "iw". +This expression is evaluated just once during configuration. +

+
+
out_h, oh
+

Set the output height expression, default value is "ih". +This expression is evaluated just once during configuration. +

+
+
bilinear
+

Enable bilinear interpolation if set to 1, a value of 0 disables +it. Default value is 1. +

+
+
fillcolor, c
+

Set the color used to fill the output area not covered by the rotated +image. For the generalsyntax of this option, check the "Color" section in the +ffmpeg-utils manual. If the special value "none" is selected then no +background is printed (useful for example if the background is never shown). +

+

Default value is "black". +

+
+ +

The expressions for the angle and the output size can contain the +following constants and functions: +

+
+
n
+

sequential number of the input frame, starting from 0. It is always NAN +before the first frame is filtered. +

+
+
t
+

time in seconds of the input frame, it is set to 0 when the filter is +configured. It is always NAN before the first frame is filtered. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_w, iw
+
in_h, ih
+

the input video width and height +

+
+
out_w, ow
+
out_h, oh
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
rotw(a)
+
roth(a)
+

the minimal width/height required for completely containing the input +video rotated by a radians. +

+

These are only available when computing the ‘out_w’ and +‘out_h’ expressions. +

+
+ + +

9.70.1 Examples

+ +
    +
  • +Rotate the input by PI/6 radians clockwise: +
     
    rotate=PI/6
    +
    + +
  • +Rotate the input by PI/6 radians counter-clockwise: +
     
    rotate=-PI/6
    +
    + +
  • +Rotate the input by 45 degrees clockwise: +
     
    rotate=45*PI/180
    +
    + +
  • +Apply a constant rotation with period T, starting from an angle of PI/3: +
     
    rotate=PI/3+2*PI*t/T
    +
    + +
  • +Make the input video rotation oscillating with a period of T +seconds and an amplitude of A radians: +
     
    rotate=A*sin(2*PI/T*t)
    +
    + +
  • +Rotate the video, output size is chosen so that the whole rotating +input video is always completely contained in the output: +
     
    rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
    +
    + +
  • +Rotate the video, reduce the output size so that no background is ever +shown: +
     
    rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
    +
    +
+ + +

9.70.2 Commands

+ +

The filter supports the following commands: +

+
+
a, angle
+

Set the angle expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

9.71 sab

+ +

Apply Shape Adaptive Blur. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set luma blur filter strength, must be a value in range 0.1-4.0, default +value is 1.0. A greater value will result in a more blurred image, and +in slower processing. +

+
+
luma_pre_filter_radius, lpfr
+

Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default +value is 1.0. +

+
+
luma_strength, ls
+

Set luma maximum difference between pixels to still be considered, must +be a value in the 0.1-100.0 range, default value is 1.0. +

+
+
chroma_radius, cr
+

Set chroma blur filter strength, must be a value in range 0.1-4.0. A +greater value will result in a more blurred image, and in slower +processing. +

+
+
chroma_pre_filter_radius, cpfr
+

Set chroma pre-filter radius, must be a value in the 0.1-2.0 range. +

+
+
chroma_strength, cs
+

Set chroma maximum difference between pixels to still be considered, +must be a value in the 0.1-100.0 range. +

+
+ +

Each chroma option value, if not explicitly specified, is set to the +corresponding luma option value. +

+

+

+

9.72 scale

+ +

Scale (resize) the input video, using the libswscale library. +

+

The scale filter forces the output display aspect ratio to be the same +of the input, by changing the output sample aspect ratio. +

+

If the input image format is different from the format requested by +the next filter, the scale filter will convert the input to the +requested format. +

+ +

9.72.1 Options

+

The filter accepts the following options, or any of the options +supported by the libswscale scaler. +

+

See (ffmpeg-scaler)scaler_options for +the complete list of scaler options. +

+
+
width, w
+
height, h
+

Set the output video dimension expression. Default value is the input +dimension. +

+

If the value is 0, the input width is used for the output. +

+

If one of the values is -1, the scale filter will use a value that +maintains the aspect ratio of the input image, calculated from the +other specified dimension. If both of them are -1, the input size is +used +

+

If one of the values is -n with n > 1, the scale filter will also use a value +that maintains the aspect ratio of the input image, calculated from the other +specified dimension. After that it will, however, make sure that the calculated +dimension is divisible by n and adjust the value if necessary. +

+

See below for the list of accepted constants for use in the dimension +expression. +

+
+
interl
+

Set the interlacing mode. It accepts the following values: +

+
+
1
+

Force interlaced aware scaling. +

+
+
0
+

Do not apply interlaced scaling. +

+
+
-1
+

Select interlaced aware scaling depending on whether the source frames +are flagged as interlaced or not. +

+
+ +

Default value is ‘0’. +

+
+
flags
+

Set libswscale scaling flags. See +(ffmpeg-scaler)sws_flags for the +complete list of values. If not explicitly specified the filter applies +the default flags. +

+
+
size, s
+

Set the video size. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
in_color_matrix
+
out_color_matrix
+

Set in/output YCbCr color space type. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. +

+

If not specified, the color space type depends on the pixel format. +

+

Possible values: +

+
+
auto
+

Choose automatically. +

+
+
bt709
+

Format conforming to International Telecommunication Union (ITU) +Recommendation BT.709. +

+
+
fcc
+

Set color space conforming to the United States Federal Communications +Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a). +

+
+
bt601
+

Set color space conforming to: +

+
    +
  • +ITU Radiocommunication Sector (ITU-R) Recommendation BT.601 + +
  • +ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G + +
  • +Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004 + +
+ +
+
smpte240m
+

Set color space conforming to SMPTE ST 240:1999. +

+
+ +
+
in_range
+
out_range
+

Set in/output YCbCr sample range. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. If not specified, the +range depends on the pixel format. Possible values: +

+
+
auto
+

Choose automatically. +

+
+
jpeg/full/pc
+

Set full range (0-255 in case of 8-bit luma). +

+
+
mpeg/tv
+

Set "MPEG" range (16-235 in case of 8-bit luma). +

+
+ +
+
force_original_aspect_ratio
+

Enable decreasing or increasing output video width or height if necessary to +keep the original aspect ratio. Possible values: +

+
+
disable
+

Scale the video as specified and disable this feature. +

+
+
decrease
+

The output video dimensions will automatically be decreased if needed. +

+
+
increase
+

The output video dimensions will automatically be increased if needed. +

+
+
+ +

One useful instance of this option is that when you know a specific device’s +maximum allowed resolution, you can use this to limit the output video to +that, while retaining the aspect ratio. For example, device A allows +1280x720 playback, and your video is 1920x800. Using this option (set it to +decrease) and specifying 1280x720 to the command line makes the output +1280x533. +

+

Please note that this is a different thing than specifying -1 for ‘w’ +or ‘h’, you still need to specify the output resolution for this option +to work. +

+
+
+ +

The values of the ‘w’ and ‘h’ options are expressions +containing the following constants: +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (scaled) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio. Calculated from (iw / ih) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical input chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
ohsub
+
ovsub
+

horizontal and vertical output chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

9.72.2 Examples

+ +
    +
  • +Scale the input video to a size of 200x100: +
     
    scale=w=200:h=100
    +
    + +

    This is equivalent to: +

     
    scale=200:100
    +
    + +

    or: +

     
    scale=200x100
    +
    + +
  • +Specify a size abbreviation for the output size: +
     
    scale=qcif
    +
    + +

    which can also be written as: +

     
    scale=size=qcif
    +
    + +
  • +Scale the input to 2x: +
     
    scale=w=2*iw:h=2*ih
    +
    + +
  • +The above is the same as: +
     
    scale=2*in_w:2*in_h
    +
    + +
  • +Scale the input to 2x with forced interlaced scaling: +
     
    scale=2*iw:2*ih:interl=1
    +
    + +
  • +Scale the input to half size: +
     
    scale=w=iw/2:h=ih/2
    +
    + +
  • +Increase the width, and set the height to the same size: +
     
    scale=3/2*iw:ow
    +
    + +
  • +Seek for Greek harmony: +
     
    scale=iw:1/PHI*iw
    +scale=ih*PHI:ih
    +
    + +
  • +Increase the height, and set the width to 3/2 of the height: +
     
    scale=w=3/2*oh:h=3/5*ih
    +
    + +
  • +Increase the size, but make the size a multiple of the chroma +subsample values: +
     
    scale="trunc(3/2*iw/hsub)*hsub:trunc(3/2*ih/vsub)*vsub"
    +
    + +
  • +Increase the width to a maximum of 500 pixels, keep the same input +aspect ratio: +
     
    scale=w='min(500\, iw*3/2):h=-1'
    +
    +
+ + +

9.73 separatefields

+ +

The separatefields takes a frame-based video input and splits +each frame into its components fields, producing a new half height clip +with twice the frame rate and twice the frame count. +

+

This filter use field-dominance information in frame to decide which +of each pair of fields to place first in the output. +If it gets it wrong use setfield filter before separatefields filter. +

+ +

9.74 setdar, setsar

+ +

The setdar filter sets the Display Aspect Ratio for the filter +output video. +

+

This is done by changing the specified Sample (aka Pixel) Aspect +Ratio, according to the following equation: +

 
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+ +

Keep in mind that the setdar filter does not modify the pixel +dimensions of the video frame. Also the display aspect ratio set by +this filter may be changed by later filters in the filterchain, +e.g. in case of scaling or if another "setdar" or a "setsar" filter is +applied. +

+

The setsar filter sets the Sample (aka Pixel) Aspect Ratio for +the filter output video. +

+

Note that as a consequence of the application of this filter, the +output display aspect ratio will change according to the equation +above. +

+

Keep in mind that the sample aspect ratio set by the setsar +filter may be changed by later filters in the filterchain, e.g. if +another "setsar" or a "setdar" filter is applied. +

+

The filters accept the following options: +

+
+
r, ratio, dar (setdar only), sar (setsar only)
+

Set the aspect ratio used by the filter. +

+

The parameter can be a floating point number string, an expression, or +a string of the form num:den, where num and +den are the numerator and denominator of the aspect ratio. If +the parameter is not specified, it is assumed the value "0". +In case the form "num:den" is used, the : character +should be escaped. +

+
+
max
+

Set the maximum integer value to use for expressing numerator and +denominator when reducing the expressed aspect ratio to a rational. +Default value is 100. +

+
+
+ +

The parameter sar is an expression containing +the following constants: +

+
+
E, PI, PHI
+

the corresponding mathematical approximated values for e +(euler number), pi (greek PI), phi (golden ratio) +

+
+
w, h
+

the input width and height +

+
+
a
+

same as w / h +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub, vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

9.74.1 Examples

+ +
    +
  • +To change the display aspect ratio to 16:9, specify one of the following: +
     
    setdar=dar=1.77777
    +setdar=dar=16/9
    +setdar=dar=1.77777
    +
    + +
  • +To change the sample aspect ratio to 10:11, specify: +
     
    setsar=sar=10/11
    +
    + +
  • +To set a display aspect ratio of 16:9, and specify a maximum integer value of +1000 in the aspect ratio reduction, use the command: +
     
    setdar=ratio=16/9:max=1000
    +
    + +
+ +

+

+

9.75 setfield

+ +

Force field for the output video frame. +

+

The setfield filter marks the interlace type field for the +output frames. It does not change the input frame, but only sets the +corresponding property, which affects how the frame is treated by +following filters (e.g. fieldorder or yadif). +

+

The filter accepts the following options: +

+
+
mode
+

Available values are: +

+
+
auto
+

Keep the same field property. +

+
+
bff
+

Mark the frame as bottom-field-first. +

+
+
tff
+

Mark the frame as top-field-first. +

+
+
prog
+

Mark the frame as progressive. +

+
+
+
+ + +

9.76 showinfo

+ +

Show a line containing various information for each input video frame. +The input video is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation TimeStamp of the input frame, expressed as a number of +time base units. The time base unit depends on the filter input pad. +

+
+
pts_time
+

Presentation TimeStamp of the input frame, expressed as a number of +seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic video) +

+
+
fmt
+

pixel format name +

+
+
sar
+

sample aspect ratio of the input frame, expressed in the form +num/den +

+
+
s
+

size of the input frame. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
i
+

interlaced mode ("P" for "progressive", "T" for top field first, "B" +for bottom field first) +

+
+
iskey
+

1 if the frame is a key frame, 0 otherwise +

+
+
type
+

picture type of the input frame ("I" for an I-frame, "P" for a +P-frame, "B" for a B-frame, "?" for unknown type). +Check also the documentation of the AVPictureType enum and of +the av_get_picture_type_char function defined in +‘libavutil/avutil.h’. +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame +

+
+
plane_checksum
+

Adler-32 checksum (printed in hexadecimal) of each plane of the input frame, +expressed in the form "[c0 c1 c2 c3]" +

+
+ +

+

+

9.77 smartblur

+ +

Blur the input video without impacting the outlines. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set the luma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
luma_strength, ls
+

Set the luma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
luma_threshold, lt
+

Set the luma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+
chroma_radius, cr
+

Set the chroma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
chroma_strength, cs
+

Set the chroma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
chroma_threshold, ct
+

Set the chroma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+ +

If a chroma option is not explicitly set, the corresponding luma value +is set. +

+ +

9.78 stereo3d

+ +

Convert between different stereoscopic image formats. +

+

The filters accept the following options: +

+
+
in
+

Set stereoscopic image format of input. +

+

Available values for input image formats are: +

+
sbsl
+

side by side parallel (left eye left, right eye right) +

+
+
sbsr
+

side by side crosseye (right eye left, left eye right) +

+
+
sbs2l
+

side by side parallel with half width resolution +(left eye left, right eye right) +

+
+
sbs2r
+

side by side crosseye with half width resolution +(right eye left, left eye right) +

+
+
abl
+

above-below (left eye above, right eye below) +

+
+
abr
+

above-below (right eye above, left eye below) +

+
+
ab2l
+

above-below with half height resolution +(left eye above, right eye below) +

+
+
ab2r
+

above-below with half height resolution +(right eye above, left eye below) +

+
+
al
+

alternating frames (left eye first, right eye second) +

+
+
ar
+

alternating frames (right eye first, left eye second) +

+

Default value is ‘sbsl’. +

+
+ +
+
out
+

Set stereoscopic image format of output. +

+

Available values for output image formats are all the input formats as well as: +

+
arbg
+

anaglyph red/blue gray +(red filter on left eye, blue filter on right eye) +

+
+
argg
+

anaglyph red/green gray +(red filter on left eye, green filter on right eye) +

+
+
arcg
+

anaglyph red/cyan gray +(red filter on left eye, cyan filter on right eye) +

+
+
arch
+

anaglyph red/cyan half colored +(red filter on left eye, cyan filter on right eye) +

+
+
arcc
+

anaglyph red/cyan color +(red filter on left eye, cyan filter on right eye) +

+
+
arcd
+

anaglyph red/cyan color optimized with the least squares projection of dubois +(red filter on left eye, cyan filter on right eye) +

+
+
agmg
+

anaglyph green/magenta gray +(green filter on left eye, magenta filter on right eye) +

+
+
agmh
+

anaglyph green/magenta half colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmc
+

anaglyph green/magenta colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmd
+

anaglyph green/magenta color optimized with the least squares projection of dubois +(green filter on left eye, magenta filter on right eye) +

+
+
aybg
+

anaglyph yellow/blue gray +(yellow filter on left eye, blue filter on right eye) +

+
+
aybh
+

anaglyph yellow/blue half colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybc
+

anaglyph yellow/blue colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybd
+

anaglyph yellow/blue color optimized with the least squares projection of dubois +(yellow filter on left eye, blue filter on right eye) +

+
+
irl
+

interleaved rows (left eye has top row, right eye starts on next row) +

+
+
irr
+

interleaved rows (right eye has top row, left eye starts on next row) +

+
+
ml
+

mono output (left eye only) +

+
+
mr
+

mono output (right eye only) +

+
+ +

Default value is ‘arcd’. +

+
+ + +

9.78.1 Examples

+ +
    +
  • +Convert input video from side by side parallel to anaglyph yellow/blue dubois: +
     
    stereo3d=sbsl:aybd
    +
    + +
  • +Convert input video from above bellow (left eye above, right eye below) to side by side crosseye. +
     
    stereo3d=abl:sbsr
    +
    +
+ + +

9.79 spp

+ +

Apply a simple postprocessing filter that compresses and decompresses the image +at several (or - in the case of ‘quality’ level 6 - all) shifts +and average the results. +

+

The filter accepts the following options: +

+
+
quality
+

Set quality. This option defines the number of levels for averaging. It accepts +an integer in the range 0-6. If set to 0, the filter will have no +effect. A value of 6 means the higher quality. For each increment of +that value the speed drops by a factor of approximately 2. Default value is +3. +

+
+
qp
+

Force a constant quantization parameter. If not set, the filter will use the QP +from the video stream (if available). +

+
+
mode
+

Set thresholding mode. Available modes are: +

+
+
hard
+

Set hard thresholding (default). +

+
soft
+

Set soft thresholding (better de-ringing effect, but likely blurrier). +

+
+ +
+
use_bframe_qp
+

Enable the use of the QP from the B-Frames if set to 1. Using this +option may cause flicker since the B-Frames have often larger QP. Default is +0 (not enabled). +

+
+ +

+

+

9.80 subtitles

+ +

Draw subtitles on top of input video using the libass library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libass. This filter also requires a build with libavcodec and +libavformat to convert the passed subtitles file to ASS (Advanced Substation +Alpha) subtitles format. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filename of the subtitle file to read. It must be specified. +

+
+
original_size
+

Specify the size of the original video, the video for which the ASS file +was composed. For the syntax of this option, check the "Video size" section in +the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic, +this is necessary to correctly scale the fonts if the aspect ratio has been +changed. +

+
+
charenc
+

Set subtitles input character encoding. subtitles filter only. Only +useful if not UTF-8. +

+
+ +

If the first key is not specified, it is assumed that the first value +specifies the ‘filename’. +

+

For example, to render the file ‘sub.srt’ on top of the input +video, use the command: +

 
subtitles=sub.srt
+
+ +

which is equivalent to: +

 
subtitles=filename=sub.srt
+
+ + +

9.81 super2xsai

+ +

Scale the input by 2x and smooth using the Super2xSaI (Scale and +Interpolate) pixel art scaling algorithm. +

+

Useful for enlarging pixel art images without reducing sharpness. +

+ +

9.82 swapuv

+

Swap U & V plane. +

+ +

9.83 telecine

+ +

Apply telecine process to the video. +

+

This filter accepts the following options: +

+
+
first_field
+
+
top, t
+

top field first +

+
bottom, b
+

bottom field first +The default value is top. +

+
+ +
+
pattern
+

A string of numbers representing the pulldown pattern you wish to apply. +The default value is 23. +

+
+ +
 
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+ + +

9.84 thumbnail

+

Select the most representative frame in a given sequence of consecutive frames. +

+

The filter accepts the following options: +

+
+
n
+

Set the frames batch size to analyze; in a set of n frames, the filter +will pick one of them, and then handle the next batch of n frames until +the end. Default is 100. +

+
+ +

Since the filter keeps track of the whole frames sequence, a bigger n +value will result in a higher memory usage, so a high value is not recommended. +

+ +

9.84.1 Examples

+ +
    +
  • +Extract one picture each 50 frames: +
     
    thumbnail=50
    +
    + +
  • +Complete example of a thumbnail creation with ffmpeg: +
     
    ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
    +
    +
+ + +

9.85 tile

+ +

Tile several successive frames together. +

+

The filter accepts the following options: +

+
+
layout
+

Set the grid size (i.e. the number of lines and columns). For the syntax of +this option, check the "Video size" section in the ffmpeg-utils manual. +

+
+
nb_frames
+

Set the maximum number of frames to render in the given area. It must be less +than or equal to wxh. The default value is 0, meaning all +the area will be used. +

+
+
margin
+

Set the outer border margin in pixels. +

+
+
padding
+

Set the inner border thickness (i.e. the number of pixels between frames). For +more advanced padding options (such as having different values for the edges), +refer to the pad video filter. +

+
+
color
+

Specify the color of the unused areaFor the syntax of this option, check the +"Color" section in the ffmpeg-utils manual. The default value of color +is "black". +

+
+ + +

9.85.1 Examples

+ +
    +
  • +Produce 8x8 PNG tiles of all keyframes (‘-skip_frame nokey’) in a movie: +
     
    ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
    +
    +

    The ‘-vsync 0’ is necessary to prevent ffmpeg from +duplicating each output frame to accommodate the originally detected frame +rate. +

    +
  • +Display 5 pictures in an area of 3x2 frames, +with 7 pixels between them, and 2 pixels of initial margin, using +mixed flat and named options: +
     
    tile=3x2:nb_frames=5:padding=7:margin=2
    +
    +
+ + +

9.86 tinterlace

+ +

Perform various types of temporal field interlacing. +

+

Frames are counted starting from 1, so the first input frame is +considered odd. +

+

The filter accepts the following options: +

+
+
mode
+

Specify the mode of the interlacing. This option can also be specified +as a value alone. See below for a list of values for this option. +

+

Available values are: +

+
+
merge, 0
+

Move odd frames into the upper field, even into the lower field, +generating a double height frame at half frame rate. +

+
+
drop_odd, 1
+

Only output even frames, odd frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
drop_even, 2
+

Only output odd frames, even frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
pad, 3
+

Expand each frame to full height, but pad alternate lines with black, +generating a frame with double height at the same input frame rate. +

+
+
interleave_top, 4
+

Interleave the upper field from odd frames with the lower field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interleave_bottom, 5
+

Interleave the lower field from odd frames with the upper field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interlacex2, 6
+

Double frame rate with unchanged height. Frames are inserted each +containing the second temporal field from the previous input frame and +the first temporal field from the next input frame. This mode relies on +the top_field_first flag. Useful for interlaced video displays with no +field synchronisation. +

+
+ +

Numeric values are deprecated but are accepted for backward +compatibility reasons. +

+

Default mode is merge. +

+
+
flags
+

Specify flags influencing the filter process. +

+

Available value for flags is: +

+
+
low_pass_filter, vlfp
+

Enable vertical low-pass filtering in the filter. +Vertical low-pass filtering is required when creating an interlaced +destination from a progressive source which contains high-frequency +vertical detail. Filtering will reduce interlace ’twitter’ and Moire +patterning. +

+

Vertical low-pass filtering can only be enabled for ‘mode’ +interleave_top and interleave_bottom. +

+
+
+
+
+ + +

9.87 transpose

+ +

Transpose rows with columns in the input video and optionally flip it. +

+

This filter accepts the following options: +

+
+
dir
+

Specify the transposition direction. +

+

Can assume the following values: +

+
0, 4, cclock_flip
+

Rotate by 90 degrees counterclockwise and vertically flip (default), that is: +

 
L.R     L.l
+. . ->  . .
+l.r     R.r
+
+ +
+
1, 5, clock
+

Rotate by 90 degrees clockwise, that is: +

 
L.R     l.L
+. . ->  . .
+l.r     r.R
+
+ +
+
2, 6, cclock
+

Rotate by 90 degrees counterclockwise, that is: +

 
L.R     R.r
+. . ->  . .
+l.r     L.l
+
+ +
+
3, 7, clock_flip
+

Rotate by 90 degrees clockwise and vertically flip, that is: +

 
L.R     r.R
+. . ->  . .
+l.r     l.L
+
+
+
+ +

For values between 4-7, the transposition is only done if the input +video geometry is portrait and not landscape. These values are +deprecated, the passthrough option should be used instead. +

+

Numerical values are deprecated, and should be dropped in favor of +symbolic constants. +

+
+
passthrough
+

Do not apply the transposition if the input geometry matches the one +specified by the specified value. It accepts the following values: +

+
none
+

Always apply transposition. +

+
portrait
+

Preserve portrait geometry (when height >= width). +

+
landscape
+

Preserve landscape geometry (when width >= height). +

+
+ +

Default value is none. +

+
+ +

For example to rotate by 90 degrees clockwise and preserve portrait +layout: +

 
transpose=dir=1:passthrough=portrait
+
+ +

The command above can also be specified as: +

 
transpose=1:portrait
+
+ + +

9.88 trim

+

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the frame with the +timestamp start will be the first frame in the output. +

+
+
end
+

Specify time of the first frame that will be dropped, i.e. the frame +immediately preceding the one with the timestamp end will be the last +frame in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in timebase +units instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in timebase units +instead of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_frame
+

Number of the first frame that should be passed to output. +

+
+
end_frame
+

Number of the first frame that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _frame variants simply count the +frames that pass through the filter. Also note that this filter does not modify +the timestamps. If you wish that the output timestamps start at zero, insert a +setpts filter after the trim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all the frames that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple trim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -vf trim=60:120
    +
    + +
  • +keep only the first second +
     
    ffmpeg -i INPUT -vf trim=duration=1
    +
    + +
+ + + +

9.89 unsharp

+ +

Sharpen or blur the input video. +

+

It accepts the following parameters: +

+
+
luma_msize_x, lx
+

Set the luma matrix horizontal size. It must be an odd integer between +3 and 63, default value is 5. +

+
+
luma_msize_y, ly
+

Set the luma matrix vertical size. It must be an odd integer between 3 +and 63, default value is 5. +

+
+
luma_amount, la
+

Set the luma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 1.0. +

+
+
chroma_msize_x, cx
+

Set the chroma matrix horizontal size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_msize_y, cy
+

Set the chroma matrix vertical size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_amount, ca
+

Set the chroma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 0.0. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ +

All parameters are optional and default to the equivalent of the +string ’5:5:1.0:5:5:0.0’. +

+ +

9.89.1 Examples

+ +
    +
  • +Apply strong luma sharpen effect: +
     
    unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
    +
    + +
  • +Apply strong blur of both luma and chroma parameters: +
     
    unsharp=7:7:-2:7:7:-2
    +
    +
+ +

+

+

9.90 vidstabdetect

+ +

Analyze video stabilization/deshaking. Perform pass 1 of 2, see +vidstabtransform for pass 2. +

+

This filter generates a file with relative translation and rotation +transform information about subsequent frames, which is then used by +the vidstabtransform filter. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+

This filter accepts the following options: +

+
+
result
+

Set the path to the file used to write the transforms information. +Default value is ‘transforms.trf’. +

+
+
shakiness
+

Set how shaky the video is and how quick the camera is. It accepts an +integer in the range 1-10, a value of 1 means little shakiness, a +value of 10 means strong shakiness. Default value is 5. +

+
+
accuracy
+

Set the accuracy of the detection process. It must be a value in the +range 1-15. A value of 1 means low accuracy, a value of 15 means high +accuracy. Default value is 15. +

+
+
stepsize
+

Set stepsize of the search process. The region around minimum is +scanned with 1 pixel resolution. Default value is 6. +

+
+
mincontrast
+

Set minimum contrast. Below this value a local measurement field is +discarded. Must be a floating point value in the range 0-1. Default +value is 0.3. +

+
+
tripod
+

Set reference frame number for tripod mode. +

+

If enabled, the motion of the frames is compared to a reference frame +in the filtered stream, identified by the specified number. The idea +is to compensate all movements in a more-or-less static scene and keep +the camera view absolutely still. +

+

If set to 0, it is disabled. The frames are counted starting from 1. +

+
+
show
+

Show fields and transforms in the resulting frames. It accepts an +integer in the range 0-2. Default value is 0, which disables any +visualization. +

+
+ + +

9.90.1 Examples

+ +
    +
  • +Use default values: +
     
    vidstabdetect
    +
    + +
  • +Analyze strongly shaky movie and put the results in file +‘mytransforms.trf’: +
     
    vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
    +
    + +
  • +Visualize the result of internal transformations in the resulting +video: +
     
    vidstabdetect=show=1
    +
    + +
  • +Analyze a video with medium shakiness using ffmpeg: +
     
    ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
    +
    +
+ +

+

+

9.91 vidstabtransform

+ +

Video stabilization/deshaking: pass 2 of 2, +see vidstabdetect for pass 1. +

+

Read a file with transform information for each frame and +apply/compensate them. Together with the vidstabdetect +filter this can be used to deshake videos. See also +http://public.hronopik.de/vid.stab. It is important to also use +the unsharp filter, see below. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+ +

9.91.1 Options

+ +
+
input
+

Set path to the file used to read the transforms. Default value is +‘transforms.trf’). +

+
+
smoothing
+

Set the number of frames (value*2 + 1) used for lowpass filtering the +camera movements. Default value is 10. +

+

For example a number of 10 means that 21 frames are used (10 in the +past and 10 in the future) to smoothen the motion in the video. A +larger values leads to a smoother video, but limits the acceleration +of the camera (pan/tilt movements). 0 is a special case where a +static camera is simulated. +

+
+
optalgo
+

Set the camera path optimization algorithm. +

+

Accepted values are: +

+
gauss
+

gaussian kernel low-pass filter on camera motion (default) +

+
avg
+

averaging on transformations +

+
+ +
+
maxshift
+

Set maximal number of pixels to translate frames. Default value is -1, +meaning no limit. +

+
+
maxangle
+

Set maximal angle in radians (degree*PI/180) to rotate frames. Default +value is -1, meaning no limit. +

+
+
crop
+

Specify how to deal with borders that may be visible due to movement +compensation. +

+

Available values are: +

+
keep
+

keep image information from previous frame (default) +

+
black
+

fill the border black +

+
+ +
+
invert
+

Invert transforms if set to 1. Default value is 0. +

+
+
relative
+

Consider transforms as relative to previsou frame if set to 1, +absolute if set to 0. Default value is 0. +

+
+
zoom
+

Set percentage to zoom. A positive value will result in a zoom-in +effect, a negative value in a zoom-out effect. Default value is 0 (no +zoom). +

+
+
optzoom
+

Set optimal zooming to avoid borders. +

+

Accepted values are: +

+
0
+

disabled +

+
1
+

optimal static zoom value is determined (only very strong movements +will lead to visible borders) (default) +

+
2
+

optimal adaptive zoom value is determined (no borders will be +visible), see ‘zoomspeed’ +

+
+ +

Note that the value given at zoom is added to the one calculated here. +

+
+
zoomspeed
+

Set percent to zoom maximally each frame (enabled when +‘optzoom’ is set to 2). Range is from 0 to 5, default value is +0.25. +

+
+
interpol
+

Specify type of interpolation. +

+

Available values are: +

+
no
+

no interpolation +

+
linear
+

linear only horizontal +

+
bilinear
+

linear in both directions (default) +

+
bicubic
+

cubic in both directions (slow) +

+
+ +
+
tripod
+

Enable virtual tripod mode if set to 1, which is equivalent to +relative=0:smoothing=0. Default value is 0. +

+

Use also tripod option of vidstabdetect. +

+
+
debug
+

Increase log verbosity if set to 1. Also the detected global motions +are written to the temporary file ‘global_motions.trf’. Default +value is 0. +

+
+ + +

9.91.2 Examples

+ +
    +
  • +Use ffmpeg for a typical stabilization with default values: +
     
    ffmpeg -i inp.mpeg -vf vidstabtransform,unsharp=5:5:0.8:3:3:0.4 inp_stabilized.mpeg
    +
    + +

    Note the use of the unsharp filter which is always recommended. +

    +
  • +Zoom in a bit more and load transform data from a given file: +
     
    vidstabtransform=zoom=5:input="mytransforms.trf"
    +
    + +
  • +Smoothen the video even more: +
     
    vidstabtransform=smoothing=30
    +
    +
+ + +

9.92 vflip

+ +

Flip the input video vertically. +

+

For example, to vertically flip a video with ffmpeg: +

 
ffmpeg -i in.avi -vf "vflip" out.avi
+
+ + +

9.93 vignette

+ +

Make or reverse a natural vignetting effect. +

+

The filter accepts the following options: +

+
+
angle, a
+

Set lens angle expression as a number of radians. +

+

The value is clipped in the [0,PI/2] range. +

+

Default value: "PI/5" +

+
+
x0
+
y0
+

Set center coordinates expressions. Respectively "w/2" and "h/2" +by default. +

+
+
mode
+

Set forward/backward mode. +

+

Available modes are: +

+
forward
+

The larger the distance from the central point, the darker the image becomes. +

+
+
backward
+

The larger the distance from the central point, the brighter the image becomes. +This can be used to reverse a vignette effect, though there is no automatic +detection to extract the lens ‘angle’ and other settings (yet). It can +also be used to create a burning effect. +

+
+ +

Default value is ‘forward’. +

+
+
eval
+

Set evaluation mode for the expressions (‘angle’, ‘x0’, ‘y0’). +

+

It accepts the following values: +

+
init
+

Evaluate expressions only once during the filter initialization. +

+
+
frame
+

Evaluate expressions for each incoming frame. This is way slower than the +‘init’ mode since it requires all the scalers to be re-computed, but it +allows advanced dynamic expressions. +

+
+ +

Default value is ‘init’. +

+
+
dither
+

Set dithering to reduce the circular banding effects. Default is 1 +(enabled). +

+
+
aspect
+

Set vignette aspect. This setting allows one to adjust the shape of the vignette. +Setting this value to the SAR of the input will make a rectangular vignetting +following the dimensions of the video. +

+

Default is 1/1. +

+
+ + +

9.93.1 Expressions

+ +

The ‘alpha’, ‘x0’ and ‘y0’ expressions can contain the +following parameters. +

+
+
w
+
h
+

input width and height +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pts
+

the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in +TB units, NAN if undefined +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
tb
+

time base of the input video +

+
+ + + +

9.93.2 Examples

+ +
    +
  • +Apply simple strong vignetting effect: +
     
    vignette=PI/4
    +
    + +
  • +Make a flickering vignetting: +
     
    vignette='PI/4+random(1)*PI/50':eval=frame
    +
    + +
+ + +

9.94 w3fdif

+ +

Deinterlace the input video ("w3fdif" stands for "Weston 3 Field +Deinterlacing Filter"). +

+

Based on the process described by Martin Weston for BBC R&D, and +implemented based on the de-interlace algorithm written by Jim +Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter +uses filter coefficients calculated by BBC R&D. +

+

There are two sets of filter coefficients, so called "simple": +and "complex". Which set of filter coefficients is used can +be set by passing an optional parameter: +

+
+
filter
+

Set the interlacing filter coefficients. Accepts one of the following values: +

+
+
simple
+

Simple filter coefficient set. +

+
complex
+

More-complex filter coefficient set. +

+
+

Default value is ‘complex’. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following values: +

+
+
all
+

Deinterlace all frames, +

+
interlaced
+

Only deinterlace frames marked as interlaced. +

+
+ +

Default value is ‘all’. +

+
+ +

+

+

9.95 yadif

+ +

Deinterlace the input video ("yadif" means "yet another deinterlacing +filter"). +

+

This filter accepts the following options: +

+ +
+
mode
+

The interlacing mode to adopt, accepts one of the following values: +

+
+
0, send_frame
+

output 1 frame for each frame +

+
1, send_field
+

output 1 frame for each field +

+
2, send_frame_nospatial
+

like send_frame but skip spatial interlacing check +

+
3, send_field_nospatial
+

like send_field but skip spatial interlacing check +

+
+ +

Default value is send_frame. +

+
+
parity
+

The picture field parity assumed for the input interlaced video, accepts one of +the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
-1, auto
+

enable automatic detection +

+
+ +

Default value is auto. +If interlacing is unknown or decoder does not export this information, +top field first will be assumed. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following +values: +

+
+
0, all
+

deinterlace all frames +

+
1, interlaced
+

only deinterlace frames marked as interlaced +

+
+ +

Default value is all. +

+
+ + + +

10. Video Sources

+ +

Below is a description of the currently available video sources. +

+ +

10.1 buffer

+ +

Buffer video frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/vsrc_buffer.h’. +

+

This source accepts the following options: +

+
+
video_size
+

Specify the size (width and height) of the buffered video frames. For the +syntax of this option, check the "Video size" section in the ffmpeg-utils +manual. +

+
+
width
+

Input video width. +

+
+
height
+

Input video height. +

+
+
pix_fmt
+

A string representing the pixel format of the buffered video frames. +It may be a number corresponding to a pixel format, or a pixel format +name. +

+
+
time_base
+

Specify the timebase assumed by the timestamps of the buffered frames. +

+
+
frame_rate
+

Specify the frame rate expected for the video stream. +

+
+
pixel_aspect, sar
+

Specify the sample aspect ratio assumed by the video frames. +

+
+
sws_param
+

Specify the optional parameters to be used for the scale filter which +is automatically inserted when an input change is detected in the +input size or format. +

+
+ +

For example: +

 
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+ +

will instruct the source to accept video frames with size 320x240 and +with format "yuv410p", assuming 1/24 as the timestamps timebase and +square pixels (1:1 sample aspect ratio). +Since the pixel format with name "yuv410p" corresponds to the number 6 +(check the enum AVPixelFormat definition in ‘libavutil/pixfmt.h’), +this example corresponds to: +

 
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+ +

Alternatively, the options can be specified as a flat string, but this +syntax is deprecated: +

+

width:height:pix_fmt:time_base.num:time_base.den:pixel_aspect.num:pixel_aspect.den[:sws_param] +

+ +

10.2 cellauto

+ +

Create a pattern generated by an elementary cellular automaton. +

+

The initial state of the cellular automaton can be defined through the +‘filename’, and ‘pattern’ options. If such options are +not specified an initial state is created randomly. +

+

At each new frame a new row in the video is filled with the result of +the cellular automaton next generation. The behavior when the whole +frame is filled is defined by the ‘scroll’ option. +

+

This source accepts the following options: +

+
+
filename, f
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified file. +In the file, each non-whitespace character is considered an alive +cell, a newline will terminate the row, and further characters in the +file will be ignored. +

+
+
pattern, p
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified string. +

+

Each non-whitespace character in the string is considered an alive +cell, a newline will terminate the row, and further characters in the +string will be ignored. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial cellular automaton row. It +is a floating point number value ranging from 0 to 1, defaults to +1/PHI. +

+

This option is ignored when a file or a pattern is specified. +

+
+
random_seed, seed
+

Set the seed for filling randomly the initial row, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the cellular automaton rule, it is a number ranging from 0 to 255. +Default value is 110. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ or ‘pattern’ is specified, the size is set +by default to the width of the specified initial state row, and the +height is set to width * PHI. +

+

If ‘size’ is set, it must contain the width of the specified +pattern string, and the specified pattern will be centered in the +larger row. +

+

If a filename or a pattern string is not specified, the size value +defaults to "320x518" (used for a randomly generated initial state). +

+
+
scroll
+

If set to 1, scroll the output upward when all the rows in the output +have been already filled. If set to 0, the new generated row will be +written over the top row just after the bottom row is filled. +Defaults to 1. +

+
+
start_full, full
+

If set to 1, completely fill the output with generated rows before +outputting the first frame. +This is the default behavior, for disabling set the value to 0. +

+
+
stitch
+

If set to 1, stitch the left and right row edges together. +This is the default behavior, for disabling set the value to 0. +

+
+ + +

10.2.1 Examples

+ +
    +
  • +Read the initial state from ‘pattern’, and specify an output of +size 200x400. +
     
    cellauto=f=pattern:s=200x400
    +
    + +
  • +Generate a random initial row with a width of 200 cells, with a fill +ratio of 2/3: +
     
    cellauto=ratio=2/3:s=200x200
    +
    + +
  • +Create a pattern generated by rule 18 starting by a single alive cell +centered on an initial row with width 100: +
     
    cellauto=p=@:s=100x400:full=0:rule=18
    +
    + +
  • +Specify a more elaborated initial pattern: +
     
    cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
    +
    + +
+ + +

10.3 mandelbrot

+ +

Generate a Mandelbrot set fractal, and progressively zoom towards the +point specified with start_x and start_y. +

+

This source accepts the following options: +

+
+
end_pts
+

Set the terminal pts value. Default value is 400. +

+
+
end_scale
+

Set the terminal scale value. +Must be a floating point value. Default value is 0.3. +

+
+
inner
+

Set the inner coloring mode, that is the algorithm used to draw the +Mandelbrot fractal internal region. +

+

It shall assume one of the following values: +

+
black
+

Set black mode. +

+
convergence
+

Show time until convergence. +

+
mincol
+

Set color based on point closest to the origin of the iterations. +

+
period
+

Set period mode. +

+
+ +

Default value is mincol. +

+
+
bailout
+

Set the bailout value. Default value is 10.0. +

+
+
maxiter
+

Set the maximum of iterations performed by the rendering +algorithm. Default value is 7189. +

+
+
outer
+

Set outer coloring mode. +It shall assume one of following values: +

+
iteration_count
+

Set iteration cound mode. +

+
normalized_iteration_count
+

set normalized iteration count mode. +

+
+

Default value is normalized_iteration_count. +

+
+
rate, r
+

Set frame rate, expressed as number of frames per second. Default +value is "25". +

+
+
size, s
+

Set frame size. For the syntax of this option, check the "Video +size" section in the ffmpeg-utils manual. Default value is "640x480". +

+
+
start_scale
+

Set the initial scale value. Default value is 3.0. +

+
+
start_x
+

Set the initial x position. Must be a floating point value between +-100 and 100. Default value is -0.743643887037158704752191506114774. +

+
+
start_y
+

Set the initial y position. Must be a floating point value between +-100 and 100. Default value is -0.131825904205311970493132056385139. +

+
+ + +

10.4 mptestsrc

+ +

Generate various test patterns, as generated by the MPlayer test filter. +

+

The size of the generated video is fixed, and is 256x256. +This source is useful in particular for testing encoding features. +

+

This source accepts the following options: +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH:MM:SS[.m...]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
test, t
+
+

Set the number or the name of the test to perform. Supported tests are: +

+
dc_luma
+
dc_chroma
+
freq_luma
+
freq_chroma
+
amp_luma
+
amp_chroma
+
cbp
+
mv
+
ring1
+
ring2
+
all
+
+ +

Default value is "all", which will cycle through the list of all tests. +

+
+ +

For example the following: +

 
testsrc=t=dc_luma
+
+ +

will generate a "dc_luma" test pattern. +

+ +

10.5 frei0r_src

+ +

Provide a frei0r source. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This source accepts the following options: +

+
+
size
+

The size of the video to generate. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+
+
framerate
+

Framerate of the generated video, may be a string of the form +num/den or a frame rate abbreviation. +

+
+
filter_name
+

The name to the frei0r source to load. For more information regarding frei0r and +how to set the parameters read the section frei0r in the description of +the video filters. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r source. +

+
+
+ +

For example, to generate a frei0r partik0l source with size 200x200 +and frame rate 10 which is overlayed on the overlay filter main input: +

 
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+ + +

10.6 life

+ +

Generate a life pattern. +

+

This source is based on a generalization of John Conway’s life game. +

+

The sourced input represents a life grid, each pixel represents a cell +which can be in one of two possible states, alive or dead. Every cell +interacts with its eight neighbours, which are the cells that are +horizontally, vertically, or diagonally adjacent. +

+

At each interaction the grid evolves according to the adopted rule, +which specifies the number of neighbor alive cells which will make a +cell stay alive or born. The ‘rule’ option allows one to specify +the rule to adopt. +

+

This source accepts the following options: +

+
+
filename, f
+

Set the file from which to read the initial grid state. In the file, +each non-whitespace character is considered an alive cell, and newline +is used to delimit the end of each row. +

+

If this option is not specified, the initial grid is generated +randomly. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial random grid. It is a +floating point number value ranging from 0 to 1, defaults to 1/PHI. +It is ignored when a file is specified. +

+
+
random_seed, seed
+

Set the seed for filling the initial random grid, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the life rule. +

+

A rule can be specified with a code of the kind "SNS/BNB", +where NS and NB are sequences of numbers in the range 0-8, +NS specifies the number of alive neighbor cells which make a +live cell stay alive, and NB the number of alive neighbor cells +which make a dead cell to become alive (i.e. to "born"). +"s" and "b" can be used in place of "S" and "B", respectively. +

+

Alternatively a rule can be specified by an 18-bits integer. The 9 +high order bits are used to encode the next cell state if it is alive +for each number of neighbor alive cells, the low order bits specify +the rule for "borning" new cells. Higher order bits encode for an +higher number of neighbor cells. +For example the number 6153 = (12<<9)+9 specifies a stay alive +rule of 12 and a born rule of 9, which corresponds to "S23/B03". +

+

Default value is "S23/B3", which is the original Conway’s game of life +rule, and will keep a cell alive if it has 2 or 3 neighbor alive +cells, and will born a new cell if there are three alive cells around +a dead cell. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ is specified, the size is set by default to the +same size of the input file. If ‘size’ is set, it must contain +the size specified in the input file, and the initial grid defined in +that file is centered in the larger resulting area. +

+

If a filename is not specified, the size value defaults to "320x240" +(used for a randomly generated initial grid). +

+
+
stitch
+

If set to 1, stitch the left and right grid edges together, and the +top and bottom edges also. Defaults to 1. +

+
+
mold
+

Set cell mold speed. If set, a dead cell will go from ‘death_color’ to +‘mold_color’ with a step of ‘mold’. ‘mold’ can have a +value from 0 to 255. +

+
+
life_color
+

Set the color of living (or new born) cells. +

+
+
death_color
+

Set the color of dead cells. If ‘mold’ is set, this is the first color +used to represent a dead cell. +

+
+
mold_color
+

Set mold color, for definitely dead and moldy cells. +

+

For the syntax of these 3 color options, check the "Color" section in the +ffmpeg-utils manual. +

+
+ + +

10.6.1 Examples

+ +
    +
  • +Read a grid from ‘pattern’, and center it on a grid of size +300x300 pixels: +
     
    life=f=pattern:s=300x300
    +
    + +
  • +Generate a random grid of size 200x200, with a fill ratio of 2/3: +
     
    life=ratio=2/3:s=200x200
    +
    + +
  • +Specify a custom rule for evolving a randomly generated grid: +
     
    life=rule=S14/B34
    +
    + +
  • +Full example with slow death effect (mold) using ffplay: +
     
    ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
    +
    +
+ +

+ + + + + + +

+

10.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc

+ +

The color source provides an uniformly colored input. +

+

The haldclutsrc source provides an identity Hald CLUT. See also +haldclut filter. +

+

The nullsrc source returns unprocessed video frames. It is +mainly useful to be employed in analysis / debugging tools, or as the +source for filters which ignore the input data. +

+

The rgbtestsrc source generates an RGB test pattern useful for +detecting RGB vs BGR issues. You should see a red, green and blue +stripe from top to bottom. +

+

The smptebars source generates a color bars pattern, based on +the SMPTE Engineering Guideline EG 1-1990. +

+

The smptehdbars source generates a color bars pattern, based on +the SMPTE RP 219-2002. +

+

The testsrc source generates a test video pattern, showing a +color pattern, a scrolling gradient and a timestamp. This is mainly +intended for testing purposes. +

+

The sources accept the following options: +

+
+
color, c
+

Specify the color of the source, only available in the color +source. For the syntax of this option, check the "Color" section in the +ffmpeg-utils manual. +

+
+
level
+

Specify the level of the Hald CLUT, only available in the haldclutsrc +source. A level of N generates a picture of N*N*N by N*N*N +pixels to be used as identity matrix for 3D lookup tables. Each component is +coded on a 1/(N*N) scale. +

+
+
size, s
+

Specify the size of the sourced video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. The default value is +"320x240". +

+

This option is not available with the haldclutsrc filter. +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
sar
+

Set the sample aspect ratio of the sourced video. +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
decimals, n
+

Set the number of decimals to show in the timestamp, only available in the +testsrc source. +

+

The displayed timestamp value will correspond to the original +timestamp value multiplied by the power of 10 of the specified +value. Default value is 0. +

+
+ +

For example the following: +

 
testsrc=duration=5.3:size=qcif:rate=10
+
+ +

will generate a video with a duration of 5.3 seconds, with size +176x144 and a frame rate of 10 frames per second. +

+

The following graph description will generate a red source +with an opacity of 0.2, with size "qcif" and a frame rate of 10 +frames per second. +

 
color=c=red@0.2:s=qcif:r=10
+
+ +

If the input content is to be ignored, nullsrc can be used. The +following command generates noise in the luminance plane by employing +the geq filter: +

 
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+ + +

10.7.1 Commands

+ +

The color source supports the following commands: +

+
+
c, color
+

Set the color of the created image. Accepts the same syntax of the +corresponding ‘color’ option. +

+
+ + + +

11. Video Sinks

+ +

Below is a description of the currently available video sinks. +

+ +

11.1 buffersink

+ +

Buffer video frames, and make them available to the end of the filter +graph. +

+

This sink is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVBufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

11.2 nullsink

+ +

Null video sink, do absolutely nothing with the input video. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

12. Multimedia Filters

+ +

Below is a description of the currently available multimedia filters. +

+ +

12.1 avectorscope

+ +

Convert input audio to a video output, representing the audio vector +scope. +

+

The filter is used to measure the difference between channels of stereo +audio stream. A monoaural signal, consisting of identical left and right +signal, results in straight vertical line. Any stereo separation is visible +as a deviation from this line, creating a Lissajous figure. +If the straight (or deviation from it) but horizontal line appears this +indicates that the left and right channels are out of phase. +

+

The filter accepts the following options: +

+
+
mode, m
+

Set the vectorscope mode. +

+

Available values are: +

+
lissajous
+

Lissajous rotated by 45 degrees. +

+
+
lissajous_xy
+

Same as above but not rotated. +

+
+ +

Default value is ‘lissajous’. +

+
+
size, s
+

Set the video size for the output. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. Default value is 400x400. +

+
+
rate, r
+

Set the output frame rate. Default value is 25. +

+
+
rc
+
gc
+
bc
+

Specify the red, green and blue contrast. Default values are 40, 160 and 80. +Allowed range is [0, 255]. +

+
+
rf
+
gf
+
bf
+

Specify the red, green and blue fade. Default values are 15, 10 and 5. +Allowed range is [0, 255]. +

+
+
zoom
+

Set the zoom factor. Default value is 1. Allowed range is [1, 10]. +

+
+ + +

12.1.1 Examples

+ +
    +
  • +Complete example using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
    +
    +
+ + +

12.2 concat

+ +

Concatenate audio and video streams, joining them together one after the +other. +

+

The filter works on segments of synchronized video and audio streams. All +segments must have the same number of streams of each type, and that will +also be the number of streams at output. +

+

The filter accepts the following options: +

+
+
n
+

Set the number of segments. Default is 2. +

+
+
v
+

Set the number of output video streams, that is also the number of video +streams in each segment. Default is 1. +

+
+
a
+

Set the number of output audio streams, that is also the number of video +streams in each segment. Default is 0. +

+
+
unsafe
+

Activate unsafe mode: do not fail if segments have a different format. +

+
+
+ +

The filter has v+a outputs: first v video outputs, then +a audio outputs. +

+

There are nx(v+a) inputs: first the inputs for the first +segment, in the same order as the outputs, then the inputs for the second +segment, etc. +

+

Related streams do not always have exactly the same duration, for various +reasons including codec frame size or sloppy authoring. For that reason, +related synchronized streams (e.g. a video and its audio track) should be +concatenated at once. The concat filter will use the duration of the longest +stream in each segment (except the last one), and if necessary pad shorter +audio streams with silence. +

+

For this filter to work correctly, all segments must start at timestamp 0. +

+

All corresponding streams must have the same parameters in all segments; the +filtering system will automatically select a common pixel format for video +streams, and a common sample format, sample rate and channel layout for +audio streams, but other settings, such as resolution, must be converted +explicitly by the user. +

+

Different frame rates are acceptable but will result in variable frame rate +at output; be sure to configure the output file to handle it. +

+ +

12.2.1 Examples

+ +
    +
  • +Concatenate an opening, an episode and an ending, all in bilingual version +(video in stream 0, audio in streams 1 and 2): +
     
    ffmpeg -i opening.mkv -i episode.mkv -i ending.mkv -filter_complex \
    +  '[0:0] [0:1] [0:2] [1:0] [1:1] [1:2] [2:0] [2:1] [2:2]
    +   concat=n=3:v=1:a=2 [v] [a1] [a2]' \
    +  -map '[v]' -map '[a1]' -map '[a2]' output.mkv
    +
    + +
  • +Concatenate two parts, handling audio and video separately, using the +(a)movie sources, and adjusting the resolution: +
     
    movie=part1.mp4, scale=512:288 [v1] ; amovie=part1.mp4 [a1] ;
    +movie=part2.mp4, scale=512:288 [v2] ; amovie=part2.mp4 [a2] ;
    +[v1] [v2] concat [outv] ; [a1] [a2] concat=v=0:a=1 [outa]
    +
    +

    Note that a desync will happen at the stitch if the audio and video streams +do not have exactly the same duration in the first file. +

    +
+ + +

12.3 ebur128

+ +

EBU R128 scanner filter. This filter takes an audio stream as input and outputs +it unchanged. By default, it logs a message at a frequency of 10Hz with the +Momentary loudness (identified by M), Short-term loudness (S), +Integrated loudness (I) and Loudness Range (LRA). +

+

The filter also has a video output (see the video option) with a real +time graph to observe the loudness evolution. The graphic contains the logged +message mentioned above, so it is not printed anymore when this option is set, +unless the verbose logging is set. The main graphing area contains the +short-term loudness (3 seconds of analysis), and the gauge on the right is for +the momentary loudness (400 milliseconds). +

+

More information about the Loudness Recommendation EBU R128 on +http://tech.ebu.ch/loudness. +

+

The filter accepts the following options: +

+
+
video
+

Activate the video output. The audio stream is passed unchanged whether this +option is set or no. The video stream will be the first output stream if +activated. Default is 0. +

+
+
size
+

Set the video size. This option is for video only. For the syntax of this +option, check the "Video size" section in the ffmpeg-utils manual. Default +and minimum resolution is 640x480. +

+
+
meter
+

Set the EBU scale meter. Default is 9. Common values are 9 and +18, respectively for EBU scale meter +9 and EBU scale meter +18. Any +other integer value between this range is allowed. +

+
+
metadata
+

Set metadata injection. If set to 1, the audio input will be segmented +into 100ms output frames, each of them containing various loudness information +in metadata. All the metadata keys are prefixed with lavfi.r128.. +

+

Default is 0. +

+
+
framelog
+

Force the frame logging level. +

+

Available values are: +

+
info
+

information logging level +

+
verbose
+

verbose logging level +

+
+ +

By default, the logging level is set to info. If the ‘video’ or +the ‘metadata’ options are set, it switches to verbose. +

+
+
peak
+

Set peak mode(s). +

+

Available modes can be cumulated (the option is a flag type). Possible +values are: +

+
none
+

Disable any peak mode (default). +

+
sample
+

Enable sample-peak mode. +

+

Simple peak mode looking for the higher sample value. It logs a message +for sample-peak (identified by SPK). +

+
true
+

Enable true-peak mode. +

+

If enabled, the peak lookup is done on an over-sampled version of the input +stream for better peak accuracy. It logs a message for true-peak. +(identified by TPK) and true-peak per frame (identified by FTPK). +This mode requires a build with libswresample. +

+
+ +
+
+ + +

12.3.1 Examples

+ +
    +
  • +Real-time graph using ffplay, with a EBU scale meter +18: +
     
    ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
    +
    + +
  • +Run an analysis with ffmpeg: +
     
    ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
    +
    +
+ + +

12.4 interleave, ainterleave

+ +

Temporally interleave frames from several inputs. +

+

interleave works with video inputs, ainterleave with audio. +

+

These filters read frames from several inputs and send the oldest +queued frame to the output. +

+

Input streams must have a well defined, monotonically increasing frame +timestamp values. +

+

In order to submit one frame to output, these filters need to enqueue +at least one frame for each input, so they cannot work in case one +input is not yet terminated and will not receive incoming frames. +

+

For example consider the case when one input is a select filter +which always drop input frames. The interleave filter will keep +reading from that input, but it will never be able to send new frames +to output until the input will send an end-of-stream signal. +

+

Also, depending on inputs synchronization, the filters will drop +frames in case one input receives more frames than the other ones, and +the queue is already filled. +

+

These filters accept the following options: +

+
+
nb_inputs, n
+

Set the number of different inputs, it is 2 by default. +

+
+ + +

12.4.1 Examples

+ +
    +
  • +Interleave frames belonging to different streams using ffmpeg: +
     
    ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
    +
    + +
  • +Add flickering blur effect: +
     
    select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
    +
    +
+ + +

12.5 perms, aperms

+ +

Set read/write permissions for the output frames. +

+

These filters are mainly aimed at developers to test direct path in the +following filter in the filtergraph. +

+

The filters accept the following options: +

+
+
mode
+

Select the permissions mode. +

+

It accepts the following values: +

+
none
+

Do nothing. This is the default. +

+
ro
+

Set all the output frames read-only. +

+
rw
+

Set all the output frames directly writable. +

+
toggle
+

Make the frame read-only if writable, and writable if read-only. +

+
random
+

Set each output frame read-only or writable randomly. +

+
+ +
+
seed
+

Set the seed for the random mode, must be an integer included between +0 and UINT32_MAX. If not specified, or if explicitly set to +-1, the filter will try to use a good random seed on a best effort +basis. +

+
+ +

Note: in case of auto-inserted filter between the permission filter and the +following one, the permission might not be received as expected in that +following filter. Inserting a format or aformat filter before the +perms/aperms filter can avoid this problem. +

+ +

12.6 select, aselect

+ +

Select frames to pass in output. +

+

This filter accepts the following options: +

+
+
expr, e
+

Set expression, which is evaluated for each input frame. +

+

If the expression is evaluated to zero, the frame is discarded. +

+

If the evaluation result is negative or NaN, the frame is sent to the +first output; otherwise it is sent to the output with index +ceil(val)-1, assuming that the input index starts from 0. +

+

For example a value of 1.2 corresponds to the output with index +ceil(1.2)-1 = 2-1 = 1, that is the second output. +

+
+
outputs, n
+

Set the number of outputs. The output to which to send the selected +frame is based on the result of the evaluation. Default value is 1. +

+
+ +

The expression can contain the following constants: +

+
+
n
+

the sequential number of the filtered frame, starting from 0 +

+
+
selected_n
+

the sequential number of the selected frame, starting from 0 +

+
+
prev_selected_n
+

the sequential number of the last selected frame, NAN if undefined +

+
+
TB
+

timebase of the input timestamps +

+
+
pts
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in TB units, NAN if undefined +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
prev_pts
+

the PTS of the previously filtered video frame, NAN if undefined +

+
+
prev_selected_pts
+

the PTS of the last previously filtered video frame, NAN if undefined +

+
+
prev_selected_t
+

the PTS of the last previously selected video frame, NAN if undefined +

+
+
start_pts
+

the PTS of the first video frame in the video, NAN if undefined +

+
+
start_t
+

the time of the first video frame in the video, NAN if undefined +

+
+
pict_type (video only)
+

the type of the filtered frame, can assume one of the following +values: +

+
I
+
P
+
B
+
S
+
SI
+
SP
+
BI
+
+ +
+
interlace_type (video only)
+

the frame interlace type, can assume one of the following values: +

+
PROGRESSIVE
+

the frame is progressive (not interlaced) +

+
TOPFIRST
+

the frame is top-field-first +

+
BOTTOMFIRST
+

the frame is bottom-field-first +

+
+ +
+
consumed_sample_n (audio only)
+

the number of selected samples before the current frame +

+
+
samples_n (audio only)
+

the number of samples in the current frame +

+
+
sample_rate (audio only)
+

the input sample rate +

+
+
key
+

1 if the filtered frame is a key-frame, 0 otherwise +

+
+
pos
+

the position in the file of the filtered frame, -1 if the information +is not available (e.g. for synthetic video) +

+
+
scene (video only)
+

value between 0 and 1 to indicate a new scene; a low value reflects a low +probability for the current frame to introduce a new scene, while a higher +value means the current frame is more likely to be one (see the example below) +

+
+
+ +

The default value of the select expression is "1". +

+ +

12.6.1 Examples

+ +
    +
  • +Select all frames in input: +
     
    select
    +
    + +

    The example above is the same as: +

     
    select=1
    +
    + +
  • +Skip all frames: +
     
    select=0
    +
    + +
  • +Select only I-frames: +
     
    select='eq(pict_type\,I)'
    +
    + +
  • +Select one frame every 100: +
     
    select='not(mod(n\,100))'
    +
    + +
  • +Select only frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)
    +
    + +
  • +Select only I frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)*eq(pict_type\,I)
    +
    + +
  • +Select frames with a minimum distance of 10 seconds: +
     
    select='isnan(prev_selected_t)+gte(t-prev_selected_t\,10)'
    +
    + +
  • +Use aselect to select only audio frames with samples number > 100: +
     
    aselect='gt(samples_n\,100)'
    +
    + +
  • +Create a mosaic of the first scenes: +
     
    ffmpeg -i video.avi -vf select='gt(scene\,0.4)',scale=160:120,tile -frames:v 1 preview.png
    +
    + +

    Comparing scene against a value between 0.3 and 0.5 is generally a sane +choice. +

    +
  • +Send even and odd frames to separate outputs, and compose them: +
     
    select=n=2:e='mod(n, 2)+1' [odd][even]; [odd] pad=h=2*ih [tmp]; [tmp][even] overlay=y=h
    +
    +
+ + +

12.7 sendcmd, asendcmd

+ +

Send commands to filters in the filtergraph. +

+

These filters read commands to be sent to other filters in the +filtergraph. +

+

sendcmd must be inserted between two video filters, +asendcmd must be inserted between two audio filters, but apart +from that they act the same way. +

+

The specification of commands can be provided in the filter arguments +with the commands option, or in a file specified by the +filename option. +

+

These filters accept the following options: +

+
commands, c
+

Set the commands to be read and sent to the other filters. +

+
filename, f
+

Set the filename of the commands to be read and sent to the other +filters. +

+
+ + +

12.7.1 Commands syntax

+ +

A commands description consists of a sequence of interval +specifications, comprising a list of commands to be executed when a +particular event related to that interval occurs. The occurring event +is typically the current frame time entering or leaving a given time +interval. +

+

An interval is specified by the following syntax: +

 
START[-END] COMMANDS;
+
+ +

The time interval is specified by the START and END times. +END is optional and defaults to the maximum time. +

+

The current frame time is considered within the specified interval if +it is included in the interval [START, END), that is when +the time is greater or equal to START and is lesser than +END. +

+

COMMANDS consists of a sequence of one or more command +specifications, separated by ",", relating to that interval. The +syntax of a command specification is given by: +

 
[FLAGS] TARGET COMMAND ARG
+
+ +

FLAGS is optional and specifies the type of events relating to +the time interval which enable sending the specified command, and must +be a non-null sequence of identifier flags separated by "+" or "|" and +enclosed between "[" and "]". +

+

The following flags are recognized: +

+
enter
+

The command is sent when the current frame timestamp enters the +specified interval. In other words, the command is sent when the +previous frame timestamp was not in the given interval, and the +current is. +

+
+
leave
+

The command is sent when the current frame timestamp leaves the +specified interval. In other words, the command is sent when the +previous frame timestamp was in the given interval, and the +current is not. +

+
+ +

If FLAGS is not specified, a default value of [enter] is +assumed. +

+

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional list of argument for +the given COMMAND. +

+

Between one interval specification and another, whitespaces, or +sequences of characters starting with # until the end of line, +are ignored and can be used to annotate comments. +

+

A simplified BNF description of the commands specification syntax +follows: +

 
COMMAND_FLAG  ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG]
+COMMAND       ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG]
+COMMANDS      ::= COMMAND [,COMMANDS]
+INTERVAL      ::= START[-END] COMMANDS
+INTERVALS     ::= INTERVAL[;INTERVALS]
+
+ + +

12.7.2 Examples

+ +
    +
  • +Specify audio tempo change at second 4: +
     
    asendcmd=c='4.0 atempo tempo 1.5',atempo
    +
    + +
  • +Specify a list of drawtext and hue commands in a file. +
     
    # show text in the interval 5-10
    +5.0-10.0 [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=hello world',
    +         [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=';
    +
    +# desaturate the image in the interval 15-20
    +15.0-20.0 [enter] hue s 0,
    +          [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=nocolor',
    +          [leave] hue s 1,
    +          [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=color';
    +
    +# apply an exponential saturation fade-out effect, starting from time 25
    +25 [enter] hue s exp(25-t)
    +
    + +

    A filtergraph allowing to read and process the above command list +stored in a file ‘test.cmd’, can be specified with: +

     
    sendcmd=f=test.cmd,drawtext=fontfile=FreeSerif.ttf:text='',hue
    +
    +
+ +

+

+

12.8 setpts, asetpts

+ +

Change the PTS (presentation timestamp) of the input frames. +

+

setpts works on video frames, asetpts on audio frames. +

+

This filter accepts the following options: +

+
+
expr
+

The expression which is evaluated for each frame to construct its timestamp. +

+
+
+ +

The expression is evaluated through the eval API and can contain the following +constants: +

+
+
FRAME_RATE
+

frame rate, only defined for constant frame-rate video +

+
+
PTS
+

the presentation timestamp in input +

+
+
N
+

the count of the input frame for video or the number of consumed samples, +not including the current frame for audio, starting from 0. +

+
+
NB_CONSUMED_SAMPLES
+

the number of consumed samples, not including the current frame (only +audio) +

+
+
NB_SAMPLES, S
+

the number of samples in the current frame (only audio) +

+
+
SAMPLE_RATE, SR
+

audio sample rate +

+
+
STARTPTS
+

the PTS of the first frame +

+
+
STARTT
+

the time in seconds of the first frame +

+
+
INTERLACED
+

tell if the current frame is interlaced +

+
+
T
+

the time in seconds of the current frame +

+
+
POS
+

original position in the file of the frame, or undefined if undefined +for the current frame +

+
+
PREV_INPTS
+

previous input PTS +

+
+
PREV_INT
+

previous input time in seconds +

+
+
PREV_OUTPTS
+

previous output PTS +

+
+
PREV_OUTT
+

previous output time in seconds +

+
+
RTCTIME
+

wallclock (RTC) time in microseconds. This is deprecated, use time(0) +instead. +

+
+
RTCSTART
+

wallclock (RTC) time at the start of the movie in microseconds +

+
+
TB
+

timebase of the input timestamps +

+
+
+ + +

12.8.1 Examples

+ +
    +
  • +Start counting PTS from zero +
     
    setpts=PTS-STARTPTS
    +
    + +
  • +Apply fast motion effect: +
     
    setpts=0.5*PTS
    +
    + +
  • +Apply slow motion effect: +
     
    setpts=2.0*PTS
    +
    + +
  • +Set fixed rate of 25 frames per second: +
     
    setpts=N/(25*TB)
    +
    + +
  • +Set fixed rate 25 fps with some jitter: +
     
    setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
    +
    + +
  • +Apply an offset of 10 seconds to the input PTS: +
     
    setpts=PTS+10/TB
    +
    + +
  • +Generate timestamps from a "live source" and rebase onto the current timebase: +
     
    setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
    +
    + +
  • +Generate timestamps by counting samples: +
     
    asetpts=N/SR/TB
    +
    + +
+ + +

12.9 settb, asettb

+ +

Set the timebase to use for the output frames timestamps. +It is mainly useful for testing timebase configuration. +

+

This filter accepts the following options: +

+
+
expr, tb
+

The expression which is evaluated into the output timebase. +

+
+
+ +

The value for ‘tb’ is an arithmetic expression representing a +rational. The expression can contain the constants "AVTB" (the default +timebase), "intb" (the input timebase) and "sr" (the sample rate, +audio only). Default value is "intb". +

+ +

12.9.1 Examples

+ +
    +
  • +Set the timebase to 1/25: +
     
    settb=expr=1/25
    +
    + +
  • +Set the timebase to 1/10: +
     
    settb=expr=0.1
    +
    + +
  • +Set the timebase to 1001/1000: +
     
    settb=1+0.001
    +
    + +
  • +Set the timebase to 2*intb: +
     
    settb=2*intb
    +
    + +
  • +Set the default timebase value: +
     
    settb=AVTB
    +
    +
+ + +

12.10 showspectrum

+ +

Convert input audio to a video output, representing the audio frequency +spectrum. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value is +640x512. +

+
+
slide
+

Specify if the spectrum should slide along the window. Default value is +0. +

+
+
mode
+

Specify display mode. +

+

It accepts the following values: +

+
combined
+

all channels are displayed in the same row +

+
separate
+

all channels are displayed in separate rows +

+
+ +

Default value is ‘combined’. +

+
+
color
+

Specify display color mode. +

+

It accepts the following values: +

+
channel
+

each channel is displayed in a separate color +

+
intensity
+

each channel is is displayed using the same color scheme +

+
+ +

Default value is ‘channel’. +

+
+
scale
+

Specify scale used for calculating intensity color values. +

+

It accepts the following values: +

+
lin
+

linear +

+
sqrt
+

square root, default +

+
cbrt
+

cubic root +

+
log
+

logarithmic +

+
+ +

Default value is ‘sqrt’. +

+
+
saturation
+

Set saturation modifier for displayed colors. Negative values provide +alternative color scheme. 0 is no saturation at all. +Saturation must be in [-10.0, 10.0] range. +Default value is 1. +

+
+
win_func
+

Set window function. +

+

It accepts the following values: +

+
none
+

No samples pre-processing (do not expect this to be faster) +

+
hann
+

Hann window +

+
hamming
+

Hamming window +

+
blackman
+

Blackman window +

+
+ +

Default value is hann. +

+
+ +

The usage is very similar to the showwaves filter; see the examples in that +section. +

+ +

12.10.1 Examples

+ +
    +
  • +Large window with logarithmic color scaling: +
     
    showspectrum=s=1280x480:scale=log
    +
    + +
  • +Complete example for a colored and sliding spectrum per channel using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
    +
    +
+ + +

12.11 showwaves

+ +

Convert input audio to a video output, representing the samples waves. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value +is "600x240". +

+
+
mode
+

Set display mode. +

+

Available values are: +

+
point
+

Draw a point for each sample. +

+
+
line
+

Draw a vertical line for each sample. +

+
+ +

Default value is point. +

+
+
n
+

Set the number of samples which are printed on the same column. A +larger value will decrease the frame rate. Must be a positive +integer. This option can be set only if the value for rate +is not explicitly specified. +

+
+
rate, r
+

Set the (approximate) output frame rate. This is done by setting the +option n. Default value is "25". +

+
+
+ + +

12.11.1 Examples

+ +
    +
  • +Output the input file audio and the corresponding video representation +at the same time: +
     
    amovie=a.mp3,asplit[out0],showwaves[out1]
    +
    + +
  • +Create a synthetic signal and show it with showwaves, forcing a +frame rate of 30 frames per second: +
     
    aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
    +
    +
+ + +

12.12 split, asplit

+ +

Split input into several identical outputs. +

+

asplit works with audio input, split with video. +

+

The filter accepts a single parameter which specifies the number of outputs. If +unspecified, it defaults to 2. +

+ +

12.12.1 Examples

+ +
    +
  • +Create two separate outputs from the same input: +
     
    [in] split [out0][out1]
    +
    + +
  • +To create 3 or more outputs, you need to specify the number of +outputs, like in: +
     
    [in] asplit=3 [out0][out1][out2]
    +
    + +
  • +Create two separate outputs from the same input, one cropped and +one padded: +
     
    [in] split [splitout1][splitout2];
    +[splitout1] crop=100:100:0:0    [cropout];
    +[splitout2] pad=200:200:100:100 [padout];
    +
    + +
  • +Create 5 copies of the input audio with ffmpeg: +
     
    ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
    +
    +
+ + +

12.13 zmq, azmq

+ +

Receive commands sent through a libzmq client, and forward them to +filters in the filtergraph. +

+

zmq and azmq work as a pass-through filters. zmq +must be inserted between two video filters, azmq between two +audio filters. +

+

To enable these filters you need to install the libzmq library and +headers and configure FFmpeg with --enable-libzmq. +

+

For more information about libzmq see: +http://www.zeromq.org/ +

+

The zmq and azmq filters work as a libzmq server, which +receives messages sent through a network interface defined by the +‘bind_address’ option. +

+

The received message must be in the form: +

 
TARGET COMMAND [ARG]
+
+ +

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional argument list for the +given COMMAND. +

+

Upon reception, the message is processed and the corresponding command +is injected into the filtergraph. Depending on the result, the filter +will send a reply to the client, adopting the format: +

 
ERROR_CODE ERROR_REASON
+MESSAGE
+
+ +

MESSAGE is optional. +

+ +

12.13.1 Examples

+ +

Look at ‘tools/zmqsend’ for an example of a zmq client which can +be used to send commands processed by these filters. +

+

Consider the following filtergraph generated by ffplay +

 
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red  [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l]   overlay      [bg+l];
+[bg+l][r] overlay=x=100 "
+
+ +

To change the color of the left side of the video, the following +command can be used: +

 
echo Parsed_color_0 c yellow | tools/zmqsend
+
+ +

To change the right side: +

 
echo Parsed_color_1 c pink | tools/zmqsend
+
+ + + +

13. Multimedia Sources

+ +

Below is a description of the currently available multimedia sources. +

+ +

13.1 amovie

+ +

This is the same as movie source, except it selects an audio +stream by default. +

+

+

+

13.2 movie

+ +

Read audio and/or video stream(s) from a movie container. +

+

This filter accepts the following options: +

+
+
filename
+

The name of the resource to read (not necessarily a file but also a device or a +stream accessed through some protocol). +

+
+
format_name, f
+

Specifies the format assumed for the movie to read, and can be either +the name of a container or an input device. If not specified the +format is guessed from movie_name or by probing. +

+
+
seek_point, sp
+

Specifies the seek point in seconds, the frames will be output +starting from this seek point, the parameter is evaluated with +av_strtod so the numerical value may be suffixed by an IS +postfix. Default value is "0". +

+
+
streams, s
+

Specifies the streams to read. Several streams can be specified, +separated by "+". The source will then have as many outputs, in the +same order. The syntax is explained in the “Stream specifiers” +section in the ffmpeg manual. Two special names, "dv" and "da" specify +respectively the default (best suited) video and audio stream. Default +is "dv", or "da" if the filter is called as "amovie". +

+
+
stream_index, si
+

Specifies the index of the video stream to read. If the value is -1, +the best suited video stream will be automatically selected. Default +value is "-1". Deprecated. If the filter is called "amovie", it will select +audio instead of video. +

+
+
loop
+

Specifies how many times to read the stream in sequence. +If the value is less than 1, the stream will be read again and again. +Default value is "1". +

+

Note that when the movie is looped the source timestamps are not +changed, so it will generate non monotonically increasing timestamps. +

+
+ +

This filter allows one to overlay a second video on top of main input of +a filtergraph as shown in this graph: +

 
input -----------> deltapts0 --> overlay --> output
+                                    ^
+                                    |
+movie --> scale--> deltapts1 -------+
+
+ + +

13.2.1 Examples

+ +
    +
  • +Skip 3.2 seconds from the start of the avi file in.avi, and overlay it +on top of the input labelled as "in": +
     
    movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read from a video4linux2 device, and overlay it on top of the input +labelled as "in": +
     
    movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read the first video stream and the audio stream with id 0x81 from +dvd.vob; the video is connected to the pad named "video" and the audio is +connected to the pad named "audio": +
     
    movie=dvd.vob:s=v:0+#0x81 [video] [audio]
    +
    +
+ + + +

14. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavfilter +

+ + +

15. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-formats.html b/dependencies64/ffmpeg/doc/ffmpeg-formats.html new file mode 100644 index 000000000..761ad76ae --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-formats.html @@ -0,0 +1,1993 @@ + + + + + +FFmpeg documentation : FFmpeg Formats + + + + + + + + + + +
+
+ + +

FFmpeg Formats Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

This document describes the supported formats (muxers and demuxers) +provided by the libavformat library. +

+ + +

2. Format Options

+ +

The libavformat library provides some generic global options, which +can be set on all the muxers and demuxers. In addition each muxer or +demuxer may support so-called private options, which are specific for +that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follows: +

+
+
avioflags flags (input/output)
+

Possible values: +

+
direct
+

Reduce buffering. +

+
+ +
+
probesize integer (input)
+

Set probing size in bytes, i.e. the size of the data to analyze to get +stream information. A higher value will allow to detect more +information in case it is dispersed into the stream, but will increase +latency. Must be an integer not lesser than 32. It is 5000000 by default. +

+
+
packetsize integer (output)
+

Set packet size. +

+
+
fflags flags (input/output)
+

Set format flags. +

+

Possible values: +

+
ignidx
+

Ignore index. +

+
genpts
+

Generate PTS. +

+
nofillin
+

Do not fill in missing values that can be exactly calculated. +

+
noparse
+

Disable AVParsers, this needs +nofillin too. +

+
igndts
+

Ignore DTS. +

+
discardcorrupt
+

Discard corrupted frames. +

+
sortdts
+

Try to interleave output packets by DTS. +

+
keepside
+

Do not merge side data. +

+
latm
+

Enable RTP MP4A-LATM payload. +

+
nobuffer
+

Reduce the latency introduced by optional buffering +

+
+ +
+
seek2any integer (input)
+

Allow seeking to non-keyframes on demuxer level when supported if set to 1. +Default is 0. +

+
+
analyzeduration integer (input)
+

Specify how many microseconds are analyzed to probe the input. A +higher value will allow to detect more accurate information, but will +increase latency. It defaults to 5,000,000 microseconds = 5 seconds. +

+
+
cryptokey hexadecimal string (input)
+

Set decryption key. +

+
+
indexmem integer (input)
+

Set max memory used for timestamp index (per stream). +

+
+
rtbufsize integer (input)
+

Set max memory used for buffering real-time frames. +

+
+
fdebug flags (input/output)
+

Print specific debug info. +

+

Possible values: +

+
ts
+
+ +
+
max_delay integer (input/output)
+

Set maximum muxing or demuxing delay in microseconds. +

+
+
fpsprobesize integer (input)
+

Set number of frames used to probe fps. +

+
+
audio_preload integer (output)
+

Set microseconds by which audio packets should be interleaved earlier. +

+
+
chunk_duration integer (output)
+

Set microseconds for each chunk. +

+
+
chunk_size integer (output)
+

Set size in bytes for each chunk. +

+
+
err_detect, f_err_detect flags (input)
+

Set error detection flags. f_err_detect is deprecated and +should be used only via the ffmpeg tool. +

+

Possible values: +

+
crccheck
+

Verify embedded CRCs. +

+
bitstream
+

Detect bitstream specification deviations. +

+
buffer
+

Detect improper bitstream length. +

+
explode
+

Abort decoding on minor error detection. +

+
careful
+

Consider things that violate the spec and have not been seen in the +wild as errors. +

+
compliant
+

Consider all spec non compliancies as errors. +

+
aggressive
+

Consider things that a sane encoder should not do as an error. +

+
+ +
+
use_wallclock_as_timestamps integer (input)
+

Use wallclock as timestamps. +

+
+
avoid_negative_ts integer (output)
+
+

Possible values: +

+
make_non_negative
+

Shift timestamps to make them non-negative. +Also note that this affects only leading negative timestamps, and not +non-monotonic negative timestamps. +

+
make_zero
+

Shift timestamps so that the first timestamp is 0. +

+
auto (default)
+

Enables shifting when required by the target format. +

+
disabled
+

Disables shifting of timestamp. +

+
+ +

When shifting is enabled, all output timestamps are shifted by the +same amount. Audio, video, and subtitles desynching and relative +timestamp differences are preserved compared to how they would have +been without shifting. +

+
+
skip_initial_bytes integer (input)
+

Set number of bytes to skip before reading header and frames if set to 1. +Default is 0. +

+
+
correct_ts_overflow integer (input)
+

Correct single timestamp overflows if set to 1. Default is 1. +

+
+
flush_packets integer (output)
+

Flush the underlying I/O stream after each packet. Default 1 enables it, and +has the effect of reducing the latency; 0 disables it and may slightly +increase performance in some cases. +

+
+
output_ts_offset offset (output)
+

Set the output time offset. +

+

offset must be a time duration specification, +see (ffmpeg-utils)time duration syntax. +

+

The offset is added by the muxer to the output timestamps. +

+

Specifying a positive offset means that the corresponding streams are +delayed bt the time duration specified in offset. Default value +is 0 (meaning that no offset is applied). +

+
+ + +

+

+

2.1 Format stream specifiers

+ +

Format stream specifiers allow selection of one or more streams that +match specific properties. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. +

+
+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, +’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If +stream_index is given, then it matches the stream number +stream_index of this type. Otherwise, it matches all streams of +this type. +

+
+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number +stream_index in the program with the id +program_id. Otherwise, it matches all streams in the program. +

+
+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ +

The exact semantics of stream specifiers is defined by the +avformat_match_stream_specifier() function declared in the +‘libavformat/avformat.h’ header. +

+ +

3. Demuxers

+ +

Demuxers are configured elements in FFmpeg that can read the +multimedia streams from a particular type of file. +

+

When you configure your FFmpeg build, all the supported demuxers +are enabled by default. You can list all available ones using the +configure option --list-demuxers. +

+

You can disable all the demuxers using the configure option +--disable-demuxers, and selectively enable a single demuxer with +the option --enable-demuxer=DEMUXER, or disable it +with the option --disable-demuxer=DEMUXER. +

+

The option -formats of the ff* tools will display the list of +enabled demuxers. +

+

The description of some of the currently available demuxers follows. +

+ +

3.1 applehttp

+ +

Apple HTTP Live Streaming demuxer. +

+

This demuxer presents all AVStreams from all variant streams. +The id field is set to the bitrate variant index number. By setting +the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay), +the caller can decide which variant streams to actually receive. +The total bitrate of the variant that the stream belongs to is +available in a metadata key named "variant_bitrate". +

+ +

3.2 asf

+ +

Advanced Systems Format demuxer. +

+

This demuxer is used to demux ASF files and MMS network streams. +

+
+
-no_resync_search bool
+

Do not try to resynchronize by looking for a certain optional start code. +

+
+ +

+

+

3.3 concat

+ +

Virtual concatenation script demuxer. +

+

This demuxer reads a list of files and other directives from a text file and +demuxes them one after the other, as if all their packet had been muxed +together. +

+

The timestamps in the files are adjusted so that the first file starts at 0 +and each next file starts where the previous one finishes. Note that it is +done globally and may cause gaps if all streams do not have exactly the same +length. +

+

All files must have the same streams (same codecs, same time base, etc.). +

+

The duration of each file is used to adjust the timestamps of the next file: +if the duration is incorrect (because it was computed using the bit-rate or +because the file is truncated, for example), it can cause artifacts. The +duration directive can be used to override the duration stored in +each file. +

+ +

3.3.1 Syntax

+ +

The script is a text file in extended-ASCII, with one directive per line. +Empty lines, leading spaces and lines starting with ’#’ are ignored. The +following directive is recognized: +

+
+
file path
+

Path to a file to read; special characters and spaces must be escaped with +backslash or single quotes. +

+

All subsequent directives apply to that file. +

+
+
ffconcat version 1.0
+

Identify the script type and version. It also sets the ‘safe’ option +to 1 if it was to its default -1. +

+

To make FFmpeg recognize the format automatically, this directive must +appears exactly as is (no extra space or byte-order-mark) on the very first +line of the script. +

+
+
duration dur
+

Duration of the file. This information can be specified from the file; +specifying it here may be more efficient or help if the information from the +file is not available or accurate. +

+

If the duration is set for all files, then it is possible to seek in the +whole concatenated video. +

+
+
+ + +

3.3.2 Options

+ +

This demuxer accepts the following option: +

+
+
safe
+

If set to 1, reject unsafe file paths. A file path is considered safe if it +does not contain a protocol specification and is relative and all components +only contain characters from the portable character set (letters, digits, +period, underscore and hyphen) and have no period at the beginning of a +component. +

+

If set to 0, any file name is accepted. +

+

The default is -1, it is equivalent to 1 if the format was automatically +probed and 0 otherwise. +

+
+
+ + +

3.4 flv

+ +

Adobe Flash Video Format demuxer. +

+

This demuxer is used to demux FLV files and RTMP network streams. +

+
+
-flv_metadata bool
+

Allocate the streams according to the onMetaData array content. +

+
+ + +

3.5 libgme

+ +

The Game Music Emu library is a collection of video game music file emulators. +

+

See http://code.google.com/p/game-music-emu/ for more information. +

+

Some files have multiple tracks. The demuxer will pick the first track by +default. The ‘track_index’ option can be used to select a different +track. Track indexes start at 0. The demuxer exports the number of tracks as +tracks meta data entry. +

+

For very large files, the ‘max_size’ option may have to be adjusted. +

+ +

3.6 libquvi

+ +

Play media from Internet services using the quvi project. +

+

The demuxer accepts a ‘format’ option to request a specific quality. It +is by default set to best. +

+

See http://quvi.sourceforge.net/ for more information. +

+

FFmpeg needs to be built with --enable-libquvi for this demuxer to be +enabled. +

+ +

3.7 image2

+ +

Image file demuxer. +

+

This demuxer reads from a list of image files specified by a pattern. +The syntax and meaning of the pattern is specified by the +option pattern_type. +

+

The pattern may contain a suffix which is used to automatically +determine the format of the images contained in the files. +

+

The size, the pixel format, and the format of each image must be the +same for all the files in the sequence. +

+

This demuxer accepts the following options: +

+
framerate
+

Set the frame rate for the video stream. It defaults to 25. +

+
loop
+

If set to 1, loop over the input. Default value is 0. +

+
pattern_type
+

Select the pattern type used to interpret the provided filename. +

+

pattern_type accepts one of the following values. +

+
sequence
+

Select a sequence pattern type, used to specify a sequence of files +indexed by sequential numbers. +

+

A sequence pattern may contain the string "%d" or "%0Nd", which +specifies the position of the characters representing a sequential +number in each filename matched by the pattern. If the form +"%d0Nd" is used, the string representing the number in each +filename is 0-padded and N is the total number of 0-padded +digits representing the number. The literal character ’%’ can be +specified in the pattern with the string "%%". +

+

If the sequence pattern contains "%d" or "%0Nd", the first filename of +the file list specified by the pattern must contain a number +inclusively contained between start_number and +start_number+start_number_range-1, and all the following +numbers must be sequential. +

+

For example the pattern "img-%03d.bmp" will match a sequence of +filenames of the form ‘img-001.bmp’, ‘img-002.bmp’, ..., +‘img-010.bmp’, etc.; the pattern "i%%m%%g-%d.jpg" will match a +sequence of filenames of the form ‘i%m%g-1.jpg’, +‘i%m%g-2.jpg’, ..., ‘i%m%g-10.jpg’, etc. +

+

Note that the pattern must not necessarily contain "%d" or +"%0Nd", for example to convert a single image file +‘img.jpeg’ you can employ the command: +

 
ffmpeg -i img.jpeg img.png
+
+ +
+
glob
+

Select a glob wildcard pattern type. +

+

The pattern is interpreted like a glob() pattern. This is only +selectable if libavformat was compiled with globbing support. +

+
+
glob_sequence (deprecated, will be removed)
+

Select a mixed glob wildcard/sequence pattern. +

+

If your version of libavformat was compiled with globbing support, and +the provided pattern contains at least one glob meta character among +%*?[]{} that is preceded by an unescaped "%", the pattern is +interpreted like a glob() pattern, otherwise it is interpreted +like a sequence pattern. +

+

All glob special characters %*?[]{} must be prefixed +with "%". To escape a literal "%" you shall use "%%". +

+

For example the pattern foo-%*.jpeg will match all the +filenames prefixed by "foo-" and terminating with ".jpeg", and +foo-%?%?%?.jpeg will match all the filenames prefixed with +"foo-", followed by a sequence of three characters, and terminating +with ".jpeg". +

+

This pattern type is deprecated in favor of glob and +sequence. +

+
+ +

Default value is glob_sequence. +

+
pixel_format
+

Set the pixel format of the images to read. If not specified the pixel +format is guessed from the first image file in the sequence. +

+
start_number
+

Set the index of the file matched by the image file pattern to start +to read from. Default value is 0. +

+
start_number_range
+

Set the index interval range to check when looking for the first image +file in the sequence, starting from start_number. Default value +is 5. +

+
ts_from_file
+

If set to 1, will set frame timestamp to modification time of image file. Note +that monotonity of timestamps is not provided: images go in the same order as +without this option. Default value is 0. +

+
video_size
+

Set the video size of the images to read. If not specified the video +size is guessed from the first image file in the sequence. +

+
+ + +

3.7.1 Examples

+ +
    +
  • +Use ffmpeg for creating a video from the images in the file +sequence ‘img-001.jpeg’, ‘img-002.jpeg’, ..., assuming an +input frame rate of 10 frames per second: +
     
    ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +As above, but start by reading from a file with index 100 in the sequence: +
     
    ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +Read images matching the "*.png" glob pattern , that is all the files +terminating with the ".png" suffix: +
     
    ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
    +
    +
+ + +

3.8 mpegts

+ +

MPEG-2 transport stream demuxer. +

+
+
fix_teletext_pts
+

Overrides teletext packet PTS and DTS values with the timestamps calculated +from the PCR of the first program which the teletext stream is part of and is +not discarded. Default value is 1, set this option to 0 if you want your +teletext packet PTS and DTS values untouched. +

+
+ + +

3.9 rawvideo

+ +

Raw video demuxer. +

+

This demuxer allows one to read raw video data. Since there is no header +specifying the assumed video parameters, the user must specify them +in order to be able to decode the data correctly. +

+

This demuxer accepts the following options: +

+
framerate
+

Set input video frame rate. Default value is 25. +

+
+
pixel_format
+

Set the input video pixel format. Default value is yuv420p. +

+
+
video_size
+

Set the input video size. This value must be specified explicitly. +

+
+ +

For example to read a rawvideo file ‘input.raw’ with +ffplay, assuming a pixel format of rgb24, a video +size of 320x240, and a frame rate of 10 images per second, use +the command: +

 
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+ + +

3.10 sbg

+ +

SBaGen script demuxer. +

+

This demuxer reads the script language used by SBaGen +http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG +script looks like that: +

 
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW      == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00    off
+
+ +

A SBG script can mix absolute and relative timestamps. If the script uses +either only absolute timestamps (including the script start time) or only +relative ones, then its layout is fixed, and the conversion is +straightforward. On the other hand, if the script mixes both kind of +timestamps, then the NOW reference for relative timestamps will be +taken from the current time of day at the time the script is read, and the +script layout will be frozen according to that reference. That means that if +the script is directly played, the actual times will match the absolute +timestamps up to the sound controller’s clock accuracy, but if the user +somehow pauses the playback or seeks, all times will be shifted accordingly. +

+ +

3.11 tedcaptions

+ +

JSON captions used for TED Talks. +

+

TED does not provide links to the captions, but they can be guessed from the +page. The file ‘tools/bookmarklets.html’ from the FFmpeg source tree +contains a bookmarklet to expose them. +

+

This demuxer accepts the following option: +

+
start_time
+

Set the start time of the TED talk, in milliseconds. The default is 15000 +(15s). It is used to sync the captions with the downloadable videos, because +they include a 15s intro. +

+
+ +

Example: convert the captions to a format most players understand: +

 
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+ + +

4. Muxers

+ +

Muxers are configured elements in FFmpeg which allow writing +multimedia streams to a particular type of file. +

+

When you configure your FFmpeg build, all the supported muxers +are enabled by default. You can list all available muxers using the +configure option --list-muxers. +

+

You can disable all the muxers with the configure option +--disable-muxers and selectively enable / disable single muxers +with the options --enable-muxer=MUXER / +--disable-muxer=MUXER. +

+

The option -formats of the ff* tools will display the list of +enabled muxers. +

+

A description of some of the currently available muxers follows. +

+

+

+

4.1 aiff

+ +

Audio Interchange File Format muxer. +

+ +

4.1.1 Options

+ +

It accepts the following options: +

+
+
write_id3v2
+

Enable ID3v2 tags writing when set to 1. Default is 0 (disabled). +

+
+
id3v2_version
+

Select ID3v2 version to write. Currently only version 3 and 4 (aka. +ID3v2.3 and ID3v2.4) are supported. The default is version 4. +

+
+
+ +

+

+

4.2 crc

+ +

CRC (Cyclic Redundancy Check) testing format. +

+

This muxer computes and prints the Adler-32 CRC of all the input audio +and video frames. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +CRC. +

+

The output of the muxer consists of a single line of the form: +CRC=0xCRC, where CRC is a hexadecimal number 0-padded to +8 digits containing the CRC for all the decoded input frames. +

+

See also the framecrc muxer. +

+ +

4.2.1 Examples

+ +

For example to compute the CRC of the input, and store it in the file +‘out.crc’: +

 
ffmpeg -i INPUT -f crc out.crc
+
+ +

You can print the CRC to stdout with the command: +

 
ffmpeg -i INPUT -f crc -
+
+ +

You can select the output format of each frame with ffmpeg by +specifying the audio and video codec and format. For example to +compute the CRC of the input audio converted to PCM unsigned 8-bit +and the input video converted to MPEG-2 video, use the command: +

 
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
+
+ +

+

+

4.3 framecrc

+ +

Per-packet CRC (Cyclic Redundancy Check) testing format. +

+

This muxer computes and prints the Adler-32 CRC for each audio +and video packet. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +CRC. +

+

The output of the muxer consists of a line for each audio and video +packet of the form: +

 
stream_index, packet_dts, packet_pts, packet_duration, packet_size, 0xCRC
+
+ +

CRC is a hexadecimal number 0-padded to 8 digits containing the +CRC of the packet. +

+ +

4.3.1 Examples

+ +

For example to compute the CRC of the audio and video frames in +‘INPUT’, converted to raw audio and video packets, and store it +in the file ‘out.crc’: +

 
ffmpeg -i INPUT -f framecrc out.crc
+
+ +

To print the information to stdout, use the command: +

 
ffmpeg -i INPUT -f framecrc -
+
+ +

With ffmpeg, you can select the output format to which the +audio and video frames are encoded before computing the CRC for each +packet by specifying the audio and video codec. For example, to +compute the CRC of each decoded input audio frame converted to PCM +unsigned 8-bit and of each decoded input video frame converted to +MPEG-2 video, use the command: +

 
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
+
+ +

See also the crc muxer. +

+

+

+

4.4 framemd5

+ +

Per-packet MD5 testing format. +

+

This muxer computes and prints the MD5 hash for each audio +and video packet. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +hash. +

+

The output of the muxer consists of a line for each audio and video +packet of the form: +

 
stream_index, packet_dts, packet_pts, packet_duration, packet_size, MD5
+
+ +

MD5 is a hexadecimal number representing the computed MD5 hash +for the packet. +

+ +

4.4.1 Examples

+ +

For example to compute the MD5 of the audio and video frames in +‘INPUT’, converted to raw audio and video packets, and store it +in the file ‘out.md5’: +

 
ffmpeg -i INPUT -f framemd5 out.md5
+
+ +

To print the information to stdout, use the command: +

 
ffmpeg -i INPUT -f framemd5 -
+
+ +

See also the md5 muxer. +

+

+

+

4.5 gif

+ +

Animated GIF muxer. +

+

It accepts the following options: +

+
+
loop
+

Set the number of times to loop the output. Use -1 for no loop, 0 +for looping indefinitely (default). +

+
+
final_delay
+

Force the delay (expressed in centiseconds) after the last frame. Each frame +ends with a delay until the next frame. The default is -1, which is a +special value to tell the muxer to re-use the previous delay. In case of a +loop, you might want to customize this value to mark a pause for instance. +

+
+ +

For example, to encode a gif looping 10 times, with a 5 seconds delay between +the loops: +

 
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
+
+ +

Note 1: if you wish to extract the frames in separate GIF files, you need to +force the image2 muxer: +

 
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
+
+ +

Note 2: the GIF format has a very small time base: the delay between two frames +can not be smaller than one centi second. +

+

+

+

4.6 hls

+ +

Apple HTTP Live Streaming muxer that segments MPEG-TS according to +the HTTP Live Streaming (HLS) specification. +

+

It creates a playlist file and numbered segment files. The output +filename specifies the playlist filename; the segment filenames +receive the same basename as the playlist, a sequential number and +a .ts extension. +

+

For example, to convert an input file with ffmpeg: +

 
ffmpeg -i in.nut out.m3u8
+
+ +

See also the segment muxer, which provides a more generic and +flexible implementation of a segmenter, and can be used to perform HLS +segmentation. +

+ +

4.6.1 Options

+ +

This muxer supports the following options: +

+
+
hls_time seconds
+

Set the segment length in seconds. Default value is 2. +

+
+
hls_list_size size
+

Set the maximum number of playlist entries. If set to 0 the list file +will contain all the segments. Default value is 5. +

+
+
hls_wrap wrap
+

Set the number after which the segment filename number (the number +specified in each segment file) wraps. If set to 0 the number will be +never wrapped. Default value is 0. +

+

This option is useful to avoid to fill the disk with many segment +files, and limits the maximum number of segment files written to disk +to wrap. +

+
+
start_number number
+

Start the playlist sequence number from number. Default value is +0. +

+

Note that the playlist sequence number must be unique for each segment +and it is not to be confused with the segment filename sequence number +which can be cyclic, for example if the ‘wrap’ option is +specified. +

+
+ +

+

+

4.7 ico

+ +

ICO file muxer. +

+

Microsoft’s icon file format (ICO) has some strict limitations that should be noted: +

+
    +
  • +Size cannot exceed 256 pixels in any dimension + +
  • +Only BMP and PNG images can be stored + +
  • +If a BMP image is used, it must be one of the following pixel formats: +
     
    BMP Bit Depth      FFmpeg Pixel Format
    +1bit               pal8
    +4bit               pal8
    +8bit               pal8
    +16bit              rgb555le
    +24bit              bgr24
    +32bit              bgra
    +
    + +
  • +If a BMP image is used, it must use the BITMAPINFOHEADER DIB header + +
  • +If a PNG image is used, it must use the rgba pixel format +
+ +

+

+

4.8 image2

+ +

Image file muxer. +

+

The image file muxer writes video frames to image files. +

+

The output filenames are specified by a pattern, which can be used to +produce sequentially numbered series of files. +The pattern may contain the string "%d" or "%0Nd", this string +specifies the position of the characters representing a numbering in +the filenames. If the form "%0Nd" is used, the string +representing the number in each filename is 0-padded to N +digits. The literal character ’%’ can be specified in the pattern with +the string "%%". +

+

If the pattern contains "%d" or "%0Nd", the first filename of +the file list specified will contain the number 1, all the following +numbers will be sequential. +

+

The pattern may contain a suffix which is used to automatically +determine the format of the image files to write. +

+

For example the pattern "img-%03d.bmp" will specify a sequence of +filenames of the form ‘img-001.bmp’, ‘img-002.bmp’, ..., +‘img-010.bmp’, etc. +The pattern "img%%-%d.jpg" will specify a sequence of filenames of the +form ‘img%-1.jpg’, ‘img%-2.jpg’, ..., ‘img%-10.jpg’, +etc. +

+ +

4.8.1 Examples

+ +

The following example shows how to use ffmpeg for creating a +sequence of files ‘img-001.jpeg’, ‘img-002.jpeg’, ..., +taking one image every second from the input video: +

 
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
+
+ +

Note that with ffmpeg, if the format is not specified with the +-f option and the output filename specifies an image file +format, the image2 muxer is automatically selected, so the previous +command can be written as: +

 
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
+
+ +

Note also that the pattern must not necessarily contain "%d" or +"%0Nd", for example to create a single image file +‘img.jpeg’ from the input video you can employ the command: +

 
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
+
+ +

The ‘strftime’ option allows you to expand the filename with +date and time information. Check the documentation of +the strftime() function for the syntax. +

+

For example to generate image files from the strftime() +"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg command +can be used: +

 
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
+
+ + +

4.8.2 Options

+ +
+
start_number
+

Start the sequence from the specified number. Default value is 1. Must +be a non-negative number. +

+
+
update
+

If set to 1, the filename will always be interpreted as just a +filename, not a pattern, and the corresponding file will be continuously +overwritten with new images. Default value is 0. +

+
+
strftime
+

If set to 1, expand the filename with date and time information from +strftime(). Default value is 0. +

+
+ +

The image muxer supports the .Y.U.V image file format. This format is +special in that that each image frame consists of three files, for +each of the YUV420P components. To read or write this image file format, +specify the name of the ’.Y’ file. The muxer will automatically open the +’.U’ and ’.V’ files as required. +

+ +

4.9 matroska

+ +

Matroska container muxer. +

+

This muxer implements the matroska and webm container specs. +

+ +

4.9.1 Metadata

+ +

The recognized metadata settings in this muxer are: +

+
+
title
+

Set title name provided to a single track. +

+
+
language
+

Specify the language of the track in the Matroska languages form. +

+

The language can be either the 3 letters bibliographic ISO-639-2 (ISO +639-2/B) form (like "fre" for French), or a language code mixed with a +country code for specialities in languages (like "fre-ca" for Canadian +French). +

+
+
stereo_mode
+

Set stereo 3D video layout of two views in a single video track. +

+

The following values are recognized: +

+
mono
+

video is not stereo +

+
left_right
+

Both views are arranged side by side, Left-eye view is on the left +

+
bottom_top
+

Both views are arranged in top-bottom orientation, Left-eye view is at bottom +

+
top_bottom
+

Both views are arranged in top-bottom orientation, Left-eye view is on top +

+
checkerboard_rl
+

Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first +

+
checkerboard_lr
+

Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first +

+
row_interleaved_rl
+

Each view is constituted by a row based interleaving, Right-eye view is first row +

+
row_interleaved_lr
+

Each view is constituted by a row based interleaving, Left-eye view is first row +

+
col_interleaved_rl
+

Both views are arranged in a column based interleaving manner, Right-eye view is first column +

+
col_interleaved_lr
+

Both views are arranged in a column based interleaving manner, Left-eye view is first column +

+
anaglyph_cyan_red
+

All frames are in anaglyph format viewable through red-cyan filters +

+
right_left
+

Both views are arranged side by side, Right-eye view is on the left +

+
anaglyph_green_magenta
+

All frames are in anaglyph format viewable through green-magenta filters +

+
block_lr
+

Both eyes laced in one Block, Left-eye view is first +

+
block_rl
+

Both eyes laced in one Block, Right-eye view is first +

+
+
+
+ +

For example a 3D WebM clip can be created using the following command line: +

 
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
+
+ + +

4.9.2 Options

+ +

This muxer supports the following options: +

+
+
reserve_index_space
+

By default, this muxer writes the index for seeking (called cues in Matroska +terms) at the end of the file, because it cannot know in advance how much space +to leave for the index at the beginning of the file. However for some use cases +– e.g. streaming where seeking is possible but slow – it is useful to put the +index at the beginning of the file. +

+

If this option is set to a non-zero value, the muxer will reserve a given amount +of space in the file header and then try to write the cues there when the muxing +finishes. If the available space does not suffice, muxing will fail. A safe size +for most use cases should be about 50kB per hour of video. +

+

Note that cues are only written if the output is seekable and this option will +have no effect if it is not. +

+
+ +

+

+

4.10 md5

+ +

MD5 testing format. +

+

This muxer computes and prints the MD5 hash of all the input audio +and video frames. By default audio frames are converted to signed +16-bit raw audio and video frames to raw video before computing the +hash. +

+

The output of the muxer consists of a single line of the form: +MD5=MD5, where MD5 is a hexadecimal number representing +the computed MD5 hash. +

+

For example to compute the MD5 hash of the input converted to raw +audio and video, and store it in the file ‘out.md5’: +

 
ffmpeg -i INPUT -f md5 out.md5
+
+ +

You can print the MD5 to stdout with the command: +

 
ffmpeg -i INPUT -f md5 -
+
+ +

See also the framemd5 muxer. +

+ +

4.11 mov, mp4, ismv

+ +

MOV/MP4/ISMV (Smooth Streaming) muxer. +

+

The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4 +file has all the metadata about all packets stored in one location +(written at the end of the file, it can be moved to the start for +better playback by adding faststart to the movflags, or +using the qt-faststart tool). A fragmented +file consists of a number of fragments, where packets and metadata +about these packets are stored together. Writing a fragmented +file has the advantage that the file is decodable even if the +writing is interrupted (while a normal MOV/MP4 is undecodable if +it is not properly finished), and it requires less memory when writing +very long files (since writing normal MOV/MP4 files stores info about +every single packet in memory until the file is closed). The downside +is that it is less compatible with other applications. +

+ +

4.11.1 Options

+ +

Fragmentation is enabled by setting one of the AVOptions that define +how to cut the file into fragments: +

+
+
-moov_size bytes
+

Reserves space for the moov atom at the beginning of the file instead of placing the +moov atom at the end. If the space reserved is insufficient, muxing will fail. +

+
-movflags frag_keyframe
+

Start a new fragment at each video keyframe. +

+
-frag_duration duration
+

Create fragments that are duration microseconds long. +

+
-frag_size size
+

Create fragments that contain up to size bytes of payload data. +

+
-movflags frag_custom
+

Allow the caller to manually choose when to cut fragments, by +calling av_write_frame(ctx, NULL) to write a fragment with +the packets written so far. (This is only useful with other +applications integrating libavformat, not from ffmpeg.) +

+
-min_frag_duration duration
+

Don’t create fragments that are shorter than duration microseconds long. +

+
+ +

If more than one condition is specified, fragments are cut when +one of the specified conditions is fulfilled. The exception to this is +-min_frag_duration, which has to be fulfilled for any of the other +conditions to apply. +

+

Additionally, the way the output file is written can be adjusted +through a few other options: +

+
+
-movflags empty_moov
+

Write an initial moov atom directly at the start of the file, without +describing any samples in it. Generally, an mdat/moov pair is written +at the start of the file, as a normal MOV/MP4 file, containing only +a short portion of the file. With this option set, there is no initial +mdat atom, and the moov atom only describes the tracks but has +a zero duration. +

+

Files written with this option set do not work in QuickTime. +This option is implicitly set when writing ismv (Smooth Streaming) files. +

+
-movflags separate_moof
+

Write a separate moof (movie fragment) atom for each track. Normally, +packets for all tracks are written in a moof atom (which is slightly +more efficient), but with this option set, the muxer writes one moof/mdat +pair for each track, making it easier to separate tracks. +

+

This option is implicitly set when writing ismv (Smooth Streaming) files. +

+
-movflags faststart
+

Run a second pass moving the index (moov atom) to the beginning of the file. +This operation can take a while, and will not work in various situations such +as fragmented output, thus it is not enabled by default. +

+
-movflags rtphint
+

Add RTP hinting tracks to the output file. +

+
+ + +

4.11.2 Example

+ +

Smooth Streaming content can be pushed in real time to a publishing +point on IIS with this muxer. Example: +

 
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
+
+ + +

4.12 mp3

+ +

The MP3 muxer writes a raw MP3 stream with an ID3v2 header at the beginning and +optionally an ID3v1 tag at the end. ID3v2.3 and ID3v2.4 are supported, the +id3v2_version option controls which one is used. Setting +id3v2_version to 0 will disable the ID3v2 header completely. The legacy +ID3v1 tag is not written by default, but may be enabled with the +write_id3v1 option. +

+

The muxer may also write a Xing frame at the beginning, which contains the +number of frames in the file. It is useful for computing duration of VBR files. +The Xing frame is written if the output stream is seekable and if the +write_xing option is set to 1 (the default). +

+

The muxer supports writing ID3v2 attached pictures (APIC frames). The pictures +are supplied to the muxer in form of a video stream with a single packet. There +can be any number of those streams, each will correspond to a single APIC frame. +The stream metadata tags title and comment map to APIC +description and picture type respectively. See +http://id3.org/id3v2.4.0-frames for allowed picture types. +

+

Note that the APIC frames must be written at the beginning, so the muxer will +buffer the audio frames until it gets all the pictures. It is therefore advised +to provide the pictures as soon as possible to avoid excessive buffering. +

+

Examples: +

+

Write an mp3 with an ID3v2.3 header and an ID3v1 footer: +

 
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
+
+ +

To attach a picture to an mp3 file select both the audio and the picture stream +with map: +

 
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
+-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
+
+ +

Write a "clean" MP3 without any extra features: +

 
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
+
+ + +

4.13 mpegts

+ +

MPEG transport stream muxer. +

+

This muxer implements ISO 13818-1 and part of ETSI EN 300 468. +

+

The recognized metadata settings in mpegts muxer are service_provider +and service_name. If they are not set the default for +service_provider is "FFmpeg" and the default for +service_name is "Service01". +

+ +

4.13.1 Options

+ +

The muxer options are: +

+
+
-mpegts_original_network_id number
+

Set the original_network_id (default 0x0001). This is unique identifier +of a network in DVB. Its main use is in the unique identification of a +service through the path Original_Network_ID, Transport_Stream_ID. +

+
-mpegts_transport_stream_id number
+

Set the transport_stream_id (default 0x0001). This identifies a +transponder in DVB. +

+
-mpegts_service_id number
+

Set the service_id (default 0x0001) also known as program in DVB. +

+
-mpegts_pmt_start_pid number
+

Set the first PID for PMT (default 0x1000, max 0x1f00). +

+
-mpegts_start_pid number
+

Set the first PID for data packets (default 0x0100, max 0x0f00). +

+
-mpegts_m2ts_mode number
+

Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode. +

+
-muxrate number
+

Set muxrate. +

+
-pes_payload_size number
+

Set minimum PES packet payload in bytes. +

+
-mpegts_flags flags
+

Set flags (see below). +

+
-mpegts_copyts number
+

Preserve original timestamps, if value is set to 1. Default value is -1, which +results in shifting timestamps so that they start from 0. +

+
-tables_version number
+

Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively). +This option allows updating stream structure so that standard consumer may +detect the change. To do so, reopen output AVFormatContext (in case of API +usage) or restart ffmpeg instance, cyclically changing tables_version value: +

 
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
+ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+
+
+
+ +

Option mpegts_flags may take a set of such flags: +

+
+
resend_headers
+

Reemit PAT/PMT before writing the next packet. +

+
latm
+

Use LATM packetization for AAC. +

+
+ + +

4.13.2 Example

+ +
 
ffmpeg -i file.mpg -c copy \
+     -mpegts_original_network_id 0x1122 \
+     -mpegts_transport_stream_id 0x3344 \
+     -mpegts_service_id 0x5566 \
+     -mpegts_pmt_start_pid 0x1500 \
+     -mpegts_start_pid 0x150 \
+     -metadata service_provider="Some provider" \
+     -metadata service_name="Some Channel" \
+     -y out.ts
+
+ + +

4.14 null

+ +

Null muxer. +

+

This muxer does not generate any output file, it is mainly useful for +testing or benchmarking purposes. +

+

For example to benchmark decoding with ffmpeg you can use the +command: +

 
ffmpeg -benchmark -i INPUT -f null out.null
+
+ +

Note that the above command does not read or write the ‘out.null’ +file, but specifying the output file is required by the ffmpeg +syntax. +

+

Alternatively you can write the command as: +

 
ffmpeg -benchmark -i INPUT -f null -
+
+ + +

4.15 ogg

+ +

Ogg container muxer. +

+
+
-page_duration duration
+

Preferred page duration, in microseconds. The muxer will attempt to create +pages that are approximately duration microseconds long. This allows the +user to compromise between seek granularity and container overhead. The default +is 1 second. A value of 0 will fill all segments, making pages as large as +possible. A value of 1 will effectively use 1 packet-per-page in most +situations, giving a small seek granularity at the cost of additional container +overhead. +

+
+ +

+

+

4.16 segment, stream_segment, ssegment

+ +

Basic stream segmenter. +

+

This muxer outputs streams to a number of separate files of nearly +fixed duration. Output filename pattern can be set in a fashion similar to +image2. +

+

stream_segment is a variant of the muxer used to write to +streaming output formats, i.e. which do not require global headers, +and is recommended for outputting e.g. to MPEG transport stream segments. +ssegment is a shorter alias for stream_segment. +

+

Every segment starts with a keyframe of the selected reference stream, +which is set through the ‘reference_stream’ option. +

+

Note that if you want accurate splitting for a video file, you need to +make the input key frames correspond to the exact splitting times +expected by the segmenter, or the segment muxer will start the new +segment with the key frame found next after the specified start +time. +

+

The segment muxer works best with a single constant frame rate video. +

+

Optionally it can generate a list of the created segments, by setting +the option segment_list. The list type is specified by the +segment_list_type option. The entry filenames in the segment +list are set by default to the basename of the corresponding segment +files. +

+

See also the hls muxer, which provides a more specific +implementation for HLS segmentation. +

+ +

4.16.1 Options

+ +

The segment muxer supports the following options: +

+
+
reference_stream specifier
+

Set the reference stream, as specified by the string specifier. +If specifier is set to auto, the reference is chosen +automatically. Otherwise it must be a stream specifier (see the “Stream +specifiers” chapter in the ffmpeg manual) which specifies the +reference stream. The default value is auto. +

+
+
segment_format format
+

Override the inner container format, by default it is guessed by the filename +extension. +

+
+
segment_list name
+

Generate also a listfile named name. If not specified no +listfile is generated. +

+
+
segment_list_flags flags
+

Set flags affecting the segment list generation. +

+

It currently supports the following flags: +

+
cache
+

Allow caching (only affects M3U8 list files). +

+
+
live
+

Allow live-friendly file generation. +

+
+ +
+
segment_list_size size
+

Update the list file so that it contains at most the last size +segments. If 0 the list file will contain all the segments. Default +value is 0. +

+
+
segment_list_entry_prefix prefix
+

Set prefix to prepend to the name of each entry filename. By +default no prefix is applied. +

+
+
segment_list_type type
+

Specify the format for the segment list file. +

+

The following values are recognized: +

+
flat
+

Generate a flat list for the created segments, one segment per line. +

+
+
csv, ext
+

Generate a list for the created segments, one segment per line, +each line matching the format (comma-separated values): +

 
segment_filename,segment_start_time,segment_end_time
+
+ +

segment_filename is the name of the output file generated by the +muxer according to the provided pattern. CSV escaping (according to +RFC4180) is applied if required. +

+

segment_start_time and segment_end_time specify +the segment start and end time expressed in seconds. +

+

A list file with the suffix ".csv" or ".ext" will +auto-select this format. +

+

ext’ is deprecated in favor or ‘csv’. +

+
+
ffconcat
+

Generate an ffconcat file for the created segments. The resulting file +can be read using the FFmpeg concat demuxer. +

+

A list file with the suffix ".ffcat" or ".ffconcat" will +auto-select this format. +

+
+
m3u8
+

Generate an extended M3U8 file, version 3, compliant with +http://tools.ietf.org/id/draft-pantos-http-live-streaming. +

+

A list file with the suffix ".m3u8" will auto-select this format. +

+
+ +

If not specified the type is guessed from the list file name suffix. +

+
+
segment_time time
+

Set segment duration to time, the value must be a duration +specification. Default value is "2". See also the +‘segment_times’ option. +

+

Note that splitting may not be accurate, unless you force the +reference stream key-frames at the given time. See the introductory +notice and the examples below. +

+
+
segment_time_delta delta
+

Specify the accuracy time when selecting the start time for a +segment, expressed as a duration specification. Default value is "0". +

+

When delta is specified a key-frame will start a new segment if its +PTS satisfies the relation: +

 
PTS >= start_time - time_delta
+
+ +

This option is useful when splitting video content, which is always +split at GOP boundaries, in case a key frame is found just before the +specified split time. +

+

In particular may be used in combination with the ‘ffmpeg’ option +force_key_frames. The key frame times specified by +force_key_frames may not be set accurately because of rounding +issues, with the consequence that a key frame time may result set just +before the specified time. For constant frame rate videos a value of +1/(2*frame_rate) should address the worst case mismatch between +the specified time and the time set by force_key_frames. +

+
+
segment_times times
+

Specify a list of split points. times contains a list of comma +separated duration specifications, in increasing order. See also +the ‘segment_time’ option. +

+
+
segment_frames frames
+

Specify a list of split video frame numbers. frames contains a +list of comma separated integer numbers, in increasing order. +

+

This option specifies to start a new segment whenever a reference +stream key frame is found and the sequential number (starting from 0) +of the frame is greater or equal to the next value in the list. +

+
+
segment_wrap limit
+

Wrap around segment index once it reaches limit. +

+
+
segment_start_number number
+

Set the sequence number of the first segment. Defaults to 0. +

+
+
reset_timestamps 1|0
+

Reset timestamps at the begin of each segment, so that each segment +will start with near-zero timestamps. It is meant to ease the playback +of the generated segments. May not work with some combinations of +muxers/codecs. It is set to 0 by default. +

+
+
initial_offset offset
+

Specify timestamp offset to apply to the output packet timestamps. The +argument must be a time duration specification, and defaults to 0. +

+
+ + +

4.16.2 Examples

+ +
    +
  • +To remux the content of file ‘in.mkv’ to a list of segments +‘out-000.nut’, ‘out-001.nut’, etc., and write the list of +generated segments to ‘out.list’: +
     
    ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.list out%03d.nut
    +
    + +
  • +As the example above, but segment the input file according to the split +points specified by the segment_times option: +
     
    ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 out%03d.nut
    +
    + +
  • +As the example above, but use the ffmpegforce_key_frames’ +option to force key frames in the input at the specified location, together +with the segment option ‘segment_time_delta’ to account for +possible roundings operated when setting key frame times. +
     
    ffmpeg -i in.mkv -force_key_frames 1,2,3,5,8,13,21 -codec:v mpeg4 -codec:a pcm_s16le -map 0 \
    +-f segment -segment_list out.csv -segment_times 1,2,3,5,8,13,21 -segment_time_delta 0.05 out%03d.nut
    +
    +

    In order to force key frames on the input file, transcoding is +required. +

    +
  • +Segment the input file by splitting the input file according to the +frame numbers sequence specified with the ‘segment_frames’ option: +
     
    ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_frames 100,200,300,500,800 out%03d.nut
    +
    + +
  • +To convert the ‘in.mkv’ to TS segments using the libx264 +and libfaac encoders: +
     
    ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
    +
    + +
  • +Segment the input file, and create an M3U8 live playlist (can be used +as live HLS source): +
     
    ffmpeg -re -i in.mkv -codec copy -map 0 -f segment -segment_list playlist.m3u8 \
    +-segment_list_flags +live -segment_time 10 out%03d.mkv
    +
    +
+ + +

4.17 tee

+ +

The tee muxer can be used to write the same data to several files or any +other kind of muxer. It can be used, for example, to both stream a video to +the network and save it to disk at the same time. +

+

It is different from specifying several outputs to the ffmpeg +command-line tool because the audio and video data will be encoded only once +with the tee muxer; encoding can be a very expensive process. It is not +useful when using the libavformat API directly because it is then possible +to feed the same packets to several muxers directly. +

+

The slave outputs are specified in the file name given to the muxer, +separated by ’|’. If any of the slave name contains the ’|’ separator, +leading or trailing spaces or any special character, it must be +escaped (see (ffmpeg-utils)quoting_and_escaping). +

+

Muxer options can be specified for each slave by prepending them as a list of +key=value pairs separated by ’:’, between square brackets. If +the options values contain a special character or the ’:’ separator, they +must be escaped; note that this is a second level escaping. +

+

The following special options are also recognized: +

+
f
+

Specify the format name. Useful if it cannot be guessed from the +output name suffix. +

+
+
bsfs[/spec]
+

Specify a list of bitstream filters to apply to the specified +output. +

+

It is possible to specify to which streams a given bitstream filter +applies, by appending a stream specifier to the option separated by +/. spec must be a stream specifier (see Format stream specifiers). If the stream specifier is not specified, the +bistream filters will be applied to all streams in the output. +

+

Several bitstream filters can be specified, separated by ",". +

+
+
select
+

Select the streams that should be mapped to the slave output, +specified by a stream specifier. If not specified, this defaults to +all the input streams. +

+
+ + +

4.17.1 Examples

+ +
    +
  • +Encode something and both archive it in a WebM file and stream it +as MPEG-TS over UDP (the streams need to be explicitly mapped): +
     
    ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
    +  "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
    +
    + +
  • +Use ffmpeg to encode the input, and send the output +to three different destinations. The dump_extra bitstream +filter is used to add extradata information to all the output video +keyframes packets, as requested by the MPEG-TS format. The select +option is applied to ‘out.aac’ in order to make it contain only +audio packets. +
     
    ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
    +       -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
    +
    + +
  • +As below, but select only stream a:1 for the audio output. Note +that a second level escaping must be performed, as ":" is a special +character used to separate options. +
     
    ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
    +       -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
    +
    +
+ +

Note: some codecs may need different options depending on the output format; +the auto-detection of this can not work with the tee muxer. The main example +is the ‘global_header’ flag. +

+ +

5. Metadata

+ +

FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded +INI-like text file and then load it back using the metadata muxer/demuxer. +

+

The file format is as follows: +

    +
  1. +A file consists of a header and a number of metadata tags divided into sections, +each on its own line. + +
  2. +The header is a ’;FFMETADATA’ string, followed by a version number (now 1). + +
  3. +Metadata tags are of the form ’key=value’ + +
  4. +Immediately after header follows global metadata + +
  5. +After global metadata there may be sections with per-stream/per-chapter +metadata. + +
  6. +A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in +brackets (’[’, ’]’) and ends with next section or end of file. + +
  7. +At the beginning of a chapter section there may be an optional timebase to be +used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and +den are integers. If the timebase is missing then start/end times are assumed to +be in milliseconds. +Next a chapter section must contain chapter start and end times in form +’START=num’, ’END=num’, where num is a positive integer. + +
  8. +Empty lines and lines starting with ’;’ or ’#’ are ignored. + +
  9. +Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a +newline) must be escaped with a backslash ’\’. + +
  10. +Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of +the tag (in the example above key is ’foo ’, value is ’ bar’). +
+ +

A ffmetadata file might look like this: +

 
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+ +

By using the ffmetadata muxer and demuxer it is possible to extract +metadata from an input file to an ffmetadata file, and then transcode +the file into an output file with the edited ffmetadata file. +

+

Extracting an ffmetadata file with ‘ffmpeg’ goes as follows: +

 
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+ +

Reinserting edited metadata information from the FFMETADATAFILE file can +be done as: +

 
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+ + + +

6. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavformat +

+ + +

7. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-protocols.html b/dependencies64/ffmpeg/doc/ffmpeg-protocols.html new file mode 100644 index 000000000..6be426ade --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-protocols.html @@ -0,0 +1,1381 @@ + + + + + +FFmpeg documentation : FFmpeg Protocols + + + + + + + + + + +
+
+ + +

FFmpeg Protocols Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

This document describes the input and output protocols provided by the +libavformat library. +

+ + +

2. Protocols

+ +

Protocols are configured elements in FFmpeg that enable access to +resources that require specific protocols. +

+

When you configure your FFmpeg build, all the supported protocols are +enabled by default. You can list all available ones using the +configure option "–list-protocols". +

+

You can disable all the protocols using the configure option +"–disable-protocols", and selectively enable a protocol using the +option "–enable-protocol=PROTOCOL", or you can disable a +particular protocol using the option +"–disable-protocol=PROTOCOL". +

+

The option "-protocols" of the ff* tools will display the list of +supported protocols. +

+

A description of the currently available protocols follows. +

+ +

2.1 bluray

+ +

Read BluRay playlist. +

+

The accepted options are: +

+
angle
+

BluRay angle +

+
+
chapter
+

Start chapter (1...N) +

+
+
playlist
+

Playlist to read (BDMV/PLAYLIST/?????.mpls) +

+
+
+ +

Examples: +

+

Read longest playlist from BluRay mounted to /mnt/bluray: +

 
bluray:/mnt/bluray
+
+ +

Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2: +

 
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+ + +

2.2 cache

+ +

Caching wrapper for input stream. +

+

Cache the input stream to temporary file. It brings seeking capability to live streams. +

+
 
cache:URL
+
+ + +

2.3 concat

+ +

Physical concatenation protocol. +

+

Allow to read and seek from many resource in sequence as if they were +a unique resource. +

+

A URL accepted by this protocol has the syntax: +

 
concat:URL1|URL2|...|URLN
+
+ +

where URL1, URL2, ..., URLN are the urls of the +resource to be concatenated, each one possibly specifying a distinct +protocol. +

+

For example to read a sequence of files ‘split1.mpeg’, +‘split2.mpeg’, ‘split3.mpeg’ with ffplay use the +command: +

 
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+ +

Note that you may need to escape the character "|" which is special for +many shells. +

+ +

2.4 crypto

+ +

AES-encrypted stream reading protocol. +

+

The accepted options are: +

+
key
+

Set the AES decryption key binary block from given hexadecimal representation. +

+
+
iv
+

Set the AES decryption initialization vector binary block from given hexadecimal representation. +

+
+ +

Accepted URL formats: +

 
crypto:URL
+crypto+URL
+
+ + +

2.5 data

+ +

Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme. +

+

For example, to convert a GIF file given inline with ffmpeg: +

 
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+ + +

2.6 file

+ +

File access protocol. +

+

Allow to read from or write to a file. +

+

A file URL can have the form: +

 
file:filename
+
+ +

where filename is the path of the file to read. +

+

An URL that does not have a protocol prefix will be assumed to be a +file URL. Depending on the build, an URL that looks like a Windows +path with the drive letter at the beginning will also be assumed to be +a file URL (usually not the case in builds for unix-like systems). +

+

For example to read from a file ‘input.mpeg’ with ffmpeg +use the command: +

 
ffmpeg -i file:input.mpeg output.mpeg
+
+ +

This protocol accepts the following options: +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable for files on slow medium. +

+
+ + +

2.7 ftp

+ +

FTP (File Transfer Protocol). +

+

Allow to read from or write to remote resources using FTP protocol. +

+

Following syntax is required. +

 
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
ftp-anonymous-password
+

Password used when login as anonymous user. Typically an e-mail address +should be used. +

+
+
ftp-write-seekable
+

Control seekability of connection during encoding. If set to 1 the +resource is supposed to be seekable, if set to 0 it is assumed not +to be seekable. Default value is 0. +

+
+ +

NOTE: Protocol can be used as output, but it is recommended to not do +it, unless special care is taken (tests, customized server configuration +etc.). Different FTP servers behave in different way during seek +operation. ff* tools may produce incomplete content due to server limitations. +

+ +

2.8 gopher

+ +

Gopher protocol. +

+ +

2.9 hls

+ +

Read Apple HTTP Live Streaming compliant segmented stream as +a uniform one. The M3U8 playlists describing the segments can be +remote HTTP resources or local files, accessed using the standard +file protocol. +The nested protocol is declared by specifying +"+proto" after the hls URI scheme name, where proto +is either "file" or "http". +

+
 
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+ +

Using this protocol is discouraged - the hls demuxer should work +just as well (if not, please report the issues) and is more complete. +To use the hls demuxer instead, simply use the direct URLs to the +m3u8 files. +

+ +

2.10 http

+ +

HTTP (Hyper Text Transfer Protocol). +

+

This protocol accepts the following options: +

+
+
seekable
+

Control seekability of connection. If set to 1 the resource is +supposed to be seekable, if set to 0 it is assumed not to be seekable, +if set to -1 it will try to autodetect if it is seekable. Default +value is -1. +

+
+
chunked_post
+

If set to 1 use chunked Transfer-Encoding for posts, default is 1. +

+
+
content_type
+

Set a specific content type for the POST messages. +

+
+
headers
+

Set custom HTTP headers, can override built in default headers. The +value must be a string encoding the headers. +

+
+
multiple_requests
+

Use persistent connections if set to 1, default is 0. +

+
+
post_data
+

Set custom HTTP post data. +

+
+
user-agent
+
user_agent
+

Override the User-Agent header. If not specified the protocol will use a +string describing the libavformat build. ("Lavf/<version>") +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
mime_type
+

Export the MIME type. +

+
+
icy
+

If set to 1 request ICY (SHOUTcast) metadata from the server. If the server +supports this, the metadata has to be retrieved by the application by reading +the ‘icy_metadata_headers’ and ‘icy_metadata_packet’ options. +The default is 0. +

+
+
icy_metadata_headers
+

If the server supports ICY metadata, this contains the ICY-specific HTTP reply +headers, separated by newline characters. +

+
+
icy_metadata_packet
+

If the server supports ICY metadata, and ‘icy’ was set to 1, this +contains the last non-empty metadata packet sent by the server. It should be +polled in regular intervals by applications interested in mid-stream metadata +updates. +

+
+
cookies
+

Set the cookies to be sent in future requests. The format of each cookie is the +same as the value of a Set-Cookie HTTP response field. Multiple cookies can be +delimited by a newline character. +

+
+
offset
+

Set initial byte offset. +

+
+
end_offset
+

Try to limit the request to bytes preceding this offset. +

+
+ + +

2.10.1 HTTP Cookies

+ +

Some HTTP requests will be denied unless cookie values are passed in with the +request. The ‘cookies’ option allows these cookies to be specified. At +the very least, each cookie must specify a value along with a path and domain. +HTTP requests that match both the domain and path will automatically include the +cookie value in the HTTP Cookie header field. Multiple cookies can be delimited +by a newline. +

+

The required syntax to play a stream specifying a cookie is: +

 
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+ + +

2.11 mmst

+ +

MMS (Microsoft Media Server) protocol over TCP. +

+ +

2.12 mmsh

+ +

MMS (Microsoft Media Server) protocol over HTTP. +

+

The required syntax is: +

 
mmsh://server[:port][/app][/playpath]
+
+ + +

2.13 md5

+ +

MD5 output protocol. +

+

Computes the MD5 hash of the data to be written, and on close writes +this to the designated output or stdout if none is specified. It can +be used to test muxers without writing an actual file. +

+

Some examples follow. +

 
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+ +

Note that some formats (typically MOV) require the output protocol to +be seekable, so they will fail with the MD5 output protocol. +

+ +

2.14 pipe

+ +

UNIX pipe access protocol. +

+

Allow to read and write from UNIX pipes. +

+

The accepted syntax is: +

 
pipe:[number]
+
+ +

number is the number corresponding to the file descriptor of the +pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number +is not specified, by default the stdout file descriptor will be used +for writing, stdin for reading. +

+

For example to read from stdin with ffmpeg: +

 
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+ +

For writing to stdout with ffmpeg: +

 
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+ +

This protocol accepts the following options: +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable if data transmission is slow. +

+
+ +

Note that some formats (typically MOV), require the output protocol to +be seekable, so they will fail with the pipe output protocol. +

+ +

2.15 rtmp

+ +

Real-Time Messaging Protocol. +

+

The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia +content across a TCP/IP network. +

+

The required syntax is: +

 
rtmp://[username:password@]server[:port][/app][/instance][/playpath]
+
+ +

The accepted parameters are: +

+
username
+

An optional username (mostly for publishing). +

+
+
password
+

An optional password (mostly for publishing). +

+
+
server
+

The address of the RTMP server. +

+
+
port
+

The number of the TCP port to use (by default is 1935). +

+
+
app
+

It is the name of the application to access. It usually corresponds to +the path where the application is installed on the RTMP server +(e.g. ‘/ondemand/’, ‘/flash/live/’, etc.). You can override +the value parsed from the URI through the rtmp_app option, too. +

+
+
playpath
+

It is the path or name of the resource to play with reference to the +application specified in app, may be prefixed by "mp4:". You +can override the value parsed from the URI through the rtmp_playpath +option, too. +

+
+
listen
+

Act as a server, listening for an incoming connection. +

+
+
timeout
+

Maximum time to wait for the incoming connection. Implies listen. +

+
+ +

Additionally, the following parameters can be set via command line options +(or in code via AVOptions): +

+
rtmp_app
+

Name of application to connect on the RTMP server. This option +overrides the parameter specified in the URI. +

+
+
rtmp_buffer
+

Set the client buffer time in milliseconds. The default is 3000. +

+
+
rtmp_conn
+

Extra arbitrary AMF connection parameters, parsed from a string, +e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0. +Each value is prefixed by a single character denoting the type, +B for Boolean, N for number, S for string, O for object, or Z for null, +followed by a colon. For Booleans the data must be either 0 or 1 for +FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or +1 to end or begin an object, respectively. Data items in subobjects may +be named, by prefixing the type with ’N’ and specifying the name before +the value (i.e. NB:myFlag:1). This option may be used multiple +times to construct arbitrary AMF sequences. +

+
+
rtmp_flashver
+

Version of the Flash plugin used to run the SWF player. The default +is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible; +<libavformat version>).) +

+
+
rtmp_flush_interval
+

Number of packets flushed in the same request (RTMPT only). The default +is 10. +

+
+
rtmp_live
+

Specify that the media is a live stream. No resuming or seeking in +live streams is possible. The default value is any, which means the +subscriber first tries to play the live stream specified in the +playpath. If a live stream of that name is not found, it plays the +recorded stream. The other possible values are live and +recorded. +

+
+
rtmp_pageurl
+

URL of the web page in which the media was embedded. By default no +value will be sent. +

+
+
rtmp_playpath
+

Stream identifier to play or to publish. This option overrides the +parameter specified in the URI. +

+
+
rtmp_subscribe
+

Name of live stream to subscribe to. By default no value will be sent. +It is only sent if the option is specified or if rtmp_live +is set to live. +

+
+
rtmp_swfhash
+

SHA256 hash of the decompressed SWF file (32 bytes). +

+
+
rtmp_swfsize
+

Size of the decompressed SWF file, required for SWFVerification. +

+
+
rtmp_swfurl
+

URL of the SWF player for the media. By default no value will be sent. +

+
+
rtmp_swfverify
+

URL to player swf file, compute hash/size automatically. +

+
+
rtmp_tcurl
+

URL of the target stream. Defaults to proto://host[:port]/app. +

+
+
+ +

For example to read with ffplay a multimedia resource named +"sample" from the application "vod" from an RTMP server "myserver": +

 
ffplay rtmp://myserver/vod/sample
+
+ +

To publish to a password protected server, passing the playpath and +app names separately: +

 
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+ + +

2.16 rtmpe

+ +

Encrypted Real-Time Messaging Protocol. +

+

The Encrypted Real-Time Messaging Protocol (RTMPE) is used for +streaming multimedia content within standard cryptographic primitives, +consisting of Diffie-Hellman key exchange and HMACSHA256, generating +a pair of RC4 keys. +

+ +

2.17 rtmps

+ +

Real-Time Messaging Protocol over a secure SSL connection. +

+

The Real-Time Messaging Protocol (RTMPS) is used for streaming +multimedia content across an encrypted connection. +

+ +

2.18 rtmpt

+ +

Real-Time Messaging Protocol tunneled through HTTP. +

+

The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used +for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

2.19 rtmpte

+ +

Encrypted Real-Time Messaging Protocol tunneled through HTTP. +

+

The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE) +is used for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

2.20 rtmpts

+ +

Real-Time Messaging Protocol tunneled through HTTPS. +

+

The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used +for streaming multimedia content within HTTPS requests to traverse +firewalls. +

+ +

2.21 libssh

+ +

Secure File Transfer Protocol via libssh +

+

Allow to read from or write to remote resources using SFTP protocol. +

+

Following syntax is required. +

+
 
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout +is not specified. +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
private_key
+

Specify the path of the file containing private key to use during authorization. +By default libssh searches for keys in the ‘~/.ssh/’ directory. +

+
+
+ +

Example: Play a file stored on remote server. +

+
 
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+ + +

2.22 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte

+ +

Real-Time Messaging Protocol and its variants supported through +librtmp. +

+

Requires the presence of the librtmp headers and library during +configuration. You need to explicitly configure the build with +"–enable-librtmp". If enabled this will replace the native RTMP +protocol. +

+

This protocol provides most client functions and a few server +functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT), +encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled +variants of these encrypted types (RTMPTE, RTMPTS). +

+

The required syntax is: +

 
rtmp_proto://server[:port][/app][/playpath] options
+
+ +

where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe", +"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and +server, port, app and playpath have the same +meaning as specified for the RTMP native protocol. +options contains a list of space-separated options of the form +key=val. +

+

See the librtmp manual page (man 3 librtmp) for more information. +

+

For example, to stream a file in real-time to an RTMP server using +ffmpeg: +

 
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+ +

To play the same stream using ffplay: +

 
ffplay "rtmp://myserver/live/mystream live=1"
+
+ + +

2.23 rtp

+ +

Real-time Transport Protocol. +

+

The required syntax for an RTP URL is: +rtp://hostname[:port][?option=val...] +

+

port specifies the RTP port to use. +

+

The following URL options are supported: +

+
+
ttl=n
+

Set the TTL (Time-To-Live) value (for multicast only). +

+
+
rtcpport=n
+

Set the remote RTCP port to n. +

+
+
localrtpport=n
+

Set the local RTP port to n. +

+
+
localrtcpport=n'
+

Set the local RTCP port to n. +

+
+
pkt_size=n
+

Set max packet size (in bytes) to n. +

+
+
connect=0|1
+

Do a connect() on the UDP socket (if set to 1) or not (if set +to 0). +

+
+
sources=ip[,ip]
+

List allowed source IP addresses. +

+
+
block=ip[,ip]
+

List disallowed (blocked) source IP addresses. +

+
+
write_to_source=0|1
+

Send packets to the source address of the latest received packet (if +set to 1) or to a default remote address (if set to 0). +

+
+
localport=n
+

Set the local RTP port to n. +

+

This is a deprecated option. Instead, ‘localrtpport’ should be +used. +

+
+
+ +

Important notes: +

+
    +
  1. +If ‘rtcpport’ is not set the RTCP port will be set to the RTP +port value plus 1. + +
  2. +If ‘localrtpport’ (the local RTP port) is not set any available +port will be used for the local RTP and RTCP ports. + +
  3. +If ‘localrtcpport’ (the local RTCP port) is not set it will be +set to the the local RTP port value plus 1. +
+ + +

2.24 rtsp

+ +

Real-Time Streaming Protocol. +

+

RTSP is not technically a protocol handler in libavformat, it is a demuxer +and muxer. The demuxer supports both normal RTSP (with data transferred +over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with +data transferred over RDT). +

+

The muxer can be used to send a stream using RTSP ANNOUNCE to a server +supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s +RTSP server). +

+

The required syntax for a RTSP url is: +

 
rtsp://hostname[:port]/path
+
+ +

Options can be set on the ffmpeg/ffplay command +line, or set in code via AVOptions or in +avformat_open_input. +

+

The following options are supported. +

+
+
initial_pause
+

Do not start playing the stream immediately if set to 1. Default value +is 0. +

+
+
rtsp_transport
+

Set RTSP trasport protocols. +

+

It accepts the following values: +

+
udp
+

Use UDP as lower transport protocol. +

+
+
tcp
+

Use TCP (interleaving within the RTSP control channel) as lower +transport protocol. +

+
+
udp_multicast
+

Use UDP multicast as lower transport protocol. +

+
+
http
+

Use HTTP tunneling as lower transport protocol, which is useful for +passing proxies. +

+
+ +

Multiple lower transport protocols may be specified, in that case they are +tried one at a time (if the setup of one fails, the next one is tried). +For the muxer, only the ‘tcp’ and ‘udp’ options are supported. +

+
+
rtsp_flags
+

Set RTSP flags. +

+

The following values are accepted: +

+
filter_src
+

Accept packets only from negotiated peer address and port. +

+
listen
+

Act as a server, listening for an incoming connection. +

+
+ +

Default value is ‘none’. +

+
+
allowed_media_types
+

Set media types to accept from the server. +

+

The following flags are accepted: +

+
video
+
audio
+
data
+
+ +

By default it accepts all media types. +

+
+
min_port
+

Set minimum local UDP port. Default value is 5000. +

+
+
max_port
+

Set maximum local UDP port. Default value is 65000. +

+
+
timeout
+

Set maximum timeout (in seconds) to wait for incoming connections. +

+

A value of -1 mean infinite (default). This option implies the +‘rtsp_flags’ set to ‘listen’. +

+
+
reorder_queue_size
+

Set number of packets to buffer for handling of reordered packets. +

+
+
stimeout
+

Set socket TCP I/O timeout in micro seconds. +

+
+
user-agent
+

Override User-Agent header. If not specified, it default to the +libavformat identifier string. +

+
+ +

When receiving data over UDP, the demuxer tries to reorder received packets +(since they may arrive out of order, or packets may get lost totally). This +can be disabled by setting the maximum demuxing delay to zero (via +the max_delay field of AVFormatContext). +

+

When watching multi-bitrate Real-RTSP streams with ffplay, the +streams to display can be chosen with -vst n and +-ast n for video and audio respectively, and can be switched +on the fly by pressing v and a. +

+ +

2.24.1 Examples

+ +

The following examples all make use of the ffplay and +ffmpeg tools. +

+
    +
  • +Watch a stream over UDP, with a max reordering delay of 0.5 seconds: +
     
    ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
    +
    + +
  • +Watch a stream tunneled over HTTP: +
     
    ffplay -rtsp_transport http rtsp://server/video.mp4
    +
    + +
  • +Send a stream in realtime to a RTSP server, for others to watch: +
     
    ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
    +
    + +
  • +Receive a stream in realtime: +
     
    ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
    +
    +
+ + +

2.25 sap

+ +

Session Announcement Protocol (RFC 2974). This is not technically a +protocol handler in libavformat, it is a muxer and demuxer. +It is used for signalling of RTP streams, by announcing the SDP for the +streams regularly on a separate port. +

+ +

2.25.1 Muxer

+ +

The syntax for a SAP url given to the muxer is: +

 
sap://destination[:port][?options]
+
+ +

The RTP packets are sent to destination on port port, +or to port 5004 if no port is specified. +options is a &-separated list. The following options +are supported: +

+
+
announce_addr=address
+

Specify the destination IP address for sending the announcements to. +If omitted, the announcements are sent to the commonly used SAP +announcement multicast address 224.2.127.254 (sap.mcast.net), or +ff0e::2:7ffe if destination is an IPv6 address. +

+
+
announce_port=port
+

Specify the port to send the announcements on, defaults to +9875 if not specified. +

+
+
ttl=ttl
+

Specify the time to live value for the announcements and RTP packets, +defaults to 255. +

+
+
same_port=0|1
+

If set to 1, send all RTP streams on the same port pair. If zero (the +default), all streams are sent on unique ports, with each stream on a +port 2 numbers higher than the previous. +VLC/Live555 requires this to be set to 1, to be able to receive the stream. +The RTP stack in libavformat for receiving requires all streams to be sent +on unique ports. +

+
+ +

Example command lines follow. +

+

To broadcast a stream on the local subnet, for watching in VLC: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+ +

Similarly, for watching in ffplay: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+ +

And for watching in ffplay, over IPv6: +

+
 
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+ + +

2.25.2 Demuxer

+ +

The syntax for a SAP url given to the demuxer is: +

 
sap://[address][:port]
+
+ +

address is the multicast address to listen for announcements on, +if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port +is the port that is listened on, 9875 if omitted. +

+

The demuxers listens for announcements on the given address and port. +Once an announcement is received, it tries to receive that particular stream. +

+

Example command lines follow. +

+

To play back the first stream announced on the normal SAP multicast address: +

+
 
ffplay sap://
+
+ +

To play back the first stream announced on one the default IPv6 SAP multicast address: +

+
 
ffplay sap://[ff0e::2:7ffe]
+
+ + +

2.26 sctp

+ +

Stream Control Transmission Protocol. +

+

The accepted URL syntax is: +

 
sctp://host:port[?options]
+
+ +

The protocol accepts the following options: +

+
listen
+

If set to any value, listen for an incoming connection. Outgoing connection is done by default. +

+
+
max_streams
+

Set the maximum number of streams. By default no limit is set. +

+
+ + +

2.27 srtp

+ +

Secure Real-time Transport Protocol. +

+

The accepted options are: +

+
srtp_in_suite
+
srtp_out_suite
+

Select input and output encoding suites. +

+

Supported values: +

+
AES_CM_128_HMAC_SHA1_80
+
SRTP_AES128_CM_HMAC_SHA1_80
+
AES_CM_128_HMAC_SHA1_32
+
SRTP_AES128_CM_HMAC_SHA1_32
+
+ +
+
srtp_in_params
+
srtp_out_params
+

Set input and output encoding parameters, which are expressed by a +base64-encoded representation of a binary block. The first 16 bytes of +this binary block are used as master key, the following 14 bytes are +used as master salt. +

+
+ + +

2.28 tcp

+ +

Transmission Control Protocol. +

+

The required syntax for a TCP url is: +

 
tcp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form +key=val. +

+

The list of supported options follows. +

+
+
listen=1|0
+

Listen for an incoming connection. Default value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+
listen_timeout=microseconds
+

Set listen timeout, expressed in microseconds. +

+
+ +

The following example shows how to setup a listening TCP connection +with ffmpeg, which is then accessed with ffplay: +

 
ffmpeg -i input -f format tcp://hostname:port?listen
+ffplay tcp://hostname:port
+
+ + +

2.29 tls

+ +

Transport Layer Security (TLS) / Secure Sockets Layer (SSL) +

+

The required syntax for a TLS/SSL url is: +

 
tls://hostname:port[?options]
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
ca_file, cafile=filename
+

A file containing certificate authority (CA) root certificates to treat +as trusted. If the linked TLS library contains a default this might not +need to be specified for verification to work, but not all libraries and +setups have defaults built in. +The file must be in OpenSSL PEM format. +

+
+
tls_verify=1|0
+

If enabled, try to verify the peer that we are communicating with. +Note, if using OpenSSL, this currently only makes sure that the +peer certificate is signed by one of the root certificates in the CA +database, but it does not validate that the certificate actually +matches the host name we are trying to connect to. (With GnuTLS, +the host name is validated as well.) +

+

This is disabled by default since it requires a CA database to be +provided by the caller in many cases. +

+
+
cert_file, cert=filename
+

A file containing a certificate to use in the handshake with the peer. +(When operating as server, in listen mode, this is more often required +by the peer, while client certificates only are mandated in certain +setups.) +

+
+
key_file, key=filename
+

A file containing the private key for the certificate. +

+
+
listen=1|0
+

If enabled, listen for connections on the provided port, and assume +the server role in the handshake instead of the client role. +

+
+
+ +

Example command lines: +

+

To create a TLS/SSL server that serves an input stream. +

+
 
ffmpeg -i input -f format tls://hostname:port?listen&cert=server.crt&key=server.key
+
+ +

To play back a stream from the TLS/SSL server using ffplay: +

+
 
ffplay tls://hostname:port
+
+ + +

2.30 udp

+ +

User Datagram Protocol. +

+

The required syntax for an UDP URL is: +

 
udp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form key=val. +

+

In case threading is enabled on the system, a circular buffer is used +to store the incoming data, which allows one to reduce loss of data due to +UDP socket buffer overruns. The fifo_size and +overrun_nonfatal options are related to this buffer. +

+

The list of supported options follows. +

+
+
buffer_size=size
+

Set the UDP socket buffer size in bytes. This is used both for the +receiving and the sending buffer size. +

+
+
localport=port
+

Override the local UDP port to bind with. +

+
+
localaddr=addr
+

Choose the local IP address. This is useful e.g. if sending multicast +and the host has multiple interfaces, where the user can choose +which interface to send on by specifying the IP address of that interface. +

+
+
pkt_size=size
+

Set the size in bytes of UDP packets. +

+
+
reuse=1|0
+

Explicitly allow or disallow reusing UDP sockets. +

+
+
ttl=ttl
+

Set the time to live value (for multicast only). +

+
+
connect=1|0
+

Initialize the UDP socket with connect(). In this case, the +destination address can’t be changed with ff_udp_set_remote_url later. +If the destination address isn’t known at the start, this option can +be specified in ff_udp_set_remote_url, too. +This allows finding out the source address for the packets with getsockname, +and makes writes return with AVERROR(ECONNREFUSED) if "destination +unreachable" is received. +For receiving, this gives the benefit of only receiving packets from +the specified peer address/port. +

+
+
sources=address[,address]
+

Only receive packets sent to the multicast group from one of the +specified sender IP addresses. +

+
+
block=address[,address]
+

Ignore packets sent to the multicast group from the specified +sender IP addresses. +

+
+
fifo_size=units
+

Set the UDP receiving circular buffer size, expressed as a number of +packets with size of 188 bytes. If not specified defaults to 7*4096. +

+
+
overrun_nonfatal=1|0
+

Survive in case of UDP receiving circular buffer overrun. Default +value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+ + +

2.30.1 Examples

+ +
    +
  • +Use ffmpeg to stream over UDP to a remote endpoint: +
     
    ffmpeg -i input -f format udp://hostname:port
    +
    + +
  • +Use ffmpeg to stream in mpegts format over UDP using 188 +sized UDP packets, using a large input buffer: +
     
    ffmpeg -i input -f mpegts udp://hostname:port?pkt_size=188&buffer_size=65535
    +
    + +
  • +Use ffmpeg to receive over UDP from a remote endpoint: +
     
    ffmpeg -i udp://[multicast-address]:port ...
    +
    +
+ + +

2.31 unix

+ +

Unix local socket +

+

The required syntax for a Unix socket URL is: +

+
 
unix://filepath
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
timeout
+

Timeout in ms. +

+
listen
+

Create the Unix socket in listening mode. +

+
+ + + +

3. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavformat +

+ + +

4. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-resampler.html b/dependencies64/ffmpeg/doc/ffmpeg-resampler.html new file mode 100644 index 000000000..c620b972a --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-resampler.html @@ -0,0 +1,359 @@ + + + + + +FFmpeg documentation : FFmpeg Resampler + + + + + + + + + + +
+
+ + +

FFmpeg Resampler Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The FFmpeg resampler provides a high-level interface to the +libswresample library audio resampling utilities. In particular it +allows one to perform audio resampling, audio channel layout rematrixing, +and convert audio format and packing layout. +

+ + +

2. Resampler Options

+ +

The audio resampler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools, option=value for the aresample filter, +by setting the value explicitly in the +SwrContext options or using the ‘libavutil/opt.h’ API for +programmatic use. +

+
+
ich, in_channel_count
+

Set the number of input channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘in_channel_layout’ is set. +

+
+
och, out_channel_count
+

Set the number of output channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘out_channel_layout’ is set. +

+
+
uch, used_channel_count
+

Set the number of used input channels. Default value is 0. This option is +only used for special remapping. +

+
+
isr, in_sample_rate
+

Set the input sample rate. Default value is 0. +

+
+
osr, out_sample_rate
+

Set the output sample rate. Default value is 0. +

+
+
isf, in_sample_fmt
+

Specify the input sample format. It is set by default to none. +

+
+
osf, out_sample_fmt
+

Specify the output sample format. It is set by default to none. +

+
+
tsf, internal_sample_fmt
+

Set the internal sample format. Default value is none. +This will automatically be chosen when it is not explicitly set. +

+
+
icl, in_channel_layout
+
ocl, out_channel_layout
+

Set the input/output channel layout. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+
clev, center_mix_level
+

Set the center mix level. It is a value expressed in deciBel, and must be +in the interval [-32,32]. +

+
+
slev, surround_mix_level
+

Set the surround mix level. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
lfe_mix_level
+

Set LFE mix into non LFE level. It is used when there is a LFE input but no +LFE output. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
rmvol, rematrix_volume
+

Set rematrix volume. Default value is 1.0. +

+
+
rematrix_maxval
+

Set maximum output value for rematrixing. +This can be used to prevent clipping vs. preventing volumn reduction +A value of 1.0 prevents cliping. +

+
+
flags, swr_flags
+

Set flags used by the converter. Default value is 0. +

+

It supports the following individual flags: +

+
res
+

force resampling, this flag forces resampling to be used even when the +input and output sample rates match. +

+
+ +
+
dither_scale
+

Set the dither scale. Default value is 1. +

+
+
dither_method
+

Set dither method. Default value is 0. +

+

Supported values: +

+
rectangular
+

select rectangular dither +

+
triangular
+

select triangular dither +

+
triangular_hp
+

select triangular dither with high pass +

+
lipshitz
+

select lipshitz noise shaping dither +

+
shibata
+

select shibata noise shaping dither +

+
low_shibata
+

select low shibata noise shaping dither +

+
high_shibata
+

select high shibata noise shaping dither +

+
f_weighted
+

select f-weighted noise shaping dither +

+
modified_e_weighted
+

select modified-e-weighted noise shaping dither +

+
improved_e_weighted
+

select improved-e-weighted noise shaping dither +

+
+
+ +
+
resampler
+

Set resampling engine. Default value is swr. +

+

Supported values: +

+
swr
+

select the native SW Resampler; filter options precision and cheby are not +applicable in this case. +

+
soxr
+

select the SoX Resampler (where available); compensation, and filter options +filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this +case. +

+
+ +
+
filter_size
+

For swr only, set resampling filter size, default value is 32. +

+
+
phase_shift
+

For swr only, set resampling phase shift, default value is 10, and must be in +the interval [0,30]. +

+
+
linear_interp
+

Use Linear Interpolation if set to 1, default value is 0. +

+
+
cutoff
+

Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float +value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr +(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz). +

+
+
precision
+

For soxr only, the precision in bits to which the resampled signal will be +calculated. The default value of 20 (which, with suitable dithering, is +appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a +value of 28 gives SoX’s ’Very High Quality’. +

+
+
cheby
+

For soxr only, selects passband rolloff none (Chebyshev) & higher-precision +approximation for ’irrational’ ratios. Default value is 0. +

+
+
async
+

For swr only, simple 1 parameter audio sync to timestamps using stretching, +squeezing, filling and trimming. Setting this to 1 will enable filling and +trimming, larger values represent the maximum amount in samples that the data +may be stretched or squeezed for each second. +Default value is 0, thus no compensation is applied to make the samples match +the audio timestamps. +

+
+
first_pts
+

For swr only, assume the first pts should be this value. The time unit is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
min_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger stretching/squeezing/filling or trimming of the +data to make it match the timestamps. The default is that +stretching/squeezing/filling and trimming is disabled +(‘min_comp’ = FLT_MAX). +

+
+
min_hard_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger adding/dropping samples to make it match the +timestamps. This option effectively is a threshold to select between +hard (trim/fill) and soft (squeeze/stretch) compensation. Note that +all compensation is by default disabled through ‘min_comp’. +The default is 0.1. +

+
+
comp_duration
+

For swr only, set duration (in seconds) over which data is stretched/squeezed +to make it match the timestamps. Must be a non-negative double float value, +default value is 1.0. +

+
+
max_soft_comp
+

For swr only, set maximum factor by which data is stretched/squeezed to make it +match the timestamps. Must be a non-negative double float value, default value +is 0. +

+
+
matrix_encoding
+

Select matrixed stereo encoding. +

+

It accepts the following values: +

+
none
+

select none +

+
dolby
+

select Dolby +

+
dplii
+

select Dolby Pro Logic II +

+
+ +

Default value is none. +

+
+
filter_type
+

For swr only, select resampling filter type. This only affects resampling +operations. +

+

It accepts the following values: +

+
cubic
+

select cubic +

+
blackman_nuttall
+

select Blackman Nuttall Windowed Sinc +

+
kaiser
+

select Kaiser Windowed Sinc +

+
+ +
+
kaiser_beta
+

For swr only, set Kaiser Window Beta value. Must be an integer in the +interval [2,16], default value is 9. +

+
+
output_sample_bits
+

For swr only, set number of used output sample bits for dithering. Must be an integer in the +interval [0,64], default value is 0, which means it’s not used. +

+
+
+ + + +

3. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libswresample +

+ + +

4. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-scaler.html b/dependencies64/ffmpeg/doc/ffmpeg-scaler.html new file mode 100644 index 000000000..34e4ee642 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-scaler.html @@ -0,0 +1,225 @@ + + + + + +FFmpeg documentation : FFmpeg Scaler + + + + + + + + + + +
+
+ + +

FFmpeg Scaler Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The FFmpeg rescaler provides a high-level interface to the libswscale +library image conversion utilities. In particular it allows one to perform +image rescaling and pixel format conversion. +

+ +

+

+

2. Scaler Options

+ +

The video scaler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools. For programmatic use, they can be set explicitly in the +SwsContext options or through the ‘libavutil/opt.h’ API. +

+
+
+

+

+
sws_flags
+

Set the scaler flags. This is also used to set the scaling +algorithm. Only a single algorithm should be selected. +

+

It accepts the following values: +

+
fast_bilinear
+

Select fast bilinear scaling algorithm. +

+
+
bilinear
+

Select bilinear scaling algorithm. +

+
+
bicubic
+

Select bicubic scaling algorithm. +

+
+
experimental
+

Select experimental scaling algorithm. +

+
+
neighbor
+

Select nearest neighbor rescaling algorithm. +

+
+
area
+

Select averaging area rescaling algorithm. +

+
+
bicublin
+

Select bicubic scaling algorithm for the luma component, bilinear for +chroma components. +

+
+
gauss
+

Select Gaussian rescaling algorithm. +

+
+
sinc
+

Select sinc rescaling algorithm. +

+
+
lanczos
+

Select lanczos rescaling algorithm. +

+
+
spline
+

Select natural bicubic spline rescaling algorithm. +

+
+
print_info
+

Enable printing/debug logging. +

+
+
accurate_rnd
+

Enable accurate rounding. +

+
+
full_chroma_int
+

Enable full chroma interpolation. +

+
+
full_chroma_inp
+

Select full chroma input. +

+
+
bitexact
+

Enable bitexact output. +

+
+ +
+
srcw
+

Set source width. +

+
+
srch
+

Set source height. +

+
+
dstw
+

Set destination width. +

+
+
dsth
+

Set destination height. +

+
+
src_format
+

Set source pixel format (must be expressed as an integer). +

+
+
dst_format
+

Set destination pixel format (must be expressed as an integer). +

+
+
src_range
+

Select source range. +

+
+
dst_range
+

Select destination range. +

+
+
param0, param1
+

Set scaling algorithm parameters. The specified values are specific of +some scaling algorithms and ignored by others. The specified values +are floating point number values. +

+
+
sws_dither
+

Set the dithering algorithm. Accepts one of the following +values. Default value is ‘auto’. +

+
+
auto
+

automatic choice +

+
+
none
+

no dithering +

+
+
bayer
+

bayer dither +

+
+
ed
+

error diffusion dither +

+
+ +
+
+ + + +

3. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libswscale +

+ + +

4. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg-utils.html b/dependencies64/ffmpeg/doc/ffmpeg-utils.html new file mode 100644 index 000000000..08840fce1 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg-utils.html @@ -0,0 +1,1477 @@ + + + + + +FFmpeg documentation : FFmpeg Utilities + + + + + + + + + + +
+
+ + +

FFmpeg Utilities Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

This document describes some generic features and utilities provided +by the libavutil library. +

+ + +

2. Syntax

+ +

This section documents the syntax and formats employed by the FFmpeg +libraries and tools. +

+

+

+

2.1 Quoting and escaping

+ +

FFmpeg adopts the following quoting and escaping mechanism, unless +explicitly specified. The following rules are applied: +

+
    +
  • +' and \ are special characters (respectively used for +quoting and escaping). In addition to them, there might be other +special characters depending on the specific syntax where the escaping +and quoting are employed. + +
  • +A special character is escaped by prefixing it with a ’\’. + +
  • +All characters enclosed between ” are included literally in the +parsed string. The quote character ' itself cannot be quoted, +so you may need to close the quote and escape it. + +
  • +Leading and trailing whitespaces, unless escaped or quoted, are +removed from the parsed string. +
+ +

Note that you may need to add a second level of escaping when using +the command line or a script, which depends on the syntax of the +adopted shell language. +

+

The function av_get_token defined in +‘libavutil/avstring.h’ can be used to parse a token quoted or +escaped according to the rules defined above. +

+

The tool ‘tools/ffescape’ in the FFmpeg source tree can be used +to automatically quote or escape a string in a script. +

+ +

2.1.1 Examples

+ +
    +
  • +Escape the string Crime d'Amour containing the ' special +character: +
     
    Crime d\'Amour
    +
    + +
  • +The string above contains a quote, so the ' needs to be escaped +when quoting it: +
     
    'Crime d'\''Amour'
    +
    + +
  • +Include leading or trailing whitespaces using quoting: +
     
    '  this string starts and ends with whitespaces  '
    +
    + +
  • +Escaping and quoting can be mixed together: +
     
    ' The string '\'string\'' is a string '
    +
    + +
  • +To include a literal \ you can use either escaping or quoting: +
     
    'c:\foo' can be written as c:\\foo
    +
    +
+ +

+

+

2.2 Date

+ +

The accepted syntax is: +

 
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+ +

If the value is "now" it takes the current time. +

+

Time is local time unless Z is appended, in which case it is +interpreted as UTC. +If the year-month-day part is not specified it takes the current +year-month-day. +

+

+

+

2.3 Time duration

+ +

There are two accepted syntaxes for expressing time duration. +

+
 
[-][HH:]MM:SS[.m...]
+
+ +

HH expresses the number of hours, MM the number of minutes +for a maximum of 2 digits, and SS the number of seconds for a +maximum of 2 digits. The m at the end expresses decimal value for +SS. +

+

or +

+
 
[-]S+[.m...]
+
+ +

S expresses the number of seconds, with the optional decimal part +m. +

+

In both expressions, the optional ‘-’ indicates negative duration. +

+ +

2.3.1 Examples

+ +

The following examples are all valid time duration: +

+
+
55
+

55 seconds +

+
+
12:03:45
+

12 hours, 03 minutes and 45 seconds +

+
+
23.189
+

23.189 seconds +

+
+ +

+

+

2.4 Video size

+

Specify the size of the sourced video, it may be a string of the form +widthxheight, or the name of a size abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

720x480 +

+
pal
+

720x576 +

+
qntsc
+

352x240 +

+
qpal
+

352x288 +

+
sntsc
+

640x480 +

+
spal
+

768x576 +

+
film
+

352x240 +

+
ntsc-film
+

352x240 +

+
sqcif
+

128x96 +

+
qcif
+

176x144 +

+
cif
+

352x288 +

+
4cif
+

704x576 +

+
16cif
+

1408x1152 +

+
qqvga
+

160x120 +

+
qvga
+

320x240 +

+
vga
+

640x480 +

+
svga
+

800x600 +

+
xga
+

1024x768 +

+
uxga
+

1600x1200 +

+
qxga
+

2048x1536 +

+
sxga
+

1280x1024 +

+
qsxga
+

2560x2048 +

+
hsxga
+

5120x4096 +

+
wvga
+

852x480 +

+
wxga
+

1366x768 +

+
wsxga
+

1600x1024 +

+
wuxga
+

1920x1200 +

+
woxga
+

2560x1600 +

+
wqsxga
+

3200x2048 +

+
wquxga
+

3840x2400 +

+
whsxga
+

6400x4096 +

+
whuxga
+

7680x4800 +

+
cga
+

320x200 +

+
ega
+

640x350 +

+
hd480
+

852x480 +

+
hd720
+

1280x720 +

+
hd1080
+

1920x1080 +

+
2k
+

2048x1080 +

+
2kflat
+

1998x1080 +

+
2kscope
+

2048x858 +

+
4k
+

4096x2160 +

+
4kflat
+

3996x2160 +

+
4kscope
+

4096x1716 +

+
nhd
+

640x360 +

+
hqvga
+

240x160 +

+
wqvga
+

400x240 +

+
fwqvga
+

432x240 +

+
hvga
+

480x320 +

+
qhd
+

960x540 +

+
+ +

+

+

2.5 Video rate

+ +

Specify the frame rate of a video, expressed as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

30000/1001 +

+
pal
+

25/1 +

+
qntsc
+

30000/1001 +

+
qpal
+

25/1 +

+
sntsc
+

30000/1001 +

+
spal
+

25/1 +

+
film
+

24/1 +

+
ntsc-film
+

24000/1001 +

+
+ +

+

+

2.6 Ratio

+ +

A ratio can be expressed as an expression, or in the form +numerator:denominator. +

+

Note that a ratio with infinite (1/0) or negative value is +considered valid, so you should check on the returned value if you +want to exclude those values. +

+

The undefined value can be expressed using the "0:0" string. +

+

+

+

2.7 Color

+ +

It can be the name of a color as defined below (case insensitive match) or a +[0x|#]RRGGBB[AA] sequence, possibly followed by @ and a string +representing the alpha component. +

+

The alpha component may be a string composed by "0x" followed by an +hexadecimal number or a decimal number between 0.0 and 1.0, which +represents the opacity value (‘0x00’ or ‘0.0’ means completely +transparent, ‘0xff’ or ‘1.0’ completely opaque). If the alpha +component is not specified then ‘0xff’ is assumed. +

+

The string ‘random’ will result in a random color. +

+

The following names of colors are recognized: +

+
AliceBlue
+

0xF0F8FF +

+
AntiqueWhite
+

0xFAEBD7 +

+
Aqua
+

0x00FFFF +

+
Aquamarine
+

0x7FFFD4 +

+
Azure
+

0xF0FFFF +

+
Beige
+

0xF5F5DC +

+
Bisque
+

0xFFE4C4 +

+
Black
+

0x000000 +

+
BlanchedAlmond
+

0xFFEBCD +

+
Blue
+

0x0000FF +

+
BlueViolet
+

0x8A2BE2 +

+
Brown
+

0xA52A2A +

+
BurlyWood
+

0xDEB887 +

+
CadetBlue
+

0x5F9EA0 +

+
Chartreuse
+

0x7FFF00 +

+
Chocolate
+

0xD2691E +

+
Coral
+

0xFF7F50 +

+
CornflowerBlue
+

0x6495ED +

+
Cornsilk
+

0xFFF8DC +

+
Crimson
+

0xDC143C +

+
Cyan
+

0x00FFFF +

+
DarkBlue
+

0x00008B +

+
DarkCyan
+

0x008B8B +

+
DarkGoldenRod
+

0xB8860B +

+
DarkGray
+

0xA9A9A9 +

+
DarkGreen
+

0x006400 +

+
DarkKhaki
+

0xBDB76B +

+
DarkMagenta
+

0x8B008B +

+
DarkOliveGreen
+

0x556B2F +

+
Darkorange
+

0xFF8C00 +

+
DarkOrchid
+

0x9932CC +

+
DarkRed
+

0x8B0000 +

+
DarkSalmon
+

0xE9967A +

+
DarkSeaGreen
+

0x8FBC8F +

+
DarkSlateBlue
+

0x483D8B +

+
DarkSlateGray
+

0x2F4F4F +

+
DarkTurquoise
+

0x00CED1 +

+
DarkViolet
+

0x9400D3 +

+
DeepPink
+

0xFF1493 +

+
DeepSkyBlue
+

0x00BFFF +

+
DimGray
+

0x696969 +

+
DodgerBlue
+

0x1E90FF +

+
FireBrick
+

0xB22222 +

+
FloralWhite
+

0xFFFAF0 +

+
ForestGreen
+

0x228B22 +

+
Fuchsia
+

0xFF00FF +

+
Gainsboro
+

0xDCDCDC +

+
GhostWhite
+

0xF8F8FF +

+
Gold
+

0xFFD700 +

+
GoldenRod
+

0xDAA520 +

+
Gray
+

0x808080 +

+
Green
+

0x008000 +

+
GreenYellow
+

0xADFF2F +

+
HoneyDew
+

0xF0FFF0 +

+
HotPink
+

0xFF69B4 +

+
IndianRed
+

0xCD5C5C +

+
Indigo
+

0x4B0082 +

+
Ivory
+

0xFFFFF0 +

+
Khaki
+

0xF0E68C +

+
Lavender
+

0xE6E6FA +

+
LavenderBlush
+

0xFFF0F5 +

+
LawnGreen
+

0x7CFC00 +

+
LemonChiffon
+

0xFFFACD +

+
LightBlue
+

0xADD8E6 +

+
LightCoral
+

0xF08080 +

+
LightCyan
+

0xE0FFFF +

+
LightGoldenRodYellow
+

0xFAFAD2 +

+
LightGreen
+

0x90EE90 +

+
LightGrey
+

0xD3D3D3 +

+
LightPink
+

0xFFB6C1 +

+
LightSalmon
+

0xFFA07A +

+
LightSeaGreen
+

0x20B2AA +

+
LightSkyBlue
+

0x87CEFA +

+
LightSlateGray
+

0x778899 +

+
LightSteelBlue
+

0xB0C4DE +

+
LightYellow
+

0xFFFFE0 +

+
Lime
+

0x00FF00 +

+
LimeGreen
+

0x32CD32 +

+
Linen
+

0xFAF0E6 +

+
Magenta
+

0xFF00FF +

+
Maroon
+

0x800000 +

+
MediumAquaMarine
+

0x66CDAA +

+
MediumBlue
+

0x0000CD +

+
MediumOrchid
+

0xBA55D3 +

+
MediumPurple
+

0x9370D8 +

+
MediumSeaGreen
+

0x3CB371 +

+
MediumSlateBlue
+

0x7B68EE +

+
MediumSpringGreen
+

0x00FA9A +

+
MediumTurquoise
+

0x48D1CC +

+
MediumVioletRed
+

0xC71585 +

+
MidnightBlue
+

0x191970 +

+
MintCream
+

0xF5FFFA +

+
MistyRose
+

0xFFE4E1 +

+
Moccasin
+

0xFFE4B5 +

+
NavajoWhite
+

0xFFDEAD +

+
Navy
+

0x000080 +

+
OldLace
+

0xFDF5E6 +

+
Olive
+

0x808000 +

+
OliveDrab
+

0x6B8E23 +

+
Orange
+

0xFFA500 +

+
OrangeRed
+

0xFF4500 +

+
Orchid
+

0xDA70D6 +

+
PaleGoldenRod
+

0xEEE8AA +

+
PaleGreen
+

0x98FB98 +

+
PaleTurquoise
+

0xAFEEEE +

+
PaleVioletRed
+

0xD87093 +

+
PapayaWhip
+

0xFFEFD5 +

+
PeachPuff
+

0xFFDAB9 +

+
Peru
+

0xCD853F +

+
Pink
+

0xFFC0CB +

+
Plum
+

0xDDA0DD +

+
PowderBlue
+

0xB0E0E6 +

+
Purple
+

0x800080 +

+
Red
+

0xFF0000 +

+
RosyBrown
+

0xBC8F8F +

+
RoyalBlue
+

0x4169E1 +

+
SaddleBrown
+

0x8B4513 +

+
Salmon
+

0xFA8072 +

+
SandyBrown
+

0xF4A460 +

+
SeaGreen
+

0x2E8B57 +

+
SeaShell
+

0xFFF5EE +

+
Sienna
+

0xA0522D +

+
Silver
+

0xC0C0C0 +

+
SkyBlue
+

0x87CEEB +

+
SlateBlue
+

0x6A5ACD +

+
SlateGray
+

0x708090 +

+
Snow
+

0xFFFAFA +

+
SpringGreen
+

0x00FF7F +

+
SteelBlue
+

0x4682B4 +

+
Tan
+

0xD2B48C +

+
Teal
+

0x008080 +

+
Thistle
+

0xD8BFD8 +

+
Tomato
+

0xFF6347 +

+
Turquoise
+

0x40E0D0 +

+
Violet
+

0xEE82EE +

+
Wheat
+

0xF5DEB3 +

+
White
+

0xFFFFFF +

+
WhiteSmoke
+

0xF5F5F5 +

+
Yellow
+

0xFFFF00 +

+
YellowGreen
+

0x9ACD32 +

+
+ +

+

+

2.8 Channel Layout

+ +

A channel layout specifies the spatial disposition of the channels in +a multi-channel audio stream. To specify a channel layout, FFmpeg +makes use of a special syntax. +

+

Individual channels are identified by an id, as given by the table +below: +

+
FL
+

front left +

+
FR
+

front right +

+
FC
+

front center +

+
LFE
+

low frequency +

+
BL
+

back left +

+
BR
+

back right +

+
FLC
+

front left-of-center +

+
FRC
+

front right-of-center +

+
BC
+

back center +

+
SL
+

side left +

+
SR
+

side right +

+
TC
+

top center +

+
TFL
+

top front left +

+
TFC
+

top front center +

+
TFR
+

top front right +

+
TBL
+

top back left +

+
TBC
+

top back center +

+
TBR
+

top back right +

+
DL
+

downmix left +

+
DR
+

downmix right +

+
WL
+

wide left +

+
WR
+

wide right +

+
SDL
+

surround direct left +

+
SDR
+

surround direct right +

+
LFE2
+

low frequency 2 +

+
+ +

Standard channel layout compositions can be specified by using the +following identifiers: +

+
mono
+

FC +

+
stereo
+

FL+FR +

+
2.1
+

FL+FR+LFE +

+
3.0
+

FL+FR+FC +

+
3.0(back)
+

FL+FR+BC +

+
4.0
+

FL+FR+FC+BC +

+
quad
+

FL+FR+BL+BR +

+
quad(side)
+

FL+FR+SL+SR +

+
3.1
+

FL+FR+FC+LFE +

+
5.0
+

FL+FR+FC+BL+BR +

+
5.0(side)
+

FL+FR+FC+SL+SR +

+
4.1
+

FL+FR+FC+LFE+BC +

+
5.1
+

FL+FR+FC+LFE+BL+BR +

+
5.1(side)
+

FL+FR+FC+LFE+SL+SR +

+
6.0
+

FL+FR+FC+BC+SL+SR +

+
6.0(front)
+

FL+FR+FLC+FRC+SL+SR +

+
hexagonal
+

FL+FR+FC+BL+BR+BC +

+
6.1
+

FL+FR+FC+LFE+BC+SL+SR +

+
6.1
+

FL+FR+FC+LFE+BL+BR+BC +

+
6.1(front)
+

FL+FR+LFE+FLC+FRC+SL+SR +

+
7.0
+

FL+FR+FC+BL+BR+SL+SR +

+
7.0(front)
+

FL+FR+FC+FLC+FRC+SL+SR +

+
7.1
+

FL+FR+FC+LFE+BL+BR+SL+SR +

+
7.1(wide)
+

FL+FR+FC+LFE+BL+BR+FLC+FRC +

+
7.1(wide-side)
+

FL+FR+FC+LFE+FLC+FRC+SL+SR +

+
octagonal
+

FL+FR+FC+BL+BR+BC+SL+SR +

+
downmix
+

DL+DR +

+
+ +

A custom channel layout can be specified as a sequence of terms, separated by +’+’ or ’|’. Each term can be: +

    +
  • +the name of a standard channel layout (e.g. ‘mono’, +‘stereo’, ‘4.0’, ‘quad’, ‘5.0’, etc.) + +
  • +the name of a single channel (e.g. ‘FL’, ‘FR’, ‘FC’, ‘LFE’, etc.) + +
  • +a number of channels, in decimal, optionally followed by ’c’, yielding +the default channel layout for that number of channels (see the +function av_get_default_channel_layout) + +
  • +a channel layout mask, in hexadecimal starting with "0x" (see the +AV_CH_* macros in ‘libavutil/channel_layout.h’. +
+ +

Starting from libavutil version 53 the trailing character "c" to +specify a number of channels will be required, while a channel layout +mask could also be specified as a decimal number (if and only if not +followed by "c"). +

+

See also the function av_get_channel_layout defined in +‘libavutil/channel_layout.h’. +

+ +

3. Expression Evaluation

+ +

When evaluating an arithmetic expression, FFmpeg uses an internal +formula evaluator, implemented through the ‘libavutil/eval.h’ +interface. +

+

An expression may contain unary, binary operators, constants, and +functions. +

+

Two expressions expr1 and expr2 can be combined to form +another expression "expr1;expr2". +expr1 and expr2 are evaluated in turn, and the new +expression evaluates to the value of expr2. +

+

The following binary operators are available: +, -, +*, /, ^. +

+

The following unary operators are available: +, -. +

+

The following functions are available: +

+
abs(x)
+

Compute absolute value of x. +

+
+
acos(x)
+

Compute arccosine of x. +

+
+
asin(x)
+

Compute arcsine of x. +

+
+
atan(x)
+

Compute arctangent of x. +

+
+
between(x, min, max)
+

Return 1 if x is greater than or equal to min and lesser than or +equal to max, 0 otherwise. +

+
+
bitand(x, y)
+
bitor(x, y)
+

Compute bitwise and/or operation on x and y. +

+

The results of the evaluation of x and y are converted to +integers before executing the bitwise operation. +

+

Note that both the conversion to integer and the conversion back to +floating point can lose precision. Beware of unexpected results for +large numbers (usually 2^53 and larger). +

+
+
ceil(expr)
+

Round the value of expression expr upwards to the nearest +integer. For example, "ceil(1.5)" is "2.0". +

+
+
cos(x)
+

Compute cosine of x. +

+
+
cosh(x)
+

Compute hyperbolic cosine of x. +

+
+
eq(x, y)
+

Return 1 if x and y are equivalent, 0 otherwise. +

+
+
exp(x)
+

Compute exponential of x (with base e, the Euler’s number). +

+
+
floor(expr)
+

Round the value of expression expr downwards to the nearest +integer. For example, "floor(-1.5)" is "-2.0". +

+
+
gauss(x)
+

Compute Gauss function of x, corresponding to +exp(-x*x/2) / sqrt(2*PI). +

+
+
gcd(x, y)
+

Return the greatest common divisor of x and y. If both x and +y are 0 or either or both are less than zero then behavior is undefined. +

+
+
gt(x, y)
+

Return 1 if x is greater than y, 0 otherwise. +

+
+
gte(x, y)
+

Return 1 if x is greater than or equal to y, 0 otherwise. +

+
+
hypot(x, y)
+

This function is similar to the C function with the same name; it returns +"sqrt(x*x + y*y)", the length of the hypotenuse of a +right triangle with sides of length x and y, or the distance of the +point (x, y) from the origin. +

+
+
if(x, y)
+

Evaluate x, and if the result is non-zero return the result of +the evaluation of y, return 0 otherwise. +

+
+
if(x, y, z)
+

Evaluate x, and if the result is non-zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
ifnot(x, y)
+

Evaluate x, and if the result is zero return the result of the +evaluation of y, return 0 otherwise. +

+
+
ifnot(x, y, z)
+

Evaluate x, and if the result is zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
isinf(x)
+

Return 1.0 if x is +/-INFINITY, 0.0 otherwise. +

+
+
isnan(x)
+

Return 1.0 if x is NAN, 0.0 otherwise. +

+
+
ld(var)
+

Allow to load the value of the internal variable with number +var, which was previously stored with st(var, expr). +The function returns the loaded value. +

+
+
log(x)
+

Compute natural logarithm of x. +

+
+
lt(x, y)
+

Return 1 if x is lesser than y, 0 otherwise. +

+
+
lte(x, y)
+

Return 1 if x is lesser than or equal to y, 0 otherwise. +

+
+
max(x, y)
+

Return the maximum between x and y. +

+
+
min(x, y)
+

Return the maximum between x and y. +

+
+
mod(x, y)
+

Compute the remainder of division of x by y. +

+
+
not(expr)
+

Return 1.0 if expr is zero, 0.0 otherwise. +

+
+
pow(x, y)
+

Compute the power of x elevated y, it is equivalent to +"(x)^(y)". +

+
+
print(t)
+
print(t, l)
+

Print the value of expression t with loglevel l. If +l is not specified then a default log level is used. +Returns the value of the expression printed. +

+

Prints t with loglevel l +

+
+
random(x)
+

Return a pseudo random value between 0.0 and 1.0. x is the index of the +internal variable which will be used to save the seed/state. +

+
+
root(expr, max)
+

Find an input value for which the function represented by expr +with argument ld(0) is 0 in the interval 0..max. +

+

The expression in expr must denote a continuous function or the +result is undefined. +

+

ld(0) is used to represent the function input value, which means +that the given expression will be evaluated multiple times with +various input values that the expression can access through +ld(0). When the expression evaluates to 0 then the +corresponding input value will be returned. +

+
+
sin(x)
+

Compute sine of x. +

+
+
sinh(x)
+

Compute hyperbolic sine of x. +

+
+
sqrt(expr)
+

Compute the square root of expr. This is equivalent to +"(expr)^.5". +

+
+
squish(x)
+

Compute expression 1/(1 + exp(4*x)). +

+
+
st(var, expr)
+

Allow to store the value of the expression expr in an internal +variable. var specifies the number of the variable where to +store the value, and it is a value ranging from 0 to 9. The function +returns the value stored in the internal variable. +Note, Variables are currently not shared between expressions. +

+
+
tan(x)
+

Compute tangent of x. +

+
+
tanh(x)
+

Compute hyperbolic tangent of x. +

+
+
taylor(expr, x)
+
taylor(expr, x, id)
+

Evaluate a Taylor series at x, given an expression representing +the ld(id)-th derivative of a function at 0. +

+

When the series does not converge the result is undefined. +

+

ld(id) is used to represent the derivative order in expr, +which means that the given expression will be evaluated multiple times +with various input values that the expression can access through +ld(id). If id is not specified then 0 is assumed. +

+

Note, when you have the derivatives at y instead of 0, +taylor(expr, x-y) can be used. +

+
+
time(0)
+

Return the current (wallclock) time in seconds. +

+
+
trunc(expr)
+

Round the value of expression expr towards zero to the nearest +integer. For example, "trunc(-1.5)" is "-1.0". +

+
+
while(cond, expr)
+

Evaluate expression expr while the expression cond is +non-zero, and returns the value of the last expr evaluation, or +NAN if cond was always false. +

+
+ +

The following constants are available: +

+
PI
+

area of the unit disc, approximately 3.14 +

+
E
+

exp(1) (Euler’s number), approximately 2.718 +

+
PHI
+

golden ratio (1+sqrt(5))/2, approximately 1.618 +

+
+ +

Assuming that an expression is considered "true" if it has a non-zero +value, note that: +

+

* works like AND +

+

+ works like OR +

+

For example the construct: +

 
if (A AND B) then C
+
+

is equivalent to: +

 
if(A*B, C)
+
+ +

In your C code, you can extend the list of unary and binary functions, +and define recognized constants, so that they are available for your +expressions. +

+

The evaluator also recognizes the International System unit prefixes. +If ’i’ is appended after the prefix, binary prefixes are used, which +are based on powers of 1024 instead of powers of 1000. +The ’B’ postfix multiplies the value by 8, and can be appended after a +unit prefix or used alone. This allows using for example ’KB’, ’MiB’, +’G’ and ’B’ as number postfix. +

+

The list of available International System prefixes follows, with +indication of the corresponding powers of 10 and of 2. +

+
y
+

10^-24 / 2^-80 +

+
z
+

10^-21 / 2^-70 +

+
a
+

10^-18 / 2^-60 +

+
f
+

10^-15 / 2^-50 +

+
p
+

10^-12 / 2^-40 +

+
n
+

10^-9 / 2^-30 +

+
u
+

10^-6 / 2^-20 +

+
m
+

10^-3 / 2^-10 +

+
c
+

10^-2 +

+
d
+

10^-1 +

+
h
+

10^2 +

+
k
+

10^3 / 2^10 +

+
K
+

10^3 / 2^10 +

+
M
+

10^6 / 2^20 +

+
G
+

10^9 / 2^30 +

+
T
+

10^12 / 2^40 +

+
P
+

10^15 / 2^40 +

+
E
+

10^18 / 2^50 +

+
Z
+

10^21 / 2^60 +

+
Y
+

10^24 / 2^70 +

+
+ + + +

4. OpenCL Options

+ +

When FFmpeg is configured with --enable-opencl, it is possible +to set the options for the global OpenCL context. +

+

The list of supported options follows: +

+
+
build_options
+

Set build options used to compile the registered kernels. +

+

See reference "OpenCL Specification Version: 1.2 chapter 5.6.4". +

+
+
platform_idx
+

Select the index of the platform to run OpenCL code. +

+

The specified index must be one of the indexes in the device list +which can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
device_idx
+

Select the index of the device used to run OpenCL code. +

+

The specified index must be one of the indexes in the device list which +can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
+ + + +

5. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +libavutil +

+ + +

6. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffmpeg.html b/dependencies64/ffmpeg/doc/ffmpeg.html new file mode 100644 index 000000000..95cbfb1b6 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffmpeg.html @@ -0,0 +1,1941 @@ + + + + + +FFmpeg documentation : ffmpeg + + + + + + + + + + +
+
+ + +

ffmpeg Documentation

+ + +

Table of Contents

+ + + +

1. Synopsis

+ +

ffmpeg [global_options] {[input_file_options] -i ‘input_file’} ... {[output_file_options] ‘output_file’} ... +

+ +

2. Description

+ +

ffmpeg is a very fast video and audio converter that can also grab from +a live audio/video source. It can also convert between arbitrary sample +rates and resize video on the fly with a high quality polyphase filter. +

+

ffmpeg reads from an arbitrary number of input "files" (which can be regular +files, pipes, network streams, grabbing devices, etc.), specified by the +-i option, and writes to an arbitrary number of output "files", which are +specified by a plain output filename. Anything found on the command line which +cannot be interpreted as an option is considered to be an output filename. +

+

Each input or output file can, in principle, contain any number of streams of +different types (video/audio/subtitle/attachment/data). The allowed number and/or +types of streams may be limited by the container format. Selecting which +streams from which inputs will go into which output is either done automatically +or with the -map option (see the Stream selection chapter). +

+

To refer to input files in options, you must use their indices (0-based). E.g. +the first input file is 0, the second is 1, etc. Similarly, streams +within a file are referred to by their indices. E.g. 2:3 refers to the +fourth stream in the third input file. Also see the Stream specifiers chapter. +

+

As a general rule, options are applied to the next specified +file. Therefore, order is important, and you can have the same +option on the command line multiple times. Each occurrence is +then applied to the next input or output file. +Exceptions from this rule are the global options (e.g. verbosity level), +which should be specified first. +

+

Do not mix input and output files – first specify all input files, then all +output files. Also do not mix options which belong to different files. All +options apply ONLY to the next input or output file and are reset between files. +

+
    +
  • +To set the video bitrate of the output file to 64 kbit/s: +
     
    ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
    +
    + +
  • +To force the frame rate of the output file to 24 fps: +
     
    ffmpeg -i input.avi -r 24 output.avi
    +
    + +
  • +To force the frame rate of the input file (valid for raw formats only) +to 1 fps and the frame rate of the output file to 24 fps: +
     
    ffmpeg -r 1 -i input.m2v -r 24 output.avi
    +
    +
+ +

The format option may be needed for raw input files. +

+ + +

3. Detailed description

+ +

The transcoding process in ffmpeg for each output can be described by +the following diagram: +

+
 
 _______              ______________
+|       |            |              |
+| input |  demuxer   | encoded data |   decoder
+| file  | ---------> | packets      | -----+
+|_______|            |______________|      |
+                                           v
+                                       _________
+                                      |         |
+                                      | decoded |
+                                      | frames  |
+ ________             ______________  |_________|
+|        |           |              |      |
+| output | <-------- | encoded data | <----+
+| file   |   muxer   | packets      |   encoder
+|________|           |______________|
+
+
+
+ +

ffmpeg calls the libavformat library (containing demuxers) to read +input files and get packets containing encoded data from them. When there are +multiple input files, ffmpeg tries to keep them synchronized by +tracking lowest timestamp on any active input stream. +

+

Encoded packets are then passed to the decoder (unless streamcopy is selected +for the stream, see further for a description). The decoder produces +uncompressed frames (raw video/PCM audio/...) which can be processed further by +filtering (see next section). After filtering, the frames are passed to the +encoder, which encodes them and outputs encoded packets. Finally those are +passed to the muxer, which writes the encoded packets to the output file. +

+ +

3.1 Filtering

+

Before encoding, ffmpeg can process raw audio and video frames using +filters from the libavfilter library. Several chained filters form a filter +graph. ffmpeg distinguishes between two types of filtergraphs: +simple and complex. +

+ +

3.1.1 Simple filtergraphs

+

Simple filtergraphs are those that have exactly one input and output, both of +the same type. In the above diagram they can be represented by simply inserting +an additional step between decoding and encoding: +

+
 
 _________               __________              ______________
+|         |  simple     |          |            |              |
+| decoded |  fltrgrph   | filtered |  encoder   | encoded data |
+| frames  | ----------> | frames   | ---------> | packets      |
+|_________|             |__________|            |______________|
+
+
+ +

Simple filtergraphs are configured with the per-stream ‘-filter’ option +(with ‘-vf’ and ‘-af’ aliases for video and audio respectively). +A simple filtergraph for video can look for example like this: +

+
 
 _______        _____________        _______        ________
+|       |      |             |      |       |      |        |
+| input | ---> | deinterlace | ---> | scale | ---> | output |
+|_______|      |_____________|      |_______|      |________|
+
+
+ +

Note that some filters change frame properties but not frame contents. E.g. the +fps filter in the example above changes number of frames, but does not +touch the frame contents. Another example is the setpts filter, which +only sets timestamps and otherwise passes the frames unchanged. +

+ +

3.1.2 Complex filtergraphs

+

Complex filtergraphs are those which cannot be described as simply a linear +processing chain applied to one stream. This is the case, for example, when the graph has +more than one input and/or output, or when output stream type is different from +input. They can be represented with the following diagram: +

+
 
 _________
+|         |
+| input 0 |\                    __________
+|_________| \                  |          |
+             \   _________    /| output 0 |
+              \ |         |  / |__________|
+ _________     \| complex | /
+|         |     |         |/
+| input 1 |---->| filter  |\
+|_________|     |         | \   __________
+               /| graph   |  \ |          |
+              / |         |   \| output 1 |
+ _________   /  |_________|    |__________|
+|         | /
+| input 2 |/
+|_________|
+
+
+ +

Complex filtergraphs are configured with the ‘-filter_complex’ option. +Note that this option is global, since a complex filtergraph, by its nature, +cannot be unambiguously associated with a single stream or file. +

+

The ‘-lavfi’ option is equivalent to ‘-filter_complex’. +

+

A trivial example of a complex filtergraph is the overlay filter, which +has two video inputs and one video output, containing one video overlaid on top +of the other. Its audio counterpart is the amix filter. +

+ +

3.2 Stream copy

+

Stream copy is a mode selected by supplying the copy parameter to the +‘-codec’ option. It makes ffmpeg omit the decoding and encoding +step for the specified stream, so it does only demuxing and muxing. It is useful +for changing the container format or modifying container-level metadata. The +diagram above will, in this case, simplify to this: +

+
 
 _______              ______________            ________
+|       |            |              |          |        |
+| input |  demuxer   | encoded data |  muxer   | output |
+| file  | ---------> | packets      | -------> | file   |
+|_______|            |______________|          |________|
+
+
+ +

Since there is no decoding or encoding, it is very fast and there is no quality +loss. However, it might not work in some cases because of many factors. Applying +filters is obviously also impossible, since filters work on uncompressed data. +

+ + +

4. Stream selection

+ +

By default, ffmpeg includes only one stream of each type (video, audio, subtitle) +present in the input files and adds them to each output file. It picks the +"best" of each based upon the following criteria: for video, it is the stream +with the highest resolution, for audio, it is the stream with the most channels, for +subtitles, it is the first subtitle stream. In the case where several streams of +the same type rate equally, the stream with the lowest index is chosen. +

+

You can disable some of those defaults by using the -vn/-an/-sn options. For +full manual control, use the -map option, which disables the defaults just +described. +

+ + +

5. Options

+ +

All the numerical options, if not specified otherwise, accept a string +representing a number as input, which may be followed by one of the SI +unit prefixes, for example: ’K’, ’M’, or ’G’. +

+

If ’i’ is appended to the SI unit prefix, the complete prefix will be +interpreted as a unit prefix for binary multiplies, which are based on +powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit +prefix multiplies the value by 8. This allows using, for example: +’KB’, ’MiB’, ’G’ and ’B’ as number suffixes. +

+

Options which do not take arguments are boolean options, and set the +corresponding value to true. They can be set to false by prefixing +the option name with "no". For example using "-nofoo" +will set the boolean option with name "foo" to false. +

+

+

+

5.1 Stream specifiers

+

Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers +are used to precisely specify which stream(s) a given option belongs to. +

+

A stream specifier is a string generally appended to the option name and +separated from it by a colon. E.g. -codec:a:1 ac3 contains the +a:1 stream specifier, which matches the second audio stream. Therefore, it +would select the ac3 codec for the second audio stream. +

+

A stream specifier can match several streams, so that the option is applied to all +of them. E.g. the stream specifier in -b:a 128k matches all audio +streams. +

+

An empty stream specifier matches all streams. For example, -codec copy +or -codec: copy would copy all the streams without reencoding. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. E.g. -threads:1 4 would set the +thread count for the second stream to 4. +

+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle, +’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches +stream number stream_index of this type. Otherwise, it matches all +streams of this type. +

+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number stream_index +in the program with the id program_id. Otherwise, it matches all streams in the +program. +

+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ + +

5.2 Generic options

+ +

These options are shared amongst the ff* tools. +

+
+
-L
+

Show license. +

+
+
-h, -?, -help, --help [arg]
+

Show help. An optional parameter may be specified to print help about a specific +item. If no argument is specified, only basic (non advanced) tool +options are shown. +

+

Possible values of arg are: +

+
long
+

Print advanced tool options in addition to the basic tool options. +

+
+
full
+

Print complete list of options, including shared and private options +for encoders, decoders, demuxers, muxers, filters, etc. +

+
+
decoder=decoder_name
+

Print detailed information about the decoder named decoder_name. Use the +‘-decoders’ option to get a list of all decoders. +

+
+
encoder=encoder_name
+

Print detailed information about the encoder named encoder_name. Use the +‘-encoders’ option to get a list of all encoders. +

+
+
demuxer=demuxer_name
+

Print detailed information about the demuxer named demuxer_name. Use the +‘-formats’ option to get a list of all demuxers and muxers. +

+
+
muxer=muxer_name
+

Print detailed information about the muxer named muxer_name. Use the +‘-formats’ option to get a list of all muxers and demuxers. +

+
+
filter=filter_name
+

Print detailed information about the filter name filter_name. Use the +‘-filters’ option to get a list of all filters. +

+
+ +
+
-version
+

Show version. +

+
+
-formats
+

Show available formats. +

+
+
-codecs
+

Show all codecs known to libavcodec. +

+

Note that the term ’codec’ is used throughout this documentation as a shortcut +for what is more correctly called a media bitstream format. +

+
+
-decoders
+

Show available decoders. +

+
+
-encoders
+

Show all available encoders. +

+
+
-bsfs
+

Show available bitstream filters. +

+
+
-protocols
+

Show available protocols. +

+
+
-filters
+

Show available libavfilter filters. +

+
+
-pix_fmts
+

Show available pixel formats. +

+
+
-sample_fmts
+

Show available sample formats. +

+
+
-layouts
+

Show channel names and standard channel layouts. +

+
+
-colors
+

Show recognized color names. +

+
+
-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+

Set the logging level used by the library. +Adding "repeat+" indicates that repeated log output should not be compressed +to the first line and the "Last message repeated n times" line will be +omitted. "repeat" can also be used alone. +If "repeat" is used alone, and with no prior loglevel set, the default +loglevel will be used. If multiple loglevel parameters are given, using +’repeat’ will not change the loglevel. +loglevel is a number or a string containing one of the following values: +

+
quiet
+

Show nothing at all; be silent. +

+
panic
+

Only show fatal errors which could lead the process to crash, such as +and assert failure. This is not currently used for anything. +

+
fatal
+

Only show fatal errors. These are errors after which the process absolutely +cannot continue after. +

+
error
+

Show all errors, including ones which can be recovered from. +

+
warning
+

Show all warnings and errors. Any message related to possibly +incorrect or unexpected events will be shown. +

+
info
+

Show informative messages during processing. This is in addition to +warnings and errors. This is the default value. +

+
verbose
+

Same as info, except more verbose. +

+
debug
+

Show everything, including debugging information. +

+
+ +

By default the program logs to stderr, if coloring is supported by the +terminal, colors are used to mark errors and warnings. Log coloring +can be disabled setting the environment variable +AV_LOG_FORCE_NOCOLOR or NO_COLOR, or can be forced setting +the environment variable AV_LOG_FORCE_COLOR. +The use of the environment variable NO_COLOR is deprecated and +will be dropped in a following FFmpeg version. +

+
+
-report
+

Dump full command line and console output to a file named +program-YYYYMMDD-HHMMSS.log in the current +directory. +This file can be useful for bug reports. +It also implies -loglevel verbose. +

+

Setting the environment variable FFREPORT to any value has the +same effect. If the value is a ’:’-separated key=value sequence, these +options will affect the report; options values must be escaped if they +contain special characters or the options delimiter ’:’ (see the +“Quoting and escaping” section in the ffmpeg-utils manual). The +following option is recognized: +

+
file
+

set the file name to use for the report; %p is expanded to the name +of the program, %t is expanded to a timestamp, %% is expanded +to a plain % +

+
+ +

Errors in parsing the environment variable are not fatal, and will not +appear in the report. +

+
+
-hide_banner
+

Suppress printing banner. +

+

All FFmpeg tools will normally show a copyright notice, build options +and library versions. This option can be used to suppress printing +this information. +

+
+
-cpuflags flags (global)
+

Allows setting and clearing cpu flags. This option is intended +for testing. Do not use it unless you know what you’re doing. +

 
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+

Possible flags for this option are: +

+
x86
+
+
mmx
+
mmxext
+
sse
+
sse2
+
sse2slow
+
sse3
+
sse3slow
+
ssse3
+
atom
+
sse4.1
+
sse4.2
+
avx
+
xop
+
fma4
+
3dnow
+
3dnowext
+
cmov
+
+
+
ARM
+
+
armv5te
+
armv6
+
armv6t2
+
vfp
+
vfpv3
+
neon
+
+
+
PowerPC
+
+
altivec
+
+
+
Specific Processors
+
+
pentium2
+
pentium3
+
pentium4
+
k6
+
k62
+
athlon
+
athlonxp
+
k8
+
+
+
+ +
+
-opencl_bench
+

Benchmark all available OpenCL devices and show the results. This option +is only available when FFmpeg has been compiled with --enable-opencl. +

+
+
-opencl_options options (global)
+

Set OpenCL environment options. This option is only available when +FFmpeg has been compiled with --enable-opencl. +

+

options must be a list of key=value option pairs +separated by ’:’. See the “OpenCL Options” section in the +ffmpeg-utils manual for the list of supported options. +

+
+ + +

5.3 AVOptions

+ +

These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +‘-help’ option. They are separated into two categories: +

+
generic
+

These options can be set for any container, codec or device. Generic options +are listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +

+
private
+

These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +

+
+ +

For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the ‘id3v2_version’ private option of the MP3 +muxer: +

 
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+ +

All codec AVOptions are per-stream, and thus a stream specifier +should be attached to them. +

+

Note: the ‘-nooption’ syntax cannot be used for boolean +AVOptions, use ‘-option 0’/‘-option 1’. +

+

Note: the old undocumented way of specifying per-stream AVOptions by +prepending v/a/s to the options name is now obsolete and will be +removed soon. +

+ +

5.4 Main options

+ +
+
-f fmt (input/output)
+

Force input or output file format. The format is normally auto detected for input +files and guessed from the file extension for output files, so this option is not +needed in most cases. +

+
+
-i filename (input)
+

input file name +

+
+
-y (global)
+

Overwrite output files without asking. +

+
+
-n (global)
+

Do not overwrite output files, and exit immediately if a specified +output file already exists. +

+
+
-c[:stream_specifier] codec (input/output,per-stream)
+
-codec[:stream_specifier] codec (input/output,per-stream)
+

Select an encoder (when used before an output file) or a decoder (when used +before an input file) for one or more streams. codec is the name of a +decoder/encoder or a special value copy (output only) to indicate that +the stream is not to be re-encoded. +

+

For example +

 
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
+
+

encodes all video streams with libx264 and copies all audio streams. +

+

For each stream, the last matching c option is applied, so +

 
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
+
+

will copy all the streams except the second video, which will be encoded with +libx264, and the 138th audio, which will be encoded with libvorbis. +

+
+
-t duration (output)
+

Stop writing the output after its duration reaches duration. +duration may be a number in seconds, or in hh:mm:ss[.xxx] form. +

+

-to and -t are mutually exclusive and -t has priority. +

+
+
-to position (output)
+

Stop writing the output at position. +position may be a number in seconds, or in hh:mm:ss[.xxx] form. +

+

-to and -t are mutually exclusive and -t has priority. +

+
+
-fs limit_size (output)
+

Set the file size limit, expressed in bytes. +

+
+
-ss position (input/output)
+

When used as an input option (before -i), seeks in this input file to +position. Note the in most formats it is not possible to seek exactly, so +ffmpeg will seek to the closest seek point before position. +When transcoding and ‘-accurate_seek’ is enabled (the default), this +extra segment between the seek point and position will be decoded and +discarded. When doing stream copy or when ‘-noaccurate_seek’ is used, it +will be preserved. +

+

When used as an output option (before an output filename), decodes but discards +input until the timestamps reach position. +

+

position may be either in seconds or in hh:mm:ss[.xxx] form. +

+
+
-itsoffset offset (input)
+

Set the input time offset. +

+

offset must be a time duration specification, +see (ffmpeg-utils)time duration syntax. +

+

The offset is added to the timestamps of the input files. Specifying +a positive offset means that the corresponding streams are delayed by +the time duration specified in offset. +

+
+
-timestamp date (output)
+

Set the recording timestamp in the container. +

+

date must be a time duration specification, +see (ffmpeg-utils)date syntax. +

+
+
-metadata[:metadata_specifier] key=value (output,per-metadata)
+

Set a metadata key/value pair. +

+

An optional metadata_specifier may be given to set metadata +on streams or chapters. See -map_metadata documentation for +details. +

+

This option overrides metadata set with -map_metadata. It is +also possible to delete metadata by using an empty value. +

+

For example, for setting the title in the output file: +

 
ffmpeg -i in.avi -metadata title="my title" out.flv
+
+ +

To set the language of the first audio stream: +

 
ffmpeg -i INPUT -metadata:s:a:1 language=eng OUTPUT
+
+ +
+
-target type (output)
+

Specify target file type (vcd, svcd, dvd, dv, +dv50). type may be prefixed with pal-, ntsc- or +film- to use the corresponding standard. All the format options +(bitrate, codecs, buffer sizes) are then set automatically. You can just type: +

+
 
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
+
+ +

Nevertheless you can specify additional options as long as you know +they do not conflict with the standard, as in: +

+
 
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
+
+ +
+
-dframes number (output)
+

Set the number of data frames to record. This is an alias for -frames:d. +

+
+
-frames[:stream_specifier] framecount (output,per-stream)
+

Stop writing to the stream after framecount frames. +

+
+
-q[:stream_specifier] q (output,per-stream)
+
-qscale[:stream_specifier] q (output,per-stream)
+

Use fixed quality scale (VBR). The meaning of q/qscale is +codec-dependent. +If qscale is used without a stream_specifier then it applies only +to the video stream, this is to maintain compatibility with previous behavior +and as specifying the same codec specific value to 2 different codecs that is +audio and video generally is not what is intended when no stream_specifier is +used. +

+

+

+
-filter[:stream_specifier] filtergraph (output,per-stream)
+

Create the filtergraph specified by filtergraph and use it to +filter the stream. +

+

filtergraph is a description of the filtergraph to apply to +the stream, and must have a single input and a single output of the +same type of the stream. In the filtergraph, the input is associated +to the label in, and the output to the label out. See +the ffmpeg-filters manual for more information about the filtergraph +syntax. +

+

See the -filter_complex option if you +want to create filtergraphs with multiple inputs and/or outputs. +

+
+
-filter_script[:stream_specifier] filename (output,per-stream)
+

This option is similar to ‘-filter’, the only difference is that its +argument is the name of the file from which a filtergraph description is to be +read. +

+
+
-pre[:stream_specifier] preset_name (output,per-stream)
+

Specify the preset for matching stream(s). +

+
+
-stats (global)
+

Print encoding progress/statistics. It is on by default, to explicitly +disable it you need to specify -nostats. +

+
+
-progress url (global)
+

Send program-friendly progress information to url. +

+

Progress information is written approximately every second and at the end of +the encoding process. It is made of "key=value" lines. key +consists of only alphanumeric characters. The last key of a sequence of +progress information is always "progress". +

+
+
-stdin
+

Enable interaction on standard input. On by default unless standard input is +used as an input. To explicitly disable interaction you need to specify +-nostdin. +

+

Disabling interaction on standard input is useful, for example, if +ffmpeg is in the background process group. Roughly the same result can +be achieved with ffmpeg ... < /dev/null but it requires a +shell. +

+
+
-debug_ts (global)
+

Print timestamp information. It is off by default. This option is +mostly useful for testing and debugging purposes, and the output +format may change from one version to another, so it should not be +employed by portable scripts. +

+

See also the option -fdebug ts. +

+
+
-attach filename (output)
+

Add an attachment to the output file. This is supported by a few formats +like Matroska for e.g. fonts used in rendering subtitles. Attachments +are implemented as a specific type of stream, so this option will add +a new stream to the file. It is then possible to use per-stream options +on this stream in the usual way. Attachment streams created with this +option will be created after all the other streams (i.e. those created +with -map or automatic mappings). +

+

Note that for Matroska you also have to set the mimetype metadata tag: +

 
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
+
+

(assuming that the attachment stream will be third in the output file). +

+
+
-dump_attachment[:stream_specifier] filename (input,per-stream)
+

Extract the matching attachment stream into a file named filename. If +filename is empty, then the value of the filename metadata tag +will be used. +

+

E.g. to extract the first attachment to a file named ’out.ttf’: +

 
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
+
+

To extract all attachments to files determined by the filename tag: +

 
ffmpeg -dump_attachment:t "" -i INPUT
+
+ +

Technical note – attachments are implemented as codec extradata, so this +option can actually be used to extract extradata from any stream, not just +attachments. +

+
+
+ + +

5.5 Video Options

+ +
+
-vframes number (output)
+

Set the number of video frames to record. This is an alias for -frames:v. +

+
-r[:stream_specifier] fps (input/output,per-stream)
+

Set frame rate (Hz value, fraction or abbreviation). +

+

As an input option, ignore any timestamps stored in the file and instead +generate timestamps assuming constant frame rate fps. +

+

As an output option, duplicate or drop input frames to achieve constant output +frame rate fps. +

+
+
-s[:stream_specifier] size (input/output,per-stream)
+

Set frame size. +

+

As an input option, this is a shortcut for the ‘video_size’ private +option, recognized by some demuxers for which the frame size is either not +stored in the file or is configurable – e.g. raw video or video grabbers. +

+

As an output option, this inserts the scale video filter to the +end of the corresponding filtergraph. Please use the scale filter +directly to insert it at the beginning or some other place. +

+

The format is ‘wxh’ (default - same as source). +

+
+
-aspect[:stream_specifier] aspect (output,per-stream)
+

Set the video display aspect ratio specified by aspect. +

+

aspect can be a floating point number string, or a string of the +form num:den, where num and den are the +numerator and denominator of the aspect ratio. For example "4:3", +"16:9", "1.3333", and "1.7777" are valid argument values. +

+

If used together with ‘-vcodec copy’, it will affect the aspect ratio +stored at container level, but not the aspect ratio stored in encoded +frames, if it exists. +

+
+
-vn (output)
+

Disable video recording. +

+
+
-vcodec codec (output)
+

Set the video codec. This is an alias for -codec:v. +

+
+
-pass[:stream_specifier] n (output,per-stream)
+

Select the pass number (1 or 2). It is used to do two-pass +video encoding. The statistics of the video are recorded in the first +pass into a log file (see also the option -passlogfile), +and in the second pass that log file is used to generate the video +at the exact requested bitrate. +On pass 1, you may just deactivate audio and set output to null, +examples for Windows and Unix: +

 
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
+ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
+
+ +
+
-passlogfile[:stream_specifier] prefix (output,per-stream)
+

Set two-pass log file name prefix to prefix, the default file name +prefix is “ffmpeg2pass”. The complete file name will be +‘PREFIX-N.log’, where N is a number specific to the output +stream +

+
+
-vf filtergraph (output)
+

Create the filtergraph specified by filtergraph and use it to +filter the stream. +

+

This is an alias for -filter:v, see the -filter option. +

+
+ + +

5.6 Advanced Video Options

+ +
+
-pix_fmt[:stream_specifier] format (input/output,per-stream)
+

Set pixel format. Use -pix_fmts to show all the supported +pixel formats. +If the selected pixel format can not be selected, ffmpeg will print a +warning and select the best pixel format supported by the encoder. +If pix_fmt is prefixed by a +, ffmpeg will exit with an error +if the requested pixel format can not be selected, and automatic conversions +inside filtergraphs are disabled. +If pix_fmt is a single +, ffmpeg selects the same pixel format +as the input (or graph output) and automatic conversions are disabled. +

+
+
-sws_flags flags (input/output)
+

Set SwScaler flags. +

+
-vdt n
+

Discard threshold. +

+
+
-rc_override[:stream_specifier] override (output,per-stream)
+

Rate control override for specific intervals, formatted as "int,int,int" +list separated with slashes. Two first values are the beginning and +end frame numbers, last one is quantizer to use if positive, or quality +factor if negative. +

+
+
-ilme
+

Force interlacing support in encoder (MPEG-2 and MPEG-4 only). +Use this option if your input file is interlaced and you want +to keep the interlaced format for minimum losses. +The alternative is to deinterlace the input stream with +‘-deinterlace’, but deinterlacing introduces losses. +

+
-psnr
+

Calculate PSNR of compressed frames. +

+
-vstats
+

Dump video coding statistics to ‘vstats_HHMMSS.log’. +

+
-vstats_file file
+

Dump video coding statistics to file. +

+
-top[:stream_specifier] n (output,per-stream)
+

top=1/bottom=0/auto=-1 field first +

+
-dc precision
+

Intra_dc_precision. +

+
-vtag fourcc/tag (output)
+

Force video tag/fourcc. This is an alias for -tag:v. +

+
-qphist (global)
+

Show QP histogram +

+
-vbsf bitstream_filter
+

Deprecated see -bsf +

+
+
-force_key_frames[:stream_specifier] time[,time...] (output,per-stream)
+
-force_key_frames[:stream_specifier] expr:expr (output,per-stream)
+

Force key frames at the specified timestamps, more precisely at the first +frames after each specified time. +

+

If the argument is prefixed with expr:, the string expr +is interpreted like an expression and is evaluated for each frame. A +key frame is forced in case the evaluation is non-zero. +

+

If one of the times is "chapters[delta]", it is expanded into +the time of the beginning of all chapters in the file, shifted by +delta, expressed as a time in seconds. +This option can be useful to ensure that a seek point is present at a +chapter mark or any other designated place in the output file. +

+

For example, to insert a key frame at 5 minutes, plus key frames 0.1 second +before the beginning of every chapter: +

 
-force_key_frames 0:05:00,chapters-0.1
+
+ +

The expression in expr can contain the following constants: +

+
n
+

the number of current processed frame, starting from 0 +

+
n_forced
+

the number of forced frames +

+
prev_forced_n
+

the number of the previous forced frame, it is NAN when no +keyframe was forced yet +

+
prev_forced_t
+

the time of the previous forced frame, it is NAN when no +keyframe was forced yet +

+
t
+

the time of the current processed frame +

+
+ +

For example to force a key frame every 5 seconds, you can specify: +

 
-force_key_frames expr:gte(t,n_forced*5)
+
+ +

To force a key frame 5 seconds after the time of the last forced one, +starting from second 13: +

 
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
+
+ +

Note that forcing too many keyframes is very harmful for the lookahead +algorithms of certain encoders: using fixed-GOP options or similar +would be more efficient. +

+
+
-copyinkf[:stream_specifier] (output,per-stream)
+

When doing stream copy, copy also non-key frames found at the +beginning. +

+
+
-hwaccel[:stream_specifier] hwaccel (input,per-stream)
+

Use hardware acceleration to decode the matching stream(s). The allowed values +of hwaccel are: +

+
none
+

Do not use any hardware acceleration (the default). +

+
+
auto
+

Automatically select the hardware acceleration method. +

+
+
vdpau
+

Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration. +

+
+ +

This option has no effect if the selected hwaccel is not available or not +supported by the chosen decoder. +

+

Note that most acceleration methods are intended for playback and will not be +faster than software decoding on modern CPUs. Additionally, ffmpeg +will usually need to copy the decoded frames from the GPU memory into the system +memory, resulting in further performance loss. This option is thus mainly +useful for testing. +

+
+
-hwaccel_device[:stream_specifier] hwaccel_device (input,per-stream)
+

Select a device to use for hardware acceleration. +

+

This option only makes sense when the ‘-hwaccel’ option is also +specified. Its exact meaning depends on the specific hardware acceleration +method chosen. +

+
+
vdpau
+

For VDPAU, this option specifies the X11 display/screen to use. If this option +is not specified, the value of the DISPLAY environment variable is used +

+
+
+
+ + +

5.7 Audio Options

+ +
+
-aframes number (output)
+

Set the number of audio frames to record. This is an alias for -frames:a. +

+
-ar[:stream_specifier] freq (input/output,per-stream)
+

Set the audio sampling frequency. For output streams it is set by +default to the frequency of the corresponding input stream. For input +streams this option only makes sense for audio grabbing devices and raw +demuxers and is mapped to the corresponding demuxer options. +

+
-aq q (output)
+

Set the audio quality (codec-specific, VBR). This is an alias for -q:a. +

+
-ac[:stream_specifier] channels (input/output,per-stream)
+

Set the number of audio channels. For output streams it is set by +default to the number of input audio channels. For input streams +this option only makes sense for audio grabbing devices and raw demuxers +and is mapped to the corresponding demuxer options. +

+
-an (output)
+

Disable audio recording. +

+
-acodec codec (input/output)
+

Set the audio codec. This is an alias for -codec:a. +

+
-sample_fmt[:stream_specifier] sample_fmt (output,per-stream)
+

Set the audio sample format. Use -sample_fmts to get a list +of supported sample formats. +

+
+
-af filtergraph (output)
+

Create the filtergraph specified by filtergraph and use it to +filter the stream. +

+

This is an alias for -filter:a, see the -filter option. +

+
+ + +

5.8 Advanced Audio options:

+ +
+
-atag fourcc/tag (output)
+

Force audio tag/fourcc. This is an alias for -tag:a. +

+
-absf bitstream_filter
+

Deprecated, see -bsf +

+
-guess_layout_max channels (input,per-stream)
+

If some input channel layout is not known, try to guess only if it +corresponds to at most the specified number of channels. For example, 2 +tells to ffmpeg to recognize 1 channel as mono and 2 channels as +stereo but not 6 channels as 5.1. The default is to always try to guess. Use +0 to disable all guessing. +

+
+ + +

5.9 Subtitle options:

+ +
+
-scodec codec (input/output)
+

Set the subtitle codec. This is an alias for -codec:s. +

+
-sn (output)
+

Disable subtitle recording. +

+
-sbsf bitstream_filter
+

Deprecated, see -bsf +

+
+ + +

5.10 Advanced Subtitle options:

+ +
+
-fix_sub_duration
+

Fix subtitles durations. For each subtitle, wait for the next packet in the +same stream and adjust the duration of the first to avoid overlap. This is +necessary with some subtitles codecs, especially DVB subtitles, because the +duration in the original packet is only a rough estimate and the end is +actually marked by an empty subtitle frame. Failing to use this option when +necessary can result in exaggerated durations or muxing failures due to +non-monotonic timestamps. +

+

Note that this option will delay the output of all data until the next +subtitle packet is decoded: it may increase memory consumption and latency a +lot. +

+
+
-canvas_size size
+

Set the size of the canvas used to render subtitles. +

+
+
+ + +

5.11 Advanced options

+ +
+
-map [-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]] | [linklabel] (output)
+
+

Designate one or more input streams as a source for the output file. Each input +stream is identified by the input file index input_file_id and +the input stream index input_stream_id within the input +file. Both indices start at 0. If specified, +sync_file_id:stream_specifier sets which input stream +is used as a presentation sync reference. +

+

The first -map option on the command line specifies the +source for output stream 0, the second -map option specifies +the source for output stream 1, etc. +

+

A - character before the stream identifier creates a "negative" mapping. +It disables matching streams from already created mappings. +

+

An alternative [linklabel] form will map outputs from complex filter +graphs (see the ‘-filter_complex’ option) to the output file. +linklabel must correspond to a defined output link label in the graph. +

+

For example, to map ALL streams from the first input file to output +

 
ffmpeg -i INPUT -map 0 output
+
+ +

For example, if you have two audio streams in the first input file, +these streams are identified by "0:0" and "0:1". You can use +-map to select which streams to place in an output file. For +example: +

 
ffmpeg -i INPUT -map 0:1 out.wav
+
+

will map the input stream in ‘INPUT’ identified by "0:1" to +the (single) output stream in ‘out.wav’. +

+

For example, to select the stream with index 2 from input file +‘a.mov’ (specified by the identifier "0:2"), and stream with +index 6 from input ‘b.mov’ (specified by the identifier "1:6"), +and copy them to the output file ‘out.mov’: +

 
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
+
+ +

To select all video and the third audio stream from an input file: +

 
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
+
+ +

To map all the streams except the second audio, use negative mappings +

 
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
+
+ +

Note that using this option disables the default mappings for this output file. +

+
+
-map_channel [input_file_id.stream_specifier.channel_id|-1][:output_file_id.stream_specifier]
+

Map an audio channel from a given input to an output. If +output_file_id.stream_specifier is not set, the audio channel will +be mapped on all the audio streams. +

+

Using "-1" instead of +input_file_id.stream_specifier.channel_id will map a muted +channel. +

+

For example, assuming INPUT is a stereo audio file, you can switch the +two audio channels with the following command: +

 
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
+
+ +

If you want to mute the first channel and keep the second: +

 
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
+
+ +

The order of the "-map_channel" option specifies the order of the channels in +the output stream. The output channel layout is guessed from the number of +channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac" +in combination of "-map_channel" makes the channel gain levels to be updated if +input and output channel layouts don’t match (for instance two "-map_channel" +options and "-ac 6"). +

+

You can also extract each channel of an input to specific outputs; the following +command extracts two channels of the INPUT audio stream (file 0, stream 0) +to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs: +

 
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
+
+ +

The following example splits the channels of a stereo input into two separate +streams, which are put into the same output file: +

 
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
+
+ +

Note that currently each output stream can only contain channels from a single +input stream; you can’t for example use "-map_channel" to pick multiple input +audio channels contained in different streams (from the same or different files) +and merge them into a single output stream. It is therefore not currently +possible, for example, to turn two separate mono streams into a single stereo +stream. However splitting a stereo stream into two single channel mono streams +is possible. +

+

If you need this feature, a possible workaround is to use the amerge +filter. For example, if you need to merge a media (here ‘input.mkv’) with 2 +mono audio streams into one single stereo channel audio stream (and keep the +video stream), you can use the following command: +

 
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
+
+ +
+
-map_metadata[:metadata_spec_out] infile[:metadata_spec_in] (output,per-metadata)
+

Set metadata information of the next output file from infile. Note that +those are file indices (zero-based), not filenames. +Optional metadata_spec_in/out parameters specify, which metadata to copy. +A metadata specifier can have the following forms: +

+
g
+

global metadata, i.e. metadata that applies to the whole file +

+
+
s[:stream_spec]
+

per-stream metadata. stream_spec is a stream specifier as described +in the Stream specifiers chapter. In an input metadata specifier, the first +matching stream is copied from. In an output metadata specifier, all matching +streams are copied to. +

+
+
c:chapter_index
+

per-chapter metadata. chapter_index is the zero-based chapter index. +

+
+
p:program_index
+

per-program metadata. program_index is the zero-based program index. +

+
+

If metadata specifier is omitted, it defaults to global. +

+

By default, global metadata is copied from the first input file, +per-stream and per-chapter metadata is copied along with streams/chapters. These +default mappings are disabled by creating any mapping of the relevant type. A negative +file index can be used to create a dummy mapping that just disables automatic copying. +

+

For example to copy metadata from the first stream of the input file to global metadata +of the output file: +

 
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
+
+ +

To do the reverse, i.e. copy global metadata to all audio streams: +

 
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
+
+

Note that simple 0 would work as well in this example, since global +metadata is assumed by default. +

+
+
-map_chapters input_file_index (output)
+

Copy chapters from input file with index input_file_index to the next +output file. If no chapter mapping is specified, then chapters are copied from +the first input file with at least one chapter. Use a negative file index to +disable any chapter copying. +

+
+
-benchmark (global)
+

Show benchmarking information at the end of an encode. +Shows CPU time used and maximum memory consumption. +Maximum memory consumption is not supported on all systems, +it will usually display as 0 if not supported. +

+
-benchmark_all (global)
+

Show benchmarking information during the encode. +Shows CPU time used in various steps (audio/video encode/decode). +

+
-timelimit duration (global)
+

Exit after ffmpeg has been running for duration seconds. +

+
-dump (global)
+

Dump each input packet to stderr. +

+
-hex (global)
+

When dumping packets, also dump the payload. +

+
-re (input)
+

Read input at native frame rate. Mainly used to simulate a grab device. +or live input stream (e.g. when reading from a file). Should not be used +with actual grab devices or live input streams (where it can cause packet +loss). +By default ffmpeg attempts to read the input(s) as fast as possible. +This option will slow down the reading of the input(s) to the native frame rate +of the input(s). It is useful for real-time output (e.g. live streaming). +

+
-loop_input
+

Loop over the input stream. Currently it works only for image +streams. This option is used for automatic FFserver testing. +This option is deprecated, use -loop 1. +

+
-loop_output number_of_times
+

Repeatedly loop output for formats that support looping such as animated GIF +(0 will loop the output infinitely). +This option is deprecated, use -loop. +

+
-vsync parameter
+

Video sync method. +For compatibility reasons old values can be specified as numbers. +Newly added values will have to be specified as strings always. +

+
+
0, passthrough
+

Each frame is passed with its timestamp from the demuxer to the muxer. +

+
1, cfr
+

Frames will be duplicated and dropped to achieve exactly the requested +constant frame rate. +

+
2, vfr
+

Frames are passed through with their timestamp or dropped so as to +prevent 2 frames from having the same timestamp. +

+
drop
+

As passthrough but destroys all timestamps, making the muxer generate +fresh timestamps based on frame-rate. +

+
-1, auto
+

Chooses between 1 and 2 depending on muxer capabilities. This is the +default method. +

+
+ +

Note that the timestamps may be further modified by the muxer, after this. +For example, in the case that the format option ‘avoid_negative_ts’ +is enabled. +

+

With -map you can select from which stream the timestamps should be +taken. You can leave either video or audio unchanged and sync the +remaining stream(s) to the unchanged one. +

+
+
-async samples_per_second
+

Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps, +the parameter is the maximum samples per second by which the audio is changed. +-async 1 is a special case where only the start of the audio stream is corrected +without any later correction. +

+

Note that the timestamps may be further modified by the muxer, after this. +For example, in the case that the format option ‘avoid_negative_ts’ +is enabled. +

+

This option has been deprecated. Use the aresample audio filter instead. +

+
+
-copyts
+

Do not process input timestamps, but keep their values without trying +to sanitize them. In particular, do not remove the initial start time +offset value. +

+

Note that, depending on the ‘vsync’ option or on specific muxer +processing (e.g. in case the format option ‘avoid_negative_ts’ +is enabled) the output timestamps may mismatch with the input +timestamps even when this option is selected. +

+
+
-copytb mode
+

Specify how to set the encoder timebase when stream copying. mode is an +integer numeric value, and can assume one of the following values: +

+
+
1
+

Use the demuxer timebase. +

+

The time base is copied to the output encoder from the corresponding input +demuxer. This is sometimes required to avoid non monotonically increasing +timestamps when copying video streams with variable frame rate. +

+
+
0
+

Use the decoder timebase. +

+

The time base is copied to the output encoder from the corresponding input +decoder. +

+
+
-1
+

Try to make the choice automatically, in order to generate a sane output. +

+
+ +

Default value is -1. +

+
+
-shortest (output)
+

Finish encoding when the shortest input stream ends. +

+
-dts_delta_threshold
+

Timestamp discontinuity delta threshold. +

+
-muxdelay seconds (input)
+

Set the maximum demux-decode delay. +

+
-muxpreload seconds (input)
+

Set the initial demux-decode delay. +

+
-streamid output-stream-index:new-value (output)
+

Assign a new stream-id value to an output stream. This option should be +specified prior to the output filename to which it applies. +For the situation where multiple output files exist, a streamid +may be reassigned to a different value. +

+

For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for +an output mpegts file: +

 
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
+
+ +
+
-bsf[:stream_specifier] bitstream_filters (output,per-stream)
+

Set bitstream filters for matching streams. bitstream_filters is +a comma-separated list of bitstream filters. Use the -bsfs option +to get the list of bitstream filters. +

 
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
+
+
 
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
+
+ +
+
-tag[:stream_specifier] codec_tag (input/output,per-stream)
+

Force a tag/fourcc for matching streams. +

+
+
-timecode hh:mm:ssSEPff
+

Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’ +(or ’.’) for drop. +

 
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
+
+ +

+

+
-filter_complex filtergraph (global)
+

Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or +outputs. For simple graphs – those with one input and one output of the same +type – see the ‘-filter’ options. filtergraph is a description of +the filtergraph, as described in the “Filtergraph syntax” section of the +ffmpeg-filters manual. +

+

Input link labels must refer to input streams using the +[file_index:stream_specifier] syntax (i.e. the same as ‘-map’ +uses). If stream_specifier matches multiple streams, the first one will be +used. An unlabeled input will be connected to the first unused input stream of +the matching type. +

+

Output link labels are referred to with ‘-map’. Unlabeled outputs are +added to the first output file. +

+

Note that with this option it is possible to use only lavfi sources without +normal input files. +

+

For example, to overlay an image over video +

 
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
+'[out]' out.mkv
+
+

Here [0:v] refers to the first video stream in the first input file, +which is linked to the first (main) input of the overlay filter. Similarly the +first video stream in the second input is linked to the second (overlay) input +of overlay. +

+

Assuming there is only one video stream in each input file, we can omit input +labels, so the above is equivalent to +

 
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
+'[out]' out.mkv
+
+ +

Furthermore we can omit the output label and the single output from the filter +graph will be added to the output file automatically, so we can simply write +

 
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
+
+ +

To generate 5 seconds of pure red video using lavfi color source: +

 
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
+
+ +
+
-lavfi filtergraph (global)
+

Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or +outputs. Equivalent to ‘-filter_complex’. +

+
+
-filter_complex_script filename (global)
+

This option is similar to ‘-filter_complex’, the only difference is that +its argument is the name of the file from which a complex filtergraph +description is to be read. +

+
+
-accurate_seek (input)
+

This option enables or disables accurate seeking in input files with the +‘-ss’ option. It is enabled by default, so seeking is accurate when +transcoding. Use ‘-noaccurate_seek’ to disable it, which may be useful +e.g. when copying some streams and transcoding the others. +

+
+
-override_ffserver (global)
+

Overrides the input specifications from ffserver. Using this +option you can map any input stream to ffserver and control +many aspects of the encoding from ffmpeg. Without this +option ffmpeg will transmit to ffserver what is +requested by ffserver. +

+

The option is intended for cases where features are needed that cannot be +specified to ffserver but can be to ffmpeg. +

+
+
+ +

As a special exception, you can use a bitmap subtitle stream as input: it +will be converted into a video with the same size as the largest video in +the file, or 720x576 if no video is present. Note that this is an +experimental and temporary solution. It will be removed once libavfilter has +proper support for subtitles. +

+

For example, to hardcode subtitles on top of a DVB-T recording stored in +MPEG-TS format, delaying the subtitles by 1 second: +

 
ffmpeg -i input.ts -filter_complex \
+  '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
+  -sn -map '#0x2dc' output.mkv
+
+

(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video, +audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too) +

+ +

5.12 Preset files

+

A preset file contains a sequence of option=value pairs, +one for each line, specifying a sequence of options which would be +awkward to specify on the command line. Lines starting with the hash +(’#’) character are ignored and are used to provide comments. Check +the ‘presets’ directory in the FFmpeg source tree for examples. +

+

Preset files are specified with the vpre, apre, +spre, and fpre options. The fpre option takes the +filename of the preset instead of a preset name as input and can be +used for any kind of codec. For the vpre, apre, and +spre options, the options specified in a preset file are +applied to the currently selected codec of the same type as the preset +option. +

+

The argument passed to the vpre, apre, and spre +preset options identifies the preset file to use according to the +following rules: +

+

First ffmpeg searches for a file named arg.ffpreset in the +directories ‘$FFMPEG_DATADIR’ (if set), and ‘$HOME/.ffmpeg’, and in +the datadir defined at configuration time (usually ‘PREFIX/share/ffmpeg’) +or in a ‘ffpresets’ folder along the executable on win32, +in that order. For example, if the argument is libvpx-1080p, it will +search for the file ‘libvpx-1080p.ffpreset’. +

+

If no such file is found, then ffmpeg will search for a file named +codec_name-arg.ffpreset in the above-mentioned +directories, where codec_name is the name of the codec to which +the preset file options will be applied. For example, if you select +the video codec with -vcodec libvpx and use -vpre 1080p, +then it will search for the file ‘libvpx-1080p.ffpreset’. +

+ +

6. Tips

+ +
    +
  • +For streaming at very low bitrates, use a low frame rate +and a small GOP size. This is especially true for RealVideo where +the Linux player does not seem to be very fast, so it can miss +frames. An example is: + +
     
    ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
    +
    + +
  • +The parameter ’q’ which is displayed while encoding is the current +quantizer. The value 1 indicates that a very good quality could +be achieved. The value 31 indicates the worst quality. If q=31 appears +too often, it means that the encoder cannot compress enough to meet +your bitrate. You must either increase the bitrate, decrease the +frame rate or decrease the frame size. + +
  • +If your computer is not fast enough, you can speed up the +compression at the expense of the compression ratio. You can use +’-me zero’ to speed up motion estimation, and ’-g 0’ to disable +motion estimation completely (you have only I-frames, which means it +is about as good as JPEG compression). + +
  • +To have very low audio bitrates, reduce the sampling frequency +(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3). + +
  • +To have a constant quality (but a variable bitrate), use the option +’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst +quality). + +
+ + +

7. Examples

+ + +

7.1 Preset files

+ +

A preset file contains a sequence of option=value pairs, one for +each line, specifying a sequence of options which can be specified also on +the command line. Lines starting with the hash (’#’) character are ignored and +are used to provide comments. Empty lines are also ignored. Check the +‘presets’ directory in the FFmpeg source tree for examples. +

+

Preset files are specified with the pre option, this option takes a +preset name as input. FFmpeg searches for a file named preset_name.avpreset in +the directories ‘$AVCONV_DATADIR’ (if set), and ‘$HOME/.ffmpeg’, and in +the data directory defined at configuration time (usually ‘$PREFIX/share/ffmpeg’) +in that order. For example, if the argument is libx264-max, it will +search for the file ‘libx264-max.avpreset’. +

+ +

7.2 Video and Audio grabbing

+ +

If you specify the input format and device then ffmpeg can grab video +and audio directly. +

+
 
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+ +

Or with an ALSA audio source (mono input, card id 1) instead of OSS: +

 
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+ +

Note that you must activate the right video source and channel before +launching ffmpeg with any TV viewer such as +xawtv by Gerd Knorr. You also +have to set the audio recording levels correctly with a +standard mixer. +

+ +

7.3 X11 grabbing

+ +

Grab the X11 display with ffmpeg via +

+
 
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
+
+ +

0.0 is display.screen number of your X11 server, same as +the DISPLAY environment variable. +

+
 
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
+
+ +

0.0 is display.screen number of your X11 server, same as the DISPLAY environment +variable. 10 is the x-offset and 20 the y-offset for the grabbing. +

+ +

7.4 Video and Audio file format conversion

+ +

Any supported file format and protocol can serve as input to ffmpeg: +

+

Examples: +

    +
  • +You can use YUV files as input: + +
     
    ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
    +
    + +

    It will use the files: +

     
    /tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
    +/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
    +
    + +

    The Y files use twice the resolution of the U and V files. They are +raw files, without header. They can be generated by all decent video +decoders. You must specify the size of the image with the ‘-s’ option +if ffmpeg cannot guess it. +

    +
  • +You can input from a raw YUV420P file: + +
     
    ffmpeg -i /tmp/test.yuv /tmp/out.avi
    +
    + +

    test.yuv is a file containing raw YUV planar data. Each frame is composed +of the Y plane followed by the U and V planes at half vertical and +horizontal resolution. +

    +
  • +You can output to a raw YUV420P file: + +
     
    ffmpeg -i mydivx.avi hugefile.yuv
    +
    + +
  • +You can set several input files and output files: + +
     
    ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
    +
    + +

    Converts the audio file a.wav and the raw YUV video file a.yuv +to MPEG file a.mpg. +

    +
  • +You can also do audio and video conversions at the same time: + +
     
    ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
    +
    + +

    Converts a.wav to MPEG audio at 22050 Hz sample rate. +

    +
  • +You can encode to several formats at the same time and define a +mapping from input stream to output streams: + +
     
    ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
    +
    + +

    Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map +file:index’ specifies which input stream is used for each output +stream, in the order of the definition of output streams. +

    +
  • +You can transcode decrypted VOBs: + +
     
    ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
    +
    + +

    This is a typical DVD ripping example; the input is a VOB file, the +output an AVI file with MPEG-4 video and MP3 audio. Note that in this +command we use B-frames so the MPEG-4 stream is DivX5 compatible, and +GOP size is 300 which means one intra frame every 10 seconds for 29.97fps +input video. Furthermore, the audio stream is MP3-encoded so you need +to enable LAME support by passing --enable-libmp3lame to configure. +The mapping is particularly useful for DVD transcoding +to get the desired audio language. +

    +

    NOTE: To see the supported input formats, use ffmpeg -formats. +

    +
  • +You can extract images from a video, or create a video from many images: + +

    For extracting images from a video: +

     
    ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
    +
    + +

    This will extract one video frame per second from the video and will +output them in files named ‘foo-001.jpeg’, ‘foo-002.jpeg’, +etc. Images will be rescaled to fit the new WxH values. +

    +

    If you want to extract just a limited number of frames, you can use the +above command in combination with the -vframes or -t option, or in +combination with -ss to start extracting from a certain point in time. +

    +

    For creating a video from many images: +

     
    ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
    +
    + +

    The syntax foo-%03d.jpeg specifies to use a decimal number +composed of three digits padded with zeroes to express the sequence +number. It is the same syntax supported by the C printf function, but +only formats accepting a normal integer are suitable. +

    +

    When importing an image sequence, -i also supports expanding +shell-like wildcard patterns (globbing) internally, by selecting the +image2-specific -pattern_type glob option. +

    +

    For example, for creating a video from filenames matching the glob pattern +foo-*.jpeg: +

     
    ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
    +
    + +
  • +You can put many streams of the same type in the output: + +
     
    ffmpeg -i test1.avi -i test2.avi -map 0:3 -map 0:2 -map 0:1 -map 0:0 -c copy test12.nut
    +
    + +

    The resulting output file ‘test12.avi’ will contain first four streams from +the input file in reverse order. +

    +
  • +To force CBR video output: +
     
    ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
    +
    + +
  • +The four options lmin, lmax, mblmin and mblmax use ’lambda’ units, +but you may use the QP2LAMBDA constant to easily convert from ’q’ units: +
     
    ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
    +
    + +
+ + + +

8. See Also

+ +

ffmpeg-all, +ffplay, ffprobe, ffserver, +ffmpeg-utils, +ffmpeg-scaler, +ffmpeg-resampler, +ffmpeg-codecs, +ffmpeg-bitstream-filters, +ffmpeg-formats, +ffmpeg-devices, +ffmpeg-protocols, +ffmpeg-filters +

+ + +

9. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffplay-all.html b/dependencies64/ffmpeg/doc/ffplay-all.html new file mode 100644 index 000000000..e3439afcc --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffplay-all.html @@ -0,0 +1,18956 @@ + + + + + +FFmpeg documentation : ffplay + + + + + + + + + + +
+
+ + +

ffplay Documentation

+ + +

Table of Contents

+
+ + +
+ + +

1. Synopsis

+ +

ffplay [options] [‘input_file’] +

+ +

2. Description

+ +

FFplay is a very simple and portable media player using the FFmpeg +libraries and the SDL library. It is mostly used as a testbed for the +various FFmpeg APIs. +

+ +

3. Options

+ +

All the numerical options, if not specified otherwise, accept a string +representing a number as input, which may be followed by one of the SI +unit prefixes, for example: ’K’, ’M’, or ’G’. +

+

If ’i’ is appended to the SI unit prefix, the complete prefix will be +interpreted as a unit prefix for binary multiplies, which are based on +powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit +prefix multiplies the value by 8. This allows using, for example: +’KB’, ’MiB’, ’G’ and ’B’ as number suffixes. +

+

Options which do not take arguments are boolean options, and set the +corresponding value to true. They can be set to false by prefixing +the option name with "no". For example using "-nofoo" +will set the boolean option with name "foo" to false. +

+

+

+

3.1 Stream specifiers

+

Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers +are used to precisely specify which stream(s) a given option belongs to. +

+

A stream specifier is a string generally appended to the option name and +separated from it by a colon. E.g. -codec:a:1 ac3 contains the +a:1 stream specifier, which matches the second audio stream. Therefore, it +would select the ac3 codec for the second audio stream. +

+

A stream specifier can match several streams, so that the option is applied to all +of them. E.g. the stream specifier in -b:a 128k matches all audio +streams. +

+

An empty stream specifier matches all streams. For example, -codec copy +or -codec: copy would copy all the streams without reencoding. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. E.g. -threads:1 4 would set the +thread count for the second stream to 4. +

+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle, +’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches +stream number stream_index of this type. Otherwise, it matches all +streams of this type. +

+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number stream_index +in the program with the id program_id. Otherwise, it matches all streams in the +program. +

+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ + +

3.2 Generic options

+ +

These options are shared amongst the ff* tools. +

+
+
-L
+

Show license. +

+
+
-h, -?, -help, --help [arg]
+

Show help. An optional parameter may be specified to print help about a specific +item. If no argument is specified, only basic (non advanced) tool +options are shown. +

+

Possible values of arg are: +

+
long
+

Print advanced tool options in addition to the basic tool options. +

+
+
full
+

Print complete list of options, including shared and private options +for encoders, decoders, demuxers, muxers, filters, etc. +

+
+
decoder=decoder_name
+

Print detailed information about the decoder named decoder_name. Use the +‘-decoders’ option to get a list of all decoders. +

+
+
encoder=encoder_name
+

Print detailed information about the encoder named encoder_name. Use the +‘-encoders’ option to get a list of all encoders. +

+
+
demuxer=demuxer_name
+

Print detailed information about the demuxer named demuxer_name. Use the +‘-formats’ option to get a list of all demuxers and muxers. +

+
+
muxer=muxer_name
+

Print detailed information about the muxer named muxer_name. Use the +‘-formats’ option to get a list of all muxers and demuxers. +

+
+
filter=filter_name
+

Print detailed information about the filter name filter_name. Use the +‘-filters’ option to get a list of all filters. +

+
+ +
+
-version
+

Show version. +

+
+
-formats
+

Show available formats. +

+
+
-codecs
+

Show all codecs known to libavcodec. +

+

Note that the term ’codec’ is used throughout this documentation as a shortcut +for what is more correctly called a media bitstream format. +

+
+
-decoders
+

Show available decoders. +

+
+
-encoders
+

Show all available encoders. +

+
+
-bsfs
+

Show available bitstream filters. +

+
+
-protocols
+

Show available protocols. +

+
+
-filters
+

Show available libavfilter filters. +

+
+
-pix_fmts
+

Show available pixel formats. +

+
+
-sample_fmts
+

Show available sample formats. +

+
+
-layouts
+

Show channel names and standard channel layouts. +

+
+
-colors
+

Show recognized color names. +

+
+
-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+

Set the logging level used by the library. +Adding "repeat+" indicates that repeated log output should not be compressed +to the first line and the "Last message repeated n times" line will be +omitted. "repeat" can also be used alone. +If "repeat" is used alone, and with no prior loglevel set, the default +loglevel will be used. If multiple loglevel parameters are given, using +’repeat’ will not change the loglevel. +loglevel is a number or a string containing one of the following values: +

+
quiet
+

Show nothing at all; be silent. +

+
panic
+

Only show fatal errors which could lead the process to crash, such as +and assert failure. This is not currently used for anything. +

+
fatal
+

Only show fatal errors. These are errors after which the process absolutely +cannot continue after. +

+
error
+

Show all errors, including ones which can be recovered from. +

+
warning
+

Show all warnings and errors. Any message related to possibly +incorrect or unexpected events will be shown. +

+
info
+

Show informative messages during processing. This is in addition to +warnings and errors. This is the default value. +

+
verbose
+

Same as info, except more verbose. +

+
debug
+

Show everything, including debugging information. +

+
+ +

By default the program logs to stderr, if coloring is supported by the +terminal, colors are used to mark errors and warnings. Log coloring +can be disabled setting the environment variable +AV_LOG_FORCE_NOCOLOR or NO_COLOR, or can be forced setting +the environment variable AV_LOG_FORCE_COLOR. +The use of the environment variable NO_COLOR is deprecated and +will be dropped in a following FFmpeg version. +

+
+
-report
+

Dump full command line and console output to a file named +program-YYYYMMDD-HHMMSS.log in the current +directory. +This file can be useful for bug reports. +It also implies -loglevel verbose. +

+

Setting the environment variable FFREPORT to any value has the +same effect. If the value is a ’:’-separated key=value sequence, these +options will affect the report; options values must be escaped if they +contain special characters or the options delimiter ’:’ (see the +“Quoting and escaping” section in the ffmpeg-utils manual). The +following option is recognized: +

+
file
+

set the file name to use for the report; %p is expanded to the name +of the program, %t is expanded to a timestamp, %% is expanded +to a plain % +

+
+ +

Errors in parsing the environment variable are not fatal, and will not +appear in the report. +

+
+
-hide_banner
+

Suppress printing banner. +

+

All FFmpeg tools will normally show a copyright notice, build options +and library versions. This option can be used to suppress printing +this information. +

+
+
-cpuflags flags (global)
+

Allows setting and clearing cpu flags. This option is intended +for testing. Do not use it unless you know what you’re doing. +

 
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+

Possible flags for this option are: +

+
x86
+
+
mmx
+
mmxext
+
sse
+
sse2
+
sse2slow
+
sse3
+
sse3slow
+
ssse3
+
atom
+
sse4.1
+
sse4.2
+
avx
+
xop
+
fma4
+
3dnow
+
3dnowext
+
cmov
+
+
+
ARM
+
+
armv5te
+
armv6
+
armv6t2
+
vfp
+
vfpv3
+
neon
+
+
+
PowerPC
+
+
altivec
+
+
+
Specific Processors
+
+
pentium2
+
pentium3
+
pentium4
+
k6
+
k62
+
athlon
+
athlonxp
+
k8
+
+
+
+ +
+
-opencl_bench
+

Benchmark all available OpenCL devices and show the results. This option +is only available when FFmpeg has been compiled with --enable-opencl. +

+
+
-opencl_options options (global)
+

Set OpenCL environment options. This option is only available when +FFmpeg has been compiled with --enable-opencl. +

+

options must be a list of key=value option pairs +separated by ’:’. See the “OpenCL Options” section in the +ffmpeg-utils manual for the list of supported options. +

+
+ + +

3.3 AVOptions

+ +

These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +‘-help’ option. They are separated into two categories: +

+
generic
+

These options can be set for any container, codec or device. Generic options +are listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +

+
private
+

These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +

+
+ +

For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the ‘id3v2_version’ private option of the MP3 +muxer: +

 
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+ +

All codec AVOptions are per-stream, and thus a stream specifier +should be attached to them. +

+

Note: the ‘-nooption’ syntax cannot be used for boolean +AVOptions, use ‘-option 0’/‘-option 1’. +

+

Note: the old undocumented way of specifying per-stream AVOptions by +prepending v/a/s to the options name is now obsolete and will be +removed soon. +

+ +

3.4 Main options

+ +
+
-x width
+

Force displayed width. +

+
-y height
+

Force displayed height. +

+
-s size
+

Set frame size (WxH or abbreviation), needed for videos which do +not contain a header with the frame size like raw YUV. This option +has been deprecated in favor of private options, try -video_size. +

+
-an
+

Disable audio. +

+
-vn
+

Disable video. +

+
-ss pos
+

Seek to a given position in seconds. +

+
-t duration
+

play <duration> seconds of audio/video +

+
-bytes
+

Seek by bytes. +

+
-nodisp
+

Disable graphical display. +

+
-f fmt
+

Force format. +

+
-window_title title
+

Set window title (default is the input filename). +

+
-loop number
+

Loops movie playback <number> times. 0 means forever. +

+
-showmode mode
+

Set the show mode to use. +Available values for mode are: +

+
0, video
+

show video +

+
1, waves
+

show audio waves +

+
2, rdft
+

show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform) +

+
+ +

Default value is "video", if video is not present or cannot be played +"rdft" is automatically selected. +

+

You can interactively cycle through the available show modes by +pressing the key <w>. +

+
+
-vf filtergraph
+

Create the filtergraph specified by filtergraph and use it to +filter the video stream. +

+

filtergraph is a description of the filtergraph to apply to +the stream, and must have a single video input and a single video +output. In the filtergraph, the input is associated to the label +in, and the output to the label out. See the +ffmpeg-filters manual for more information about the filtergraph +syntax. +

+
+
-af filtergraph
+

filtergraph is a description of the filtergraph to apply to +the input audio. +Use the option "-filters" to show all the available filters (including +sources and sinks). +

+
+
-i input_file
+

Read input_file. +

+
+ + +

3.5 Advanced options

+
+
-pix_fmt format
+

Set pixel format. +This option has been deprecated in favor of private options, try -pixel_format. +

+
+
-stats
+

Print several playback statistics, in particular show the stream +duration, the codec parameters, the current position in the stream and +the audio/video synchronisation drift. It is on by default, to +explicitly disable it you need to specify -nostats. +

+
+
-bug
+

Work around bugs. +

+
-fast
+

Non-spec-compliant optimizations. +

+
-genpts
+

Generate pts. +

+
-rtp_tcp
+

Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful +if you are streaming with the RTSP protocol. +

+
-sync type
+

Set the master clock to audio (type=audio), video +(type=video) or external (type=ext). Default is audio. The +master clock is used to control audio-video synchronization. Most media +players use audio as master clock, but in some cases (streaming or high +quality broadcast) it is necessary to change that. This option is mainly +used for debugging purposes. +

+
-threads count
+

Set the thread count. +

+
-ast audio_stream_number
+

Select the desired audio stream number, counting from 0. The number +refers to the list of all the input audio streams. If it is greater +than the number of audio streams minus one, then the last one is +selected, if it is negative the audio playback is disabled. +

+
-vst video_stream_number
+

Select the desired video stream number, counting from 0. The number +refers to the list of all the input video streams. If it is greater +than the number of video streams minus one, then the last one is +selected, if it is negative the video playback is disabled. +

+
-sst subtitle_stream_number
+

Select the desired subtitle stream number, counting from 0. The number +refers to the list of all the input subtitle streams. If it is greater +than the number of subtitle streams minus one, then the last one is +selected, if it is negative the subtitle rendering is disabled. +

+
-autoexit
+

Exit when video is done playing. +

+
-exitonkeydown
+

Exit if any key is pressed. +

+
-exitonmousedown
+

Exit if any mouse button is pressed. +

+
+
-codec:media_specifier codec_name
+

Force a specific decoder implementation for the stream identified by +media_specifier, which can assume the values a (audio), +v (video), and s subtitle. +

+
+
-acodec codec_name
+

Force a specific audio decoder. +

+
+
-vcodec codec_name
+

Force a specific video decoder. +

+
+
-scodec codec_name
+

Force a specific subtitle decoder. +

+
+ + +

3.6 While playing

+ +
+
<q, ESC>
+

Quit. +

+
+
<f>
+

Toggle full screen. +

+
+
<p, SPC>
+

Pause. +

+
+
<a>
+

Cycle audio channel in the curret program. +

+
+
<v>
+

Cycle video channel. +

+
+
<t>
+

Cycle subtitle channel in the current program. +

+
+
<c>
+

Cycle program. +

+
+
<w>
+

Show audio waves. +

+
+
<s>
+

Step to the next frame. +

+

Pause if the stream is not already paused, step to the next video +frame, and pause. +

+
+
<left/right>
+

Seek backward/forward 10 seconds. +

+
+
<down/up>
+

Seek backward/forward 1 minute. +

+
+
<page down/page up>
+

Seek to the previous/next chapter. +or if there are no chapters +Seek backward/forward 10 minutes. +

+
+
<mouse click>
+

Seek to percentage in file corresponding to fraction of width. +

+
+
+ + + +

4. Syntax

+ +

This section documents the syntax and formats employed by the FFmpeg +libraries and tools. +

+

+

+

4.1 Quoting and escaping

+ +

FFmpeg adopts the following quoting and escaping mechanism, unless +explicitly specified. The following rules are applied: +

+
    +
  • +' and \ are special characters (respectively used for +quoting and escaping). In addition to them, there might be other +special characters depending on the specific syntax where the escaping +and quoting are employed. + +
  • +A special character is escaped by prefixing it with a ’\’. + +
  • +All characters enclosed between ” are included literally in the +parsed string. The quote character ' itself cannot be quoted, +so you may need to close the quote and escape it. + +
  • +Leading and trailing whitespaces, unless escaped or quoted, are +removed from the parsed string. +
+ +

Note that you may need to add a second level of escaping when using +the command line or a script, which depends on the syntax of the +adopted shell language. +

+

The function av_get_token defined in +‘libavutil/avstring.h’ can be used to parse a token quoted or +escaped according to the rules defined above. +

+

The tool ‘tools/ffescape’ in the FFmpeg source tree can be used +to automatically quote or escape a string in a script. +

+ +

4.1.1 Examples

+ +
    +
  • +Escape the string Crime d'Amour containing the ' special +character: +
     
    Crime d\'Amour
    +
    + +
  • +The string above contains a quote, so the ' needs to be escaped +when quoting it: +
     
    'Crime d'\''Amour'
    +
    + +
  • +Include leading or trailing whitespaces using quoting: +
     
    '  this string starts and ends with whitespaces  '
    +
    + +
  • +Escaping and quoting can be mixed together: +
     
    ' The string '\'string\'' is a string '
    +
    + +
  • +To include a literal \ you can use either escaping or quoting: +
     
    'c:\foo' can be written as c:\\foo
    +
    +
+ +

+

+

4.2 Date

+ +

The accepted syntax is: +

 
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+ +

If the value is "now" it takes the current time. +

+

Time is local time unless Z is appended, in which case it is +interpreted as UTC. +If the year-month-day part is not specified it takes the current +year-month-day. +

+

+

+

4.3 Time duration

+ +

There are two accepted syntaxes for expressing time duration. +

+
 
[-][HH:]MM:SS[.m...]
+
+ +

HH expresses the number of hours, MM the number of minutes +for a maximum of 2 digits, and SS the number of seconds for a +maximum of 2 digits. The m at the end expresses decimal value for +SS. +

+

or +

+
 
[-]S+[.m...]
+
+ +

S expresses the number of seconds, with the optional decimal part +m. +

+

In both expressions, the optional ‘-’ indicates negative duration. +

+ +

4.3.1 Examples

+ +

The following examples are all valid time duration: +

+
+
55
+

55 seconds +

+
+
12:03:45
+

12 hours, 03 minutes and 45 seconds +

+
+
23.189
+

23.189 seconds +

+
+ +

+

+

4.4 Video size

+

Specify the size of the sourced video, it may be a string of the form +widthxheight, or the name of a size abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

720x480 +

+
pal
+

720x576 +

+
qntsc
+

352x240 +

+
qpal
+

352x288 +

+
sntsc
+

640x480 +

+
spal
+

768x576 +

+
film
+

352x240 +

+
ntsc-film
+

352x240 +

+
sqcif
+

128x96 +

+
qcif
+

176x144 +

+
cif
+

352x288 +

+
4cif
+

704x576 +

+
16cif
+

1408x1152 +

+
qqvga
+

160x120 +

+
qvga
+

320x240 +

+
vga
+

640x480 +

+
svga
+

800x600 +

+
xga
+

1024x768 +

+
uxga
+

1600x1200 +

+
qxga
+

2048x1536 +

+
sxga
+

1280x1024 +

+
qsxga
+

2560x2048 +

+
hsxga
+

5120x4096 +

+
wvga
+

852x480 +

+
wxga
+

1366x768 +

+
wsxga
+

1600x1024 +

+
wuxga
+

1920x1200 +

+
woxga
+

2560x1600 +

+
wqsxga
+

3200x2048 +

+
wquxga
+

3840x2400 +

+
whsxga
+

6400x4096 +

+
whuxga
+

7680x4800 +

+
cga
+

320x200 +

+
ega
+

640x350 +

+
hd480
+

852x480 +

+
hd720
+

1280x720 +

+
hd1080
+

1920x1080 +

+
2k
+

2048x1080 +

+
2kflat
+

1998x1080 +

+
2kscope
+

2048x858 +

+
4k
+

4096x2160 +

+
4kflat
+

3996x2160 +

+
4kscope
+

4096x1716 +

+
nhd
+

640x360 +

+
hqvga
+

240x160 +

+
wqvga
+

400x240 +

+
fwqvga
+

432x240 +

+
hvga
+

480x320 +

+
qhd
+

960x540 +

+
+ +

+

+

4.5 Video rate

+ +

Specify the frame rate of a video, expressed as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

30000/1001 +

+
pal
+

25/1 +

+
qntsc
+

30000/1001 +

+
qpal
+

25/1 +

+
sntsc
+

30000/1001 +

+
spal
+

25/1 +

+
film
+

24/1 +

+
ntsc-film
+

24000/1001 +

+
+ +

+

+

4.6 Ratio

+ +

A ratio can be expressed as an expression, or in the form +numerator:denominator. +

+

Note that a ratio with infinite (1/0) or negative value is +considered valid, so you should check on the returned value if you +want to exclude those values. +

+

The undefined value can be expressed using the "0:0" string. +

+

+

+

4.7 Color

+ +

It can be the name of a color as defined below (case insensitive match) or a +[0x|#]RRGGBB[AA] sequence, possibly followed by @ and a string +representing the alpha component. +

+

The alpha component may be a string composed by "0x" followed by an +hexadecimal number or a decimal number between 0.0 and 1.0, which +represents the opacity value (‘0x00’ or ‘0.0’ means completely +transparent, ‘0xff’ or ‘1.0’ completely opaque). If the alpha +component is not specified then ‘0xff’ is assumed. +

+

The string ‘random’ will result in a random color. +

+

The following names of colors are recognized: +

+
AliceBlue
+

0xF0F8FF +

+
AntiqueWhite
+

0xFAEBD7 +

+
Aqua
+

0x00FFFF +

+
Aquamarine
+

0x7FFFD4 +

+
Azure
+

0xF0FFFF +

+
Beige
+

0xF5F5DC +

+
Bisque
+

0xFFE4C4 +

+
Black
+

0x000000 +

+
BlanchedAlmond
+

0xFFEBCD +

+
Blue
+

0x0000FF +

+
BlueViolet
+

0x8A2BE2 +

+
Brown
+

0xA52A2A +

+
BurlyWood
+

0xDEB887 +

+
CadetBlue
+

0x5F9EA0 +

+
Chartreuse
+

0x7FFF00 +

+
Chocolate
+

0xD2691E +

+
Coral
+

0xFF7F50 +

+
CornflowerBlue
+

0x6495ED +

+
Cornsilk
+

0xFFF8DC +

+
Crimson
+

0xDC143C +

+
Cyan
+

0x00FFFF +

+
DarkBlue
+

0x00008B +

+
DarkCyan
+

0x008B8B +

+
DarkGoldenRod
+

0xB8860B +

+
DarkGray
+

0xA9A9A9 +

+
DarkGreen
+

0x006400 +

+
DarkKhaki
+

0xBDB76B +

+
DarkMagenta
+

0x8B008B +

+
DarkOliveGreen
+

0x556B2F +

+
Darkorange
+

0xFF8C00 +

+
DarkOrchid
+

0x9932CC +

+
DarkRed
+

0x8B0000 +

+
DarkSalmon
+

0xE9967A +

+
DarkSeaGreen
+

0x8FBC8F +

+
DarkSlateBlue
+

0x483D8B +

+
DarkSlateGray
+

0x2F4F4F +

+
DarkTurquoise
+

0x00CED1 +

+
DarkViolet
+

0x9400D3 +

+
DeepPink
+

0xFF1493 +

+
DeepSkyBlue
+

0x00BFFF +

+
DimGray
+

0x696969 +

+
DodgerBlue
+

0x1E90FF +

+
FireBrick
+

0xB22222 +

+
FloralWhite
+

0xFFFAF0 +

+
ForestGreen
+

0x228B22 +

+
Fuchsia
+

0xFF00FF +

+
Gainsboro
+

0xDCDCDC +

+
GhostWhite
+

0xF8F8FF +

+
Gold
+

0xFFD700 +

+
GoldenRod
+

0xDAA520 +

+
Gray
+

0x808080 +

+
Green
+

0x008000 +

+
GreenYellow
+

0xADFF2F +

+
HoneyDew
+

0xF0FFF0 +

+
HotPink
+

0xFF69B4 +

+
IndianRed
+

0xCD5C5C +

+
Indigo
+

0x4B0082 +

+
Ivory
+

0xFFFFF0 +

+
Khaki
+

0xF0E68C +

+
Lavender
+

0xE6E6FA +

+
LavenderBlush
+

0xFFF0F5 +

+
LawnGreen
+

0x7CFC00 +

+
LemonChiffon
+

0xFFFACD +

+
LightBlue
+

0xADD8E6 +

+
LightCoral
+

0xF08080 +

+
LightCyan
+

0xE0FFFF +

+
LightGoldenRodYellow
+

0xFAFAD2 +

+
LightGreen
+

0x90EE90 +

+
LightGrey
+

0xD3D3D3 +

+
LightPink
+

0xFFB6C1 +

+
LightSalmon
+

0xFFA07A +

+
LightSeaGreen
+

0x20B2AA +

+
LightSkyBlue
+

0x87CEFA +

+
LightSlateGray
+

0x778899 +

+
LightSteelBlue
+

0xB0C4DE +

+
LightYellow
+

0xFFFFE0 +

+
Lime
+

0x00FF00 +

+
LimeGreen
+

0x32CD32 +

+
Linen
+

0xFAF0E6 +

+
Magenta
+

0xFF00FF +

+
Maroon
+

0x800000 +

+
MediumAquaMarine
+

0x66CDAA +

+
MediumBlue
+

0x0000CD +

+
MediumOrchid
+

0xBA55D3 +

+
MediumPurple
+

0x9370D8 +

+
MediumSeaGreen
+

0x3CB371 +

+
MediumSlateBlue
+

0x7B68EE +

+
MediumSpringGreen
+

0x00FA9A +

+
MediumTurquoise
+

0x48D1CC +

+
MediumVioletRed
+

0xC71585 +

+
MidnightBlue
+

0x191970 +

+
MintCream
+

0xF5FFFA +

+
MistyRose
+

0xFFE4E1 +

+
Moccasin
+

0xFFE4B5 +

+
NavajoWhite
+

0xFFDEAD +

+
Navy
+

0x000080 +

+
OldLace
+

0xFDF5E6 +

+
Olive
+

0x808000 +

+
OliveDrab
+

0x6B8E23 +

+
Orange
+

0xFFA500 +

+
OrangeRed
+

0xFF4500 +

+
Orchid
+

0xDA70D6 +

+
PaleGoldenRod
+

0xEEE8AA +

+
PaleGreen
+

0x98FB98 +

+
PaleTurquoise
+

0xAFEEEE +

+
PaleVioletRed
+

0xD87093 +

+
PapayaWhip
+

0xFFEFD5 +

+
PeachPuff
+

0xFFDAB9 +

+
Peru
+

0xCD853F +

+
Pink
+

0xFFC0CB +

+
Plum
+

0xDDA0DD +

+
PowderBlue
+

0xB0E0E6 +

+
Purple
+

0x800080 +

+
Red
+

0xFF0000 +

+
RosyBrown
+

0xBC8F8F +

+
RoyalBlue
+

0x4169E1 +

+
SaddleBrown
+

0x8B4513 +

+
Salmon
+

0xFA8072 +

+
SandyBrown
+

0xF4A460 +

+
SeaGreen
+

0x2E8B57 +

+
SeaShell
+

0xFFF5EE +

+
Sienna
+

0xA0522D +

+
Silver
+

0xC0C0C0 +

+
SkyBlue
+

0x87CEEB +

+
SlateBlue
+

0x6A5ACD +

+
SlateGray
+

0x708090 +

+
Snow
+

0xFFFAFA +

+
SpringGreen
+

0x00FF7F +

+
SteelBlue
+

0x4682B4 +

+
Tan
+

0xD2B48C +

+
Teal
+

0x008080 +

+
Thistle
+

0xD8BFD8 +

+
Tomato
+

0xFF6347 +

+
Turquoise
+

0x40E0D0 +

+
Violet
+

0xEE82EE +

+
Wheat
+

0xF5DEB3 +

+
White
+

0xFFFFFF +

+
WhiteSmoke
+

0xF5F5F5 +

+
Yellow
+

0xFFFF00 +

+
YellowGreen
+

0x9ACD32 +

+
+ +

+

+

4.8 Channel Layout

+ +

A channel layout specifies the spatial disposition of the channels in +a multi-channel audio stream. To specify a channel layout, FFmpeg +makes use of a special syntax. +

+

Individual channels are identified by an id, as given by the table +below: +

+
FL
+

front left +

+
FR
+

front right +

+
FC
+

front center +

+
LFE
+

low frequency +

+
BL
+

back left +

+
BR
+

back right +

+
FLC
+

front left-of-center +

+
FRC
+

front right-of-center +

+
BC
+

back center +

+
SL
+

side left +

+
SR
+

side right +

+
TC
+

top center +

+
TFL
+

top front left +

+
TFC
+

top front center +

+
TFR
+

top front right +

+
TBL
+

top back left +

+
TBC
+

top back center +

+
TBR
+

top back right +

+
DL
+

downmix left +

+
DR
+

downmix right +

+
WL
+

wide left +

+
WR
+

wide right +

+
SDL
+

surround direct left +

+
SDR
+

surround direct right +

+
LFE2
+

low frequency 2 +

+
+ +

Standard channel layout compositions can be specified by using the +following identifiers: +

+
mono
+

FC +

+
stereo
+

FL+FR +

+
2.1
+

FL+FR+LFE +

+
3.0
+

FL+FR+FC +

+
3.0(back)
+

FL+FR+BC +

+
4.0
+

FL+FR+FC+BC +

+
quad
+

FL+FR+BL+BR +

+
quad(side)
+

FL+FR+SL+SR +

+
3.1
+

FL+FR+FC+LFE +

+
5.0
+

FL+FR+FC+BL+BR +

+
5.0(side)
+

FL+FR+FC+SL+SR +

+
4.1
+

FL+FR+FC+LFE+BC +

+
5.1
+

FL+FR+FC+LFE+BL+BR +

+
5.1(side)
+

FL+FR+FC+LFE+SL+SR +

+
6.0
+

FL+FR+FC+BC+SL+SR +

+
6.0(front)
+

FL+FR+FLC+FRC+SL+SR +

+
hexagonal
+

FL+FR+FC+BL+BR+BC +

+
6.1
+

FL+FR+FC+LFE+BC+SL+SR +

+
6.1
+

FL+FR+FC+LFE+BL+BR+BC +

+
6.1(front)
+

FL+FR+LFE+FLC+FRC+SL+SR +

+
7.0
+

FL+FR+FC+BL+BR+SL+SR +

+
7.0(front)
+

FL+FR+FC+FLC+FRC+SL+SR +

+
7.1
+

FL+FR+FC+LFE+BL+BR+SL+SR +

+
7.1(wide)
+

FL+FR+FC+LFE+BL+BR+FLC+FRC +

+
7.1(wide-side)
+

FL+FR+FC+LFE+FLC+FRC+SL+SR +

+
octagonal
+

FL+FR+FC+BL+BR+BC+SL+SR +

+
downmix
+

DL+DR +

+
+ +

A custom channel layout can be specified as a sequence of terms, separated by +’+’ or ’|’. Each term can be: +

    +
  • +the name of a standard channel layout (e.g. ‘mono’, +‘stereo’, ‘4.0’, ‘quad’, ‘5.0’, etc.) + +
  • +the name of a single channel (e.g. ‘FL’, ‘FR’, ‘FC’, ‘LFE’, etc.) + +
  • +a number of channels, in decimal, optionally followed by ’c’, yielding +the default channel layout for that number of channels (see the +function av_get_default_channel_layout) + +
  • +a channel layout mask, in hexadecimal starting with "0x" (see the +AV_CH_* macros in ‘libavutil/channel_layout.h’. +
+ +

Starting from libavutil version 53 the trailing character "c" to +specify a number of channels will be required, while a channel layout +mask could also be specified as a decimal number (if and only if not +followed by "c"). +

+

See also the function av_get_channel_layout defined in +‘libavutil/channel_layout.h’. +

+ +

5. Expression Evaluation

+ +

When evaluating an arithmetic expression, FFmpeg uses an internal +formula evaluator, implemented through the ‘libavutil/eval.h’ +interface. +

+

An expression may contain unary, binary operators, constants, and +functions. +

+

Two expressions expr1 and expr2 can be combined to form +another expression "expr1;expr2". +expr1 and expr2 are evaluated in turn, and the new +expression evaluates to the value of expr2. +

+

The following binary operators are available: +, -, +*, /, ^. +

+

The following unary operators are available: +, -. +

+

The following functions are available: +

+
abs(x)
+

Compute absolute value of x. +

+
+
acos(x)
+

Compute arccosine of x. +

+
+
asin(x)
+

Compute arcsine of x. +

+
+
atan(x)
+

Compute arctangent of x. +

+
+
between(x, min, max)
+

Return 1 if x is greater than or equal to min and lesser than or +equal to max, 0 otherwise. +

+
+
bitand(x, y)
+
bitor(x, y)
+

Compute bitwise and/or operation on x and y. +

+

The results of the evaluation of x and y are converted to +integers before executing the bitwise operation. +

+

Note that both the conversion to integer and the conversion back to +floating point can lose precision. Beware of unexpected results for +large numbers (usually 2^53 and larger). +

+
+
ceil(expr)
+

Round the value of expression expr upwards to the nearest +integer. For example, "ceil(1.5)" is "2.0". +

+
+
cos(x)
+

Compute cosine of x. +

+
+
cosh(x)
+

Compute hyperbolic cosine of x. +

+
+
eq(x, y)
+

Return 1 if x and y are equivalent, 0 otherwise. +

+
+
exp(x)
+

Compute exponential of x (with base e, the Euler’s number). +

+
+
floor(expr)
+

Round the value of expression expr downwards to the nearest +integer. For example, "floor(-1.5)" is "-2.0". +

+
+
gauss(x)
+

Compute Gauss function of x, corresponding to +exp(-x*x/2) / sqrt(2*PI). +

+
+
gcd(x, y)
+

Return the greatest common divisor of x and y. If both x and +y are 0 or either or both are less than zero then behavior is undefined. +

+
+
gt(x, y)
+

Return 1 if x is greater than y, 0 otherwise. +

+
+
gte(x, y)
+

Return 1 if x is greater than or equal to y, 0 otherwise. +

+
+
hypot(x, y)
+

This function is similar to the C function with the same name; it returns +"sqrt(x*x + y*y)", the length of the hypotenuse of a +right triangle with sides of length x and y, or the distance of the +point (x, y) from the origin. +

+
+
if(x, y)
+

Evaluate x, and if the result is non-zero return the result of +the evaluation of y, return 0 otherwise. +

+
+
if(x, y, z)
+

Evaluate x, and if the result is non-zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
ifnot(x, y)
+

Evaluate x, and if the result is zero return the result of the +evaluation of y, return 0 otherwise. +

+
+
ifnot(x, y, z)
+

Evaluate x, and if the result is zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
isinf(x)
+

Return 1.0 if x is +/-INFINITY, 0.0 otherwise. +

+
+
isnan(x)
+

Return 1.0 if x is NAN, 0.0 otherwise. +

+
+
ld(var)
+

Allow to load the value of the internal variable with number +var, which was previously stored with st(var, expr). +The function returns the loaded value. +

+
+
log(x)
+

Compute natural logarithm of x. +

+
+
lt(x, y)
+

Return 1 if x is lesser than y, 0 otherwise. +

+
+
lte(x, y)
+

Return 1 if x is lesser than or equal to y, 0 otherwise. +

+
+
max(x, y)
+

Return the maximum between x and y. +

+
+
min(x, y)
+

Return the maximum between x and y. +

+
+
mod(x, y)
+

Compute the remainder of division of x by y. +

+
+
not(expr)
+

Return 1.0 if expr is zero, 0.0 otherwise. +

+
+
pow(x, y)
+

Compute the power of x elevated y, it is equivalent to +"(x)^(y)". +

+
+
print(t)
+
print(t, l)
+

Print the value of expression t with loglevel l. If +l is not specified then a default log level is used. +Returns the value of the expression printed. +

+

Prints t with loglevel l +

+
+
random(x)
+

Return a pseudo random value between 0.0 and 1.0. x is the index of the +internal variable which will be used to save the seed/state. +

+
+
root(expr, max)
+

Find an input value for which the function represented by expr +with argument ld(0) is 0 in the interval 0..max. +

+

The expression in expr must denote a continuous function or the +result is undefined. +

+

ld(0) is used to represent the function input value, which means +that the given expression will be evaluated multiple times with +various input values that the expression can access through +ld(0). When the expression evaluates to 0 then the +corresponding input value will be returned. +

+
+
sin(x)
+

Compute sine of x. +

+
+
sinh(x)
+

Compute hyperbolic sine of x. +

+
+
sqrt(expr)
+

Compute the square root of expr. This is equivalent to +"(expr)^.5". +

+
+
squish(x)
+

Compute expression 1/(1 + exp(4*x)). +

+
+
st(var, expr)
+

Allow to store the value of the expression expr in an internal +variable. var specifies the number of the variable where to +store the value, and it is a value ranging from 0 to 9. The function +returns the value stored in the internal variable. +Note, Variables are currently not shared between expressions. +

+
+
tan(x)
+

Compute tangent of x. +

+
+
tanh(x)
+

Compute hyperbolic tangent of x. +

+
+
taylor(expr, x)
+
taylor(expr, x, id)
+

Evaluate a Taylor series at x, given an expression representing +the ld(id)-th derivative of a function at 0. +

+

When the series does not converge the result is undefined. +

+

ld(id) is used to represent the derivative order in expr, +which means that the given expression will be evaluated multiple times +with various input values that the expression can access through +ld(id). If id is not specified then 0 is assumed. +

+

Note, when you have the derivatives at y instead of 0, +taylor(expr, x-y) can be used. +

+
+
time(0)
+

Return the current (wallclock) time in seconds. +

+
+
trunc(expr)
+

Round the value of expression expr towards zero to the nearest +integer. For example, "trunc(-1.5)" is "-1.0". +

+
+
while(cond, expr)
+

Evaluate expression expr while the expression cond is +non-zero, and returns the value of the last expr evaluation, or +NAN if cond was always false. +

+
+ +

The following constants are available: +

+
PI
+

area of the unit disc, approximately 3.14 +

+
E
+

exp(1) (Euler’s number), approximately 2.718 +

+
PHI
+

golden ratio (1+sqrt(5))/2, approximately 1.618 +

+
+ +

Assuming that an expression is considered "true" if it has a non-zero +value, note that: +

+

* works like AND +

+

+ works like OR +

+

For example the construct: +

 
if (A AND B) then C
+
+

is equivalent to: +

 
if(A*B, C)
+
+ +

In your C code, you can extend the list of unary and binary functions, +and define recognized constants, so that they are available for your +expressions. +

+

The evaluator also recognizes the International System unit prefixes. +If ’i’ is appended after the prefix, binary prefixes are used, which +are based on powers of 1024 instead of powers of 1000. +The ’B’ postfix multiplies the value by 8, and can be appended after a +unit prefix or used alone. This allows using for example ’KB’, ’MiB’, +’G’ and ’B’ as number postfix. +

+

The list of available International System prefixes follows, with +indication of the corresponding powers of 10 and of 2. +

+
y
+

10^-24 / 2^-80 +

+
z
+

10^-21 / 2^-70 +

+
a
+

10^-18 / 2^-60 +

+
f
+

10^-15 / 2^-50 +

+
p
+

10^-12 / 2^-40 +

+
n
+

10^-9 / 2^-30 +

+
u
+

10^-6 / 2^-20 +

+
m
+

10^-3 / 2^-10 +

+
c
+

10^-2 +

+
d
+

10^-1 +

+
h
+

10^2 +

+
k
+

10^3 / 2^10 +

+
K
+

10^3 / 2^10 +

+
M
+

10^6 / 2^20 +

+
G
+

10^9 / 2^30 +

+
T
+

10^12 / 2^40 +

+
P
+

10^15 / 2^40 +

+
E
+

10^18 / 2^50 +

+
Z
+

10^21 / 2^60 +

+
Y
+

10^24 / 2^70 +

+
+ + + +

6. OpenCL Options

+ +

When FFmpeg is configured with --enable-opencl, it is possible +to set the options for the global OpenCL context. +

+

The list of supported options follows: +

+
+
build_options
+

Set build options used to compile the registered kernels. +

+

See reference "OpenCL Specification Version: 1.2 chapter 5.6.4". +

+
+
platform_idx
+

Select the index of the platform to run OpenCL code. +

+

The specified index must be one of the indexes in the device list +which can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
device_idx
+

Select the index of the device used to run OpenCL code. +

+

The specified index must be one of the indexes in the device list which +can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
+ +

+

+

7. Codec Options

+ +

libavcodec provides some generic global options, which can be set on +all the encoders and decoders. In addition each codec may support +so-called private options, which are specific for a given codec. +

+

Sometimes, a global option may only affect a specific kind of codec, +and may be unsensical or ignored by another, so you need to be aware +of the meaning of the specified options. Also some options are +meant only for decoding or encoding. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVCodecContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follow: +

+
+
b integer (encoding,audio,video)
+

Set bitrate in bits/s. Default value is 200K. +

+
+
ab integer (encoding,audio)
+

Set audio bitrate (in bits/s). Default value is 128K. +

+
+
bt integer (encoding,video)
+

Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate +tolerance specifies how far ratecontrol is willing to deviate from the +target average bitrate value. This is not related to min/max +bitrate. Lowering tolerance too much has an adverse effect on quality. +

+
+
flags flags (decoding/encoding,audio,video,subtitles)
+

Set generic flags. +

+

Possible values: +

+
mv4
+

Use four motion vector by macroblock (mpeg4). +

+
qpel
+

Use 1/4 pel motion compensation. +

+
loop
+

Use loop filter. +

+
qscale
+

Use fixed qscale. +

+
gmc
+

Use gmc. +

+
mv0
+

Always try a mb with mv=<0,0>. +

+
input_preserved
+
pass1
+

Use internal 2pass ratecontrol in first pass mode. +

+
pass2
+

Use internal 2pass ratecontrol in second pass mode. +

+
gray
+

Only decode/encode grayscale. +

+
emu_edge
+

Do not draw edges. +

+
psnr
+

Set error[?] variables during encoding. +

+
truncated
+
naq
+

Normalize adaptive quantization. +

+
ildct
+

Use interlaced DCT. +

+
low_delay
+

Force low delay. +

+
global_header
+

Place global headers in extradata instead of every keyframe. +

+
bitexact
+

Use only bitexact stuff (except (I)DCT). +

+
aic
+

Apply H263 advanced intra coding / mpeg4 ac prediction. +

+
cbp
+

Deprecated, use mpegvideo private options instead. +

+
qprd
+

Deprecated, use mpegvideo private options instead. +

+
ilme
+

Apply interlaced motion estimation. +

+
cgop
+

Use closed gop. +

+
+ +
+
me_method integer (encoding,video)
+

Set motion estimation method. +

+

Possible values: +

+
zero
+

zero motion estimation (fastest) +

+
full
+

full motion estimation (slowest) +

+
epzs
+

EPZS motion estimation (default) +

+
esa
+

esa motion estimation (alias for full) +

+
tesa
+

tesa motion estimation +

+
dia
+

dia motion estimation (alias for epzs) +

+
log
+

log motion estimation +

+
phods
+

phods motion estimation +

+
x1
+

X1 motion estimation +

+
hex
+

hex motion estimation +

+
umh
+

umh motion estimation +

+
iter
+

iter motion estimation +

+
+ +
+
extradata_size integer
+

Set extradata size. +

+
+
time_base rational number
+

Set codec time base. +

+

It is the fundamental unit of time (in seconds) in terms of which +frame timestamps are represented. For fixed-fps content, timebase +should be 1 / frame_rate and timestamp increments should be +identically 1. +

+
+
g integer (encoding,video)
+

Set the group of picture size. Default value is 12. +

+
+
ar integer (decoding/encoding,audio)
+

Set audio sampling rate (in Hz). +

+
+
ac integer (decoding/encoding,audio)
+

Set number of audio channels. +

+
+
cutoff integer (encoding,audio)
+

Set cutoff bandwidth. +

+
+
frame_size integer (encoding,audio)
+

Set audio frame size. +

+

Each submitted frame except the last must contain exactly frame_size +samples per channel. May be 0 when the codec has +CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not +restricted. It is set by some decoders to indicate constant frame +size. +

+
+
frame_number integer
+

Set the frame number. +

+
+
delay integer
+
qcomp float (encoding,video)
+

Set video quantizer scale compression (VBR). It is used as a constant +in the ratecontrol equation. Recommended range for default rc_eq: +0.0-1.0. +

+
+
qblur float (encoding,video)
+

Set video quantizer scale blur (VBR). +

+
+
qmin integer (encoding,video)
+

Set min video quantizer scale (VBR). Must be included between -1 and +69, default value is 2. +

+
+
qmax integer (encoding,video)
+

Set max video quantizer scale (VBR). Must be included between -1 and +1024, default value is 31. +

+
+
qdiff integer (encoding,video)
+

Set max difference between the quantizer scale (VBR). +

+
+
bf integer (encoding,video)
+

Set max number of B frames between non-B-frames. +

+

Must be an integer between -1 and 16. 0 means that B-frames are +disabled. If a value of -1 is used, it will choose an automatic value +depending on the encoder. +

+

Default value is 0. +

+
+
b_qfactor float (encoding,video)
+

Set qp factor between P and B frames. +

+
+
rc_strategy integer (encoding,video)
+

Set ratecontrol method. +

+
+
b_strategy integer (encoding,video)
+

Set strategy to choose between I/P/B-frames. +

+
+
ps integer (encoding,video)
+

Set RTP payload size in bytes. +

+
+
mv_bits integer
+
header_bits integer
+
i_tex_bits integer
+
p_tex_bits integer
+
i_count integer
+
p_count integer
+
skip_count integer
+
misc_bits integer
+
frame_bits integer
+
codec_tag integer
+
bug flags (decoding,video)
+

Workaround not auto detected encoder bugs. +

+

Possible values: +

+
autodetect
+
old_msmpeg4
+

some old lavc generated msmpeg4v3 files (no autodetection) +

+
xvid_ilace
+

Xvid interlacing bug (autodetected if fourcc==XVIX) +

+
ump4
+

(autodetected if fourcc==UMP4) +

+
no_padding
+

padding bug (autodetected) +

+
amv
+
ac_vlc
+

illegal vlc bug (autodetected per fourcc) +

+
qpel_chroma
+
std_qpel
+

old standard qpel (autodetected per fourcc/version) +

+
qpel_chroma2
+
direct_blocksize
+

direct-qpel-blocksize bug (autodetected per fourcc/version) +

+
edge
+

edge padding bug (autodetected per fourcc/version) +

+
hpel_chroma
+
dc_clip
+
ms
+

Workaround various bugs in microsoft broken decoders. +

+
trunc
+

trancated frames +

+
+ +
+
lelim integer (encoding,video)
+

Set single coefficient elimination threshold for luminance (negative +values also consider DC coefficient). +

+
+
celim integer (encoding,video)
+

Set single coefficient elimination threshold for chrominance (negative +values also consider dc coefficient) +

+
+
strict integer (decoding/encoding,audio,video)
+

Specify how strictly to follow the standards. +

+

Possible values: +

+
very
+

strictly conform to a older more strict version of the spec or reference software +

+
strict
+

strictly conform to all the things in the spec no matter what consequences +

+
normal
+
unofficial
+

allow unofficial extensions +

+
experimental
+

allow non standardized experimental things, experimental +(unfinished/work in progress/not well tested) decoders and encoders. +Note: experimental decoders can pose a security risk, do not use this for +decoding untrusted input. +

+
+ +
+
b_qoffset float (encoding,video)
+

Set QP offset between P and B frames. +

+
+
err_detect flags (decoding,audio,video)
+

Set error detection flags. +

+

Possible values: +

+
crccheck
+

verify embedded CRCs +

+
bitstream
+

detect bitstream specification deviations +

+
buffer
+

detect improper bitstream length +

+
explode
+

abort decoding on minor error detection +

+
careful
+

consider things that violate the spec and have not been seen in the wild as errors +

+
compliant
+

consider all spec non compliancies as errors +

+
aggressive
+

consider things that a sane encoder should not do as an error +

+
+ +
+
has_b_frames integer
+
block_align integer
+
mpeg_quant integer (encoding,video)
+

Use MPEG quantizers instead of H.263. +

+
+
qsquish float (encoding,video)
+

How to keep quantizer between qmin and qmax (0 = clip, 1 = use +differentiable function). +

+
+
rc_qmod_amp float (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_qmod_freq integer (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_override_count integer
+
rc_eq string (encoding,video)
+

Set rate control equation. When computing the expression, besides the +standard functions defined in the section ’Expression Evaluation’, the +following functions are available: bits2qp(bits), qp2bits(qp). Also +the following constants are available: iTex pTex tex mv fCode iCount +mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex +avgTex. +

+
+
maxrate integer (encoding,audio,video)
+

Set max bitrate tolerance (in bits/s). Requires bufsize to be set. +

+
+
minrate integer (encoding,audio,video)
+

Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR +encode. It is of little use elsewise. +

+
+
bufsize integer (encoding,audio,video)
+

Set ratecontrol buffer size (in bits). +

+
+
rc_buf_aggressivity float (encoding,video)
+

Currently useless. +

+
+
i_qfactor float (encoding,video)
+

Set QP factor between P and I frames. +

+
+
i_qoffset float (encoding,video)
+

Set QP offset between P and I frames. +

+
+
rc_init_cplx float (encoding,video)
+

Set initial complexity for 1-pass encoding. +

+
+
dct integer (encoding,video)
+

Set DCT algorithm. +

+

Possible values: +

+
auto
+

autoselect a good one (default) +

+
fastint
+

fast integer +

+
int
+

accurate integer +

+
mmx
+
altivec
+
faan
+

floating point AAN DCT +

+
+ +
+
lumi_mask float (encoding,video)
+

Compress bright areas stronger than medium ones. +

+
+
tcplx_mask float (encoding,video)
+

Set temporal complexity masking. +

+
+
scplx_mask float (encoding,video)
+

Set spatial complexity masking. +

+
+
p_mask float (encoding,video)
+

Set inter masking. +

+
+
dark_mask float (encoding,video)
+

Compress dark areas stronger than medium ones. +

+
+
idct integer (decoding/encoding,video)
+

Select IDCT implementation. +

+

Possible values: +

+
auto
+
int
+
simple
+
simplemmx
+
arm
+
altivec
+
sh4
+
simplearm
+
simplearmv5te
+
simplearmv6
+
simpleneon
+
simplealpha
+
ipp
+
xvidmmx
+
faani
+

floating point AAN IDCT +

+
+ +
+
slice_count integer
+
ec flags (decoding,video)
+

Set error concealment strategy. +

+

Possible values: +

+
guess_mvs
+

iterative motion vector (MV) search (slow) +

+
deblock
+

use strong deblock filter for damaged MBs +

+
+ +
+
bits_per_coded_sample integer
+
pred integer (encoding,video)
+

Set prediction method. +

+

Possible values: +

+
left
+
plane
+
median
+
+ +
+
aspect rational number (encoding,video)
+

Set sample aspect ratio. +

+
+
debug flags (decoding/encoding,audio,video,subtitles)
+

Print specific debug info. +

+

Possible values: +

+
pict
+

picture info +

+
rc
+

rate control +

+
bitstream
+
mb_type
+

macroblock (MB) type +

+
qp
+

per-block quantization parameter (QP) +

+
mv
+

motion vector +

+
dct_coeff
+
skip
+
startcode
+
pts
+
er
+

error recognition +

+
mmco
+

memory management control operations (H.264) +

+
bugs
+
vis_qp
+

visualize quantization parameter (QP), lower QP are tinted greener +

+
vis_mb_type
+

visualize block types +

+
buffers
+

picture buffer allocations +

+
thread_ops
+

threading operations +

+
+ +
+
vismv integer (decoding,video)
+

Visualize motion vectors (MVs). +

+

Possible values: +

+
pf
+

forward predicted MVs of P-frames +

+
bf
+

forward predicted MVs of B-frames +

+
bb
+

backward predicted MVs of B-frames +

+
+ +
+
cmp integer (encoding,video)
+

Set full pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
subcmp integer (encoding,video)
+

Set sub pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
mbcmp integer (encoding,video)
+

Set macroblock compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
ildctcmp integer (encoding,video)
+

Set interlaced dct compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation. +

+
+
last_pred integer (encoding,video)
+

Set amount of motion predictors from the previous frame. +

+
+
preme integer (encoding,video)
+

Set pre motion estimation. +

+
+
precmp integer (encoding,video)
+

Set pre motion estimation compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
pre_dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation pre-pass. +

+
+
subq integer (encoding,video)
+

Set sub pel motion estimation quality. +

+
+
dtg_active_format integer
+
me_range integer (encoding,video)
+

Set limit motion vectors range (1023 for DivX player). +

+
+
ibias integer (encoding,video)
+

Set intra quant bias. +

+
+
pbias integer (encoding,video)
+

Set inter quant bias. +

+
+
color_table_id integer
+
global_quality integer (encoding,audio,video)
+
coder integer (encoding,video)
+
+

Possible values: +

+
vlc
+

variable length coder / huffman coder +

+
ac
+

arithmetic coder +

+
raw
+

raw (no encoding) +

+
rle
+

run-length coder +

+
deflate
+

deflate-based coder +

+
+ +
+
context integer (encoding,video)
+

Set context model. +

+
+
slice_flags integer
+
xvmc_acceleration integer
+
mbd integer (encoding,video)
+

Set macroblock decision algorithm (high quality mode). +

+

Possible values: +

+
simple
+

use mbcmp (default) +

+
bits
+

use fewest bits +

+
rd
+

use best rate distortion +

+
+ +
+
stream_codec_tag integer
+
sc_threshold integer (encoding,video)
+

Set scene change threshold. +

+
+
lmin integer (encoding,video)
+

Set min lagrange factor (VBR). +

+
+
lmax integer (encoding,video)
+

Set max lagrange factor (VBR). +

+
+
nr integer (encoding,video)
+

Set noise reduction. +

+
+
rc_init_occupancy integer (encoding,video)
+

Set number of bits which should be loaded into the rc buffer before +decoding starts. +

+
+
flags2 flags (decoding/encoding,audio,video)
+
+

Possible values: +

+
fast
+

Allow non spec compliant speedup tricks. +

+
sgop
+

Deprecated, use mpegvideo private options instead. +

+
noout
+

Skip bitstream encoding. +

+
ignorecrop
+

Ignore cropping information from sps. +

+
local_header
+

Place global headers at every keyframe instead of in extradata. +

+
chunks
+

Frame data might be split into multiple chunks. +

+
showall
+

Show all frames before the first keyframe. +

+
skiprd
+

Deprecated, use mpegvideo private options instead. +

+
+ +
+
error integer (encoding,video)
+
qns integer (encoding,video)
+

Deprecated, use mpegvideo private options instead. +

+
+
threads integer (decoding/encoding,video)
+
+

Possible values: +

+
auto
+

detect a good number of threads +

+
+ +
+
me_threshold integer (encoding,video)
+

Set motion estimation threshold. +

+
+
mb_threshold integer (encoding,video)
+

Set macroblock threshold. +

+
+
dc integer (encoding,video)
+

Set intra_dc_precision. +

+
+
nssew integer (encoding,video)
+

Set nsse weight. +

+
+
skip_top integer (decoding,video)
+

Set number of macroblock rows at the top which are skipped. +

+
+
skip_bottom integer (decoding,video)
+

Set number of macroblock rows at the bottom which are skipped. +

+
+
profile integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
aac_main
+
aac_low
+
aac_ssr
+
aac_ltp
+
aac_he
+
aac_he_v2
+
aac_ld
+
aac_eld
+
mpeg2_aac_low
+
mpeg2_aac_he
+
dts
+
dts_es
+
dts_96_24
+
dts_hd_hra
+
dts_hd_ma
+
+ +
+
level integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
+ +
+
lowres integer (decoding,audio,video)
+

Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions. +

+
+
skip_threshold integer (encoding,video)
+

Set frame skip threshold. +

+
+
skip_factor integer (encoding,video)
+

Set frame skip factor. +

+
+
skip_exp integer (encoding,video)
+

Set frame skip exponent. +Negative values behave identical to the corresponding positive ones, except +that the score is normalized. +Positive values exist primarly for compatibility reasons and are not so useful. +

+
+
skipcmp integer (encoding,video)
+

Set frame skip compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
border_mask float (encoding,video)
+

Increase the quantizer for macroblocks close to borders. +

+
+
mblmin integer (encoding,video)
+

Set min macroblock lagrange factor (VBR). +

+
+
mblmax integer (encoding,video)
+

Set max macroblock lagrange factor (VBR). +

+
+
mepc integer (encoding,video)
+

Set motion estimation bitrate penalty compensation (1.0 = 256). +

+
+
skip_loop_filter integer (decoding,video)
+
skip_idct integer (decoding,video)
+
skip_frame integer (decoding,video)
+
+

Make decoder discard processing depending on the frame type selected +by the option value. +

+

skip_loop_filter’ skips frame loop filtering, ‘skip_idct’ +skips frame IDCT/dequantization, ‘skip_frame’ skips decoding. +

+

Possible values: +

+
none
+

Discard no frame. +

+
+
default
+

Discard useless frames like 0-sized frames. +

+
+
noref
+

Discard all non-reference frames. +

+
+
bidir
+

Discard all bidirectional frames. +

+
+
nokey
+

Discard all frames excepts keyframes. +

+
+
all
+

Discard all frames. +

+
+ +

Default value is ‘default’. +

+
+
bidir_refine integer (encoding,video)
+

Refine the two motion vectors used in bidirectional macroblocks. +

+
+
brd_scale integer (encoding,video)
+

Downscale frames for dynamic B-frame decision. +

+
+
keyint_min integer (encoding,video)
+

Set minimum interval between IDR-frames. +

+
+
refs integer (encoding,video)
+

Set reference frames to consider for motion compensation. +

+
+
chromaoffset integer (encoding,video)
+

Set chroma qp offset from luma. +

+
+
trellis integer (encoding,audio,video)
+

Set rate-distortion optimal quantization. +

+
+
sc_factor integer (encoding,video)
+

Set value multiplied by qscale for each frame and added to +scene_change_score. +

+
+
mv0_threshold integer (encoding,video)
+
b_sensitivity integer (encoding,video)
+

Adjust sensitivity of b_frame_strategy 1. +

+
+
compression_level integer (encoding,audio,video)
+
min_prediction_order integer (encoding,audio)
+
max_prediction_order integer (encoding,audio)
+
timecode_frame_start integer (encoding,video)
+

Set GOP timecode frame start number, in non drop frame format. +

+
+
request_channels integer (decoding,audio)
+

Set desired number of audio channels. +

+
+
bits_per_raw_sample integer
+
channel_layout integer (decoding/encoding,audio)
+
+

Possible values: +

+
request_channel_layout integer (decoding,audio)
+
+

Possible values: +

+
rc_max_vbv_use float (encoding,video)
+
rc_min_vbv_use float (encoding,video)
+
ticks_per_frame integer (decoding/encoding,audio,video)
+
color_primaries integer (decoding/encoding,video)
+
color_trc integer (decoding/encoding,video)
+
colorspace integer (decoding/encoding,video)
+
color_range integer (decoding/encoding,video)
+
chroma_sample_location integer (decoding/encoding,video)
+
log_level_offset integer
+

Set the log level offset. +

+
+
slices integer (encoding,video)
+

Number of slices, used in parallelized encoding. +

+
+
thread_type flags (decoding/encoding,video)
+

Select multithreading type. +

+

Possible values: +

+
slice
+
frame
+
+
+
audio_service_type integer (encoding,audio)
+

Set audio service type. +

+

Possible values: +

+
ma
+

Main Audio Service +

+
ef
+

Effects +

+
vi
+

Visually Impaired +

+
hi
+

Hearing Impaired +

+
di
+

Dialogue +

+
co
+

Commentary +

+
em
+

Emergency +

+
vo
+

Voice Over +

+
ka
+

Karaoke +

+
+ +
+
request_sample_fmt sample_fmt (decoding,audio)
+

Set sample format audio decoders should prefer. Default value is +none. +

+
+
pkt_timebase rational number
+
sub_charenc encoding (decoding,subtitles)
+

Set the input subtitles character encoding. +

+
+
field_order field_order (video)
+

Set/override the field order of the video. +Possible values: +

+
progressive
+

Progressive video +

+
tt
+

Interlaced video, top field coded and displayed first +

+
bb
+

Interlaced video, bottom field coded and displayed first +

+
tb
+

Interlaced video, top coded first, bottom displayed first +

+
bt
+

Interlaced video, bottom coded first, top displayed first +

+
+ +
+
skip_alpha integer (decoding,video)
+

Set to 1 to disable processing alpha (transparency). This works like the +‘gray’ flag in the ‘flags’ option which skips chroma information +instead of alpha. Default is 0. +

+
+ + + +

8. Decoders

+ +

Decoders are configured elements in FFmpeg which allow the decoding of +multimedia streams. +

+

When you configure your FFmpeg build, all the supported native decoders +are enabled by default. Decoders requiring an external library must be enabled +manually via the corresponding --enable-lib option. You can list all +available decoders using the configure option --list-decoders. +

+

You can disable all the decoders with the configure option +--disable-decoders and selectively enable / disable single decoders +with the options --enable-decoder=DECODER / +--disable-decoder=DECODER. +

+

The option -decoders of the ff* tools will display the list of +enabled decoders. +

+ + +

9. Video Decoders

+ +

A description of some of the currently available video decoders +follows. +

+ +

9.1 rawvideo

+ +

Raw video decoder. +

+

This decoder decodes rawvideo streams. +

+ +

9.1.1 Options

+ +
+
top top_field_first
+

Specify the assumed field type of the input video. +

+
-1
+

the video is assumed to be progressive (default) +

+
0
+

bottom-field-first is assumed +

+
1
+

top-field-first is assumed +

+
+ +
+
+ + + +

10. Audio Decoders

+ +

A description of some of the currently available audio decoders +follows. +

+ +

10.1 ac3

+ +

AC-3 audio decoder. +

+

This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as +the undocumented RealAudio 3 (a.k.a. dnet). +

+ +

10.1.1 AC-3 Decoder Options

+ +
+
-drc_scale value
+

Dynamic Range Scale Factor. The factor to apply to dynamic range values +from the AC-3 stream. This factor is applied exponentially. +There are 3 notable scale factor ranges: +

+
drc_scale == 0
+

DRC disabled. Produces full range audio. +

+
0 < drc_scale <= 1
+

DRC enabled. Applies a fraction of the stream DRC value. +Audio reproduction is between full range and full compression. +

+
drc_scale > 1
+

DRC enabled. Applies drc_scale asymmetrically. +Loud sounds are fully compressed. Soft sounds are enhanced. +

+
+ +
+
+ + +

10.2 ffwavesynth

+ +

Internal wave synthetizer. +

+

This decoder generates wave patterns according to predefined sequences. Its +use is purely internal and the format of the data it accepts is not publicly +documented. +

+ +

10.3 libcelt

+ +

libcelt decoder wrapper. +

+

libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec. +Requires the presence of the libcelt headers and library during configuration. +You need to explicitly configure the build with --enable-libcelt. +

+ +

10.4 libgsm

+ +

libgsm decoder wrapper. +

+

libgsm allows libavcodec to decode the GSM full rate audio codec. Requires +the presence of the libgsm headers and library during configuration. You need +to explicitly configure the build with --enable-libgsm. +

+

This decoder supports both the ordinary GSM and the Microsoft variant. +

+ +

10.5 libilbc

+ +

libilbc decoder wrapper. +

+

libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC) +audio codec. Requires the presence of the libilbc headers and library during +configuration. You need to explicitly configure the build with +--enable-libilbc. +

+ +

10.5.1 Options

+ +

The following option is supported by the libilbc wrapper. +

+
+
enhance
+
+

Enable the enhancement of the decoded audio when set to 1. The default +value is 0 (disabled). +

+
+
+ + +

10.6 libopencore-amrnb

+ +

libopencore-amrnb decoder wrapper. +

+

libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate +Narrowband audio codec. Using it requires the presence of the +libopencore-amrnb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrnb. +

+

An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB +without this library. +

+ +

10.7 libopencore-amrwb

+ +

libopencore-amrwb decoder wrapper. +

+

libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate +Wideband audio codec. Using it requires the presence of the +libopencore-amrwb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrwb. +

+

An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB +without this library. +

+ +

10.8 libopus

+ +

libopus decoder wrapper. +

+

libopus allows libavcodec to decode the Opus Interactive Audio Codec. +Requires the presence of the libopus headers and library during +configuration. You need to explicitly configure the build with +--enable-libopus. +

+ + +

11. Subtitles Decoders

+ + +

11.1 dvdsub

+ +

This codec decodes the bitmap subtitles used in DVDs; the same subtitles can +also be found in VobSub file pairs and in some Matroska files. +

+ +

11.1.1 Options

+ +
+
palette
+

Specify the global palette used by the bitmaps. When stored in VobSub, the +palette is normally specified in the index file; in Matroska, the palette is +stored in the codec extra-data in the same format as in VobSub. In DVDs, the +palette is stored in the IFO file, and therefore not available when reading +from dumped VOB files. +

+

The format for this option is a string containing 16 24-bits hexadecimal +numbers (without 0x prefix) separated by comas, for example 0d00ee, +ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1, +7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b. +

+
+ + +

11.2 libzvbi-teletext

+ +

Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext +subtitles. Requires the presence of the libzvbi headers and library during +configuration. You need to explicitly configure the build with +--enable-libzvbi. +

+ +

11.2.1 Options

+ +
+
txt_page
+

List of teletext page numbers to decode. You may use the special * string to +match all pages. Pages that do not match the specified list are dropped. +Default value is *. +

+
txt_chop_top
+

Discards the top teletext line. Default value is 1. +

+
txt_format
+

Specifies the format of the decoded subtitles. The teletext decoder is capable +of decoding the teletext pages to bitmaps or to simple text, you should use +"bitmap" for teletext pages, because certain graphics and colors cannot be +expressed in simple text. You might use "text" for teletext based subtitles if +your application can handle simple text based subtitles. Default value is +bitmap. +

+
txt_left
+

X offset of generated bitmaps, default is 0. +

+
txt_top
+

Y offset of generated bitmaps, default is 0. +

+
txt_chop_spaces
+

Chops leading and trailing spaces and removes empty lines from the generated +text. This option is useful for teletext based subtitles where empty spaces may +be present at the start or at the end of the lines or empty lines may be +present between the subtitle lines because of double-sized teletext charactes. +Default value is 1. +

+
txt_duration
+

Sets the display duration of the decoded teletext pages or subtitles in +miliseconds. Default value is 30000 which is 30 seconds. +

+
txt_transparent
+

Force transparent background of the generated teletext bitmaps. Default value +is 0 which means an opaque (black) background. +

+
+ + +

12. Bitstream Filters

+ +

When you configure your FFmpeg build, all the supported bitstream +filters are enabled by default. You can list all available ones using +the configure option --list-bsfs. +

+

You can disable all the bitstream filters using the configure option +--disable-bsfs, and selectively enable any bitstream filter using +the option --enable-bsf=BSF, or you can disable a particular +bitstream filter using the option --disable-bsf=BSF. +

+

The option -bsfs of the ff* tools will display the list of +all the supported bitstream filters included in your build. +

+

Below is a description of the currently available bitstream filters. +

+ +

12.1 aac_adtstoasc

+ +

Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration +bitstream filter. +

+

This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 +ADTS header and removes the ADTS header. +

+

This is required for example when copying an AAC stream from a raw +ADTS AAC container to a FLV or a MOV/MP4 file. +

+ +

12.2 chomp

+ +

Remove zero padding at the end of a packet. +

+ +

12.3 dump_extra

+ +

Add extradata to the beginning of the filtered packets. +

+

The additional argument specifies which packets should be filtered. +It accepts the values: +

+
a
+

add extradata to all key packets, but only if local_header is +set in the ‘flags2’ codec context field +

+
+
k
+

add extradata to all key packets +

+
+
e
+

add extradata to all packets +

+
+ +

If not specified it is assumed ‘k’. +

+

For example the following ffmpeg command forces a global +header (thus disabling individual packet headers) in the H.264 packets +generated by the libx264 encoder, but corrects them by adding +the header stored in extradata to the key packets: +

 
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+ + +

12.4 h264_mp4toannexb

+ +

Convert an H.264 bitstream from length prefixed mode to start code +prefixed mode (as defined in the Annex B of the ITU-T H.264 +specification). +

+

This is required by some streaming formats, typically the MPEG-2 +transport stream format ("mpegts"). +

+

For example to remux an MP4 file containing an H.264 stream to mpegts +format with ffmpeg, you can use the command: +

+
 
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+ + +

12.5 imx_dump_header

+ + +

12.6 mjpeg2jpeg

+ +

Convert MJPEG/AVI1 packets to full JPEG/JFIF packets. +

+

MJPEG is a video codec wherein each video frame is essentially a +JPEG image. The individual frames can be extracted without loss, +e.g. by +

+
 
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+ +

Unfortunately, these chunks are incomplete JPEG images, because +they lack the DHT segment required for decoding. Quoting from +http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml: +

+

Avery Lee, writing in the rec.video.desktop newsgroup in 2001, +commented that "MJPEG, or at least the MJPEG in AVIs having the +MJPG fourcc, is restricted JPEG with a fixed – and *omitted* – +Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2, +and it must use basic Huffman encoding, not arithmetic or +progressive. . . . You can indeed extract the MJPEG frames and +decode them with a regular JPEG decoder, but you have to prepend +the DHT segment to them, or else the decoder won’t have any idea +how to decompress the data. The exact table necessary is given in +the OpenDML spec." +

+

This bitstream filter patches the header of frames extracted from an MJPEG +stream (carrying the AVI1 header ID and lacking a DHT segment) to +produce fully qualified JPEG images. +

+
 
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+ + +

12.7 mjpega_dump_header

+ + +

12.8 movsub

+ + +

12.9 mp3_header_decompress

+ + +

12.10 noise

+ + +

12.11 remove_extra

+ + +

13. Format Options

+ +

The libavformat library provides some generic global options, which +can be set on all the muxers and demuxers. In addition each muxer or +demuxer may support so-called private options, which are specific for +that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follows: +

+
+
avioflags flags (input/output)
+

Possible values: +

+
direct
+

Reduce buffering. +

+
+ +
+
probesize integer (input)
+

Set probing size in bytes, i.e. the size of the data to analyze to get +stream information. A higher value will allow to detect more +information in case it is dispersed into the stream, but will increase +latency. Must be an integer not lesser than 32. It is 5000000 by default. +

+
+
packetsize integer (output)
+

Set packet size. +

+
+
fflags flags (input/output)
+

Set format flags. +

+

Possible values: +

+
ignidx
+

Ignore index. +

+
genpts
+

Generate PTS. +

+
nofillin
+

Do not fill in missing values that can be exactly calculated. +

+
noparse
+

Disable AVParsers, this needs +nofillin too. +

+
igndts
+

Ignore DTS. +

+
discardcorrupt
+

Discard corrupted frames. +

+
sortdts
+

Try to interleave output packets by DTS. +

+
keepside
+

Do not merge side data. +

+
latm
+

Enable RTP MP4A-LATM payload. +

+
nobuffer
+

Reduce the latency introduced by optional buffering +

+
+ +
+
seek2any integer (input)
+

Allow seeking to non-keyframes on demuxer level when supported if set to 1. +Default is 0. +

+
+
analyzeduration integer (input)
+

Specify how many microseconds are analyzed to probe the input. A +higher value will allow to detect more accurate information, but will +increase latency. It defaults to 5,000,000 microseconds = 5 seconds. +

+
+
cryptokey hexadecimal string (input)
+

Set decryption key. +

+
+
indexmem integer (input)
+

Set max memory used for timestamp index (per stream). +

+
+
rtbufsize integer (input)
+

Set max memory used for buffering real-time frames. +

+
+
fdebug flags (input/output)
+

Print specific debug info. +

+

Possible values: +

+
ts
+
+ +
+
max_delay integer (input/output)
+

Set maximum muxing or demuxing delay in microseconds. +

+
+
fpsprobesize integer (input)
+

Set number of frames used to probe fps. +

+
+
audio_preload integer (output)
+

Set microseconds by which audio packets should be interleaved earlier. +

+
+
chunk_duration integer (output)
+

Set microseconds for each chunk. +

+
+
chunk_size integer (output)
+

Set size in bytes for each chunk. +

+
+
err_detect, f_err_detect flags (input)
+

Set error detection flags. f_err_detect is deprecated and +should be used only via the ffmpeg tool. +

+

Possible values: +

+
crccheck
+

Verify embedded CRCs. +

+
bitstream
+

Detect bitstream specification deviations. +

+
buffer
+

Detect improper bitstream length. +

+
explode
+

Abort decoding on minor error detection. +

+
careful
+

Consider things that violate the spec and have not been seen in the +wild as errors. +

+
compliant
+

Consider all spec non compliancies as errors. +

+
aggressive
+

Consider things that a sane encoder should not do as an error. +

+
+ +
+
use_wallclock_as_timestamps integer (input)
+

Use wallclock as timestamps. +

+
+
avoid_negative_ts integer (output)
+
+

Possible values: +

+
make_non_negative
+

Shift timestamps to make them non-negative. +Also note that this affects only leading negative timestamps, and not +non-monotonic negative timestamps. +

+
make_zero
+

Shift timestamps so that the first timestamp is 0. +

+
auto (default)
+

Enables shifting when required by the target format. +

+
disabled
+

Disables shifting of timestamp. +

+
+ +

When shifting is enabled, all output timestamps are shifted by the +same amount. Audio, video, and subtitles desynching and relative +timestamp differences are preserved compared to how they would have +been without shifting. +

+
+
skip_initial_bytes integer (input)
+

Set number of bytes to skip before reading header and frames if set to 1. +Default is 0. +

+
+
correct_ts_overflow integer (input)
+

Correct single timestamp overflows if set to 1. Default is 1. +

+
+
flush_packets integer (output)
+

Flush the underlying I/O stream after each packet. Default 1 enables it, and +has the effect of reducing the latency; 0 disables it and may slightly +increase performance in some cases. +

+
+
output_ts_offset offset (output)
+

Set the output time offset. +

+

offset must be a time duration specification, +see (ffmpeg-utils)time duration syntax. +

+

The offset is added by the muxer to the output timestamps. +

+

Specifying a positive offset means that the corresponding streams are +delayed bt the time duration specified in offset. Default value +is 0 (meaning that no offset is applied). +

+
+ + +

+

+

13.1 Format stream specifiers

+ +

Format stream specifiers allow selection of one or more streams that +match specific properties. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. +

+
+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, +’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If +stream_index is given, then it matches the stream number +stream_index of this type. Otherwise, it matches all streams of +this type. +

+
+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number +stream_index in the program with the id +program_id. Otherwise, it matches all streams in the program. +

+
+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ +

The exact semantics of stream specifiers is defined by the +avformat_match_stream_specifier() function declared in the +‘libavformat/avformat.h’ header. +

+ +

14. Demuxers

+ +

Demuxers are configured elements in FFmpeg that can read the +multimedia streams from a particular type of file. +

+

When you configure your FFmpeg build, all the supported demuxers +are enabled by default. You can list all available ones using the +configure option --list-demuxers. +

+

You can disable all the demuxers using the configure option +--disable-demuxers, and selectively enable a single demuxer with +the option --enable-demuxer=DEMUXER, or disable it +with the option --disable-demuxer=DEMUXER. +

+

The option -formats of the ff* tools will display the list of +enabled demuxers. +

+

The description of some of the currently available demuxers follows. +

+ +

14.1 applehttp

+ +

Apple HTTP Live Streaming demuxer. +

+

This demuxer presents all AVStreams from all variant streams. +The id field is set to the bitrate variant index number. By setting +the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay), +the caller can decide which variant streams to actually receive. +The total bitrate of the variant that the stream belongs to is +available in a metadata key named "variant_bitrate". +

+ +

14.2 asf

+ +

Advanced Systems Format demuxer. +

+

This demuxer is used to demux ASF files and MMS network streams. +

+
+
-no_resync_search bool
+

Do not try to resynchronize by looking for a certain optional start code. +

+
+ +

+

+

14.3 concat

+ +

Virtual concatenation script demuxer. +

+

This demuxer reads a list of files and other directives from a text file and +demuxes them one after the other, as if all their packet had been muxed +together. +

+

The timestamps in the files are adjusted so that the first file starts at 0 +and each next file starts where the previous one finishes. Note that it is +done globally and may cause gaps if all streams do not have exactly the same +length. +

+

All files must have the same streams (same codecs, same time base, etc.). +

+

The duration of each file is used to adjust the timestamps of the next file: +if the duration is incorrect (because it was computed using the bit-rate or +because the file is truncated, for example), it can cause artifacts. The +duration directive can be used to override the duration stored in +each file. +

+ +

14.3.1 Syntax

+ +

The script is a text file in extended-ASCII, with one directive per line. +Empty lines, leading spaces and lines starting with ’#’ are ignored. The +following directive is recognized: +

+
+
file path
+

Path to a file to read; special characters and spaces must be escaped with +backslash or single quotes. +

+

All subsequent directives apply to that file. +

+
+
ffconcat version 1.0
+

Identify the script type and version. It also sets the ‘safe’ option +to 1 if it was to its default -1. +

+

To make FFmpeg recognize the format automatically, this directive must +appears exactly as is (no extra space or byte-order-mark) on the very first +line of the script. +

+
+
duration dur
+

Duration of the file. This information can be specified from the file; +specifying it here may be more efficient or help if the information from the +file is not available or accurate. +

+

If the duration is set for all files, then it is possible to seek in the +whole concatenated video. +

+
+
+ + +

14.3.2 Options

+ +

This demuxer accepts the following option: +

+
+
safe
+

If set to 1, reject unsafe file paths. A file path is considered safe if it +does not contain a protocol specification and is relative and all components +only contain characters from the portable character set (letters, digits, +period, underscore and hyphen) and have no period at the beginning of a +component. +

+

If set to 0, any file name is accepted. +

+

The default is -1, it is equivalent to 1 if the format was automatically +probed and 0 otherwise. +

+
+
+ + +

14.4 flv

+ +

Adobe Flash Video Format demuxer. +

+

This demuxer is used to demux FLV files and RTMP network streams. +

+
+
-flv_metadata bool
+

Allocate the streams according to the onMetaData array content. +

+
+ + +

14.5 libgme

+ +

The Game Music Emu library is a collection of video game music file emulators. +

+

See http://code.google.com/p/game-music-emu/ for more information. +

+

Some files have multiple tracks. The demuxer will pick the first track by +default. The ‘track_index’ option can be used to select a different +track. Track indexes start at 0. The demuxer exports the number of tracks as +tracks meta data entry. +

+

For very large files, the ‘max_size’ option may have to be adjusted. +

+ +

14.6 libquvi

+ +

Play media from Internet services using the quvi project. +

+

The demuxer accepts a ‘format’ option to request a specific quality. It +is by default set to best. +

+

See http://quvi.sourceforge.net/ for more information. +

+

FFmpeg needs to be built with --enable-libquvi for this demuxer to be +enabled. +

+ +

14.7 image2

+ +

Image file demuxer. +

+

This demuxer reads from a list of image files specified by a pattern. +The syntax and meaning of the pattern is specified by the +option pattern_type. +

+

The pattern may contain a suffix which is used to automatically +determine the format of the images contained in the files. +

+

The size, the pixel format, and the format of each image must be the +same for all the files in the sequence. +

+

This demuxer accepts the following options: +

+
framerate
+

Set the frame rate for the video stream. It defaults to 25. +

+
loop
+

If set to 1, loop over the input. Default value is 0. +

+
pattern_type
+

Select the pattern type used to interpret the provided filename. +

+

pattern_type accepts one of the following values. +

+
sequence
+

Select a sequence pattern type, used to specify a sequence of files +indexed by sequential numbers. +

+

A sequence pattern may contain the string "%d" or "%0Nd", which +specifies the position of the characters representing a sequential +number in each filename matched by the pattern. If the form +"%d0Nd" is used, the string representing the number in each +filename is 0-padded and N is the total number of 0-padded +digits representing the number. The literal character ’%’ can be +specified in the pattern with the string "%%". +

+

If the sequence pattern contains "%d" or "%0Nd", the first filename of +the file list specified by the pattern must contain a number +inclusively contained between start_number and +start_number+start_number_range-1, and all the following +numbers must be sequential. +

+

For example the pattern "img-%03d.bmp" will match a sequence of +filenames of the form ‘img-001.bmp’, ‘img-002.bmp’, ..., +‘img-010.bmp’, etc.; the pattern "i%%m%%g-%d.jpg" will match a +sequence of filenames of the form ‘i%m%g-1.jpg’, +‘i%m%g-2.jpg’, ..., ‘i%m%g-10.jpg’, etc. +

+

Note that the pattern must not necessarily contain "%d" or +"%0Nd", for example to convert a single image file +‘img.jpeg’ you can employ the command: +

 
ffmpeg -i img.jpeg img.png
+
+ +
+
glob
+

Select a glob wildcard pattern type. +

+

The pattern is interpreted like a glob() pattern. This is only +selectable if libavformat was compiled with globbing support. +

+
+
glob_sequence (deprecated, will be removed)
+

Select a mixed glob wildcard/sequence pattern. +

+

If your version of libavformat was compiled with globbing support, and +the provided pattern contains at least one glob meta character among +%*?[]{} that is preceded by an unescaped "%", the pattern is +interpreted like a glob() pattern, otherwise it is interpreted +like a sequence pattern. +

+

All glob special characters %*?[]{} must be prefixed +with "%". To escape a literal "%" you shall use "%%". +

+

For example the pattern foo-%*.jpeg will match all the +filenames prefixed by "foo-" and terminating with ".jpeg", and +foo-%?%?%?.jpeg will match all the filenames prefixed with +"foo-", followed by a sequence of three characters, and terminating +with ".jpeg". +

+

This pattern type is deprecated in favor of glob and +sequence. +

+
+ +

Default value is glob_sequence. +

+
pixel_format
+

Set the pixel format of the images to read. If not specified the pixel +format is guessed from the first image file in the sequence. +

+
start_number
+

Set the index of the file matched by the image file pattern to start +to read from. Default value is 0. +

+
start_number_range
+

Set the index interval range to check when looking for the first image +file in the sequence, starting from start_number. Default value +is 5. +

+
ts_from_file
+

If set to 1, will set frame timestamp to modification time of image file. Note +that monotonity of timestamps is not provided: images go in the same order as +without this option. Default value is 0. +

+
video_size
+

Set the video size of the images to read. If not specified the video +size is guessed from the first image file in the sequence. +

+
+ + +

14.7.1 Examples

+ +
    +
  • +Use ffmpeg for creating a video from the images in the file +sequence ‘img-001.jpeg’, ‘img-002.jpeg’, ..., assuming an +input frame rate of 10 frames per second: +
     
    ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +As above, but start by reading from a file with index 100 in the sequence: +
     
    ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +Read images matching the "*.png" glob pattern , that is all the files +terminating with the ".png" suffix: +
     
    ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
    +
    +
+ + +

14.8 mpegts

+ +

MPEG-2 transport stream demuxer. +

+
+
fix_teletext_pts
+

Overrides teletext packet PTS and DTS values with the timestamps calculated +from the PCR of the first program which the teletext stream is part of and is +not discarded. Default value is 1, set this option to 0 if you want your +teletext packet PTS and DTS values untouched. +

+
+ + +

14.9 rawvideo

+ +

Raw video demuxer. +

+

This demuxer allows one to read raw video data. Since there is no header +specifying the assumed video parameters, the user must specify them +in order to be able to decode the data correctly. +

+

This demuxer accepts the following options: +

+
framerate
+

Set input video frame rate. Default value is 25. +

+
+
pixel_format
+

Set the input video pixel format. Default value is yuv420p. +

+
+
video_size
+

Set the input video size. This value must be specified explicitly. +

+
+ +

For example to read a rawvideo file ‘input.raw’ with +ffplay, assuming a pixel format of rgb24, a video +size of 320x240, and a frame rate of 10 images per second, use +the command: +

 
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+ + +

14.10 sbg

+ +

SBaGen script demuxer. +

+

This demuxer reads the script language used by SBaGen +http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG +script looks like that: +

 
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW      == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00    off
+
+ +

A SBG script can mix absolute and relative timestamps. If the script uses +either only absolute timestamps (including the script start time) or only +relative ones, then its layout is fixed, and the conversion is +straightforward. On the other hand, if the script mixes both kind of +timestamps, then the NOW reference for relative timestamps will be +taken from the current time of day at the time the script is read, and the +script layout will be frozen according to that reference. That means that if +the script is directly played, the actual times will match the absolute +timestamps up to the sound controller’s clock accuracy, but if the user +somehow pauses the playback or seeks, all times will be shifted accordingly. +

+ +

14.11 tedcaptions

+ +

JSON captions used for TED Talks. +

+

TED does not provide links to the captions, but they can be guessed from the +page. The file ‘tools/bookmarklets.html’ from the FFmpeg source tree +contains a bookmarklet to expose them. +

+

This demuxer accepts the following option: +

+
start_time
+

Set the start time of the TED talk, in milliseconds. The default is 15000 +(15s). It is used to sync the captions with the downloadable videos, because +they include a 15s intro. +

+
+ +

Example: convert the captions to a format most players understand: +

 
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+ + +

15. Metadata

+ +

FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded +INI-like text file and then load it back using the metadata muxer/demuxer. +

+

The file format is as follows: +

    +
  1. +A file consists of a header and a number of metadata tags divided into sections, +each on its own line. + +
  2. +The header is a ’;FFMETADATA’ string, followed by a version number (now 1). + +
  3. +Metadata tags are of the form ’key=value’ + +
  4. +Immediately after header follows global metadata + +
  5. +After global metadata there may be sections with per-stream/per-chapter +metadata. + +
  6. +A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in +brackets (’[’, ’]’) and ends with next section or end of file. + +
  7. +At the beginning of a chapter section there may be an optional timebase to be +used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and +den are integers. If the timebase is missing then start/end times are assumed to +be in milliseconds. +Next a chapter section must contain chapter start and end times in form +’START=num’, ’END=num’, where num is a positive integer. + +
  8. +Empty lines and lines starting with ’;’ or ’#’ are ignored. + +
  9. +Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a +newline) must be escaped with a backslash ’\’. + +
  10. +Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of +the tag (in the example above key is ’foo ’, value is ’ bar’). +
+ +

A ffmetadata file might look like this: +

 
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+ +

By using the ffmetadata muxer and demuxer it is possible to extract +metadata from an input file to an ffmetadata file, and then transcode +the file into an output file with the edited ffmetadata file. +

+

Extracting an ffmetadata file with ‘ffmpeg’ goes as follows: +

 
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+ +

Reinserting edited metadata information from the FFMETADATAFILE file can +be done as: +

 
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+ + +

16. Protocols

+ +

Protocols are configured elements in FFmpeg that enable access to +resources that require specific protocols. +

+

When you configure your FFmpeg build, all the supported protocols are +enabled by default. You can list all available ones using the +configure option "–list-protocols". +

+

You can disable all the protocols using the configure option +"–disable-protocols", and selectively enable a protocol using the +option "–enable-protocol=PROTOCOL", or you can disable a +particular protocol using the option +"–disable-protocol=PROTOCOL". +

+

The option "-protocols" of the ff* tools will display the list of +supported protocols. +

+

A description of the currently available protocols follows. +

+ +

16.1 bluray

+ +

Read BluRay playlist. +

+

The accepted options are: +

+
angle
+

BluRay angle +

+
+
chapter
+

Start chapter (1...N) +

+
+
playlist
+

Playlist to read (BDMV/PLAYLIST/?????.mpls) +

+
+
+ +

Examples: +

+

Read longest playlist from BluRay mounted to /mnt/bluray: +

 
bluray:/mnt/bluray
+
+ +

Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2: +

 
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+ + +

16.2 cache

+ +

Caching wrapper for input stream. +

+

Cache the input stream to temporary file. It brings seeking capability to live streams. +

+
 
cache:URL
+
+ + +

16.3 concat

+ +

Physical concatenation protocol. +

+

Allow to read and seek from many resource in sequence as if they were +a unique resource. +

+

A URL accepted by this protocol has the syntax: +

 
concat:URL1|URL2|...|URLN
+
+ +

where URL1, URL2, ..., URLN are the urls of the +resource to be concatenated, each one possibly specifying a distinct +protocol. +

+

For example to read a sequence of files ‘split1.mpeg’, +‘split2.mpeg’, ‘split3.mpeg’ with ffplay use the +command: +

 
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+ +

Note that you may need to escape the character "|" which is special for +many shells. +

+ +

16.4 crypto

+ +

AES-encrypted stream reading protocol. +

+

The accepted options are: +

+
key
+

Set the AES decryption key binary block from given hexadecimal representation. +

+
+
iv
+

Set the AES decryption initialization vector binary block from given hexadecimal representation. +

+
+ +

Accepted URL formats: +

 
crypto:URL
+crypto+URL
+
+ + +

16.5 data

+ +

Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme. +

+

For example, to convert a GIF file given inline with ffmpeg: +

 
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+ + +

16.6 file

+ +

File access protocol. +

+

Allow to read from or write to a file. +

+

A file URL can have the form: +

 
file:filename
+
+ +

where filename is the path of the file to read. +

+

An URL that does not have a protocol prefix will be assumed to be a +file URL. Depending on the build, an URL that looks like a Windows +path with the drive letter at the beginning will also be assumed to be +a file URL (usually not the case in builds for unix-like systems). +

+

For example to read from a file ‘input.mpeg’ with ffmpeg +use the command: +

 
ffmpeg -i file:input.mpeg output.mpeg
+
+ +

This protocol accepts the following options: +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable for files on slow medium. +

+
+ + +

16.7 ftp

+ +

FTP (File Transfer Protocol). +

+

Allow to read from or write to remote resources using FTP protocol. +

+

Following syntax is required. +

 
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
ftp-anonymous-password
+

Password used when login as anonymous user. Typically an e-mail address +should be used. +

+
+
ftp-write-seekable
+

Control seekability of connection during encoding. If set to 1 the +resource is supposed to be seekable, if set to 0 it is assumed not +to be seekable. Default value is 0. +

+
+ +

NOTE: Protocol can be used as output, but it is recommended to not do +it, unless special care is taken (tests, customized server configuration +etc.). Different FTP servers behave in different way during seek +operation. ff* tools may produce incomplete content due to server limitations. +

+ +

16.8 gopher

+ +

Gopher protocol. +

+ +

16.9 hls

+ +

Read Apple HTTP Live Streaming compliant segmented stream as +a uniform one. The M3U8 playlists describing the segments can be +remote HTTP resources or local files, accessed using the standard +file protocol. +The nested protocol is declared by specifying +"+proto" after the hls URI scheme name, where proto +is either "file" or "http". +

+
 
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+ +

Using this protocol is discouraged - the hls demuxer should work +just as well (if not, please report the issues) and is more complete. +To use the hls demuxer instead, simply use the direct URLs to the +m3u8 files. +

+ +

16.10 http

+ +

HTTP (Hyper Text Transfer Protocol). +

+

This protocol accepts the following options: +

+
+
seekable
+

Control seekability of connection. If set to 1 the resource is +supposed to be seekable, if set to 0 it is assumed not to be seekable, +if set to -1 it will try to autodetect if it is seekable. Default +value is -1. +

+
+
chunked_post
+

If set to 1 use chunked Transfer-Encoding for posts, default is 1. +

+
+
content_type
+

Set a specific content type for the POST messages. +

+
+
headers
+

Set custom HTTP headers, can override built in default headers. The +value must be a string encoding the headers. +

+
+
multiple_requests
+

Use persistent connections if set to 1, default is 0. +

+
+
post_data
+

Set custom HTTP post data. +

+
+
user-agent
+
user_agent
+

Override the User-Agent header. If not specified the protocol will use a +string describing the libavformat build. ("Lavf/<version>") +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
mime_type
+

Export the MIME type. +

+
+
icy
+

If set to 1 request ICY (SHOUTcast) metadata from the server. If the server +supports this, the metadata has to be retrieved by the application by reading +the ‘icy_metadata_headers’ and ‘icy_metadata_packet’ options. +The default is 0. +

+
+
icy_metadata_headers
+

If the server supports ICY metadata, this contains the ICY-specific HTTP reply +headers, separated by newline characters. +

+
+
icy_metadata_packet
+

If the server supports ICY metadata, and ‘icy’ was set to 1, this +contains the last non-empty metadata packet sent by the server. It should be +polled in regular intervals by applications interested in mid-stream metadata +updates. +

+
+
cookies
+

Set the cookies to be sent in future requests. The format of each cookie is the +same as the value of a Set-Cookie HTTP response field. Multiple cookies can be +delimited by a newline character. +

+
+
offset
+

Set initial byte offset. +

+
+
end_offset
+

Try to limit the request to bytes preceding this offset. +

+
+ + +

16.10.1 HTTP Cookies

+ +

Some HTTP requests will be denied unless cookie values are passed in with the +request. The ‘cookies’ option allows these cookies to be specified. At +the very least, each cookie must specify a value along with a path and domain. +HTTP requests that match both the domain and path will automatically include the +cookie value in the HTTP Cookie header field. Multiple cookies can be delimited +by a newline. +

+

The required syntax to play a stream specifying a cookie is: +

 
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+ + +

16.11 mmst

+ +

MMS (Microsoft Media Server) protocol over TCP. +

+ +

16.12 mmsh

+ +

MMS (Microsoft Media Server) protocol over HTTP. +

+

The required syntax is: +

 
mmsh://server[:port][/app][/playpath]
+
+ + +

16.13 md5

+ +

MD5 output protocol. +

+

Computes the MD5 hash of the data to be written, and on close writes +this to the designated output or stdout if none is specified. It can +be used to test muxers without writing an actual file. +

+

Some examples follow. +

 
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+ +

Note that some formats (typically MOV) require the output protocol to +be seekable, so they will fail with the MD5 output protocol. +

+ +

16.14 pipe

+ +

UNIX pipe access protocol. +

+

Allow to read and write from UNIX pipes. +

+

The accepted syntax is: +

 
pipe:[number]
+
+ +

number is the number corresponding to the file descriptor of the +pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number +is not specified, by default the stdout file descriptor will be used +for writing, stdin for reading. +

+

For example to read from stdin with ffmpeg: +

 
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+ +

For writing to stdout with ffmpeg: +

 
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+ +

This protocol accepts the following options: +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable if data transmission is slow. +

+
+ +

Note that some formats (typically MOV), require the output protocol to +be seekable, so they will fail with the pipe output protocol. +

+ +

16.15 rtmp

+ +

Real-Time Messaging Protocol. +

+

The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia +content across a TCP/IP network. +

+

The required syntax is: +

 
rtmp://[username:password@]server[:port][/app][/instance][/playpath]
+
+ +

The accepted parameters are: +

+
username
+

An optional username (mostly for publishing). +

+
+
password
+

An optional password (mostly for publishing). +

+
+
server
+

The address of the RTMP server. +

+
+
port
+

The number of the TCP port to use (by default is 1935). +

+
+
app
+

It is the name of the application to access. It usually corresponds to +the path where the application is installed on the RTMP server +(e.g. ‘/ondemand/’, ‘/flash/live/’, etc.). You can override +the value parsed from the URI through the rtmp_app option, too. +

+
+
playpath
+

It is the path or name of the resource to play with reference to the +application specified in app, may be prefixed by "mp4:". You +can override the value parsed from the URI through the rtmp_playpath +option, too. +

+
+
listen
+

Act as a server, listening for an incoming connection. +

+
+
timeout
+

Maximum time to wait for the incoming connection. Implies listen. +

+
+ +

Additionally, the following parameters can be set via command line options +(or in code via AVOptions): +

+
rtmp_app
+

Name of application to connect on the RTMP server. This option +overrides the parameter specified in the URI. +

+
+
rtmp_buffer
+

Set the client buffer time in milliseconds. The default is 3000. +

+
+
rtmp_conn
+

Extra arbitrary AMF connection parameters, parsed from a string, +e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0. +Each value is prefixed by a single character denoting the type, +B for Boolean, N for number, S for string, O for object, or Z for null, +followed by a colon. For Booleans the data must be either 0 or 1 for +FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or +1 to end or begin an object, respectively. Data items in subobjects may +be named, by prefixing the type with ’N’ and specifying the name before +the value (i.e. NB:myFlag:1). This option may be used multiple +times to construct arbitrary AMF sequences. +

+
+
rtmp_flashver
+

Version of the Flash plugin used to run the SWF player. The default +is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible; +<libavformat version>).) +

+
+
rtmp_flush_interval
+

Number of packets flushed in the same request (RTMPT only). The default +is 10. +

+
+
rtmp_live
+

Specify that the media is a live stream. No resuming or seeking in +live streams is possible. The default value is any, which means the +subscriber first tries to play the live stream specified in the +playpath. If a live stream of that name is not found, it plays the +recorded stream. The other possible values are live and +recorded. +

+
+
rtmp_pageurl
+

URL of the web page in which the media was embedded. By default no +value will be sent. +

+
+
rtmp_playpath
+

Stream identifier to play or to publish. This option overrides the +parameter specified in the URI. +

+
+
rtmp_subscribe
+

Name of live stream to subscribe to. By default no value will be sent. +It is only sent if the option is specified or if rtmp_live +is set to live. +

+
+
rtmp_swfhash
+

SHA256 hash of the decompressed SWF file (32 bytes). +

+
+
rtmp_swfsize
+

Size of the decompressed SWF file, required for SWFVerification. +

+
+
rtmp_swfurl
+

URL of the SWF player for the media. By default no value will be sent. +

+
+
rtmp_swfverify
+

URL to player swf file, compute hash/size automatically. +

+
+
rtmp_tcurl
+

URL of the target stream. Defaults to proto://host[:port]/app. +

+
+
+ +

For example to read with ffplay a multimedia resource named +"sample" from the application "vod" from an RTMP server "myserver": +

 
ffplay rtmp://myserver/vod/sample
+
+ +

To publish to a password protected server, passing the playpath and +app names separately: +

 
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+ + +

16.16 rtmpe

+ +

Encrypted Real-Time Messaging Protocol. +

+

The Encrypted Real-Time Messaging Protocol (RTMPE) is used for +streaming multimedia content within standard cryptographic primitives, +consisting of Diffie-Hellman key exchange and HMACSHA256, generating +a pair of RC4 keys. +

+ +

16.17 rtmps

+ +

Real-Time Messaging Protocol over a secure SSL connection. +

+

The Real-Time Messaging Protocol (RTMPS) is used for streaming +multimedia content across an encrypted connection. +

+ +

16.18 rtmpt

+ +

Real-Time Messaging Protocol tunneled through HTTP. +

+

The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used +for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

16.19 rtmpte

+ +

Encrypted Real-Time Messaging Protocol tunneled through HTTP. +

+

The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE) +is used for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

16.20 rtmpts

+ +

Real-Time Messaging Protocol tunneled through HTTPS. +

+

The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used +for streaming multimedia content within HTTPS requests to traverse +firewalls. +

+ +

16.21 libssh

+ +

Secure File Transfer Protocol via libssh +

+

Allow to read from or write to remote resources using SFTP protocol. +

+

Following syntax is required. +

+
 
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout +is not specified. +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
private_key
+

Specify the path of the file containing private key to use during authorization. +By default libssh searches for keys in the ‘~/.ssh/’ directory. +

+
+
+ +

Example: Play a file stored on remote server. +

+
 
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+ + +

16.22 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte

+ +

Real-Time Messaging Protocol and its variants supported through +librtmp. +

+

Requires the presence of the librtmp headers and library during +configuration. You need to explicitly configure the build with +"–enable-librtmp". If enabled this will replace the native RTMP +protocol. +

+

This protocol provides most client functions and a few server +functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT), +encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled +variants of these encrypted types (RTMPTE, RTMPTS). +

+

The required syntax is: +

 
rtmp_proto://server[:port][/app][/playpath] options
+
+ +

where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe", +"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and +server, port, app and playpath have the same +meaning as specified for the RTMP native protocol. +options contains a list of space-separated options of the form +key=val. +

+

See the librtmp manual page (man 3 librtmp) for more information. +

+

For example, to stream a file in real-time to an RTMP server using +ffmpeg: +

 
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+ +

To play the same stream using ffplay: +

 
ffplay "rtmp://myserver/live/mystream live=1"
+
+ + +

16.23 rtp

+ +

Real-time Transport Protocol. +

+

The required syntax for an RTP URL is: +rtp://hostname[:port][?option=val...] +

+

port specifies the RTP port to use. +

+

The following URL options are supported: +

+
+
ttl=n
+

Set the TTL (Time-To-Live) value (for multicast only). +

+
+
rtcpport=n
+

Set the remote RTCP port to n. +

+
+
localrtpport=n
+

Set the local RTP port to n. +

+
+
localrtcpport=n'
+

Set the local RTCP port to n. +

+
+
pkt_size=n
+

Set max packet size (in bytes) to n. +

+
+
connect=0|1
+

Do a connect() on the UDP socket (if set to 1) or not (if set +to 0). +

+
+
sources=ip[,ip]
+

List allowed source IP addresses. +

+
+
block=ip[,ip]
+

List disallowed (blocked) source IP addresses. +

+
+
write_to_source=0|1
+

Send packets to the source address of the latest received packet (if +set to 1) or to a default remote address (if set to 0). +

+
+
localport=n
+

Set the local RTP port to n. +

+

This is a deprecated option. Instead, ‘localrtpport’ should be +used. +

+
+
+ +

Important notes: +

+
    +
  1. +If ‘rtcpport’ is not set the RTCP port will be set to the RTP +port value plus 1. + +
  2. +If ‘localrtpport’ (the local RTP port) is not set any available +port will be used for the local RTP and RTCP ports. + +
  3. +If ‘localrtcpport’ (the local RTCP port) is not set it will be +set to the the local RTP port value plus 1. +
+ + +

16.24 rtsp

+ +

Real-Time Streaming Protocol. +

+

RTSP is not technically a protocol handler in libavformat, it is a demuxer +and muxer. The demuxer supports both normal RTSP (with data transferred +over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with +data transferred over RDT). +

+

The muxer can be used to send a stream using RTSP ANNOUNCE to a server +supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s +RTSP server). +

+

The required syntax for a RTSP url is: +

 
rtsp://hostname[:port]/path
+
+ +

Options can be set on the ffmpeg/ffplay command +line, or set in code via AVOptions or in +avformat_open_input. +

+

The following options are supported. +

+
+
initial_pause
+

Do not start playing the stream immediately if set to 1. Default value +is 0. +

+
+
rtsp_transport
+

Set RTSP trasport protocols. +

+

It accepts the following values: +

+
udp
+

Use UDP as lower transport protocol. +

+
+
tcp
+

Use TCP (interleaving within the RTSP control channel) as lower +transport protocol. +

+
+
udp_multicast
+

Use UDP multicast as lower transport protocol. +

+
+
http
+

Use HTTP tunneling as lower transport protocol, which is useful for +passing proxies. +

+
+ +

Multiple lower transport protocols may be specified, in that case they are +tried one at a time (if the setup of one fails, the next one is tried). +For the muxer, only the ‘tcp’ and ‘udp’ options are supported. +

+
+
rtsp_flags
+

Set RTSP flags. +

+

The following values are accepted: +

+
filter_src
+

Accept packets only from negotiated peer address and port. +

+
listen
+

Act as a server, listening for an incoming connection. +

+
+ +

Default value is ‘none’. +

+
+
allowed_media_types
+

Set media types to accept from the server. +

+

The following flags are accepted: +

+
video
+
audio
+
data
+
+ +

By default it accepts all media types. +

+
+
min_port
+

Set minimum local UDP port. Default value is 5000. +

+
+
max_port
+

Set maximum local UDP port. Default value is 65000. +

+
+
timeout
+

Set maximum timeout (in seconds) to wait for incoming connections. +

+

A value of -1 mean infinite (default). This option implies the +‘rtsp_flags’ set to ‘listen’. +

+
+
reorder_queue_size
+

Set number of packets to buffer for handling of reordered packets. +

+
+
stimeout
+

Set socket TCP I/O timeout in micro seconds. +

+
+
user-agent
+

Override User-Agent header. If not specified, it default to the +libavformat identifier string. +

+
+ +

When receiving data over UDP, the demuxer tries to reorder received packets +(since they may arrive out of order, or packets may get lost totally). This +can be disabled by setting the maximum demuxing delay to zero (via +the max_delay field of AVFormatContext). +

+

When watching multi-bitrate Real-RTSP streams with ffplay, the +streams to display can be chosen with -vst n and +-ast n for video and audio respectively, and can be switched +on the fly by pressing v and a. +

+ +

16.24.1 Examples

+ +

The following examples all make use of the ffplay and +ffmpeg tools. +

+
    +
  • +Watch a stream over UDP, with a max reordering delay of 0.5 seconds: +
     
    ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
    +
    + +
  • +Watch a stream tunneled over HTTP: +
     
    ffplay -rtsp_transport http rtsp://server/video.mp4
    +
    + +
  • +Send a stream in realtime to a RTSP server, for others to watch: +
     
    ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
    +
    + +
  • +Receive a stream in realtime: +
     
    ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
    +
    +
+ + +

16.25 sap

+ +

Session Announcement Protocol (RFC 2974). This is not technically a +protocol handler in libavformat, it is a muxer and demuxer. +It is used for signalling of RTP streams, by announcing the SDP for the +streams regularly on a separate port. +

+ +

16.25.1 Muxer

+ +

The syntax for a SAP url given to the muxer is: +

 
sap://destination[:port][?options]
+
+ +

The RTP packets are sent to destination on port port, +or to port 5004 if no port is specified. +options is a &-separated list. The following options +are supported: +

+
+
announce_addr=address
+

Specify the destination IP address for sending the announcements to. +If omitted, the announcements are sent to the commonly used SAP +announcement multicast address 224.2.127.254 (sap.mcast.net), or +ff0e::2:7ffe if destination is an IPv6 address. +

+
+
announce_port=port
+

Specify the port to send the announcements on, defaults to +9875 if not specified. +

+
+
ttl=ttl
+

Specify the time to live value for the announcements and RTP packets, +defaults to 255. +

+
+
same_port=0|1
+

If set to 1, send all RTP streams on the same port pair. If zero (the +default), all streams are sent on unique ports, with each stream on a +port 2 numbers higher than the previous. +VLC/Live555 requires this to be set to 1, to be able to receive the stream. +The RTP stack in libavformat for receiving requires all streams to be sent +on unique ports. +

+
+ +

Example command lines follow. +

+

To broadcast a stream on the local subnet, for watching in VLC: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+ +

Similarly, for watching in ffplay: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+ +

And for watching in ffplay, over IPv6: +

+
 
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+ + +

16.25.2 Demuxer

+ +

The syntax for a SAP url given to the demuxer is: +

 
sap://[address][:port]
+
+ +

address is the multicast address to listen for announcements on, +if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port +is the port that is listened on, 9875 if omitted. +

+

The demuxers listens for announcements on the given address and port. +Once an announcement is received, it tries to receive that particular stream. +

+

Example command lines follow. +

+

To play back the first stream announced on the normal SAP multicast address: +

+
 
ffplay sap://
+
+ +

To play back the first stream announced on one the default IPv6 SAP multicast address: +

+
 
ffplay sap://[ff0e::2:7ffe]
+
+ + +

16.26 sctp

+ +

Stream Control Transmission Protocol. +

+

The accepted URL syntax is: +

 
sctp://host:port[?options]
+
+ +

The protocol accepts the following options: +

+
listen
+

If set to any value, listen for an incoming connection. Outgoing connection is done by default. +

+
+
max_streams
+

Set the maximum number of streams. By default no limit is set. +

+
+ + +

16.27 srtp

+ +

Secure Real-time Transport Protocol. +

+

The accepted options are: +

+
srtp_in_suite
+
srtp_out_suite
+

Select input and output encoding suites. +

+

Supported values: +

+
AES_CM_128_HMAC_SHA1_80
+
SRTP_AES128_CM_HMAC_SHA1_80
+
AES_CM_128_HMAC_SHA1_32
+
SRTP_AES128_CM_HMAC_SHA1_32
+
+ +
+
srtp_in_params
+
srtp_out_params
+

Set input and output encoding parameters, which are expressed by a +base64-encoded representation of a binary block. The first 16 bytes of +this binary block are used as master key, the following 14 bytes are +used as master salt. +

+
+ + +

16.28 tcp

+ +

Transmission Control Protocol. +

+

The required syntax for a TCP url is: +

 
tcp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form +key=val. +

+

The list of supported options follows. +

+
+
listen=1|0
+

Listen for an incoming connection. Default value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+
listen_timeout=microseconds
+

Set listen timeout, expressed in microseconds. +

+
+ +

The following example shows how to setup a listening TCP connection +with ffmpeg, which is then accessed with ffplay: +

 
ffmpeg -i input -f format tcp://hostname:port?listen
+ffplay tcp://hostname:port
+
+ + +

16.29 tls

+ +

Transport Layer Security (TLS) / Secure Sockets Layer (SSL) +

+

The required syntax for a TLS/SSL url is: +

 
tls://hostname:port[?options]
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
ca_file, cafile=filename
+

A file containing certificate authority (CA) root certificates to treat +as trusted. If the linked TLS library contains a default this might not +need to be specified for verification to work, but not all libraries and +setups have defaults built in. +The file must be in OpenSSL PEM format. +

+
+
tls_verify=1|0
+

If enabled, try to verify the peer that we are communicating with. +Note, if using OpenSSL, this currently only makes sure that the +peer certificate is signed by one of the root certificates in the CA +database, but it does not validate that the certificate actually +matches the host name we are trying to connect to. (With GnuTLS, +the host name is validated as well.) +

+

This is disabled by default since it requires a CA database to be +provided by the caller in many cases. +

+
+
cert_file, cert=filename
+

A file containing a certificate to use in the handshake with the peer. +(When operating as server, in listen mode, this is more often required +by the peer, while client certificates only are mandated in certain +setups.) +

+
+
key_file, key=filename
+

A file containing the private key for the certificate. +

+
+
listen=1|0
+

If enabled, listen for connections on the provided port, and assume +the server role in the handshake instead of the client role. +

+
+
+ +

Example command lines: +

+

To create a TLS/SSL server that serves an input stream. +

+
 
ffmpeg -i input -f format tls://hostname:port?listen&cert=server.crt&key=server.key
+
+ +

To play back a stream from the TLS/SSL server using ffplay: +

+
 
ffplay tls://hostname:port
+
+ + +

16.30 udp

+ +

User Datagram Protocol. +

+

The required syntax for an UDP URL is: +

 
udp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form key=val. +

+

In case threading is enabled on the system, a circular buffer is used +to store the incoming data, which allows one to reduce loss of data due to +UDP socket buffer overruns. The fifo_size and +overrun_nonfatal options are related to this buffer. +

+

The list of supported options follows. +

+
+
buffer_size=size
+

Set the UDP socket buffer size in bytes. This is used both for the +receiving and the sending buffer size. +

+
+
localport=port
+

Override the local UDP port to bind with. +

+
+
localaddr=addr
+

Choose the local IP address. This is useful e.g. if sending multicast +and the host has multiple interfaces, where the user can choose +which interface to send on by specifying the IP address of that interface. +

+
+
pkt_size=size
+

Set the size in bytes of UDP packets. +

+
+
reuse=1|0
+

Explicitly allow or disallow reusing UDP sockets. +

+
+
ttl=ttl
+

Set the time to live value (for multicast only). +

+
+
connect=1|0
+

Initialize the UDP socket with connect(). In this case, the +destination address can’t be changed with ff_udp_set_remote_url later. +If the destination address isn’t known at the start, this option can +be specified in ff_udp_set_remote_url, too. +This allows finding out the source address for the packets with getsockname, +and makes writes return with AVERROR(ECONNREFUSED) if "destination +unreachable" is received. +For receiving, this gives the benefit of only receiving packets from +the specified peer address/port. +

+
+
sources=address[,address]
+

Only receive packets sent to the multicast group from one of the +specified sender IP addresses. +

+
+
block=address[,address]
+

Ignore packets sent to the multicast group from the specified +sender IP addresses. +

+
+
fifo_size=units
+

Set the UDP receiving circular buffer size, expressed as a number of +packets with size of 188 bytes. If not specified defaults to 7*4096. +

+
+
overrun_nonfatal=1|0
+

Survive in case of UDP receiving circular buffer overrun. Default +value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+ + +

16.30.1 Examples

+ +
    +
  • +Use ffmpeg to stream over UDP to a remote endpoint: +
     
    ffmpeg -i input -f format udp://hostname:port
    +
    + +
  • +Use ffmpeg to stream in mpegts format over UDP using 188 +sized UDP packets, using a large input buffer: +
     
    ffmpeg -i input -f mpegts udp://hostname:port?pkt_size=188&buffer_size=65535
    +
    + +
  • +Use ffmpeg to receive over UDP from a remote endpoint: +
     
    ffmpeg -i udp://[multicast-address]:port ...
    +
    +
+ + +

16.31 unix

+ +

Unix local socket +

+

The required syntax for a Unix socket URL is: +

+
 
unix://filepath
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
timeout
+

Timeout in ms. +

+
listen
+

Create the Unix socket in listening mode. +

+
+ + +

17. Device Options

+ +

The libavdevice library provides the same interface as +libavformat. Namely, an input device is considered like a demuxer, and +an output device like a muxer, and the interface and generic device +options are the same provided by libavformat (see the ffmpeg-formats +manual). +

+

In addition each input or output device may support so-called private +options, which are specific for that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the device +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+ + +

18. Input Devices

+ +

Input devices are configured elements in FFmpeg which allow to access +the data coming from a multimedia device attached to your system. +

+

When you configure your FFmpeg build, all the supported input devices +are enabled by default. You can list all available ones using the +configure option "–list-indevs". +

+

You can disable all the input devices using the configure option +"–disable-indevs", and selectively enable an input device using the +option "–enable-indev=INDEV", or you can disable a particular +input device using the option "–disable-indev=INDEV". +

+

The option "-formats" of the ff* tools will display the list of +supported input devices (amongst the demuxers). +

+

A description of the currently available input devices follows. +

+ +

18.1 alsa

+ +

ALSA (Advanced Linux Sound Architecture) input device. +

+

To enable this input device during configuration you need libasound +installed on your system. +

+

This device allows capturing from an ALSA device. The name of the +device to capture has to be an ALSA card identifier. +

+

An ALSA identifier has the syntax: +

 
hw:CARD[,DEV[,SUBDEV]]
+
+ +

where the DEV and SUBDEV components are optional. +

+

The three arguments (in order: CARD,DEV,SUBDEV) +specify card number or identifier, device number and subdevice number +(-1 means any). +

+

To see the list of cards currently recognized by your system check the +files ‘/proc/asound/cards’ and ‘/proc/asound/devices’. +

+

For example to capture with ffmpeg from an ALSA device with +card id 0, you may run the command: +

 
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+ +

For more information see: +http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html +

+ +

18.2 bktr

+ +

BSD video input device. +

+ +

18.3 dshow

+ +

Windows DirectShow input device. +

+

DirectShow support is enabled when FFmpeg is built with the mingw-w64 project. +Currently only audio and video devices are supported. +

+

Multiple devices may be opened as separate inputs, but they may also be +opened on the same input, which should improve synchronism between them. +

+

The input name should be in the format: +

+
 
TYPE=NAME[:TYPE=NAME]
+
+ +

where TYPE can be either audio or video, +and NAME is the device’s name. +

+ +

18.3.1 Options

+ +

If no options are specified, the device’s defaults are used. +If the device does not support the requested options, it will +fail to open. +

+
+
video_size
+

Set the video size in the captured video. +

+
+
framerate
+

Set the frame rate in the captured video. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. +

+
+
channels
+

Set the number of channels in the captured audio. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +

+
+
list_options
+

If set to ‘true’, print a list of selected device’s options +and exit. +

+
+
video_device_number
+

Set video device number for devices with same name (starts at 0, +defaults to 0). +

+
+
audio_device_number
+

Set audio device number for devices with same name (starts at 0, +defaults to 0). +

+
+
pixel_format
+

Select pixel format to be used by DirectShow. This may only be set when +the video codec is not set or set to rawvideo. +

+
+
audio_buffer_size
+

Set audio device buffer size in milliseconds (which can directly +impact latency, depending on the device). +Defaults to using the audio device’s +default buffer size (typically some multiple of 500ms). +Setting this value too low can degrade performance. +See also +http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx +

+
+
+ + +

18.3.2 Examples

+ +
    +
  • +Print the list of DirectShow supported devices and exit: +
     
    $ ffmpeg -list_devices true -f dshow -i dummy
    +
    + +
  • +Open video device Camera: +
     
    $ ffmpeg -f dshow -i video="Camera"
    +
    + +
  • +Open second video device with name Camera: +
     
    $ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
    +
    + +
  • +Open video device Camera and audio device Microphone: +
     
    $ ffmpeg -f dshow -i video="Camera":audio="Microphone"
    +
    + +
  • +Print the list of supported options in selected device and exit: +
     
    $ ffmpeg -list_options true -f dshow -i video="Camera"
    +
    + +
+ + +

18.4 dv1394

+ +

Linux DV 1394 input device. +

+ +

18.5 fbdev

+ +

Linux framebuffer input device. +

+

The Linux framebuffer is a graphic hardware-independent abstraction +layer to show graphics on a computer monitor, typically on the +console. It is accessed through a file device node, usually +‘/dev/fb0’. +

+

For more detailed information read the file +Documentation/fb/framebuffer.txt included in the Linux source tree. +

+

To record from the framebuffer device ‘/dev/fb0’ with +ffmpeg: +

 
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+ +

You can take a single screenshot image with the command: +

 
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+ +

See also http://linux-fbdev.sourceforge.net/, and fbset(1). +

+ +

18.6 iec61883

+ +

FireWire DV/HDV input device using libiec61883. +

+

To enable this input device, you need libiec61883, libraw1394 and +libavc1394 installed on your system. Use the configure option +--enable-libiec61883 to compile with the device enabled. +

+

The iec61883 capture device supports capturing from a video device +connected via IEEE1394 (FireWire), using libiec61883 and the new Linux +FireWire stack (juju). This is the default DV/HDV input method in Linux +Kernel 2.6.37 and later, since the old FireWire stack was removed. +

+

Specify the FireWire port to be used as input file, or "auto" +to choose the first port connected. +

+ +

18.6.1 Options

+ +
+
dvtype
+

Override autodetection of DV/HDV. This should only be used if auto +detection does not work, or if usage of a different device type +should be prohibited. Treating a DV device as HDV (or vice versa) will +not work and result in undefined behavior. +The values ‘auto’, ‘dv’ and ‘hdv’ are supported. +

+
+
dvbuffer
+

Set maxiumum size of buffer for incoming data, in frames. For DV, this +is an exact value. For HDV, it is not frame exact, since HDV does +not have a fixed frame size. +

+
+
dvguid
+

Select the capture device by specifying it’s GUID. Capturing will only +be performed from the specified device and fails if no device with the +given GUID is found. This is useful to select the input if multiple +devices are connected at the same time. +Look at /sys/bus/firewire/devices to find out the GUIDs. +

+
+
+ + +

18.6.2 Examples

+ +
    +
  • +Grab and show the input of a FireWire DV/HDV device. +
     
    ffplay -f iec61883 -i auto
    +
    + +
  • +Grab and record the input of a FireWire DV/HDV device, +using a packet buffer of 100000 packets if the source is HDV. +
     
    ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
    +
    + +
+ + +

18.7 jack

+ +

JACK input device. +

+

To enable this input device during configuration you need libjack +installed on your system. +

+

A JACK input device creates one or more JACK writable clients, one for +each audio channel, with name client_name:input_N, where +client_name is the name provided by the application, and N +is a number which identifies the channel. +Each writable client will send the acquired data to the FFmpeg input +device. +

+

Once you have created one or more JACK readable clients, you need to +connect them to one or more JACK writable clients. +

+

To connect or disconnect JACK clients you can use the jack_connect +and jack_disconnect programs, or do it through a graphical interface, +for example with qjackctl. +

+

To list the JACK clients and their properties you can invoke the command +jack_lsp. +

+

Follows an example which shows how to capture a JACK readable client +with ffmpeg. +

 
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+ +

For more information read: +http://jackaudio.org/ +

+ +

18.8 lavfi

+ +

Libavfilter input virtual device. +

+

This input device reads data from the open output pads of a libavfilter +filtergraph. +

+

For each filtergraph open output, the input device will create a +corresponding stream which is mapped to the generated output. Currently +only video data is supported. The filtergraph is specified through the +option ‘graph’. +

+ +

18.8.1 Options

+ +
+
graph
+

Specify the filtergraph to use as input. Each video open output must be +labelled by a unique string of the form "outN", where N is a +number starting from 0 corresponding to the mapped input stream +generated by the device. +The first unlabelled output is automatically assigned to the "out0" +label, but all the others need to be specified explicitly. +

+

If not specified defaults to the filename specified for the input +device. +

+
+
graph_file
+

Set the filename of the filtergraph to be read and sent to the other +filters. Syntax of the filtergraph is the same as the one specified by +the option graph. +

+
+
+ + +

18.8.2 Examples

+ +
    +
  • +Create a color video stream and play it back with ffplay: +
     
    ffplay -f lavfi -graph "color=c=pink [out0]" dummy
    +
    + +
  • +As the previous example, but use filename for specifying the graph +description, and omit the "out0" label: +
     
    ffplay -f lavfi color=c=pink
    +
    + +
  • +Create three different video test filtered sources and play them: +
     
    ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
    +
    + +
  • +Read an audio stream from a file using the amovie source and play it +back with ffplay: +
     
    ffplay -f lavfi "amovie=test.wav"
    +
    + +
  • +Read an audio stream and a video stream and play it back with +ffplay: +
     
    ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
    +
    + +
+ + +

18.9 libdc1394

+ +

IIDC1394 input device, based on libdc1394 and libraw1394. +

+ +

18.10 openal

+ +

The OpenAL input device provides audio capture on all systems with a +working OpenAL 1.1 implementation. +

+

To enable this input device during configuration, you need OpenAL +headers and libraries installed on your system, and need to configure +FFmpeg with --enable-openal. +

+

OpenAL headers and libraries should be provided as part of your OpenAL +implementation, or as an additional download (an SDK). Depending on your +installation you may need to specify additional flags via the +--extra-cflags and --extra-ldflags for allowing the build +system to locate the OpenAL headers and libraries. +

+

An incomplete list of OpenAL implementations follows: +

+
+
Creative
+

The official Windows implementation, providing hardware acceleration +with supported devices and software fallback. +See http://openal.org/. +

+
OpenAL Soft
+

Portable, open source (LGPL) software implementation. Includes +backends for the most common sound APIs on the Windows, Linux, +Solaris, and BSD operating systems. +See http://kcat.strangesoft.net/openal.html. +

+
Apple
+

OpenAL is part of Core Audio, the official Mac OS X Audio interface. +See http://developer.apple.com/technologies/mac/audio-and-video.html +

+
+ +

This device allows one to capture from an audio input device handled +through OpenAL. +

+

You need to specify the name of the device to capture in the provided +filename. If the empty string is provided, the device will +automatically select the default device. You can get the list of the +supported devices by using the option list_devices. +

+ +

18.10.1 Options

+ +
+
channels
+

Set the number of channels in the captured audio. Only the values +‘1’ (monaural) and ‘2’ (stereo) are currently supported. +Defaults to ‘2’. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. Only the values +‘8’ and ‘16’ are currently supported. Defaults to +‘16’. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +Defaults to ‘44.1k’. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +Defaults to ‘false’. +

+
+
+ + +

18.10.2 Examples

+ +

Print the list of OpenAL supported devices and exit: +

 
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+ +

Capture from the OpenAL device ‘DR-BT101 via PulseAudio’: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+ +

Capture from the default device (note the empty string ” as filename): +

 
$ ffmpeg -f openal -i '' out.ogg
+
+ +

Capture from two devices simultaneously, writing to two different files, +within the same ffmpeg command: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+

Note: not all OpenAL implementations support multiple simultaneous capture - +try the latest OpenAL Soft if the above does not work. +

+ +

18.11 oss

+ +

Open Sound System input device. +

+

The filename to provide to the input device is the device node +representing the OSS input device, and is usually set to +‘/dev/dsp’. +

+

For example to grab from ‘/dev/dsp’ using ffmpeg use the +command: +

 
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+ +

For more information about OSS see: +http://manuals.opensound.com/usersguide/dsp.html +

+ +

18.12 pulse

+ +

PulseAudio input device. +

+

To enable this output device you need to configure FFmpeg with --enable-libpulse. +

+

The filename to provide to the input device is a source device or the +string "default" +

+

To list the PulseAudio source devices and their properties you can invoke +the command pactl list sources. +

+

More information about PulseAudio can be found on http://www.pulseaudio.org. +

+ +

18.12.1 Options

+
+
server
+

Connect to a specific PulseAudio server, specified by an IP address. +Default server is used when not provided. +

+
+
name
+

Specify the application name PulseAudio will use when showing active clients, +by default it is the LIBAVFORMAT_IDENT string. +

+
+
stream_name
+

Specify the stream name PulseAudio will use when showing active streams, +by default it is "record". +

+
+
sample_rate
+

Specify the samplerate in Hz, by default 48kHz is used. +

+
+
channels
+

Specify the channels in use, by default 2 (stereo) is set. +

+
+
frame_size
+

Specify the number of bytes per frame, by default it is set to 1024. +

+
+
fragment_size
+

Specify the minimal buffering fragment in PulseAudio, it will affect the +audio latency. By default it is unset. +

+
+ + +

18.12.2 Examples

+

Record a stream from default device: +

 
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+ + +

18.13 sndio

+ +

sndio input device. +

+

To enable this input device during configuration you need libsndio +installed on your system. +

+

The filename to provide to the input device is the device node +representing the sndio input device, and is usually set to +‘/dev/audio0’. +

+

For example to grab from ‘/dev/audio0’ using ffmpeg use the +command: +

 
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+ + +

18.14 video4linux2, v4l2

+ +

Video4Linux2 input video device. +

+

"v4l2" can be used as alias for "video4linux2". +

+

If FFmpeg is built with v4l-utils support (by using the +--enable-libv4l2 configure option), it is possible to use it with the +-use_libv4l2 input device option. +

+

The name of the device to grab is a file device node, usually Linux +systems tend to automatically create such nodes when the device +(e.g. an USB webcam) is plugged into the system, and has a name of the +kind ‘/dev/videoN’, where N is a number associated to +the device. +

+

Video4Linux2 devices usually support a limited set of +widthxheight sizes and frame rates. You can check which are +supported using -list_formats all for Video4Linux2 devices. +Some devices, like TV cards, support one or more standards. It is possible +to list all the supported standards using -list_standards all. +

+

The time base for the timestamps is 1 microsecond. Depending on the kernel +version and configuration, the timestamps may be derived from the real time +clock (origin at the Unix Epoch) or the monotonic clock (origin usually at +boot time, unaffected by NTP or manual changes to the clock). The +‘-timestamps abs’ or ‘-ts abs’ option can be used to force +conversion into the real time clock. +

+

Some usage examples of the video4linux2 device with ffmpeg +and ffplay: +

    +
  • +Grab and show the input of a video4linux2 device: +
     
    ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
    +
    + +
  • +Grab and record the input of a video4linux2 device, leave the +frame rate and size as previously set: +
     
    ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
    +
    +
+ +

For more information about Video4Linux, check http://linuxtv.org/. +

+ +

18.14.1 Options

+ +
+
standard
+

Set the standard. Must be the name of a supported standard. To get a +list of the supported standards, use the ‘list_standards’ +option. +

+
+
channel
+

Set the input channel number. Default to -1, which means using the +previously selected channel. +

+
+
video_size
+

Set the video frame size. The argument must be a string in the form +WIDTHxHEIGHT or a valid size abbreviation. +

+
+
pixel_format
+

Select the pixel format (only valid for raw video input). +

+
+
input_format
+

Set the preferred pixel format (for raw video) or a codec name. +This option allows one to select the input format, when several are +available. +

+
+
framerate
+

Set the preferred video frame rate. +

+
+
list_formats
+

List available formats (supported pixel formats, codecs, and frame +sizes) and exit. +

+

Available values are: +

+
all
+

Show all available (compressed and non-compressed) formats. +

+
+
raw
+

Show only raw video (non-compressed) formats. +

+
+
compressed
+

Show only compressed formats. +

+
+ +
+
list_standards
+

List supported standards and exit. +

+

Available values are: +

+
all
+

Show all supported standards. +

+
+ +
+
timestamps, ts
+

Set type of timestamps for grabbed frames. +

+

Available values are: +

+
default
+

Use timestamps from the kernel. +

+
+
abs
+

Use absolute timestamps (wall clock). +

+
+
mono2abs
+

Force conversion from monotonic to absolute timestamps. +

+
+ +

Default value is default. +

+
+ + +

18.15 vfwcap

+ +

VfW (Video for Windows) capture input device. +

+

The filename passed as input is the capture driver number, ranging from +0 to 9. You may use "list" as filename to print a list of drivers. Any +other filename will be interpreted as device number 0. +

+ +

18.16 x11grab

+ +

X11 video input device. +

+

This device allows one to capture a region of an X11 display. +

+

The filename passed as input has the syntax: +

 
[hostname]:display_number.screen_number[+x_offset,y_offset]
+
+ +

hostname:display_number.screen_number specifies the +X11 display name of the screen to grab from. hostname can be +omitted, and defaults to "localhost". The environment variable +DISPLAY contains the default display name. +

+

x_offset and y_offset specify the offsets of the grabbed +area with respect to the top-left border of the X11 screen. They +default to 0. +

+

Check the X11 documentation (e.g. man X) for more detailed information. +

+

Use the dpyinfo program for getting basic information about the +properties of your X11 display (e.g. grep for "name" or "dimensions"). +

+

For example to grab from ‘:0.0’ using ffmpeg: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

Grab at position 10,20: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ + +

18.16.1 Options

+ +
+
draw_mouse
+

Specify whether to draw the mouse pointer. A value of 0 specify +not to draw the pointer. Default value is 1. +

+
+
follow_mouse
+

Make the grabbed area follow the mouse. The argument can be +centered or a number of pixels PIXELS. +

+

When it is specified with "centered", the grabbing region follows the mouse +pointer and keeps the pointer at the center of region; otherwise, the region +follows only when the mouse pointer reaches within PIXELS (greater than +zero) to the edge of region. +

+

For example: +

 
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

To follow only when the mouse pointer reaches within 100 pixels to edge: +

 
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
framerate
+

Set the grabbing frame rate. Default value is ntsc, +corresponding to a frame rate of 30000/1001. +

+
+
show_region
+

Show grabbed region on screen. +

+

If show_region is specified with 1, then the grabbing +region will be indicated on screen. With this option, it is easy to +know what is being grabbed if only a portion of the screen is grabbed. +

+

For example: +

 
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ +

With follow_mouse: +

 
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
video_size
+

Set the video frame size. Default value is vga. +

+
+ + +

19. Resampler Options

+ +

The audio resampler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools, option=value for the aresample filter, +by setting the value explicitly in the +SwrContext options or using the ‘libavutil/opt.h’ API for +programmatic use. +

+
+
ich, in_channel_count
+

Set the number of input channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘in_channel_layout’ is set. +

+
+
och, out_channel_count
+

Set the number of output channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘out_channel_layout’ is set. +

+
+
uch, used_channel_count
+

Set the number of used input channels. Default value is 0. This option is +only used for special remapping. +

+
+
isr, in_sample_rate
+

Set the input sample rate. Default value is 0. +

+
+
osr, out_sample_rate
+

Set the output sample rate. Default value is 0. +

+
+
isf, in_sample_fmt
+

Specify the input sample format. It is set by default to none. +

+
+
osf, out_sample_fmt
+

Specify the output sample format. It is set by default to none. +

+
+
tsf, internal_sample_fmt
+

Set the internal sample format. Default value is none. +This will automatically be chosen when it is not explicitly set. +

+
+
icl, in_channel_layout
+
ocl, out_channel_layout
+

Set the input/output channel layout. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+
clev, center_mix_level
+

Set the center mix level. It is a value expressed in deciBel, and must be +in the interval [-32,32]. +

+
+
slev, surround_mix_level
+

Set the surround mix level. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
lfe_mix_level
+

Set LFE mix into non LFE level. It is used when there is a LFE input but no +LFE output. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
rmvol, rematrix_volume
+

Set rematrix volume. Default value is 1.0. +

+
+
rematrix_maxval
+

Set maximum output value for rematrixing. +This can be used to prevent clipping vs. preventing volumn reduction +A value of 1.0 prevents cliping. +

+
+
flags, swr_flags
+

Set flags used by the converter. Default value is 0. +

+

It supports the following individual flags: +

+
res
+

force resampling, this flag forces resampling to be used even when the +input and output sample rates match. +

+
+ +
+
dither_scale
+

Set the dither scale. Default value is 1. +

+
+
dither_method
+

Set dither method. Default value is 0. +

+

Supported values: +

+
rectangular
+

select rectangular dither +

+
triangular
+

select triangular dither +

+
triangular_hp
+

select triangular dither with high pass +

+
lipshitz
+

select lipshitz noise shaping dither +

+
shibata
+

select shibata noise shaping dither +

+
low_shibata
+

select low shibata noise shaping dither +

+
high_shibata
+

select high shibata noise shaping dither +

+
f_weighted
+

select f-weighted noise shaping dither +

+
modified_e_weighted
+

select modified-e-weighted noise shaping dither +

+
improved_e_weighted
+

select improved-e-weighted noise shaping dither +

+
+
+ +
+
resampler
+

Set resampling engine. Default value is swr. +

+

Supported values: +

+
swr
+

select the native SW Resampler; filter options precision and cheby are not +applicable in this case. +

+
soxr
+

select the SoX Resampler (where available); compensation, and filter options +filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this +case. +

+
+ +
+
filter_size
+

For swr only, set resampling filter size, default value is 32. +

+
+
phase_shift
+

For swr only, set resampling phase shift, default value is 10, and must be in +the interval [0,30]. +

+
+
linear_interp
+

Use Linear Interpolation if set to 1, default value is 0. +

+
+
cutoff
+

Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float +value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr +(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz). +

+
+
precision
+

For soxr only, the precision in bits to which the resampled signal will be +calculated. The default value of 20 (which, with suitable dithering, is +appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a +value of 28 gives SoX’s ’Very High Quality’. +

+
+
cheby
+

For soxr only, selects passband rolloff none (Chebyshev) & higher-precision +approximation for ’irrational’ ratios. Default value is 0. +

+
+
async
+

For swr only, simple 1 parameter audio sync to timestamps using stretching, +squeezing, filling and trimming. Setting this to 1 will enable filling and +trimming, larger values represent the maximum amount in samples that the data +may be stretched or squeezed for each second. +Default value is 0, thus no compensation is applied to make the samples match +the audio timestamps. +

+
+
first_pts
+

For swr only, assume the first pts should be this value. The time unit is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
min_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger stretching/squeezing/filling or trimming of the +data to make it match the timestamps. The default is that +stretching/squeezing/filling and trimming is disabled +(‘min_comp’ = FLT_MAX). +

+
+
min_hard_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger adding/dropping samples to make it match the +timestamps. This option effectively is a threshold to select between +hard (trim/fill) and soft (squeeze/stretch) compensation. Note that +all compensation is by default disabled through ‘min_comp’. +The default is 0.1. +

+
+
comp_duration
+

For swr only, set duration (in seconds) over which data is stretched/squeezed +to make it match the timestamps. Must be a non-negative double float value, +default value is 1.0. +

+
+
max_soft_comp
+

For swr only, set maximum factor by which data is stretched/squeezed to make it +match the timestamps. Must be a non-negative double float value, default value +is 0. +

+
+
matrix_encoding
+

Select matrixed stereo encoding. +

+

It accepts the following values: +

+
none
+

select none +

+
dolby
+

select Dolby +

+
dplii
+

select Dolby Pro Logic II +

+
+ +

Default value is none. +

+
+
filter_type
+

For swr only, select resampling filter type. This only affects resampling +operations. +

+

It accepts the following values: +

+
cubic
+

select cubic +

+
blackman_nuttall
+

select Blackman Nuttall Windowed Sinc +

+
kaiser
+

select Kaiser Windowed Sinc +

+
+ +
+
kaiser_beta
+

For swr only, set Kaiser Window Beta value. Must be an integer in the +interval [2,16], default value is 9. +

+
+
output_sample_bits
+

For swr only, set number of used output sample bits for dithering. Must be an integer in the +interval [0,64], default value is 0, which means it’s not used. +

+
+
+ +

+

+

20. Scaler Options

+ +

The video scaler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools. For programmatic use, they can be set explicitly in the +SwsContext options or through the ‘libavutil/opt.h’ API. +

+
+
+

+

+
sws_flags
+

Set the scaler flags. This is also used to set the scaling +algorithm. Only a single algorithm should be selected. +

+

It accepts the following values: +

+
fast_bilinear
+

Select fast bilinear scaling algorithm. +

+
+
bilinear
+

Select bilinear scaling algorithm. +

+
+
bicubic
+

Select bicubic scaling algorithm. +

+
+
experimental
+

Select experimental scaling algorithm. +

+
+
neighbor
+

Select nearest neighbor rescaling algorithm. +

+
+
area
+

Select averaging area rescaling algorithm. +

+
+
bicublin
+

Select bicubic scaling algorithm for the luma component, bilinear for +chroma components. +

+
+
gauss
+

Select Gaussian rescaling algorithm. +

+
+
sinc
+

Select sinc rescaling algorithm. +

+
+
lanczos
+

Select lanczos rescaling algorithm. +

+
+
spline
+

Select natural bicubic spline rescaling algorithm. +

+
+
print_info
+

Enable printing/debug logging. +

+
+
accurate_rnd
+

Enable accurate rounding. +

+
+
full_chroma_int
+

Enable full chroma interpolation. +

+
+
full_chroma_inp
+

Select full chroma input. +

+
+
bitexact
+

Enable bitexact output. +

+
+ +
+
srcw
+

Set source width. +

+
+
srch
+

Set source height. +

+
+
dstw
+

Set destination width. +

+
+
dsth
+

Set destination height. +

+
+
src_format
+

Set source pixel format (must be expressed as an integer). +

+
+
dst_format
+

Set destination pixel format (must be expressed as an integer). +

+
+
src_range
+

Select source range. +

+
+
dst_range
+

Select destination range. +

+
+
param0, param1
+

Set scaling algorithm parameters. The specified values are specific of +some scaling algorithms and ignored by others. The specified values +are floating point number values. +

+
+
sws_dither
+

Set the dithering algorithm. Accepts one of the following +values. Default value is ‘auto’. +

+
+
auto
+

automatic choice +

+
+
none
+

no dithering +

+
+
bayer
+

bayer dither +

+
+
ed
+

error diffusion dither +

+
+ +
+
+ + +

21. Filtering Introduction

+ +

Filtering in FFmpeg is enabled through the libavfilter library. +

+

In libavfilter, a filter can have multiple inputs and multiple +outputs. +To illustrate the sorts of things that are possible, we consider the +following filtergraph. +

+
 
                [main]
+input --> split ---------------------> overlay --> output
+            |                             ^
+            |[tmp]                  [flip]|
+            +-----> crop --> vflip -------+
+
+ +

This filtergraph splits the input stream in two streams, sends one +stream through the crop filter and the vflip filter before merging it +back with the other stream by overlaying it on top. You can use the +following command to achieve this: +

+
 
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+ +

The result will be that in output the top half of the video is mirrored +onto the bottom half. +

+

Filters in the same linear chain are separated by commas, and distinct +linear chains of filters are separated by semicolons. In our example, +crop,vflip are in one linear chain, split and +overlay are separately in another. The points where the linear +chains join are labelled by names enclosed in square brackets. In the +example, the split filter generates two outputs that are associated to +the labels [main] and [tmp]. +

+

The stream sent to the second output of split, labelled as +[tmp], is processed through the crop filter, which crops +away the lower half part of the video, and then vertically flipped. The +overlay filter takes in input the first unchanged output of the +split filter (which was labelled as [main]), and overlay on its +lower half the output generated by the crop,vflip filterchain. +

+

Some filters take in input a list of parameters: they are specified +after the filter name and an equal sign, and are separated from each other +by a colon. +

+

There exist so-called source filters that do not have an +audio/video input, and sink filters that will not have audio/video +output. +

+ + +

22. graph2dot

+ +

The ‘graph2dot’ program included in the FFmpeg ‘tools’ +directory can be used to parse a filtergraph description and issue a +corresponding textual representation in the dot language. +

+

Invoke the command: +

 
graph2dot -h
+
+ +

to see how to use ‘graph2dot’. +

+

You can then pass the dot description to the ‘dot’ program (from +the graphviz suite of programs) and obtain a graphical representation +of the filtergraph. +

+

For example the sequence of commands: +

 
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+ +

can be used to create and display an image representing the graph +described by the GRAPH_DESCRIPTION string. Note that this string must be +a complete self-contained graph, with its inputs and outputs explicitly defined. +For example if your command line is of the form: +

 
ffmpeg -i infile -vf scale=640:360 outfile
+
+

your GRAPH_DESCRIPTION string will need to be of the form: +

 
nullsrc,scale=640:360,nullsink
+
+

you may also need to set the nullsrc parameters and add a format +filter in order to simulate a specific input file. +

+ + +

23. Filtergraph description

+ +

A filtergraph is a directed graph of connected filters. It can contain +cycles, and there can be multiple links between a pair of +filters. Each link has one input pad on one side connecting it to one +filter from which it takes its input, and one output pad on the other +side connecting it to the one filter accepting its output. +

+

Each filter in a filtergraph is an instance of a filter class +registered in the application, which defines the features and the +number of input and output pads of the filter. +

+

A filter with no input pads is called a "source", a filter with no +output pads is called a "sink". +

+

+

+

23.1 Filtergraph syntax

+ +

A filtergraph can be represented using a textual representation, which is +recognized by the ‘-filter’/‘-vf’ and ‘-filter_complex’ +options in ffmpeg and ‘-vf’ in ffplay, and by the +avfilter_graph_parse()/avfilter_graph_parse2() function defined in +‘libavfilter/avfilter.h’. +

+

A filterchain consists of a sequence of connected filters, each one +connected to the previous one in the sequence. A filterchain is +represented by a list of ","-separated filter descriptions. +

+

A filtergraph consists of a sequence of filterchains. A sequence of +filterchains is represented by a list of ";"-separated filterchain +descriptions. +

+

A filter is represented by a string of the form: +[in_link_1]...[in_link_N]filter_name=arguments[out_link_1]...[out_link_M] +

+

filter_name is the name of the filter class of which the +described filter is an instance of, and has to be the name of one of +the filter classes registered in the program. +The name of the filter class is optionally followed by a string +"=arguments". +

+

arguments is a string which contains the parameters used to +initialize the filter instance. It may have one of the following forms: +

    +
  • +A ’:’-separated list of key=value pairs. + +
  • +A ’:’-separated list of value. In this case, the keys are assumed to be +the option names in the order they are declared. E.g. the fade filter +declares three options in this order – ‘type’, ‘start_frame’ and +‘nb_frames’. Then the parameter list in:0:30 means that the value +in is assigned to the option ‘type’, 0 to +‘start_frame’ and 30 to ‘nb_frames’. + +
  • +A ’:’-separated list of mixed direct value and long key=value +pairs. The direct value must precede the key=value pairs, and +follow the same constraints order of the previous point. The following +key=value pairs can be set in any preferred order. + +
+ +

If the option value itself is a list of items (e.g. the format filter +takes a list of pixel formats), the items in the list are usually separated by +’|’. +

+

The list of arguments can be quoted using the character "’" as initial +and ending mark, and the character ’\’ for escaping the characters +within the quoted text; otherwise the argument string is considered +terminated when the next special character (belonging to the set +"[]=;,") is encountered. +

+

The name and arguments of the filter are optionally preceded and +followed by a list of link labels. +A link label allows one to name a link and associate it to a filter output +or input pad. The preceding labels in_link_1 +... in_link_N, are associated to the filter input pads, +the following labels out_link_1 ... out_link_M, are +associated to the output pads. +

+

When two link labels with the same name are found in the +filtergraph, a link between the corresponding input and output pad is +created. +

+

If an output pad is not labelled, it is linked by default to the first +unlabelled input pad of the next filter in the filterchain. +For example in the filterchain: +

 
nullsrc, split[L1], [L2]overlay, nullsink
+
+

the split filter instance has two output pads, and the overlay filter +instance two input pads. The first output pad of split is labelled +"L1", the first input pad of overlay is labelled "L2", and the second +output pad of split is linked to the second input pad of overlay, +which are both unlabelled. +

+

In a complete filterchain all the unlabelled filter input and output +pads must be connected. A filtergraph is considered valid if all the +filter input and output pads of all the filterchains are connected. +

+

Libavfilter will automatically insert scale filters where format +conversion is required. It is possible to specify swscale flags +for those automatically inserted scalers by prepending +sws_flags=flags; +to the filtergraph description. +

+

Follows a BNF description for the filtergraph syntax: +

 
NAME             ::= sequence of alphanumeric characters and '_'
+LINKLABEL        ::= "[" NAME "]"
+LINKLABELS       ::= LINKLABEL [LINKLABELS]
+FILTER_ARGUMENTS ::= sequence of chars (eventually quoted)
+FILTER           ::= [LINKLABELS] NAME ["=" FILTER_ARGUMENTS] [LINKLABELS]
+FILTERCHAIN      ::= FILTER [,FILTERCHAIN]
+FILTERGRAPH      ::= [sws_flags=flags;] FILTERCHAIN [;FILTERGRAPH]
+
+ + +

23.2 Notes on filtergraph escaping

+ +

Filtergraph description composition entails several levels of +escaping. See (ffmpeg-utils)quoting_and_escaping for more +information about the employed escaping procedure. +

+

A first level escaping affects the content of each filter option +value, which may contain the special character : used to +separate values, or one of the escaping characters \'. +

+

A second level escaping affects the whole filter description, which +may contain the escaping characters \' or the special +characters [],; used by the filtergraph description. +

+

Finally, when you specify a filtergraph on a shell commandline, you +need to perform a third level escaping for the shell special +characters contained within it. +

+

For example, consider the following string to be embedded in +the drawtext filter description ‘text’ value: +

 
this is a 'string': may contain one, or more, special characters
+
+ +

This string contains the ' special escaping character, and the +: special character, so it needs to be escaped in this way: +

 
text=this is a \'string\'\: may contain one, or more, special characters
+
+ +

A second level of escaping is required when embedding the filter +description in a filtergraph description, in order to escape all the +filtergraph special characters. Thus the example above becomes: +

 
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+

(note that in addition to the \' escaping special characters, +also , needs to be escaped). +

+

Finally an additional level of escaping is needed when writing the +filtergraph description in a shell command, which depends on the +escaping rules of the adopted shell. For example, assuming that +\ is special and needs to be escaped with another \, the +previous string will finally result in: +

 
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+ + +

24. Timeline editing

+ +

Some filters support a generic ‘enable’ option. For the filters +supporting timeline editing, this option can be set to an expression which is +evaluated before sending a frame to the filter. If the evaluation is non-zero, +the filter will be enabled, otherwise the frame will be sent unchanged to the +next filter in the filtergraph. +

+

The expression accepts the following values: +

+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+ +

Additionally, these filters support an ‘enable’ command that can be used +to re-define the expression. +

+

Like any other filtering option, the ‘enable’ option follows the same +rules. +

+

For example, to enable a blur filter (smartblur) from 10 seconds to 3 +minutes, and a curves filter starting at 3 seconds: +

 
smartblur = enable='between(t,10,3*60)',
+curves    = enable='gte(t,3)' : preset=cross_process
+
+ + + +

25. Audio Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the audio filters included in your +build. +

+

Below is a description of the currently available audio filters. +

+ +

25.1 aconvert

+ +

Convert the input audio format to the specified formats. +

+

This filter is deprecated. Use aformat instead. +

+

The filter accepts a string of the form: +"sample_format:channel_layout". +

+

sample_format specifies the sample format, and can be a string or the +corresponding numeric value defined in ‘libavutil/samplefmt.h’. Use ’p’ +suffix for a planar sample format. +

+

channel_layout specifies the channel layout, and can be a string +or the corresponding number value defined in ‘libavutil/channel_layout.h’. +

+

The special parameter "auto", signifies that the filter will +automatically select the output format depending on the output filter. +

+ +

25.1.1 Examples

+ +
    +
  • +Convert input to float, planar, stereo: +
     
    aconvert=fltp:stereo
    +
    + +
  • +Convert input to unsigned 8-bit, automatically select out channel layout: +
     
    aconvert=u8:auto
    +
    +
+ + +

25.2 adelay

+ +

Delay one or more audio channels. +

+

Samples in delayed channel are filled with silence. +

+

The filter accepts the following option: +

+
+
delays
+

Set list of delays in milliseconds for each channel separated by ’|’. +At least one delay greater than 0 should be provided. +Unused delays will be silently ignored. If number of given delays is +smaller than number of channels all remaining channels will not be delayed. +

+
+ + +

25.2.1 Examples

+ +
    +
  • +Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave +the second channel (and any other channels that may be present) unchanged. +
     
    adelay=1500|0|500
    +
    +
+ + +

25.3 aecho

+ +

Apply echoing to the input audio. +

+

Echoes are reflected sound and can occur naturally amongst mountains +(and sometimes large buildings) when talking or shouting; digital echo +effects emulate this behaviour and are often used to help fill out the +sound of a single instrument or vocal. The time difference between the +original signal and the reflection is the delay, and the +loudness of the reflected signal is the decay. +Multiple echoes can have different delays and decays. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain of reflected signal. Default is 0.6. +

+
+
out_gain
+

Set output gain of reflected signal. Default is 0.3. +

+
+
delays
+

Set list of time intervals in milliseconds between original signal and reflections +separated by ’|’. Allowed range for each delay is (0 - 90000.0]. +Default is 1000. +

+
+
decays
+

Set list of loudnesses of reflected signals separated by ’|’. +Allowed range for each decay is (0 - 1.0]. +Default is 0.5. +

+
+ + +

25.3.1 Examples

+ +
    +
  • +Make it sound as if there are twice as many instruments as are actually playing: +
     
    aecho=0.8:0.88:60:0.4
    +
    + +
  • +If delay is very short, then it sound like a (metallic) robot playing music: +
     
    aecho=0.8:0.88:6:0.4
    +
    + +
  • +A longer delay will sound like an open air concert in the mountains: +
     
    aecho=0.8:0.9:1000:0.3
    +
    + +
  • +Same as above but with one more mountain: +
     
    aecho=0.8:0.9:1000|1800:0.3|0.25
    +
    +
+ + +

25.4 aeval

+ +

Modify an audio signal according to the specified expressions. +

+

This filter accepts one or more expressions (one for each channel), +which are evaluated and used to modify a corresponding audio signal. +

+

This filter accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. If +the number of input channels is greater than the number of +expressions, the last specified expression is used for the remaining +output channels. +

+
+
channel_layout, c
+

Set output channel layout. If not specified, the channel layout is +specified by the number of expressions. If set to ‘same’, it will +use by default the same input channel layout. +

+
+ +

Each expression in exprs can contain the following constants and functions: +

+
+
ch
+

channel number of the current expression +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
s
+

sample rate +

+
+
t
+

time of the evaluated sample expressed in seconds +

+
+
nb_in_channels
+
nb_out_channels
+

input and output number of channels +

+
+
val(CH)
+

the value of input channel with number CH +

+
+ +

Note: this filter is slow. For faster processing you should use a +dedicated filter. +

+ +

25.4.1 Examples

+ +
    +
  • +Half volume: +
     
    aeval=val(ch)/2:c=same
    +
    + +
  • +Invert phase of the second channel: +
     
    eval=val(0)|-val(1)
    +
    +
+ + +

25.5 afade

+ +

Apply fade-in/out effect to input audio. +

+

A description of the accepted parameters follows. +

+
+
type, t
+

Specify the effect type, can be either in for fade-in, or +out for a fade-out effect. Default is in. +

+
+
start_sample, ss
+

Specify the number of the start sample for starting to apply the fade +effect. Default is 0. +

+
+
nb_samples, ns
+

Specify the number of samples for which the fade effect has to last. At +the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. Default is 44100. +

+
+
start_time, st
+

Specify time for starting to apply the fade effect. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +If set this option is used instead of start_sample one. +

+
+
duration, d
+

Specify the duration for which the fade effect has to last. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +At the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. +If set this option is used instead of nb_samples one. +

+
+
curve
+

Set curve for fade transition. +

+

It accepts the following values: +

+
tri
+

select triangular, linear slope (default) +

+
qsin
+

select quarter of sine wave +

+
hsin
+

select half of sine wave +

+
esin
+

select exponential sine wave +

+
log
+

select logarithmic +

+
par
+

select inverted parabola +

+
qua
+

select quadratic +

+
cub
+

select cubic +

+
squ
+

select square root +

+
cbr
+

select cubic root +

+
+
+
+ + +

25.5.1 Examples

+ +
    +
  • +Fade in first 15 seconds of audio: +
     
    afade=t=in:ss=0:d=15
    +
    + +
  • +Fade out last 25 seconds of a 900 seconds audio: +
     
    afade=t=out:st=875:d=25
    +
    +
+ +

+

+

25.6 aformat

+ +

Set output format constraints for the input audio. The framework will +negotiate the most appropriate format to minimize conversions. +

+

The filter accepts the following named parameters: +

+
sample_fmts
+

A ’|’-separated list of requested sample formats. +

+
+
sample_rates
+

A ’|’-separated list of requested sample rates. +

+
+
channel_layouts
+

A ’|’-separated list of requested channel layouts. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+ +

If a parameter is omitted, all values are allowed. +

+

For example to force the output to either unsigned 8-bit or signed 16-bit stereo: +

 
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+ + +

25.7 allpass

+ +

Apply a two-pole all-pass filter with central frequency (in Hz) +frequency, and filter-width width. +An all-pass filter changes the audio’s frequency to phase relationship +without changing its frequency to amplitude relationship. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

25.8 amerge

+ +

Merge two or more audio streams into a single multi-channel stream. +

+

The filter accepts the following options: +

+
+
inputs
+

Set the number of inputs. Default is 2. +

+
+
+ +

If the channel layouts of the inputs are disjoint, and therefore compatible, +the channel layout of the output will be set accordingly and the channels +will be reordered as necessary. If the channel layouts of the inputs are not +disjoint, the output will have all the channels of the first input then all +the channels of the second input, in that order, and the channel layout of +the output will be the default value corresponding to the total number of +channels. +

+

For example, if the first input is in 2.1 (FL+FR+LF) and the second input +is FC+BL+BR, then the output will be in 5.1, with the channels in the +following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the +first input, b1 is the first channel of the second input). +

+

On the other hand, if both input are in stereo, the output channels will be +in the default order: a1, a2, b1, b2, and the channel layout will be +arbitrarily set to 4.0, which may or may not be the expected value. +

+

All inputs must have the same sample rate, and format. +

+

If inputs do not have the same duration, the output will stop with the +shortest. +

+ +

25.8.1 Examples

+ +
    +
  • +Merge two mono files into a stereo stream: +
     
    amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
    +
    + +
  • +Multiple merges assuming 1 video stream and 6 audio streams in ‘input.mkv’: +
     
    ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
    +
    +
+ + +

25.9 amix

+ +

Mixes multiple audio inputs into a single output. +

+

For example +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+

will mix 3 input audio streams to a single output with the same duration as the +first input and a dropout transition time of 3 seconds. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of inputs. If unspecified, it defaults to 2. +

+
+
duration
+

How to determine the end-of-stream. +

+
longest
+

Duration of longest input. (default) +

+
+
shortest
+

Duration of shortest input. +

+
+
first
+

Duration of first input. +

+
+
+ +
+
dropout_transition
+

Transition time, in seconds, for volume renormalization when an input +stream ends. The default value is 2 seconds. +

+
+
+ + +

25.10 anull

+ +

Pass the audio source unchanged to the output. +

+ +

25.11 apad

+ +

Pad the end of a audio stream with silence, this can be used together with +-shortest to extend audio streams to the same length as the video stream. +

+ +

25.12 aphaser

+

Add a phasing effect to the input audio. +

+

A phaser filter creates series of peaks and troughs in the frequency spectrum. +The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain. Default is 0.4. +

+
+
out_gain
+

Set output gain. Default is 0.74 +

+
+
delay
+

Set delay in milliseconds. Default is 3.0. +

+
+
decay
+

Set decay. Default is 0.4. +

+
+
speed
+

Set modulation speed in Hz. Default is 0.5. +

+
+
type
+

Set modulation type. Default is triangular. +

+

It accepts the following values: +

+
triangular, t
+
sinusoidal, s
+
+
+
+ +

+

+

25.13 aresample

+ +

Resample the input audio to the specified parameters, using the +libswresample library. If none are specified then the filter will +automatically convert between its input and output. +

+

This filter is also able to stretch/squeeze the audio data to make it match +the timestamps or to inject silence / cut out audio to make it match the +timestamps, do a combination of both or do neither. +

+

The filter accepts the syntax +[sample_rate:]resampler_options, where sample_rate +expresses a sample rate and resampler_options is a list of +key=value pairs, separated by ":". See the +ffmpeg-resampler manual for the complete list of supported options. +

+ +

25.13.1 Examples

+ +
    +
  • +Resample the input audio to 44100Hz: +
     
    aresample=44100
    +
    + +
  • +Stretch/squeeze samples to the given timestamps, with a maximum of 1000 +samples per second compensation: +
     
    aresample=async=1000
    +
    +
+ + +

25.14 asetnsamples

+ +

Set the number of samples per each output audio frame. +

+

The last output packet may contain a different number of samples, as +the filter will flush all the remaining samples when the input audio +signal its end. +

+

The filter accepts the following options: +

+
+
nb_out_samples, n
+

Set the number of frames per each output audio frame. The number is +intended as the number of samples per each channel. +Default value is 1024. +

+
+
pad, p
+

If set to 1, the filter will pad the last audio frame with zeroes, so +that the last frame will contain the same number of samples as the +previous ones. Default value is 1. +

+
+ +

For example, to set the number of per-frame samples to 1234 and +disable padding for the last frame, use: +

 
asetnsamples=n=1234:p=0
+
+ + +

25.15 asetrate

+ +

Set the sample rate without altering the PCM data. +This will result in a change of speed and pitch. +

+

The filter accepts the following options: +

+
+
sample_rate, r
+

Set the output sample rate. Default is 44100 Hz. +

+
+ + +

25.16 ashowinfo

+ +

Show a line containing various information for each input audio frame. +The input audio is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation timestamp of the input frame, in time base units; the time base +depends on the filter input pad, and is usually 1/sample_rate. +

+
+
pts_time
+

presentation timestamp of the input frame in seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic audio) +

+
+
fmt
+

sample format +

+
+
chlayout
+

channel layout +

+
+
rate
+

sample rate for the audio frame +

+
+
nb_samples
+

number of samples (per channel) in the frame +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of the audio data. For planar audio +the data is treated as if all the planes were concatenated. +

+
+
plane_checksums
+

A list of Adler-32 checksums for each data plane. +

+
+ + +

25.17 astats

+ +

Display time domain statistical information about the audio channels. +Statistics are calculated and displayed for each audio channel and, +where applicable, an overall figure is also given. +

+

The filter accepts the following option: +

+
length
+

Short window length in seconds, used for peak and trough RMS measurement. +Default is 0.05 (50 miliseconds). Allowed range is [0.1 - 10]. +

+
+ +

A description of each shown parameter follows: +

+
+
DC offset
+

Mean amplitude displacement from zero. +

+
+
Min level
+

Minimal sample level. +

+
+
Max level
+

Maximal sample level. +

+
+
Peak level dB
+
RMS level dB
+

Standard peak and RMS level measured in dBFS. +

+
+
RMS peak dB
+
RMS trough dB
+

Peak and trough values for RMS level measured over a short window. +

+
+
Crest factor
+

Standard ratio of peak to RMS level (note: not in dB). +

+
+
Flat factor
+

Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels +(i.e. either Min level or Max level). +

+
+
Peak count
+

Number of occasions (not the number of samples) that the signal attained either +Min level or Max level. +

+
+ + +

25.18 astreamsync

+ +

Forward two audio streams and control the order the buffers are forwarded. +

+

The filter accepts the following options: +

+
+
expr, e
+

Set the expression deciding which stream should be +forwarded next: if the result is negative, the first stream is forwarded; if +the result is positive or zero, the second stream is forwarded. It can use +the following variables: +

+
+
b1 b2
+

number of buffers forwarded so far on each stream +

+
s1 s2
+

number of samples forwarded so far on each stream +

+
t1 t2
+

current timestamp of each stream +

+
+ +

The default value is t1-t2, which means to always forward the stream +that has a smaller timestamp. +

+
+ + +

25.18.1 Examples

+ +

Stress-test amerge by randomly sending buffers on the wrong +input, while avoiding too much of a desynchronization: +

 
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+ + +

25.19 asyncts

+ +

Synchronize audio data with timestamps by squeezing/stretching it and/or +dropping samples/adding silence when needed. +

+

This filter is not built by default, please use aresample to do squeezing/stretching. +

+

The filter accepts the following named parameters: +

+
compensate
+

Enable stretching/squeezing the data to make it match the timestamps. Disabled +by default. When disabled, time gaps are covered with silence. +

+
+
min_delta
+

Minimum difference between timestamps and audio data (in seconds) to trigger +adding/dropping samples. Default value is 0.1. If you get non-perfect sync with +this filter, try setting this parameter to 0. +

+
+
max_comp
+

Maximum compensation in samples per second. Relevant only with compensate=1. +Default value 500. +

+
+
first_pts
+

Assume the first pts should be this value. The time base is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
+ + +

25.20 atempo

+ +

Adjust audio tempo. +

+

The filter accepts exactly one parameter, the audio tempo. If not +specified then the filter will assume nominal 1.0 tempo. Tempo must +be in the [0.5, 2.0] range. +

+ +

25.20.1 Examples

+ +
    +
  • +Slow down audio to 80% tempo: +
     
    atempo=0.8
    +
    + +
  • +To speed up audio to 125% tempo: +
     
    atempo=1.25
    +
    +
+ + +

25.21 atrim

+ +

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the audio sample +with the timestamp start will be the first sample in the output. +

+
+
end
+

Specify time of the first audio sample that will be dropped, i.e. the +audio sample immediately preceding the one with the timestamp end will be +the last sample in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in samples +instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in samples instead +of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_sample
+

Number of the first sample that should be passed to output. +

+
+
end_sample
+

Number of the first sample that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _sample options simply count the +samples that pass through the filter. So start/end_pts and start/end_sample will +give different results when the timestamps are wrong, inexact or do not start at +zero. Also note that this filter does not modify the timestamps. If you wish +that the output timestamps start at zero, insert the asetpts filter after the +atrim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all samples that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple atrim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -af atrim=60:120
    +
    + +
  • +keep only the first 1000 samples +
     
    ffmpeg -i INPUT -af atrim=end_sample=1000
    +
    + +
+ + +

25.22 bandpass

+ +

Apply a two-pole Butterworth band-pass filter with central +frequency frequency, and (3dB-point) band-width width. +The csg option selects a constant skirt gain (peak gain = Q) +instead of the default: constant 0dB peak gain. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
csg
+

Constant skirt gain if set to 1. Defaults to 0. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

25.23 bandreject

+ +

Apply a two-pole Butterworth band-reject filter with central +frequency frequency, and (3dB-point) band-width width. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

25.24 bass

+ +

Boost or cut the bass (lower) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at 0 Hz. Its useful range is about -20 +(for a large cut) to +20 (for a large boost). +Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 100 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

25.25 biquad

+ +

Apply a biquad IIR filter with the given coefficients. +Where b0, b1, b2 and a0, a1, a2 +are the numerator and denominator coefficients respectively. +

+ +

25.26 channelmap

+ +

Remap input channels to new locations. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the output stream. +

+
+
map
+

Map channels from input to output. The argument is a ’|’-separated list of +mappings, each in the in_channel-out_channel or +in_channel form. in_channel can be either the name of the input +channel (e.g. FL for front left) or its index in the input channel layout. +out_channel is the name of the output channel or its index in the output +channel layout. If out_channel is not given then it is implicitly an +index, starting with zero and increasing by one for each mapping. +

+
+ +

If no mapping is present, the filter will implicitly map input channels to +output channels preserving index. +

+

For example, assuming a 5.1+downmix input MOV file +

 
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+

will create an output WAV file tagged as stereo from the downmix channels of +the input. +

+

To fix a 5.1 WAV improperly encoded in AAC’s native channel order +

 
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+ + +

25.27 channelsplit

+ +

Split each channel in input audio stream into a separate output stream. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the input stream. Default is "stereo". +

+
+ +

For example, assuming a stereo input MP3 file +

 
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+

will create an output Matroska file with two audio streams, one containing only +the left channel and the other the right channel. +

+

To split a 5.1 WAV file into per-channel files +

 
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+ + +

25.28 compand

+

Compress or expand audio dynamic range. +

+

A description of the accepted options follows. +

+
+
attacks
+
decays
+

Set list of times in seconds for each channel over which the instantaneous level +of the input signal is averaged to determine its volume. attacks refers to +increase of volume and decays refers to decrease of volume. For most +situations, the attack time (response to the audio getting louder) should be +shorter than the decay time because the human ear is more sensitive to sudden +loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and +a typical value for decay is 0.8 seconds. +

+
+
points
+

Set list of points for the transfer function, specified in dB relative to the +maximum possible signal amplitude. Each key points list must be defined using +the following syntax: x0/y0|x1/y1|x2/y2|.... or +x0/y0 x1/y1 x2/y2 .... +

+

The input values must be in strictly increasing order but the transfer function +does not have to be monotonically rising. The point 0/0 is assumed but +may be overridden (by 0/out-dBn). Typical values for the transfer +function are -70/-70|-60/-20. +

+
+
soft-knee
+

Set the curve radius in dB for all joints. Defaults to 0.01. +

+
+
gain
+

Set additional gain in dB to be applied at all points on the transfer function. +This allows easy adjustment of the overall gain. Defaults to 0. +

+
+
volume
+

Set initial volume in dB to be assumed for each channel when filtering starts. +This permits the user to supply a nominal level initially, so that, for +example, a very large gain is not applied to initial signal levels before the +companding has begun to operate. A typical value for audio which is initially +quiet is -90 dB. Defaults to 0. +

+
+
delay
+

Set delay in seconds. The input audio is analyzed immediately, but audio is +delayed before being fed to the volume adjuster. Specifying a delay +approximately equal to the attack/decay times allows the filter to effectively +operate in predictive rather than reactive mode. Defaults to 0. +

+
+
+ + +

25.28.1 Examples

+ +
    +
  • +Make music with both quiet and loud passages suitable for listening in a noisy +environment: +
     
    compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
    +
    + +
  • +Noise gate for when the noise is at a lower level than the signal: +
     
    compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
    +
    + +
  • +Here is another noise gate, this time for when the noise is at a higher level +than the signal (making it, in some ways, similar to squelch): +
     
    compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
    +
    +
+ + +

25.29 earwax

+ +

Make audio easier to listen to on headphones. +

+

This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio +so that when listened to on headphones the stereo image is moved from +inside your head (standard for headphones) to outside and in front of +the listener (standard for speakers). +

+

Ported from SoX. +

+ +

25.30 equalizer

+ +

Apply a two-pole peaking equalisation (EQ) filter. With this +filter, the signal-level at and around a selected frequency can +be increased or decreased, whilst (unlike bandpass and bandreject +filters) that at all other frequencies is unchanged. +

+

In order to produce complex equalisation curves, this filter can +be given several times, each with a different central frequency. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+
gain, g
+

Set the required gain or attenuation in dB. +Beware of clipping when using a positive gain. +

+
+ + +

25.30.1 Examples

+
    +
  • +Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz: +
     
    equalizer=f=1000:width_type=h:width=200:g=-10
    +
    + +
  • +Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2: +
     
    equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
    +
    +
+ + +

25.31 highpass

+ +

Apply a high-pass filter with 3dB point frequency. +The filter can be either single-pole, or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 3000. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

25.32 join

+ +

Join multiple input streams into one multi-channel stream. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of input streams. Defaults to 2. +

+
+
channel_layout
+

Desired output channel layout. Defaults to stereo. +

+
+
map
+

Map channels from inputs to output. The argument is a ’|’-separated list of +mappings, each in the input_idx.in_channel-out_channel +form. input_idx is the 0-based index of the input stream. in_channel +can be either the name of the input channel (e.g. FL for front left) or its +index in the specified input stream. out_channel is the name of the output +channel. +

+
+ +

The filter will attempt to guess the mappings when those are not specified +explicitly. It does so by first trying to find an unused matching input channel +and if that fails it picks the first unused input channel. +

+

E.g. to join 3 inputs (with properly set channel layouts) +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+ +

To build a 5.1 output from 6 single-channel streams: +

 
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+ + +

25.33 ladspa

+ +

Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-ladspa. +

+
+
file, f
+

Specifies the name of LADSPA plugin library to load. If the environment +variable LADSPA_PATH is defined, the LADSPA plugin is searched in +each one of the directories specified by the colon separated list in +LADSPA_PATH, otherwise in the standard LADSPA paths, which are in +this order: ‘HOME/.ladspa/lib/’, ‘/usr/local/lib/ladspa/’, +‘/usr/lib/ladspa/’. +

+
+
plugin, p
+

Specifies the plugin within the library. Some libraries contain only +one plugin, but others contain many of them. If this is not set filter +will list all available plugins within the specified library. +

+
+
controls, c
+

Set the ’|’ separated list of controls which are zero or more floating point +values that determine the behavior of the loaded plugin (for example delay, +threshold or gain). +Controls need to be defined using the following syntax: +c0=value0|c1=value1|c2=value2|..., where +valuei is the value set on the i-th control. +If ‘controls’ is set to help, all available controls and +their valid ranges are printed. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. Only used if plugin have +zero inputs. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, default +is 1024. Only used if plugin have zero inputs. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format, also check the "Time duration" +section in the ffmpeg-utils manual. +Note that the resulting duration may be greater than the specified duration, +as the generated audio is always cut at the end of a complete frame. +If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +Only used if plugin have zero inputs. +

+
+
+ + +

25.33.1 Examples

+ +
    +
  • +List all available plugins within amp (LADSPA example plugin) library: +
     
    ladspa=file=amp
    +
    + +
  • +List all available controls and their valid ranges for vcf_notch +plugin from VCF library: +
     
    ladspa=f=vcf:p=vcf_notch:c=help
    +
    + +
  • +Simulate low quality audio equipment using Computer Music Toolkit (CMT) +plugin library: +
     
    ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
    +
    + +
  • +Add reverberation to the audio using TAP-plugins +(Tom’s Audio Processing plugins): +
     
    ladspa=file=tap_reverb:tap_reverb
    +
    + +
  • +Generate white noise, with 0.2 amplitude: +
     
    ladspa=file=cmt:noise_source_white:c=c0=.2
    +
    + +
  • +Generate 20 bpm clicks using plugin C* Click - Metronome from the +C* Audio Plugin Suite (CAPS) library: +
     
    ladspa=file=caps:Click:c=c1=20'
    +
    + +
  • +Apply C* Eq10X2 - Stereo 10-band equaliser effect: +
     
    ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
    +
    +
+ + +

25.33.2 Commands

+ +

This filter supports the following commands: +

+
cN
+

Modify the N-th control value. +

+

If the specified value is not valid, it is ignored and prior one is kept. +

+
+ + +

25.34 lowpass

+ +

Apply a low-pass filter with 3dB point frequency. +The filter can be either single-pole or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 500. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

25.35 pan

+ +

Mix channels with specific gain levels. The filter accepts the output +channel layout followed by a set of channels definitions. +

+

This filter is also designed to remap efficiently the channels of an audio +stream. +

+

The filter accepts parameters of the form: +"l:outdef:outdef:..." +

+
+
l
+

output channel layout or number of channels +

+
+
outdef
+

output channel specification, of the form: +"out_name=[gain*]in_name[+[gain*]in_name...]" +

+
+
out_name
+

output channel to define, either a channel name (FL, FR, etc.) or a channel +number (c0, c1, etc.) +

+
+
gain
+

multiplicative coefficient for the channel, 1 leaving the volume unchanged +

+
+
in_name
+

input channel to use, see out_name for details; it is not possible to mix +named and numbered input channels +

+
+ +

If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for +that specification will be renormalized so that the total is 1, thus +avoiding clipping noise. +

+ +

25.35.1 Mixing examples

+ +

For example, if you want to down-mix from stereo to mono, but with a bigger +factor for the left channel: +

 
pan=1:c0=0.9*c0+0.1*c1
+
+ +

A customized down-mix to stereo that works automatically for 3-, 4-, 5- and +7-channels surround: +

 
pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+ +

Note that ffmpeg integrates a default down-mix (and up-mix) system +that should be preferred (see "-ac" option) unless you have very specific +needs. +

+ +

25.35.2 Remapping examples

+ +

The channel remapping will be effective if, and only if: +

+
    +
  • gain coefficients are zeroes or ones, +
  • only one input per channel output, +
+ +

If all these conditions are satisfied, the filter will notify the user ("Pure +channel mapping detected"), and use an optimized and lossless method to do the +remapping. +

+

For example, if you have a 5.1 source and want a stereo audio stream by +dropping the extra channels: +

 
pan="stereo: c0=FL : c1=FR"
+
+ +

Given the same source, you can also switch front left and front right channels +and keep the input channel layout: +

 
pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
+
+ +

If the input is a stereo audio stream, you can mute the front left channel (and +still keep the stereo channel layout) with: +

 
pan="stereo:c1=c1"
+
+ +

Still with a stereo audio stream input, you can copy the right channel in both +front left and right: +

 
pan="stereo: c0=FR : c1=FR"
+
+ + +

25.36 replaygain

+ +

ReplayGain scanner filter. This filter takes an audio stream as an input and +outputs it unchanged. +At end of filtering it displays track_gain and track_peak. +

+ +

25.37 resample

+ +

Convert the audio sample format, sample rate and channel layout. This filter is +not meant to be used directly. +

+ +

25.38 silencedetect

+ +

Detect silence in an audio stream. +

+

This filter logs a message when it detects that the input audio volume is less +or equal to a noise tolerance value for a duration greater or equal to the +minimum detected noise duration. +

+

The printed times and duration are expressed in seconds. +

+

The filter accepts the following options: +

+
+
duration, d
+

Set silence duration until notification (default is 2 seconds). +

+
+
noise, n
+

Set noise tolerance. Can be specified in dB (in case "dB" is appended to the +specified value) or amplitude ratio. Default is -60dB, or 0.001. +

+
+ + +

25.38.1 Examples

+ +
    +
  • +Detect 5 seconds of silence with -50dB noise tolerance: +
     
    silencedetect=n=-50dB:d=5
    +
    + +
  • +Complete example with ffmpeg to detect silence with 0.0001 noise +tolerance in ‘silence.mp3’: +
     
    ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
    +
    +
+ + +

25.39 treble

+ +

Boost or cut treble (upper) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at whichever is the lower of ~22 kHz and the +Nyquist frequency. Its useful range is about -20 (for a large cut) +to +20 (for a large boost). Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 3000 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

25.40 volume

+ +

Adjust the input audio volume. +

+

The filter accepts the following options: +

+
+
volume
+

Set audio volume expression. +

+

Output values are clipped to the maximum value. +

+

The output audio volume is given by the relation: +

 
output_volume = volume * input_volume
+
+ +

Default value for volume is "1.0". +

+
+
precision
+

Set the mathematical precision. +

+

This determines which input sample formats will be allowed, which affects the +precision of the volume scaling. +

+
+
fixed
+

8-bit fixed-point; limits input sample format to U8, S16, and S32. +

+
float
+

32-bit floating-point; limits input sample format to FLT. (default) +

+
double
+

64-bit floating-point; limits input sample format to DBL. +

+
+ +
+
eval
+

Set when the volume expression is evaluated. +

+

It accepts the following values: +

+
once
+

only evaluate expression once during the filter initialization, or +when the ‘volume’ command is sent +

+
+
frame
+

evaluate expression for each incoming frame +

+
+ +

Default value is ‘once’. +

+
+ +

The volume expression can contain the following parameters. +

+
+
n
+

frame number (starting at zero) +

+
nb_channels
+

number of channels +

+
nb_consumed_samples
+

number of samples consumed by the filter +

+
nb_samples
+

number of samples in the current frame +

+
pos
+

original frame position in the file +

+
pts
+

frame PTS +

+
sample_rate
+

sample rate +

+
startpts
+

PTS at start of stream +

+
startt
+

time at start of stream +

+
t
+

frame time +

+
tb
+

timestamp timebase +

+
volume
+

last set volume value +

+
+ +

Note that when ‘eval’ is set to ‘once’ only the +sample_rate and tb variables are available, all other +variables will evaluate to NAN. +

+ +

25.40.1 Commands

+ +

This filter supports the following commands: +

+
volume
+

Modify the volume expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

25.40.2 Examples

+ +
    +
  • +Halve the input audio volume: +
     
    volume=volume=0.5
    +volume=volume=1/2
    +volume=volume=-6.0206dB
    +
    + +

    In all the above example the named key for ‘volume’ can be +omitted, for example like in: +

     
    volume=0.5
    +
    + +
  • +Increase input audio power by 6 decibels using fixed-point precision: +
     
    volume=volume=6dB:precision=fixed
    +
    + +
  • +Fade volume after time 10 with an annihilation period of 5 seconds: +
     
    volume='if(lt(t,10),1,max(1-(t-10)/5,0))':eval=frame
    +
    +
+ + +

25.41 volumedetect

+ +

Detect the volume of the input video. +

+

The filter has no parameters. The input is not modified. Statistics about +the volume will be printed in the log when the input stream end is reached. +

+

In particular it will show the mean volume (root mean square), maximum +volume (on a per-sample basis), and the beginning of a histogram of the +registered volume values (from the maximum value to a cumulated 1/1000 of +the samples). +

+

All volumes are in decibels relative to the maximum PCM value. +

+ +

25.41.1 Examples

+ +

Here is an excerpt of the output: +

 
[Parsed_volumedetect_0  0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0  0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0  0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0  0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0  0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0  0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0  0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0  0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0  0xa23120] histogram_10db: 8409
+
+ +

It means that: +

    +
  • +The mean square energy is approximately -27 dB, or 10^-2.7. +
  • +The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB. +
  • +There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc. +
+ +

In other words, raising the volume by +4 dB does not cause any clipping, +raising it by +5 dB causes clipping for 6 samples, etc. +

+ + +

26. Audio Sources

+ +

Below is a description of the currently available audio sources. +

+ +

26.1 abuffer

+ +

Buffer audio frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/asrc_abuffer.h’. +

+

It accepts the following named parameters: +

+
+
time_base
+

Timebase which will be used for timestamps of submitted frames. It must be +either a floating-point number or in numerator/denominator form. +

+
+
sample_rate
+

The sample rate of the incoming audio buffers. +

+
+
sample_fmt
+

The sample format of the incoming audio buffers. +Either a sample format name or its corresponging integer representation from +the enum AVSampleFormat in ‘libavutil/samplefmt.h’ +

+
+
channel_layout
+

The channel layout of the incoming audio buffers. +Either a channel layout name from channel_layout_map in +‘libavutil/channel_layout.c’ or its corresponding integer representation +from the AV_CH_LAYOUT_* macros in ‘libavutil/channel_layout.h’ +

+
+
channels
+

The number of channels of the incoming audio buffers. +If both channels and channel_layout are specified, then they +must be consistent. +

+
+
+ + +

26.1.1 Examples

+ +
 
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+ +

will instruct the source to accept planar 16bit signed stereo at 44100Hz. +Since the sample format with name "s16p" corresponds to the number +6 and the "stereo" channel layout corresponds to the value 0x3, this is +equivalent to: +

 
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+ + +

26.2 aevalsrc

+ +

Generate an audio signal specified by an expression. +

+

This source accepts in input one or more expressions (one for each +channel), which are evaluated and used to generate a corresponding +audio signal. +

+

This source accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. In case the +‘channel_layout’ option is not specified, the selected channel layout +depends on the number of provided expressions. Otherwise the last +specified expression is applied to the remaining output channels. +

+
+
channel_layout, c
+

Set the channel layout. The number of channels in the specified layout +must be equal to the number of specified expressions. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format. +Note that the resulting duration may be greater than the specified +duration, as the generated audio is always cut at the end of a +complete frame. +

+

If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, +default to 1024. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. +

+
+ +

Each expression in exprs can contain the following constants: +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
t
+

time of the evaluated sample expressed in seconds, starting from 0 +

+
+
s
+

sample rate +

+
+
+ + +

26.2.1 Examples

+ +
    +
  • +Generate silence: +
     
    aevalsrc=0
    +
    + +
  • +Generate a sin signal with frequency of 440 Hz, set sample rate to +8000 Hz: +
     
    aevalsrc="sin(440*2*PI*t):s=8000"
    +
    + +
  • +Generate a two channels signal, specify the channel layout (Front +Center + Back Center) explicitly: +
     
    aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
    +
    + +
  • +Generate white noise: +
     
    aevalsrc="-2+random(0)"
    +
    + +
  • +Generate an amplitude modulated signal: +
     
    aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
    +
    + +
  • +Generate 2.5 Hz binaural beats on a 360 Hz carrier: +
     
    aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
    +
    + +
+ + +

26.3 anullsrc

+ +

Null audio source, return unprocessed audio frames. It is mainly useful +as a template and to be employed in analysis / debugging tools, or as +the source for filters which ignore the input data (for example the sox +synth filter). +

+

This source accepts the following options: +

+
+
channel_layout, cl
+
+

Specify the channel layout, and can be either an integer or a string +representing a channel layout. The default value of channel_layout +is "stereo". +

+

Check the channel_layout_map definition in +‘libavutil/channel_layout.c’ for the mapping between strings and +channel layout values. +

+
+
sample_rate, r
+

Specify the sample rate, and defaults to 44100. +

+
+
nb_samples, n
+

Set the number of samples per requested frames. +

+
+
+ + +

26.3.1 Examples

+ +
    +
  • +Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO. +
     
    anullsrc=r=48000:cl=4
    +
    + +
  • +Do the same operation with a more obvious syntax: +
     
    anullsrc=r=48000:cl=mono
    +
    +
+ +

All the parameters need to be explicitly defined. +

+ +

26.4 flite

+ +

Synthesize a voice utterance using the libflite library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libflite. +

+

Note that the flite library is not thread-safe. +

+

The filter accepts the following options: +

+
+
list_voices
+

If set to 1, list the names of the available voices and exit +immediately. Default value is 0. +

+
+
nb_samples, n
+

Set the maximum number of samples per frame. Default value is 512. +

+
+
textfile
+

Set the filename containing the text to speak. +

+
+
text
+

Set the text to speak. +

+
+
voice, v
+

Set the voice to use for the speech synthesis. Default value is +kal. See also the list_voices option. +

+
+ + +

26.4.1 Examples

+ +
    +
  • +Read from file ‘speech.txt’, and synthetize the text using the +standard flite voice: +
     
    flite=textfile=speech.txt
    +
    + +
  • +Read the specified text selecting the slt voice: +
     
    flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Input text to ffmpeg: +
     
    ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Make ‘ffplay’ speak the specified text, using flite and +the lavfi device: +
     
    ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
    +
    +
+ +

For more information about libflite, check: +http://www.speech.cs.cmu.edu/flite/ +

+ +

26.5 sine

+ +

Generate an audio signal made of a sine wave with amplitude 1/8. +

+

The audio signal is bit-exact. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the carrier frequency. Default is 440 Hz. +

+
+
beep_factor, b
+

Enable a periodic beep every second with frequency beep_factor times +the carrier frequency. Default is 0, meaning the beep is disabled. +

+
+
sample_rate, r
+

Specify the sample rate, default is 44100. +

+
+
duration, d
+

Specify the duration of the generated audio stream. +

+
+
samples_per_frame
+

Set the number of samples per output frame, default is 1024. +

+
+ + +

26.5.1 Examples

+ +
    +
  • +Generate a simple 440 Hz sine wave: +
     
    sine
    +
    + +
  • +Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds: +
     
    sine=220:4:d=5
    +sine=f=220:b=4:d=5
    +sine=frequency=220:beep_factor=4:duration=5
    +
    + +
+ + + +

27. Audio Sinks

+ +

Below is a description of the currently available audio sinks. +

+ +

27.1 abuffersink

+ +

Buffer audio frames, and make them available to the end of filter chain. +

+

This sink is mainly intended for programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVABufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

27.2 anullsink

+ +

Null audio sink, do absolutely nothing with the input audio. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

28. Video Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the video filters included in your +build. +

+

Below is a description of the currently available video filters. +

+ +

28.1 alphaextract

+ +

Extract the alpha component from the input as a grayscale video. This +is especially useful with the alphamerge filter. +

+ +

28.2 alphamerge

+ +

Add or replace the alpha component of the primary input with the +grayscale value of a second input. This is intended for use with +alphaextract to allow the transmission or storage of frame +sequences that have alpha in a format that doesn’t support an alpha +channel. +

+

For example, to reconstruct full frames from a normal YUV-encoded video +and a separate video created with alphaextract, you might use: +

 
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+ +

Since this filter is designed for reconstruction, it operates on frame +sequences without considering timestamps, and terminates when either +input reaches end of stream. This will cause problems if your encoding +pipeline drops frames. If you’re trying to apply an image as an +overlay to a video stream, consider the overlay filter instead. +

+ +

28.3 ass

+ +

Same as the subtitles filter, except that it doesn’t require libavcodec +and libavformat to work. On the other hand, it is limited to ASS (Advanced +Substation Alpha) subtitles files. +

+ +

28.4 bbox

+ +

Compute the bounding box for the non-black pixels in the input frame +luminance plane. +

+

This filter computes the bounding box containing all the pixels with a +luminance value greater than the minimum allowed value. +The parameters describing the bounding box are printed on the filter +log. +

+

The filter accepts the following option: +

+
+
min_val
+

Set the minimal luminance value. Default is 16. +

+
+ + +

28.5 blackdetect

+ +

Detect video intervals that are (almost) completely black. Can be +useful to detect chapter transitions, commercials, or invalid +recordings. Output lines contains the time for the start, end and +duration of the detected black interval expressed in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
black_min_duration, d
+

Set the minimum detected black duration expressed in seconds. It must +be a non-negative floating point number. +

+

Default value is 2.0. +

+
+
picture_black_ratio_th, pic_th
+

Set the threshold for considering a picture "black". +Express the minimum value for the ratio: +

 
nb_black_pixels / nb_pixels
+
+ +

for which a picture is considered black. +Default value is 0.98. +

+
+
pixel_black_th, pix_th
+

Set the threshold for considering a pixel "black". +

+

The threshold expresses the maximum pixel luminance value for which a +pixel is considered "black". The provided value is scaled according to +the following equation: +

 
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+ +

luminance_range_size and luminance_minimum_value depend on +the input video format, the range is [0-255] for YUV full-range +formats and [16-235] for YUV non full-range formats. +

+

Default value is 0.10. +

+
+ +

The following example sets the maximum pixel threshold to the minimum +value, and detects only black intervals of 2 or more seconds: +

 
blackdetect=d=2:pix_th=0.00
+
+ + +

28.6 blackframe

+ +

Detect frames that are (almost) completely black. Can be useful to +detect chapter transitions or commercials. Output lines consist of +the frame number of the detected frame, the percentage of blackness, +the position in the file if known or -1 and the timestamp in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
amount
+

Set the percentage of the pixels that have to be below the threshold, defaults +to 98. +

+
+
threshold, thresh
+

Set the threshold below which a pixel value is considered black, defaults to +32. +

+
+
+ + +

28.7 blend

+ +

Blend two video frames into each other. +

+

It takes two input streams and outputs one stream, the first input is the +"top" layer and second input is "bottom" layer. +Output terminates when shortest input terminates. +

+

A description of the accepted options follows. +

+
+
c0_mode
+
c1_mode
+
c2_mode
+
c3_mode
+
all_mode
+

Set blend mode for specific pixel component or all pixel components in case +of all_mode. Default value is normal. +

+

Available values for component modes are: +

+
addition
+
and
+
average
+
burn
+
darken
+
difference
+
divide
+
dodge
+
exclusion
+
hardlight
+
lighten
+
multiply
+
negation
+
normal
+
or
+
overlay
+
phoenix
+
pinlight
+
reflect
+
screen
+
softlight
+
subtract
+
vividlight
+
xor
+
+ +
+
c0_opacity
+
c1_opacity
+
c2_opacity
+
c3_opacity
+
all_opacity
+

Set blend opacity for specific pixel component or all pixel components in case +of all_opacity. Only used in combination with pixel component blend modes. +

+
+
c0_expr
+
c1_expr
+
c2_expr
+
c3_expr
+
all_expr
+

Set blend expression for specific pixel component or all pixel components in case +of all_expr. Note that related mode options will be ignored if those are set. +

+

The expressions can use the following variables: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

the coordinates of the current sample +

+
+
W
+
H
+

the width and height of currently filtered plane +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
TOP, A
+

Value of pixel component at current location for first video frame (top layer). +

+
+
BOTTOM, B
+

Value of pixel component at current location for second video frame (bottom layer). +

+
+ +
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last bottom frame after the end of the stream. A value of +0 disable the filter after the last frame of the bottom layer is reached. +Default is 1. +

+
+ + +

28.7.1 Examples

+ +
    +
  • +Apply transition from bottom layer to top layer in first 10 seconds: +
     
    blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
    +
    + +
  • +Apply 1x1 checkerboard effect: +
     
    blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
    +
    + +
  • +Apply uncover left effect: +
     
    blend=all_expr='if(gte(N*SW+X,W),A,B)'
    +
    + +
  • +Apply uncover down effect: +
     
    blend=all_expr='if(gte(Y-N*SH,0),A,B)'
    +
    + +
  • +Apply uncover up-left effect: +
     
    blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
    +
    +
+ + +

28.8 boxblur

+ +

Apply boxblur algorithm to the input video. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+
luma_power, lp
+
chroma_radius, cr
+
chroma_power, cp
+
alpha_radius, ar
+
alpha_power, ap
+
+ +

A description of the accepted options follows. +

+
+
luma_radius, lr
+
chroma_radius, cr
+
alpha_radius, ar
+

Set an expression for the box radius in pixels used for blurring the +corresponding input plane. +

+

The radius value must be a non-negative number, and must not be +greater than the value of the expression min(w,h)/2 for the +luma and alpha planes, and of min(cw,ch)/2 for the chroma +planes. +

+

Default value for ‘luma_radius’ is "2". If not specified, +‘chroma_radius’ and ‘alpha_radius’ default to the +corresponding value set for ‘luma_radius’. +

+

The expressions can contain the following constants: +

+
w
+
h
+

the input width and height in pixels +

+
+
cw
+
ch
+

the input chroma image width and height in pixels +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ +
+
luma_power, lp
+
chroma_power, cp
+
alpha_power, ap
+

Specify how many times the boxblur filter is applied to the +corresponding plane. +

+

Default value for ‘luma_power’ is 2. If not specified, +‘chroma_power’ and ‘alpha_power’ default to the +corresponding value set for ‘luma_power’. +

+

A value of 0 will disable the effect. +

+
+ + +

28.8.1 Examples

+ +
    +
  • +Apply a boxblur filter with luma, chroma, and alpha radius +set to 2: +
     
    boxblur=luma_radius=2:luma_power=1
    +boxblur=2:1
    +
    + +
  • +Set luma radius to 2, alpha and chroma radius to 0: +
     
    boxblur=2:1:cr=0:ar=0
    +
    + +
  • +Set luma and chroma radius to a fraction of the video dimension: +
     
    boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
    +
    +
+ + +

28.9 colorbalance

+

Modify intensity of primary colors (red, green and blue) of input frames. +

+

The filter allows an input frame to be adjusted in the shadows, midtones or highlights +regions for the red-cyan, green-magenta or blue-yellow balance. +

+

A positive adjustment value shifts the balance towards the primary color, a negative +value towards the complementary color. +

+

The filter accepts the following options: +

+
+
rs
+
gs
+
bs
+

Adjust red, green and blue shadows (darkest pixels). +

+
+
rm
+
gm
+
bm
+

Adjust red, green and blue midtones (medium pixels). +

+
+
rh
+
gh
+
bh
+

Adjust red, green and blue highlights (brightest pixels). +

+

Allowed ranges for options are [-1.0, 1.0]. Defaults are 0. +

+
+ + +

28.9.1 Examples

+ +
    +
  • +Add red color cast to shadows: +
     
    colorbalance=rs=.3
    +
    +
+ + +

28.10 colorchannelmixer

+ +

Adjust video input frames by re-mixing color channels. +

+

This filter modifies a color channel by adding the values associated to +the other channels of the same pixels. For example if the value to +modify is red, the output value will be: +

 
red=red*rr + blue*rb + green*rg + alpha*ra
+
+ +

The filter accepts the following options: +

+
+
rr
+
rg
+
rb
+
ra
+

Adjust contribution of input red, green, blue and alpha channels for output red channel. +Default is 1 for rr, and 0 for rg, rb and ra. +

+
+
gr
+
gg
+
gb
+
ga
+

Adjust contribution of input red, green, blue and alpha channels for output green channel. +Default is 1 for gg, and 0 for gr, gb and ga. +

+
+
br
+
bg
+
bb
+
ba
+

Adjust contribution of input red, green, blue and alpha channels for output blue channel. +Default is 1 for bb, and 0 for br, bg and ba. +

+
+
ar
+
ag
+
ab
+
aa
+

Adjust contribution of input red, green, blue and alpha channels for output alpha channel. +Default is 1 for aa, and 0 for ar, ag and ab. +

+

Allowed ranges for options are [-2.0, 2.0]. +

+
+ + +

28.10.1 Examples

+ +
    +
  • +Convert source to grayscale: +
     
    colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
    +
    +
  • +Simulate sepia tones: +
     
    colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
    +
    +
+ + +

28.11 colormatrix

+ +

Convert color matrix. +

+

The filter accepts the following options: +

+
+
src
+
dst
+

Specify the source and destination color matrix. Both values must be +specified. +

+

The accepted values are: +

+
bt709
+

BT.709 +

+
+
bt601
+

BT.601 +

+
+
smpte240m
+

SMPTE-240M +

+
+
fcc
+

FCC +

+
+
+
+ +

For example to convert from BT.601 to SMPTE-240M, use the command: +

 
colormatrix=bt601:smpte240m
+
+ + +

28.12 copy

+ +

Copy the input source unchanged to the output. Mainly useful for +testing purposes. +

+ +

28.13 crop

+ +

Crop the input video to given dimensions. +

+

The filter accepts the following options: +

+
+
w, out_w
+

Width of the output video. It defaults to iw. +This expression is evaluated only once during the filter +configuration. +

+
+
h, out_h
+

Height of the output video. It defaults to ih. +This expression is evaluated only once during the filter +configuration. +

+
+
x
+

Horizontal position, in the input video, of the left edge of the output video. +It defaults to (in_w-out_w)/2. +This expression is evaluated per-frame. +

+
+
y
+

Vertical position, in the input video, of the top edge of the output video. +It defaults to (in_h-out_h)/2. +This expression is evaluated per-frame. +

+
+
keep_aspect
+

If set to 1 will force the output display aspect ratio +to be the same of the input, by changing the output sample aspect +ratio. It defaults to 0. +

+
+ +

The out_w, out_h, x, y parameters are +expressions containing the following constants: +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (cropped) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

The expression for out_w may depend on the value of out_h, +and the expression for out_h may depend on out_w, but they +cannot depend on x and y, as x and y are +evaluated after out_w and out_h. +

+

The x and y parameters specify the expressions for the +position of the top-left corner of the output (non-cropped) area. They +are evaluated for each frame. If the evaluated value is not valid, it +is approximated to the nearest valid value. +

+

The expression for x may depend on y, and the expression +for y may depend on x. +

+ +

28.13.1 Examples

+ +
    +
  • +Crop area with size 100x100 at position (12,34). +
     
    crop=100:100:12:34
    +
    + +

    Using named options, the example above becomes: +

     
    crop=w=100:h=100:x=12:y=34
    +
    + +
  • +Crop the central input area with size 100x100: +
     
    crop=100:100
    +
    + +
  • +Crop the central input area with size 2/3 of the input video: +
     
    crop=2/3*in_w:2/3*in_h
    +
    + +
  • +Crop the input video central square: +
     
    crop=out_w=in_h
    +crop=in_h
    +
    + +
  • +Delimit the rectangle with the top-left corner placed at position +100:100 and the right-bottom corner corresponding to the right-bottom +corner of the input image: +
     
    crop=in_w-100:in_h-100:100:100
    +
    + +
  • +Crop 10 pixels from the left and right borders, and 20 pixels from +the top and bottom borders +
     
    crop=in_w-2*10:in_h-2*20
    +
    + +
  • +Keep only the bottom right quarter of the input image: +
     
    crop=in_w/2:in_h/2:in_w/2:in_h/2
    +
    + +
  • +Crop height for getting Greek harmony: +
     
    crop=in_w:1/PHI*in_w
    +
    + +
  • +Appply trembling effect: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(n/10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(n/7)
    +
    + +
  • +Apply erratic camera effect depending on timestamp: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(t*10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(t*13)"
    +
    + +
  • +Set x depending on the value of y: +
     
    crop=in_w/2:in_h/2:y:10+10*sin(n/10)
    +
    +
+ + +

28.14 cropdetect

+ +

Auto-detect crop size. +

+

Calculate necessary cropping parameters and prints the recommended +parameters through the logging system. The detected dimensions +correspond to the non-black area of the input video. +

+

The filter accepts the following options: +

+
+
limit
+

Set higher black value threshold, which can be optionally specified +from nothing (0) to everything (255). An intensity value greater +to the set value is considered non-black. Default value is 24. +

+
+
round
+

Set the value for which the width/height should be divisible by. The +offset is automatically adjusted to center the video. Use 2 to get +only even dimensions (needed for 4:2:2 video). 16 is best when +encoding to most video codecs. Default value is 16. +

+
+
reset_count, reset
+

Set the counter that determines after how many frames cropdetect will +reset the previously detected largest video area and start over to +detect the current optimal crop area. Default value is 0. +

+

This can be useful when channel logos distort the video area. 0 +indicates never reset and return the largest area encountered during +playback. +

+
+ +

+

+

28.15 curves

+ +

Apply color adjustments using curves. +

+

This filter is similar to the Adobe Photoshop and GIMP curves tools. Each +component (red, green and blue) has its values defined by N key points +tied from each other using a smooth curve. The x-axis represents the pixel +values from the input frame, and the y-axis the new pixel values to be set for +the output frame. +

+

By default, a component curve is defined by the two points (0;0) and +(1;1). This creates a straight line where each original pixel value is +"adjusted" to its own value, which means no change to the image. +

+

The filter allows you to redefine these two points and add some more. A new +curve (using a natural cubic spline interpolation) will be define to pass +smoothly through all these new coordinates. The new defined points needs to be +strictly increasing over the x-axis, and their x and y values must +be in the [0;1] interval. If the computed curves happened to go outside +the vector spaces, the values will be clipped accordingly. +

+

If there is no key point defined in x=0, the filter will automatically +insert a (0;0) point. In the same way, if there is no key point defined +in x=1, the filter will automatically insert a (1;1) point. +

+

The filter accepts the following options: +

+
+
preset
+

Select one of the available color presets. This option can be used in addition +to the ‘r’, ‘g’, ‘b’ parameters; in this case, the later +options takes priority on the preset values. +Available presets are: +

+
none
+
color_negative
+
cross_process
+
darker
+
increase_contrast
+
lighter
+
linear_contrast
+
medium_contrast
+
negative
+
strong_contrast
+
vintage
+
+

Default is none. +

+
master, m
+

Set the master key points. These points will define a second pass mapping. It +is sometimes called a "luminance" or "value" mapping. It can be used with +‘r’, ‘g’, ‘b’ or ‘all’ since it acts like a +post-processing LUT. +

+
red, r
+

Set the key points for the red component. +

+
green, g
+

Set the key points for the green component. +

+
blue, b
+

Set the key points for the blue component. +

+
all
+

Set the key points for all components (not including master). +Can be used in addition to the other key points component +options. In this case, the unset component(s) will fallback on this +‘all’ setting. +

+
psfile
+

Specify a Photoshop curves file (.asv) to import the settings from. +

+
+ +

To avoid some filtergraph syntax conflicts, each key points list need to be +defined using the following syntax: x0/y0 x1/y1 x2/y2 .... +

+ +

28.15.1 Examples

+ +
    +
  • +Increase slightly the middle level of blue: +
     
    curves=blue='0.5/0.58'
    +
    + +
  • +Vintage effect: +
     
    curves=r='0/0.11 .42/.51 1/0.95':g='0.50/0.48':b='0/0.22 .49/.44 1/0.8'
    +
    +

    Here we obtain the following coordinates for each components: +

    +
    red
    +

    (0;0.11) (0.42;0.51) (1;0.95) +

    +
    green
    +

    (0;0) (0.50;0.48) (1;1) +

    +
    blue
    +

    (0;0.22) (0.49;0.44) (1;0.80) +

    +
    + +
  • +The previous example can also be achieved with the associated built-in preset: +
     
    curves=preset=vintage
    +
    + +
  • +Or simply: +
     
    curves=vintage
    +
    + +
  • +Use a Photoshop preset and redefine the points of the green component: +
     
    curves=psfile='MyCurvesPresets/purple.asv':green='0.45/0.53'
    +
    +
+ + +

28.16 dctdnoiz

+ +

Denoise frames using 2D DCT (frequency domain filtering). +

+

This filter is not designed for real time and can be extremely slow. +

+

The filter accepts the following options: +

+
+
sigma, s
+

Set the noise sigma constant. +

+

This sigma defines a hard threshold of 3 * sigma; every DCT +coefficient (absolute value) below this threshold with be dropped. +

+

If you need a more advanced filtering, see ‘expr’. +

+

Default is 0. +

+
+
overlap
+

Set number overlapping pixels for each block. Each block is of size +16x16. Since the filter can be slow, you may want to reduce this value, +at the cost of a less effective filter and the risk of various artefacts. +

+

If the overlapping value doesn’t allow to process the whole input width or +height, a warning will be displayed and according borders won’t be denoised. +

+

Default value is 15. +

+
+
expr, e
+

Set the coefficient factor expression. +

+

For each coefficient of a DCT block, this expression will be evaluated as a +multiplier value for the coefficient. +

+

If this is option is set, the ‘sigma’ option will be ignored. +

+

The absolute value of the coefficient can be accessed through the c +variable. +

+
+ + +

28.16.1 Examples

+ +

Apply a denoise with a ‘sigma’ of 4.5: +

 
dctdnoiz=4.5
+
+ +

The same operation can be achieved using the expression system: +

 
dctdnoiz=e='gte(c, 4.5*3)'
+
+ +

+

+

28.17 decimate

+ +

Drop duplicated frames at regular intervals. +

+

The filter accepts the following options: +

+
+
cycle
+

Set the number of frames from which one will be dropped. Setting this to +N means one frame in every batch of N frames will be dropped. +Default is 5. +

+
+
dupthresh
+

Set the threshold for duplicate detection. If the difference metric for a frame +is less than or equal to this value, then it is declared as duplicate. Default +is 1.1 +

+
+
scthresh
+

Set scene change threshold. Default is 15. +

+
+
blockx
+
blocky
+

Set the size of the x and y-axis blocks used during metric calculations. +Larger blocks give better noise suppression, but also give worse detection of +small movements. Must be a power of two. Default is 32. +

+
+
ppsrc
+

Mark main input as a pre-processed input and activate clean source input +stream. This allows the input to be pre-processed with various filters to help +the metrics calculation while keeping the frame selection lossless. When set to +1, the first stream is for the pre-processed input, and the second +stream is the clean source from where the kept frames are chosen. Default is +0. +

+
+
chroma
+

Set whether or not chroma is considered in the metric calculations. Default is +1. +

+
+ + +

28.18 dejudder

+ +

Remove judder produced by partially interlaced telecined content. +

+

Judder can be introduced, for instance, by pullup filter. If the original +source was partially telecined content then the output of pullup,dejudder +will have a variable frame rate. May change the recorded frame rate of the +container. Aside from that change, this filter will not affect constant frame +rate video. +

+

The option available in this filter is: +

+
cycle
+

Specify the length of the window over which the judder repeats. +

+

Accepts any interger greater than 1. Useful values are: +

+
4
+

If the original was telecined from 24 to 30 fps (Film to NTSC). +

+
+
5
+

If the original was telecined from 25 to 30 fps (PAL to NTSC). +

+
+
20
+

If a mixture of the two. +

+
+ +

The default is ‘4’. +

+
+ + +

28.19 delogo

+ +

Suppress a TV station logo by a simple interpolation of the surrounding +pixels. Just set a rectangle covering the logo and watch it disappear +(and sometimes something even uglier appear - your mileage may vary). +

+

This filter accepts the following options: +

+
x
+
y
+

Specify the top left corner coordinates of the logo. They must be +specified. +

+
+
w
+
h
+

Specify the width and height of the logo to clear. They must be +specified. +

+
+
band, t
+

Specify the thickness of the fuzzy edge of the rectangle (added to +w and h). The default value is 4. +

+
+
show
+

When set to 1, a green rectangle is drawn on the screen to simplify +finding the right x, y, w, and h parameters. +The default value is 0. +

+

The rectangle is drawn on the outermost pixels which will be (partly) +replaced with interpolated values. The values of the next pixels +immediately outside this rectangle in each direction will be used to +compute the interpolated pixel values inside the rectangle. +

+
+
+ + +

28.19.1 Examples

+ +
    +
  • +Set a rectangle covering the area with top left corner coordinates 0,0 +and size 100x77, setting a band of size 10: +
     
    delogo=x=0:y=0:w=100:h=77:band=10
    +
    + +
+ + +

28.20 deshake

+ +

Attempt to fix small changes in horizontal and/or vertical shift. This +filter helps remove camera shake from hand-holding a camera, bumping a +tripod, moving on a vehicle, etc. +

+

The filter accepts the following options: +

+
+
x
+
y
+
w
+
h
+

Specify a rectangular area where to limit the search for motion +vectors. +If desired the search for motion vectors can be limited to a +rectangular area of the frame defined by its top left corner, width +and height. These parameters have the same meaning as the drawbox +filter which can be used to visualise the position of the bounding +box. +

+

This is useful when simultaneous movement of subjects within the frame +might be confused for camera motion by the motion vector search. +

+

If any or all of x, y, w and h are set to -1 +then the full frame is used. This allows later options to be set +without specifying the bounding box for the motion vector search. +

+

Default - search the whole frame. +

+
+
rx
+
ry
+

Specify the maximum extent of movement in x and y directions in the +range 0-64 pixels. Default 16. +

+
+
edge
+

Specify how to generate pixels to fill blanks at the edge of the +frame. Available values are: +

+
blank, 0
+

Fill zeroes at blank locations +

+
original, 1
+

Original image at blank locations +

+
clamp, 2
+

Extruded edge value at blank locations +

+
mirror, 3
+

Mirrored edge at blank locations +

+
+

Default value is ‘mirror’. +

+
+
blocksize
+

Specify the blocksize to use for motion search. Range 4-128 pixels, +default 8. +

+
+
contrast
+

Specify the contrast threshold for blocks. Only blocks with more than +the specified contrast (difference between darkest and lightest +pixels) will be considered. Range 1-255, default 125. +

+
+
search
+

Specify the search strategy. Available values are: +

+
exhaustive, 0
+

Set exhaustive search +

+
less, 1
+

Set less exhaustive search. +

+
+

Default value is ‘exhaustive’. +

+
+
filename
+

If set then a detailed log of the motion search is written to the +specified file. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ + +

28.21 drawbox

+ +

Draw a colored box on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the top left corner coordinates of the box. Default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the box, if 0 they are interpreted as +the input width and height. Default to 0. +

+
+
color, c
+

Specify the color of the box to write. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the box edge color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the box edge. Default value is 3. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y offset coordinates where the box is drawn. +

+
+
w
+
h
+

The width and height of the drawn box. +

+
+
t
+

The thickness of the drawn box. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

28.21.1 Examples

+ +
    +
  • +Draw a black box around the edge of the input image: +
     
    drawbox
    +
    + +
  • +Draw a box with color red and an opacity of 50%: +
     
    drawbox=10:20:200:60:red@0.5
    +
    + +

    The previous example can be specified as: +

     
    drawbox=x=10:y=20:w=200:h=60:color=red@0.5
    +
    + +
  • +Fill the box with pink color: +
     
    drawbox=x=10:y=10:w=100:h=100:color=pink@0.5:t=max
    +
    + +
  • +Draw a 2-pixel red 2.40:1 mask: +
     
    drawbox=x=-t:y=0.5*(ih-iw/2.4)-t:w=iw+t*2:h=iw/2.4+t*2:t=2:c=red
    +
    +
+ + +

28.22 drawgrid

+ +

Draw a grid on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the +input width and height, respectively, minus thickness, so image gets +framed. Default to 0. +

+
+
color, c
+

Specify the color of the grid. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the grid color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the grid line. Default value is 1. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input grid cell width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y coordinates of some point of grid intersection (meant to configure offset). +

+
+
w
+
h
+

The width and height of the drawn cell. +

+
+
t
+

The thickness of the drawn cell. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

28.22.1 Examples

+ +
    +
  • +Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%: +
     
    drawgrid=width=100:height=100:thickness=2:color=red@0.5
    +
    + +
  • +Draw a white 3x3 grid with an opacity of 50%: +
     
    drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
    +
    +
+ +

+

+

28.23 drawtext

+ +

Draw text string or text from specified file on top of video using the +libfreetype library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libfreetype. +

+ +

28.23.1 Syntax

+ +

The description of the accepted parameters follows. +

+
+
box
+

Used to draw a box around text using background color. +Value should be either 1 (enable) or 0 (disable). +The default value of box is 0. +

+
+
boxcolor
+

The color to be used for drawing box around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of boxcolor is "white". +

+
+
borderw
+

Set the width of the border to be drawn around the text using bordercolor. +The default value of borderw is 0. +

+
+
bordercolor
+

Set the color to be used for drawing border around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of bordercolor is "black". +

+
+
expansion
+

Select how the text is expanded. Can be either none, +strftime (deprecated) or +normal (default). See the Text expansion section +below for details. +

+
+
fix_bounds
+

If true, check and fix text coords to avoid clipping. +

+
+
fontcolor
+

The color to be used for drawing fonts. For the syntax of this option, check +the "Color" section in the ffmpeg-utils manual. +

+

The default value of fontcolor is "black". +

+
+
fontfile
+

The font file to be used for drawing text. Path must be included. +This parameter is mandatory. +

+
+
fontsize
+

The font size to be used for drawing text. +The default value of fontsize is 16. +

+
+
ft_load_flags
+

Flags to be used for loading the fonts. +

+

The flags map the corresponding flags supported by libfreetype, and are +a combination of the following values: +

+
default
+
no_scale
+
no_hinting
+
render
+
no_bitmap
+
vertical_layout
+
force_autohint
+
crop_bitmap
+
pedantic
+
ignore_global_advance_width
+
no_recurse
+
ignore_transform
+
monochrome
+
linear_design
+
no_autohint
+
+ +

Default value is "default". +

+

For more information consult the documentation for the FT_LOAD_* +libfreetype flags. +

+
+
shadowcolor
+

The color to be used for drawing a shadow behind the drawn text. For the +syntax of this option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of shadowcolor is "black". +

+
+
shadowx
+
shadowy
+

The x and y offsets for the text shadow position with respect to the +position of the text. They can be either positive or negative +values. Default value for both is "0". +

+
+
start_number
+

The starting frame number for the n/frame_num variable. The default value +is "0". +

+
+
tabsize
+

The size in number of spaces to use for rendering the tab. +Default value is 4. +

+
+
timecode
+

Set the initial timecode representation in "hh:mm:ss[:;.]ff" +format. It can be used with or without text parameter. timecode_rate +option must be specified. +

+
+
timecode_rate, rate, r
+

Set the timecode frame rate (timecode only). +

+
+
text
+

The text string to be drawn. The text must be a sequence of UTF-8 +encoded characters. +This parameter is mandatory if no file is specified with the parameter +textfile. +

+
+
textfile
+

A text file containing text to be drawn. The text must be a sequence +of UTF-8 encoded characters. +

+

This parameter is mandatory if no text string is specified with the +parameter text. +

+

If both text and textfile are specified, an error is thrown. +

+
+
reload
+

If set to 1, the textfile will be reloaded before each frame. +Be sure to update it atomically, or it may be read partially, or even fail. +

+
+
x
+
y
+

The expressions which specify the offsets where text will be drawn +within the video frame. They are relative to the top/left border of the +output image. +

+

The default value of x and y is "0". +

+

See below for the list of accepted constants and functions. +

+
+ +

The parameters for x and y are expressions containing the +following constants and functions: +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
line_h, lh
+

the height of each text line +

+
+
main_h, h, H
+

the input height +

+
+
main_w, w, W
+

the input width +

+
+
max_glyph_a, ascent
+

the maximum distance from the baseline to the highest/upper grid +coordinate used to place a glyph outline point, for all the rendered +glyphs. +It is a positive value, due to the grid’s orientation with the Y axis +upwards. +

+
+
max_glyph_d, descent
+

the maximum distance from the baseline to the lowest grid coordinate +used to place a glyph outline point, for all the rendered glyphs. +This is a negative value, due to the grid’s orientation, with the Y axis +upwards. +

+
+
max_glyph_h
+

maximum glyph height, that is the maximum height for all the glyphs +contained in the rendered text, it is equivalent to ascent - +descent. +

+
+
max_glyph_w
+

maximum glyph width, that is the maximum width for all the glyphs +contained in the rendered text +

+
+
n
+

the number of input frame, starting from 0 +

+
+
rand(min, max)
+

return a random number included between min and max +

+
+
sar
+

input sample aspect ratio +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
text_h, th
+

the height of the rendered text +

+
+
text_w, tw
+

the width of the rendered text +

+
+
x
+
y
+

the x and y offset coordinates where the text is drawn. +

+

These parameters allow the x and y expressions to refer +each other, so you can for example specify y=x/dar. +

+
+ +

If libavfilter was built with --enable-fontconfig, then +‘fontfile’ can be a fontconfig pattern or omitted. +

+

+

+

28.23.2 Text expansion

+ +

If ‘expansion’ is set to strftime, +the filter recognizes strftime() sequences in the provided text and +expands them accordingly. Check the documentation of strftime(). This +feature is deprecated. +

+

If ‘expansion’ is set to none, the text is printed verbatim. +

+

If ‘expansion’ is set to normal (which is the default), +the following expansion mechanism is used. +

+

The backslash character ’\’, followed by any character, always expands to +the second character. +

+

Sequence of the form %{...} are expanded. The text between the +braces is a function name, possibly followed by arguments separated by ’:’. +If the arguments contain special characters or delimiters (’:’ or ’}’), +they should be escaped. +

+

Note that they probably must also be escaped as the value for the +‘text’ option in the filter argument string and as the filter +argument in the filtergraph description, and possibly also for the shell, +that makes up to four levels of escaping; using a text file avoids these +problems. +

+

The following functions are available: +

+
+
expr, e
+

The expression evaluation result. +

+

It must take one argument specifying the expression to be evaluated, +which accepts the same constants and functions as the x and +y values. Note that not all constants should be used, for +example the text size is not known when evaluating the expression, so +the constants text_w and text_h will have an undefined +value. +

+
+
gmtime
+

The time at which the filter is running, expressed in UTC. +It can accept an argument: a strftime() format string. +

+
+
localtime
+

The time at which the filter is running, expressed in the local time zone. +It can accept an argument: a strftime() format string. +

+
+
metadata
+

Frame metadata. It must take one argument specifying metadata key. +

+
+
n, frame_num
+

The frame number, starting from 0. +

+
+
pict_type
+

A 1 character description of the current picture type. +

+
+
pts
+

The timestamp of the current frame, in seconds, with microsecond accuracy. +

+
+
+ + +

28.23.3 Examples

+ +
    +
  • +Draw "Test Text" with font FreeSerif, using the default values for the +optional parameters. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text'"
    +
    + +
  • +Draw ’Test Text’ with font FreeSerif of size 24 at position x=100 +and y=50 (counting from the top-left corner of the screen), text is +yellow with a red box around it. Both the text and the box have an +opacity of 20%. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text':\
    +          x=100: y=50: fontsize=24: fontcolor=yellow@0.2: box=1: boxcolor=red@0.2"
    +
    + +

    Note that the double quotes are not necessary if spaces are not used +within the parameter list. +

    +
  • +Show the text at the center of the video frame: +
     
    drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2"
    +
    + +
  • +Show a text line sliding from right to left in the last row of the video +frame. The file ‘LONG_LINE’ is assumed to contain a single line +with no newlines. +
     
    drawtext="fontsize=15:fontfile=FreeSerif.ttf:text=LONG_LINE:y=h-line_h:x=-50*t"
    +
    + +
  • +Show the content of file ‘CREDITS’ off the bottom of the frame and scroll up. +
     
    drawtext="fontsize=20:fontfile=FreeSerif.ttf:textfile=CREDITS:y=h-20*t"
    +
    + +
  • +Draw a single green letter "g", at the center of the input video. +The glyph baseline is placed at half screen height. +
     
    drawtext="fontsize=60:fontfile=FreeSerif.ttf:fontcolor=green:text=g:x=(w-max_glyph_w)/2:y=h/2-ascent"
    +
    + +
  • +Show text for 1 second every 3 seconds: +
     
    drawtext="fontfile=FreeSerif.ttf:fontcolor=white:x=100:y=x/dar:enable=lt(mod(t\,3)\,1):text='blink'"
    +
    + +
  • +Use fontconfig to set the font. Note that the colons need to be escaped. +
     
    drawtext='fontfile=Linux Libertine O-40\:style=Semibold:text=FFmpeg'
    +
    + +
  • +Print the date of a real-time encoding (see strftime(3)): +
     
    drawtext='fontfile=FreeSans.ttf:text=%{localtime:%a %b %d %Y}'
    +
    + +
+ +

For more information about libfreetype, check: +http://www.freetype.org/. +

+

For more information about fontconfig, check: +http://freedesktop.org/software/fontconfig/fontconfig-user.html. +

+ +

28.24 edgedetect

+ +

Detect and draw edges. The filter uses the Canny Edge Detection algorithm. +

+

The filter accepts the following options: +

+
+
low
+
high
+

Set low and high threshold values used by the Canny thresholding +algorithm. +

+

The high threshold selects the "strong" edge pixels, which are then +connected through 8-connectivity with the "weak" edge pixels selected +by the low threshold. +

+

low and high threshold values must be chosen in the range +[0,1], and low should be lesser or equal to high. +

+

Default value for low is 20/255, and default value for high +is 50/255. +

+
+ +

Example: +

 
edgedetect=low=0.1:high=0.4
+
+ + +

28.25 extractplanes

+ +

Extract color channel components from input video stream into +separate grayscale video streams. +

+

The filter accepts the following option: +

+
+
planes
+

Set plane(s) to extract. +

+

Available values for planes are: +

+
y
+
u
+
v
+
a
+
r
+
g
+
b
+
+ +

Choosing planes not available in the input will result in an error. +That means you cannot select r, g, b planes +with y, u, v planes at same time. +

+
+ + +

28.25.1 Examples

+ +
    +
  • +Extract luma, u and v color channel component from input video frame +into 3 grayscale outputs: +
     
    ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
    +
    +
+ + +

28.26 elbg

+ +

Apply a posterize effect using the ELBG (Enhanced LBG) algorithm. +

+

For each input image, the filter will compute the optimal mapping from +the input to the output given the codebook length, that is the number +of distinct output colors. +

+

This filter accepts the following options. +

+
+
codebook_length, l
+

Set codebook length. The value must be a positive integer, and +represents the number of distinct output colors. Default value is 256. +

+
+
nb_steps, n
+

Set the maximum number of iterations to apply for computing the optimal +mapping. The higher the value the better the result and the higher the +computation time. Default value is 1. +

+
+
seed, s
+

Set a random seed, must be an integer included between 0 and +UINT32_MAX. If not specified, or if explicitly set to -1, the filter +will try to use a good random seed on a best effort basis. +

+
+ + +

28.27 fade

+ +

Apply fade-in/out effect to input video. +

+

This filter accepts the following options: +

+
+
type, t
+

The effect type – can be either "in" for fade-in, or "out" for a fade-out +effect. +Default is in. +

+
+
start_frame, s
+

Specify the number of the start frame for starting to apply the fade +effect. Default is 0. +

+
+
nb_frames, n
+

The number of frames for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +Default is 25. +

+
+
alpha
+

If set to 1, fade only alpha channel, if one exists on the input. +Default value is 0. +

+
+
start_time, st
+

Specify the timestamp (in seconds) of the frame to start to apply the fade +effect. If both start_frame and start_time are specified, the fade will start at +whichever comes last. Default is 0. +

+
+
duration, d
+

The number of seconds for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +If both duration and nb_frames are specified, duration is used. Default is 0. +

+
+
color, c
+

Specify the color of the fade. Default is "black". +

+
+ + +

28.27.1 Examples

+ +
    +
  • +Fade in first 30 frames of video: +
     
    fade=in:0:30
    +
    + +

    The command above is equivalent to: +

     
    fade=t=in:s=0:n=30
    +
    + +
  • +Fade out last 45 frames of a 200-frame video: +
     
    fade=out:155:45
    +fade=type=out:start_frame=155:nb_frames=45
    +
    + +
  • +Fade in first 25 frames and fade out last 25 frames of a 1000-frame video: +
     
    fade=in:0:25, fade=out:975:25
    +
    + +
  • +Make first 5 frames yellow, then fade in from frame 5-24: +
     
    fade=in:5:20:color=yellow
    +
    + +
  • +Fade in alpha over first 25 frames of video: +
     
    fade=in:0:25:alpha=1
    +
    + +
  • +Make first 5.5 seconds black, then fade in for 0.5 seconds: +
     
    fade=t=in:st=5.5:d=0.5
    +
    + +
+ + +

28.28 field

+ +

Extract a single field from an interlaced image using stride +arithmetic to avoid wasting CPU time. The output frames are marked as +non-interlaced. +

+

The filter accepts the following options: +

+
+
type
+

Specify whether to extract the top (if the value is 0 or +top) or the bottom field (if the value is 1 or +bottom). +

+
+ + +

28.29 fieldmatch

+ +

Field matching filter for inverse telecine. It is meant to reconstruct the +progressive frames from a telecined stream. The filter does not drop duplicated +frames, so to achieve a complete inverse telecine fieldmatch needs to be +followed by a decimation filter such as decimate in the filtergraph. +

+

The separation of the field matching and the decimation is notably motivated by +the possibility of inserting a de-interlacing filter fallback between the two. +If the source has mixed telecined and real interlaced content, +fieldmatch will not be able to match fields for the interlaced parts. +But these remaining combed frames will be marked as interlaced, and thus can be +de-interlaced by a later filter such as yadif before decimation. +

+

In addition to the various configuration options, fieldmatch can take an +optional second stream, activated through the ‘ppsrc’ option. If +enabled, the frames reconstruction will be based on the fields and frames from +this second stream. This allows the first input to be pre-processed in order to +help the various algorithms of the filter, while keeping the output lossless +(assuming the fields are matched properly). Typically, a field-aware denoiser, +or brightness/contrast adjustments can help. +

+

Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project) +and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from +which fieldmatch is based on. While the semantic and usage are very +close, some behaviour and options names can differ. +

+

The filter accepts the following options: +

+
+
order
+

Specify the assumed field order of the input stream. Available values are: +

+
+
auto
+

Auto detect parity (use FFmpeg’s internal parity value). +

+
bff
+

Assume bottom field first. +

+
tff
+

Assume top field first. +

+
+ +

Note that it is sometimes recommended not to trust the parity announced by the +stream. +

+

Default value is auto. +

+
+
mode
+

Set the matching mode or strategy to use. ‘pc’ mode is the safest in the +sense that it won’t risk creating jerkiness due to duplicate frames when +possible, but if there are bad edits or blended fields it will end up +outputting combed frames when a good match might actually exist. On the other +hand, ‘pcn_ub’ mode is the most risky in terms of creating jerkiness, +but will almost always find a good frame if there is one. The other values are +all somewhere in between ‘pc’ and ‘pcn_ub’ in terms of risking +jerkiness and creating duplicate frames versus finding good matches in sections +with bad edits, orphaned fields, blended fields, etc. +

+

More details about p/c/n/u/b are available in p/c/n/u/b meaning section. +

+

Available values are: +

+
+
pc
+

2-way matching (p/c) +

+
pc_n
+

2-way matching, and trying 3rd match if still combed (p/c + n) +

+
pc_u
+

2-way matching, and trying 3rd match (same order) if still combed (p/c + u) +

+
pc_n_ub
+

2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if +still combed (p/c + n + u/b) +

+
pcn
+

3-way matching (p/c/n) +

+
pcn_ub
+

3-way matching, and trying 4th/5th matches if all 3 of the original matches are +detected as combed (p/c/n + u/b) +

+
+ +

The parenthesis at the end indicate the matches that would be used for that +mode assuming ‘order’=tff (and ‘field’ on auto or +top). +

+

In terms of speed ‘pc’ mode is by far the fastest and ‘pcn_ub’ is +the slowest. +

+

Default value is pc_n. +

+
+
ppsrc
+

Mark the main input stream as a pre-processed input, and enable the secondary +input stream as the clean source to pick the fields from. See the filter +introduction for more details. It is similar to the ‘clip2’ feature from +VFM/TFM. +

+

Default value is 0 (disabled). +

+
+
field
+

Set the field to match from. It is recommended to set this to the same value as +‘order’ unless you experience matching failures with that setting. In +certain circumstances changing the field that is used to match from can have a +large impact on matching performance. Available values are: +

+
+
auto
+

Automatic (same value as ‘order’). +

+
bottom
+

Match from the bottom field. +

+
top
+

Match from the top field. +

+
+ +

Default value is auto. +

+
+
mchroma
+

Set whether or not chroma is included during the match comparisons. In most +cases it is recommended to leave this enabled. You should set this to 0 +only if your clip has bad chroma problems such as heavy rainbowing or other +artifacts. Setting this to 0 could also be used to speed things up at +the cost of some accuracy. +

+

Default value is 1. +

+
+
y0
+
y1
+

These define an exclusion band which excludes the lines between ‘y0’ and +‘y1’ from being included in the field matching decision. An exclusion +band can be used to ignore subtitles, a logo, or other things that may +interfere with the matching. ‘y0’ sets the starting scan line and +‘y1’ sets the ending line; all lines in between ‘y0’ and +‘y1’ (including ‘y0’ and ‘y1’) will be ignored. Setting +‘y0’ and ‘y1’ to the same value will disable the feature. +‘y0’ and ‘y1’ defaults to 0. +

+
+
scthresh
+

Set the scene change detection threshold as a percentage of maximum change on +the luma plane. Good values are in the [8.0, 14.0] range. Scene change +detection is only relevant in case ‘combmatch’=sc. The range for +‘scthresh’ is [0.0, 100.0]. +

+

Default value is 12.0. +

+
+
combmatch
+

When ‘combatch’ is not none, fieldmatch will take into +account the combed scores of matches when deciding what match to use as the +final match. Available values are: +

+
+
none
+

No final matching based on combed scores. +

+
sc
+

Combed scores are only used when a scene change is detected. +

+
full
+

Use combed scores all the time. +

+
+ +

Default is sc. +

+
+
combdbg
+

Force fieldmatch to calculate the combed metrics for certain matches and +print them. This setting is known as ‘micout’ in TFM/VFM vocabulary. +Available values are: +

+
+
none
+

No forced calculation. +

+
pcn
+

Force p/c/n calculations. +

+
pcnub
+

Force p/c/n/u/b calculations. +

+
+ +

Default value is none. +

+
+
cthresh
+

This is the area combing threshold used for combed frame detection. This +essentially controls how "strong" or "visible" combing must be to be detected. +Larger values mean combing must be more visible and smaller values mean combing +can be less visible or strong and still be detected. Valid settings are from +-1 (every pixel will be detected as combed) to 255 (no pixel will +be detected as combed). This is basically a pixel difference value. A good +range is [8, 12]. +

+

Default value is 9. +

+
+
chroma
+

Sets whether or not chroma is considered in the combed frame decision. Only +disable this if your source has chroma problems (rainbowing, etc.) that are +causing problems for the combed frame detection with chroma enabled. Actually, +using ‘chroma’=0 is usually more reliable, except for the case +where there is chroma only combing in the source. +

+

Default value is 0. +

+
+
blockx
+
blocky
+

Respectively set the x-axis and y-axis size of the window used during combed +frame detection. This has to do with the size of the area in which +‘combpel’ pixels are required to be detected as combed for a frame to be +declared combed. See the ‘combpel’ parameter description for more info. +Possible values are any number that is a power of 2 starting at 4 and going up +to 512. +

+

Default value is 16. +

+
+
combpel
+

The number of combed pixels inside any of the ‘blocky’ by +‘blockx’ size blocks on the frame for the frame to be detected as +combed. While ‘cthresh’ controls how "visible" the combing must be, this +setting controls "how much" combing there must be in any localized area (a +window defined by the ‘blockx’ and ‘blocky’ settings) on the +frame. Minimum value is 0 and maximum is blocky x blockx (at +which point no frames will ever be detected as combed). This setting is known +as ‘MI’ in TFM/VFM vocabulary. +

+

Default value is 80. +

+
+ +

+

+

28.29.1 p/c/n/u/b meaning

+ + +

28.29.1.1 p/c/n

+ +

We assume the following telecined stream: +

+
 
Top fields:     1 2 2 3 4
+Bottom fields:  1 2 3 4 4
+
+ +

The numbers correspond to the progressive frame the fields relate to. Here, the +first two frames are progressive, the 3rd and 4th are combed, and so on. +

+

When fieldmatch is configured to run a matching from bottom +(‘field’=bottom) this is how this input stream get transformed: +

+
 
Input stream:
+                T     1 2 2 3 4
+                B     1 2 3 4 4   <-- matching reference
+
+Matches:              c c n n c
+
+Output stream:
+                T     1 2 3 4 4
+                B     1 2 3 4 4
+
+ +

As a result of the field matching, we can see that some frames get duplicated. +To perform a complete inverse telecine, you need to rely on a decimation filter +after this operation. See for instance the decimate filter. +

+

The same operation now matching from top fields (‘field’=top) +looks like this: +

+
 
Input stream:
+                T     1 2 2 3 4   <-- matching reference
+                B     1 2 3 4 4
+
+Matches:              c c p p c
+
+Output stream:
+                T     1 2 2 3 4
+                B     1 2 2 3 4
+
+ +

In these examples, we can see what p, c and n mean; +basically, they refer to the frame and field of the opposite parity: +

+
    +
  • p matches the field of the opposite parity in the previous frame +
  • c matches the field of the opposite parity in the current frame +
  • n matches the field of the opposite parity in the next frame +
+ + +

28.29.1.2 u/b

+ +

The u and b matching are a bit special in the sense that they match +from the opposite parity flag. In the following examples, we assume that we are +currently matching the 2nd frame (Top:2, bottom:2). According to the match, a +’x’ is placed above and below each matched fields. +

+

With bottom matching (‘field’=bottom): +

 
Match:           c         p           n          b          u
+
+                 x       x               x        x          x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x         x           x        x              x
+
+Output frames:
+                 2          1          2          2          2
+                 2          2          2          1          3
+
+ +

With top matching (‘field’=top): +

 
Match:           c         p           n          b          u
+
+                 x         x           x        x              x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x       x               x        x          x
+
+Output frames:
+                 2          2          2          1          2
+                 2          1          3          2          2
+
+ + +

28.29.2 Examples

+ +

Simple IVTC of a top field first telecined stream: +

 
fieldmatch=order=tff:combmatch=none, decimate
+
+ +

Advanced IVTC, with fallback on yadif for still combed frames: +

 
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+ + +

28.30 fieldorder

+ +

Transform the field order of the input video. +

+

This filter accepts the following options: +

+
+
order
+

Output field order. Valid values are tff for top field first or bff +for bottom field first. +

+
+ +

Default value is ‘tff’. +

+

Transformation is achieved by shifting the picture content up or down +by one line, and filling the remaining line with appropriate picture content. +This method is consistent with most broadcast field order converters. +

+

If the input video is not flagged as being interlaced, or it is already +flagged as being of the required output field order then this filter does +not alter the incoming video. +

+

This filter is very useful when converting to or from PAL DV material, +which is bottom field first. +

+

For example: +

 
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+ + +

28.31 fifo

+ +

Buffer input images and send them when they are requested. +

+

This filter is mainly useful when auto-inserted by the libavfilter +framework. +

+

The filter does not take parameters. +

+

+

+

28.32 format

+ +

Convert the input video to one of the specified pixel formats. +Libavfilter will try to pick one that is supported for the input to +the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

28.32.1 Examples

+ +
    +
  • +Convert the input video to the format yuv420p +
     
    format=pix_fmts=yuv420p
    +
    + +

    Convert the input video to any of the formats in the list +

     
    format=pix_fmts=yuv420p|yuv444p|yuv410p
    +
    +
+ +

+

+

28.33 fps

+ +

Convert the video to specified constant frame rate by duplicating or dropping +frames as necessary. +

+

This filter accepts the following named parameters: +

+
fps
+

Desired output frame rate. The default is 25. +

+
+
round
+

Rounding method. +

+

Possible values are: +

+
zero
+

zero round towards 0 +

+
inf
+

round away from 0 +

+
down
+

round towards -infinity +

+
up
+

round towards +infinity +

+
near
+

round to nearest +

+
+

The default is near. +

+
+
start_time
+

Assume the first PTS should be the given value, in seconds. This allows for +padding/trimming at the start of stream. By default, no assumption is made +about the first frame’s expected PTS, so no padding or trimming is done. +For example, this could be set to 0 to pad the beginning with duplicates of +the first frame if a video stream starts after the audio stream or to trim any +frames with a negative PTS. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +fps[:round]. +

+

See also the setpts filter. +

+ +

28.33.1 Examples

+ +
    +
  • +A typical usage in order to set the fps to 25: +
     
    fps=fps=25
    +
    + +
  • +Sets the fps to 24, using abbreviation and rounding method to round to nearest: +
     
    fps=fps=film:round=near
    +
    +
+ + +

28.34 framepack

+ +

Pack two different video streams into a stereoscopic video, setting proper +metadata on supported codecs. The two views should have the same size and +framerate and processing will stop when the shorter video ends. Please note +that you may conveniently adjust view properties with the scale and +fps filters. +

+

This filter accepts the following named parameters: +

+
format
+

Desired packing format. Supported values are: +

+
+
sbs
+

Views are next to each other (default). +

+
+
tab
+

Views are on top of each other. +

+
+
lines
+

Views are packed by line. +

+
+
columns
+

Views are eacked by column. +

+
+
frameseq
+

Views are temporally interleaved. +

+
+
+ +
+
+ +

Some examples follow: +

+
 
# Convert left and right views into a frame sequential video.
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input.
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+ + +

28.35 framestep

+ +

Select one frame every N-th frame. +

+

This filter accepts the following option: +

+
step
+

Select frame after every step frames. +Allowed values are positive integers higher than 0. Default value is 1. +

+
+ +

+

+

28.36 frei0r

+ +

Apply a frei0r effect to the input video. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This filter accepts the following options: +

+
+
filter_name
+

The name to the frei0r effect to load. If the environment variable +FREI0R_PATH is defined, the frei0r effect is searched in each one of the +directories specified by the colon separated list in FREIOR_PATH, +otherwise in the standard frei0r paths, which are in this order: +‘HOME/.frei0r-1/lib/’, ‘/usr/local/lib/frei0r-1/’, +‘/usr/lib/frei0r-1/’. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r effect. +

+
+
+ +

A frei0r effect parameter can be a boolean (whose values are specified +with "y" and "n"), a double, a color (specified by the syntax +R/G/B, (R, G, and B being float +numbers from 0.0 to 1.0) or by a color description specified in the "Color" +section in the ffmpeg-utils manual), a position (specified by the syntax X/Y, +X and Y being float numbers) and a string. +

+

The number and kind of parameters depend on the loaded effect. If an +effect parameter is not specified the default value is set. +

+ +

28.36.1 Examples

+ +
    +
  • +Apply the distort0r effect, set the first two double parameters: +
     
    frei0r=filter_name=distort0r:filter_params=0.5|0.01
    +
    + +
  • +Apply the colordistance effect, take a color as first parameter: +
     
    frei0r=colordistance:0.2/0.3/0.4
    +frei0r=colordistance:violet
    +frei0r=colordistance:0x112233
    +
    + +
  • +Apply the perspective effect, specify the top left and top right image +positions: +
     
    frei0r=perspective:0.2/0.2|0.8/0.2
    +
    +
+ +

For more information see: +http://frei0r.dyne.org +

+ +

28.37 geq

+ +

The filter accepts the following options: +

+
+
lum_expr, lum
+

Set the luminance expression. +

+
cb_expr, cb
+

Set the chrominance blue expression. +

+
cr_expr, cr
+

Set the chrominance red expression. +

+
alpha_expr, a
+

Set the alpha expression. +

+
red_expr, r
+

Set the red expression. +

+
green_expr, g
+

Set the green expression. +

+
blue_expr, b
+

Set the blue expression. +

+
+ +

The colorspace is selected according to the specified options. If one +of the ‘lum_expr’, ‘cb_expr’, or ‘cr_expr’ +options is specified, the filter will automatically select a YCbCr +colorspace. If one of the ‘red_expr’, ‘green_expr’, or +‘blue_expr’ options is specified, it will select an RGB +colorspace. +

+

If one of the chrominance expression is not defined, it falls back on the other +one. If no alpha expression is specified it will evaluate to opaque value. +If none of chrominance expressions are specified, they will evaluate +to the luminance expression. +

+

The expressions can use the following variables and functions: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

The coordinates of the current sample. +

+
+
W
+
H
+

The width and height of the image. +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
p(x, y)
+

Return the value of the pixel at location (x,y) of the current +plane. +

+
+
lum(x, y)
+

Return the value of the pixel at location (x,y) of the luminance +plane. +

+
+
cb(x, y)
+

Return the value of the pixel at location (x,y) of the +blue-difference chroma plane. Return 0 if there is no such plane. +

+
+
cr(x, y)
+

Return the value of the pixel at location (x,y) of the +red-difference chroma plane. Return 0 if there is no such plane. +

+
+
r(x, y)
+
g(x, y)
+
b(x, y)
+

Return the value of the pixel at location (x,y) of the +red/green/blue component. Return 0 if there is no such component. +

+
+
alpha(x, y)
+

Return the value of the pixel at location (x,y) of the alpha +plane. Return 0 if there is no such plane. +

+
+ +

For functions, if x and y are outside the area, the value will be +automatically clipped to the closer edge. +

+ +

28.37.1 Examples

+ +
    +
  • +Flip the image horizontally: +
     
    geq=p(W-X\,Y)
    +
    + +
  • +Generate a bidimensional sine wave, with angle PI/3 and a +wavelength of 100 pixels: +
     
    geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
    +
    + +
  • +Generate a fancy enigmatic moving light: +
     
    nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
    +
    + +
  • +Generate a quick emboss effect: +
     
    format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
    +
    + +
  • +Modify RGB components depending on pixel position: +
     
    geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
    +
    +
+ + +

28.38 gradfun

+ +

Fix the banding artifacts that are sometimes introduced into nearly flat +regions by truncation to 8bit color depth. +Interpolate the gradients that should go where the bands are, and +dither them. +

+

This filter is designed for playback only. Do not use it prior to +lossy compression, because compression tends to lose the dither and +bring back the bands. +

+

This filter accepts the following options: +

+
+
strength
+

The maximum amount by which the filter will change any one pixel. Also the +threshold for detecting nearly flat regions. Acceptable values range from .51 to +64, default value is 1.2, out-of-range values will be clipped to the valid +range. +

+
+
radius
+

The neighborhood to fit the gradient to. A larger radius makes for smoother +gradients, but also prevents the filter from modifying the pixels near detailed +regions. Acceptable values are 8-32, default value is 16, out-of-range values +will be clipped to the valid range. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +strength[:radius] +

+ +

28.38.1 Examples

+ +
    +
  • +Apply the filter with a 3.5 strength and radius of 8: +
     
    gradfun=3.5:8
    +
    + +
  • +Specify radius, omitting the strength (which will fall-back to the default +value): +
     
    gradfun=radius=8
    +
    + +
+ +

+

+

28.39 haldclut

+ +

Apply a Hald CLUT to a video stream. +

+

First input is the video stream to process, and second one is the Hald CLUT. +The Hald CLUT input can be a simple picture or a complete video stream. +

+

The filter accepts the following options: +

+
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last CLUT after the end of the stream. A value of +0 disable the filter after the last frame of the CLUT is reached. +Default is 1. +

+
+ +

haldclut also has the same interpolation options as lut3d (both +filters share the same internals). +

+

More information about the Hald CLUT can be found on Eskil Steenberg’s website +(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html. +

+ +

28.39.1 Workflow examples

+ + +

28.39.1.1 Hald CLUT video stream

+ +

Generate an identity Hald CLUT stream altered with various effects: +

 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+ +

Note: make sure you use a lossless codec. +

+

Then use it with haldclut to apply it on some random stream: +

 
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+ +

The Hald CLUT will be applied to the 10 first seconds (duration of +‘clut.nut’), then the latest picture of that CLUT stream will be applied +to the remaining frames of the mandelbrot stream. +

+ +

28.39.1.2 Hald CLUT with preview

+ +

A Hald CLUT is supposed to be a squared image of Level*Level*Level by +Level*Level*Level pixels. For a given Hald CLUT, FFmpeg will select the +biggest possible square starting at the top left of the picture. The remaining +padding pixels (bottom or right) will be ignored. This area can be used to add +a preview of the Hald CLUT. +

+

Typically, the following generated Hald CLUT will be supported by the +haldclut filter: +

+
 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "
+   pad=iw+320 [padded_clut];
+   smptebars=s=320x256, split [a][b];
+   [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+   [main][b] overlay=W-320" -frames:v 1 clut.png
+
+ +

It contains the original and a preview of the effect of the CLUT: SMPTE color +bars are displayed on the right-top, and below the same color bars processed by +the color changes. +

+

Then, the effect of this Hald CLUT can be visualized with: +

 
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+ + +

28.40 hflip

+ +

Flip the input video horizontally. +

+

For example to horizontally flip the input video with ffmpeg: +

 
ffmpeg -i in.avi -vf "hflip" out.avi
+
+ + +

28.41 histeq

+

This filter applies a global color histogram equalization on a +per-frame basis. +

+

It can be used to correct video that has a compressed range of pixel +intensities. The filter redistributes the pixel intensities to +equalize their distribution across the intensity range. It may be +viewed as an "automatically adjusting contrast filter". This filter is +useful only for correcting degraded or poorly captured source +video. +

+

The filter accepts the following options: +

+
+
strength
+

Determine the amount of equalization to be applied. As the strength +is reduced, the distribution of pixel intensities more-and-more +approaches that of the input frame. The value must be a float number +in the range [0,1] and defaults to 0.200. +

+
+
intensity
+

Set the maximum intensity that can generated and scale the output +values appropriately. The strength should be set as desired and then +the intensity can be limited if needed to avoid washing-out. The value +must be a float number in the range [0,1] and defaults to 0.210. +

+
+
antibanding
+

Set the antibanding level. If enabled the filter will randomly vary +the luminance of output pixels by a small amount to avoid banding of +the histogram. Possible values are none, weak or +strong. It defaults to none. +

+
+ + +

28.42 histogram

+ +

Compute and draw a color distribution histogram for the input video. +

+

The computed histogram is a representation of the color component +distribution in an image. +

+

The filter accepts the following options: +

+
+
mode
+

Set histogram mode. +

+

It accepts the following values: +

+
levels
+

Standard histogram that displays the color components distribution in an +image. Displays color graph for each color component. Shows distribution of +the Y, U, V, A or R, G, B components, depending on input format, in the +current frame. Below each graph a color component scale meter is shown. +

+
+
color
+

Displays chroma values (U/V color placement) in a two dimensional +graph (which is called a vectorscope). The brighter a pixel in the +vectorscope, the more pixels of the input frame correspond to that pixel +(i.e., more pixels have this chroma value). The V component is displayed on +the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost +side being V = 255. The U component is displayed on the vertical (Y) axis, +with the top representing U = 0 and the bottom representing U = 255. +

+

The position of a white pixel in the graph corresponds to the chroma value of +a pixel of the input clip. The graph can therefore be used to read the hue +(color flavor) and the saturation (the dominance of the hue in the color). As +the hue of a color changes, it moves around the square. At the center of the +square the saturation is zero, which means that the corresponding pixel has no +color. If the amount of a specific color is increased (while leaving the other +colors unchanged) the saturation increases, and the indicator moves towards +the edge of the square. +

+
+
color2
+

Chroma values in vectorscope, similar as color but actual chroma values +are displayed. +

+
+
waveform
+

Per row/column color component graph. In row mode, the graph on the left side +represents color component value 0 and the right side represents value = 255. +In column mode, the top side represents color component value = 0 and bottom +side represents value = 255. +

+
+

Default value is levels. +

+
+
level_height
+

Set height of level in levels. Default value is 200. +Allowed range is [50, 2048]. +

+
+
scale_height
+

Set height of color scale in levels. Default value is 12. +Allowed range is [0, 40]. +

+
+
step
+

Set step for waveform mode. Smaller values are useful to find out how +many values of the same luminance are distributed across input rows/columns. +Default value is 10. Allowed range is [1, 255]. +

+
+
waveform_mode
+

Set mode for waveform. Can be either row, or column. +Default is row. +

+
+
waveform_mirror
+

Set mirroring mode for waveform. 0 means unmirrored, 1 +means mirrored. In mirrored mode, higher values will be represented on the left +side for row mode and at the top for column mode. Default is +0 (unmirrored). +

+
+
display_mode
+

Set display mode for waveform and levels. +It accepts the following values: +

+
parade
+

Display separate graph for the color components side by side in +row waveform mode or one below the other in column waveform mode +for waveform histogram mode. For levels histogram mode, +per color component graphs are placed below each other. +

+

Using this display mode in waveform histogram mode makes it easy to +spot color casts in the highlights and shadows of an image, by comparing the +contours of the top and the bottom graphs of each waveform. Since whites, +grays, and blacks are characterized by exactly equal amounts of red, green, +and blue, neutral areas of the picture should display three waveforms of +roughly equal width/height. If not, the correction is easy to perform by +making level adjustments the three waveforms. +

+
+
overlay
+

Presents information identical to that in the parade, except +that the graphs representing color components are superimposed directly +over one another. +

+

This display mode in waveform histogram mode makes it easier to spot +relative differences or similarities in overlapping areas of the color +components that are supposed to be identical, such as neutral whites, grays, +or blacks. +

+
+

Default is parade. +

+
+
levels_mode
+

Set mode for levels. Can be either linear, or logarithmic. +Default is linear. +

+
+ + +

28.42.1 Examples

+ +
    +
  • +Calculate and draw histogram: +
     
    ffplay -i input -vf histogram
    +
    + +
+ +

+

+

28.43 hqdn3d

+ +

High precision/quality 3d denoise filter. This filter aims to reduce +image noise producing smooth images and making still images really +still. It should enhance compressibility. +

+

It accepts the following optional parameters: +

+
+
luma_spatial
+

a non-negative float number which specifies spatial luma strength, +defaults to 4.0 +

+
+
chroma_spatial
+

a non-negative float number which specifies spatial chroma strength, +defaults to 3.0*luma_spatial/4.0 +

+
+
luma_tmp
+

a float number which specifies luma temporal strength, defaults to +6.0*luma_spatial/4.0 +

+
+
chroma_tmp
+

a float number which specifies chroma temporal strength, defaults to +luma_tmp*chroma_spatial/luma_spatial +

+
+ + +

28.44 hue

+ +

Modify the hue and/or the saturation of the input. +

+

This filter accepts the following options: +

+
+
h
+

Specify the hue angle as a number of degrees. It accepts an expression, +and defaults to "0". +

+
+
s
+

Specify the saturation in the [-10,10] range. It accepts an expression and +defaults to "1". +

+
+
H
+

Specify the hue angle as a number of radians. It accepts an +expression, and defaults to "0". +

+
+
b
+

Specify the brightness in the [-10,10] range. It accepts an expression and +defaults to "0". +

+
+ +

h’ and ‘H’ are mutually exclusive, and can’t be +specified at the same time. +

+

The ‘b’, ‘h’, ‘H’ and ‘s’ option values are +expressions containing the following constants: +

+
+
n
+

frame count of the input frame starting from 0 +

+
+
pts
+

presentation timestamp of the input frame expressed in time base units +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
tb
+

time base of the input video +

+
+ + +

28.44.1 Examples

+ +
    +
  • +Set the hue to 90 degrees and the saturation to 1.0: +
     
    hue=h=90:s=1
    +
    + +
  • +Same command but expressing the hue in radians: +
     
    hue=H=PI/2:s=1
    +
    + +
  • +Rotate hue and make the saturation swing between 0 +and 2 over a period of 1 second: +
     
    hue="H=2*PI*t: s=sin(2*PI*t)+1"
    +
    + +
  • +Apply a 3 seconds saturation fade-in effect starting at 0: +
     
    hue="s=min(t/3\,1)"
    +
    + +

    The general fade-in expression can be written as: +

     
    hue="s=min(0\, max((t-START)/DURATION\, 1))"
    +
    + +
  • +Apply a 3 seconds saturation fade-out effect starting at 5 seconds: +
     
    hue="s=max(0\, min(1\, (8-t)/3))"
    +
    + +

    The general fade-out expression can be written as: +

     
    hue="s=max(0\, min(1\, (START+DURATION-t)/DURATION))"
    +
    + +
+ + +

28.44.2 Commands

+ +

This filter supports the following commands: +

+
b
+
s
+
h
+
H
+

Modify the hue and/or the saturation and/or brightness of the input video. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

28.45 idet

+ +

Detect video interlacing type. +

+

This filter tries to detect if the input is interlaced or progressive, +top or bottom field first. +

+

The filter accepts the following options: +

+
+
intl_thres
+

Set interlacing threshold. +

+
prog_thres
+

Set progressive threshold. +

+
+ + +

28.46 il

+ +

Deinterleave or interleave fields. +

+

This filter allows one to process interlaced images fields without +deinterlacing them. Deinterleaving splits the input frame into 2 +fields (so called half pictures). Odd lines are moved to the top +half of the output image, even lines to the bottom half. +You can process (filter) them independently and then re-interleave them. +

+

The filter accepts the following options: +

+
+
luma_mode, l
+
chroma_mode, c
+
alpha_mode, a
+

Available values for luma_mode, chroma_mode and +alpha_mode are: +

+
+
none
+

Do nothing. +

+
+
deinterleave, d
+

Deinterleave fields, placing one above the other. +

+
+
interleave, i
+

Interleave fields. Reverse the effect of deinterleaving. +

+
+

Default value is none. +

+
+
luma_swap, ls
+
chroma_swap, cs
+
alpha_swap, as
+

Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0. +

+
+ + +

28.47 interlace

+ +

Simple interlacing filter from progressive contents. This interleaves upper (or +lower) lines from odd frames with lower (or upper) lines from even frames, +halving the frame rate and preserving image height. A vertical lowpass filter +is always applied in order to avoid twitter effects and reduce moiré patterns. +

+
 
   Original        Original             New Frame
+   Frame 'j'      Frame 'j+1'             (tff)
+  ==========      ===========       ==================
+    Line 0  -------------------->    Frame 'j' Line 0
+    Line 1          Line 1  ---->   Frame 'j+1' Line 1
+    Line 2 --------------------->    Frame 'j' Line 2
+    Line 3          Line 3  ---->   Frame 'j+1' Line 3
+     ...             ...                   ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+ +

It accepts the following optional parameters: +

+
+
scan
+

determines whether the interlaced frame is taken from the even (tff - default) +or odd (bff) lines of the progressive frame. +

+
+ + +

28.48 kerndeint

+ +

Deinterlace input video by applying Donald Graft’s adaptive kernel +deinterling. Work on interlaced parts of a video to produce +progressive frames. +

+

The description of the accepted parameters follows. +

+
+
thresh
+

Set the threshold which affects the filter’s tolerance when +determining if a pixel line must be processed. It must be an integer +in the range [0,255] and defaults to 10. A value of 0 will result in +applying the process on every pixels. +

+
+
map
+

Paint pixels exceeding the threshold value to white if set to 1. +Default is 0. +

+
+
order
+

Set the fields order. Swap fields if set to 1, leave fields alone if +0. Default is 0. +

+
+
sharp
+

Enable additional sharpening if set to 1. Default is 0. +

+
+
twoway
+

Enable twoway sharpening if set to 1. Default is 0. +

+
+ + +

28.48.1 Examples

+ +
    +
  • +Apply default values: +
     
    kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
    +
    + +
  • +Enable additional sharpening: +
     
    kerndeint=sharp=1
    +
    + +
  • +Paint processed pixels in white: +
     
    kerndeint=map=1
    +
    +
+ +

+

+

28.49 lut3d

+ +

Apply a 3D LUT to an input video. +

+

The filter accepts the following options: +

+
+
file
+

Set the 3D LUT file name. +

+

Currently supported formats: +

+
3dl
+

AfterEffects +

+
cube
+

Iridas +

+
dat
+

DaVinci +

+
m3d
+

Pandora +

+
+
+
interp
+

Select interpolation mode. +

+

Available values are: +

+
+
nearest
+

Use values from the nearest defined point. +

+
trilinear
+

Interpolate values using the 8 points defining a cube. +

+
tetrahedral
+

Interpolate values using a tetrahedron. +

+
+
+
+ + +

28.50 lut, lutrgb, lutyuv

+ +

Compute a look-up table for binding each pixel component input value +to an output value, and apply it to input video. +

+

lutyuv applies a lookup table to a YUV input video, lutrgb +to an RGB input video. +

+

These filters accept the following options: +

+
c0
+

set first pixel component expression +

+
c1
+

set second pixel component expression +

+
c2
+

set third pixel component expression +

+
c3
+

set fourth pixel component expression, corresponds to the alpha component +

+
+
r
+

set red component expression +

+
g
+

set green component expression +

+
b
+

set blue component expression +

+
a
+

alpha component expression +

+
+
y
+

set Y/luminance component expression +

+
u
+

set U/Cb component expression +

+
v
+

set V/Cr component expression +

+
+ +

Each of them specifies the expression to use for computing the lookup table for +the corresponding pixel component values. +

+

The exact component associated to each of the c* options depends on the +format in input. +

+

The lut filter requires either YUV or RGB pixel formats in input, +lutrgb requires RGB pixel formats in input, and lutyuv requires YUV. +

+

The expressions can contain the following constants and functions: +

+
+
w
+
h
+

the input width and height +

+
+
val
+

input value for the pixel component +

+
+
clipval
+

the input value clipped in the minval-maxval range +

+
+
maxval
+

maximum value for the pixel component +

+
+
minval
+

minimum value for the pixel component +

+
+
negval
+

the negated value for the pixel component value clipped in the +minval-maxval range , it corresponds to the expression +"maxval-clipval+minval" +

+
+
clip(val)
+

the computed value in val clipped in the +minval-maxval range +

+
+
gammaval(gamma)
+

the computed gamma correction value of the pixel component value +clipped in the minval-maxval range, corresponds to the +expression +"pow((clipval-minval)/(maxval-minval)\,gamma)*(maxval-minval)+minval" +

+
+
+ +

All expressions default to "val". +

+ +

28.50.1 Examples

+ +
    +
  • +Negate input video: +
     
    lutrgb="r=maxval+minval-val:g=maxval+minval-val:b=maxval+minval-val"
    +lutyuv="y=maxval+minval-val:u=maxval+minval-val:v=maxval+minval-val"
    +
    + +

    The above is the same as: +

     
    lutrgb="r=negval:g=negval:b=negval"
    +lutyuv="y=negval:u=negval:v=negval"
    +
    + +
  • +Negate luminance: +
     
    lutyuv=y=negval
    +
    + +
  • +Remove chroma components, turns the video into a graytone image: +
     
    lutyuv="u=128:v=128"
    +
    + +
  • +Apply a luma burning effect: +
     
    lutyuv="y=2*val"
    +
    + +
  • +Remove green and blue components: +
     
    lutrgb="g=0:b=0"
    +
    + +
  • +Set a constant alpha channel value on input: +
     
    format=rgba,lutrgb=a="maxval-minval/2"
    +
    + +
  • +Correct luminance gamma by a 0.5 factor: +
     
    lutyuv=y=gammaval(0.5)
    +
    + +
  • +Discard least significant bits of luma: +
     
    lutyuv=y='bitand(val, 128+64+32)'
    +
    +
+ + +

28.51 mergeplanes

+ +

Merge color channel components from several video streams. +

+

The filter accepts up to 4 input streams, and merge selected input +planes to the output video. +

+

This filter accepts the following options: +

+
mapping
+

Set input to output plane mapping. Default is 0. +

+

The mappings is specified as a bitmap. It should be specified as a +hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the +mapping for the first plane of the output stream. ’A’ sets the number of +the input stream to use (from 0 to 3), and ’a’ the plane number of the +corresponding input to use (from 0 to 3). The rest of the mappings is +similar, ’Bb’ describes the mapping for the output stream second +plane, ’Cc’ describes the mapping for the output stream third plane and +’Dd’ describes the mapping for the output stream fourth plane. +

+
+
format
+

Set output pixel format. Default is yuva444p. +

+
+ + +

28.51.1 Examples

+ +
    +
  • +Merge three gray video streams of same width and height into single video stream: +
     
    [a0][a1][a2]mergeplanes=0x001020:yuv444p
    +
    + +
  • +Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream: +
     
    [a0][a1]mergeplanes=0x00010210:yuva444p
    +
    + +
  • +Swap Y and A plane in yuva444p stream: +
     
    format=yuva444p,mergeplanes=0x03010200:yuva444p
    +
    + +
  • +Swap U and V plane in yuv420p stream: +
     
    format=yuv420p,mergeplanes=0x000201:yuv420p
    +
    + +
  • +Cast a rgb24 clip to yuv444p: +
     
    format=rgb24,mergeplanes=0x000102:yuv444p
    +
    +
+ + +

28.52 mcdeint

+ +

Apply motion-compensation deinterlacing. +

+

It needs one field per frame as input and must thus be used together +with yadif=1/3 or equivalent. +

+

This filter accepts the following options: +

+
mode
+

Set the deinterlacing mode. +

+

It accepts one of the following values: +

+
fast
+
medium
+
slow
+

use iterative motion estimation +

+
extra_slow
+

like ‘slow’, but use multiple reference frames. +

+
+

Default value is ‘fast’. +

+
+
parity
+

Set the picture field parity assumed for the input video. It must be +one of the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
+ +

Default value is ‘bff’. +

+
+
qp
+

Set per-block quantization parameter (QP) used by the internal +encoder. +

+

Higher values should result in a smoother motion vector field but less +optimal individual vectors. Default value is 1. +

+
+ + +

28.53 mp

+ +

Apply an MPlayer filter to the input video. +

+

This filter provides a wrapper around some of the filters of +MPlayer/MEncoder. +

+

This wrapper is considered experimental. Some of the wrapped filters +may not work properly and we may drop support for them, as they will +be implemented natively into FFmpeg. Thus you should avoid +depending on them when writing portable scripts. +

+

The filter accepts the parameters: +filter_name[:=]filter_params +

+

filter_name is the name of a supported MPlayer filter, +filter_params is a string containing the parameters accepted by +the named filter. +

+

The list of the currently supported filters follows: +

+
eq2
+
eq
+
fspp
+
ilpack
+
pp7
+
softpulldown
+
uspp
+
+ +

The parameter syntax and behavior for the listed filters are the same +of the corresponding MPlayer filters. For detailed instructions check +the "VIDEO FILTERS" section in the MPlayer manual. +

+ +

28.53.1 Examples

+ +
    +
  • +Adjust gamma, brightness, contrast: +
     
    mp=eq2=1.0:2:0.5
    +
    +
+ +

See also mplayer(1), http://www.mplayerhq.hu/. +

+ +

28.54 mpdecimate

+ +

Drop frames that do not differ greatly from the previous frame in +order to reduce frame rate. +

+

The main use of this filter is for very-low-bitrate encoding +(e.g. streaming over dialup modem), but it could in theory be used for +fixing movies that were inverse-telecined incorrectly. +

+

A description of the accepted options follows. +

+
+
max
+

Set the maximum number of consecutive frames which can be dropped (if +positive), or the minimum interval between dropped frames (if +negative). If the value is 0, the frame is dropped unregarding the +number of previous sequentially dropped frames. +

+

Default value is 0. +

+
+
hi
+
lo
+
frac
+

Set the dropping threshold values. +

+

Values for ‘hi’ and ‘lo’ are for 8x8 pixel blocks and +represent actual pixel value differences, so a threshold of 64 +corresponds to 1 unit of difference for each pixel, or the same spread +out differently over the block. +

+

A frame is a candidate for dropping if no 8x8 blocks differ by more +than a threshold of ‘hi’, and if no more than ‘frac’ blocks (1 +meaning the whole image) differ by more than a threshold of ‘lo’. +

+

Default value for ‘hi’ is 64*12, default value for ‘lo’ is +64*5, and default value for ‘frac’ is 0.33. +

+
+ + + +

28.55 negate

+ +

Negate input video. +

+

This filter accepts an integer in input, if non-zero it negates the +alpha component (if available). The default value in input is 0. +

+ +

28.56 noformat

+ +

Force libavfilter not to use any of the specified pixel formats for the +input to the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

28.56.1 Examples

+ +
    +
  • +Force libavfilter to use a format different from yuv420p for the +input to the vflip filter: +
     
    noformat=pix_fmts=yuv420p,vflip
    +
    + +
  • +Convert the input video to any of the formats not contained in the list: +
     
    noformat=yuv420p|yuv444p|yuv410p
    +
    +
+ + +

28.57 noise

+ +

Add noise on video input frame. +

+

The filter accepts the following options: +

+
+
all_seed
+
c0_seed
+
c1_seed
+
c2_seed
+
c3_seed
+

Set noise seed for specific pixel component or all pixel components in case +of all_seed. Default value is 123457. +

+
+
all_strength, alls
+
c0_strength, c0s
+
c1_strength, c1s
+
c2_strength, c2s
+
c3_strength, c3s
+

Set noise strength for specific pixel component or all pixel components in case +all_strength. Default value is 0. Allowed range is [0, 100]. +

+
+
all_flags, allf
+
c0_flags, c0f
+
c1_flags, c1f
+
c2_flags, c2f
+
c3_flags, c3f
+

Set pixel component flags or set flags for all components if all_flags. +Available values for component flags are: +

+
a
+

averaged temporal noise (smoother) +

+
p
+

mix random noise with a (semi)regular pattern +

+
t
+

temporal noise (noise pattern changes between frames) +

+
u
+

uniform noise (gaussian otherwise) +

+
+
+
+ + +

28.57.1 Examples

+ +

Add temporal and uniform noise to input video: +

 
noise=alls=20:allf=t+u
+
+ + +

28.58 null

+ +

Pass the video source unchanged to the output. +

+ +

28.59 ocv

+ +

Apply video transform using libopencv. +

+

To enable this filter install libopencv library and headers and +configure FFmpeg with --enable-libopencv. +

+

This filter accepts the following parameters: +

+
+
filter_name
+

The name of the libopencv filter to apply. +

+
+
filter_params
+

The parameters to pass to the libopencv filter. If not specified the default +values are assumed. +

+
+
+ +

Refer to the official libopencv documentation for more precise +information: +http://opencv.willowgarage.com/documentation/c/image_filtering.html +

+

Follows the list of supported libopencv filters. +

+

+

+

28.59.1 dilate

+ +

Dilate an image by using a specific structuring element. +This filter corresponds to the libopencv function cvDilate. +

+

It accepts the parameters: struct_el|nb_iterations. +

+

struct_el represents a structuring element, and has the syntax: +colsxrows+anchor_xxanchor_y/shape +

+

cols and rows represent the number of columns and rows of +the structuring element, anchor_x and anchor_y the anchor +point, and shape the shape for the structuring element, and +can be one of the values "rect", "cross", "ellipse", "custom". +

+

If the value for shape is "custom", it must be followed by a +string of the form "=filename". The file with name +filename is assumed to represent a binary image, with each +printable character corresponding to a bright pixel. When a custom +shape is used, cols and rows are ignored, the number +or columns and rows of the read file are assumed instead. +

+

The default value for struct_el is "3x3+0x0/rect". +

+

nb_iterations specifies the number of times the transform is +applied to the image, and defaults to 1. +

+

Follow some example: +

 
# use the default values
+ocv=dilate
+
+# dilate using a structuring element with a 5x5 cross, iterate two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# read the shape from the file diamond.shape, iterate two times
+# the file diamond.shape may contain a pattern of characters like this:
+#   *
+#  ***
+# *****
+#  ***
+#   *
+# the specified cols and rows are ignored (but not the anchor point coordinates)
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+ + +

28.59.2 erode

+ +

Erode an image by using a specific structuring element. +This filter corresponds to the libopencv function cvErode. +

+

The filter accepts the parameters: struct_el:nb_iterations, +with the same syntax and semantics as the dilate filter. +

+ +

28.59.3 smooth

+ +

Smooth the input video. +

+

The filter takes the following parameters: +type|param1|param2|param3|param4. +

+

type is the type of smooth filter to apply, and can be one of +the following values: "blur", "blur_no_scale", "median", "gaussian", +"bilateral". The default value is "gaussian". +

+

param1, param2, param3, and param4 are +parameters whose meanings depend on smooth type. param1 and +param2 accept integer positive values or 0, param3 and +param4 accept float values. +

+

The default value for param1 is 3, the default value for the +other parameters is 0. +

+

These parameters correspond to the parameters assigned to the +libopencv function cvSmooth. +

+

+

+

28.60 overlay

+ +

Overlay one video on top of another. +

+

It takes two inputs and one output, the first input is the "main" +video on which the second input is overlayed. +

+

This filter accepts the following parameters: +

+

A description of the accepted options follows. +

+
+
x
+
y
+

Set the expression for the x and y coordinates of the overlayed video +on the main video. Default value is "0" for both expressions. In case +the expression is invalid, it is set to a huge value (meaning that the +overlay will not be displayed within the output visible area). +

+
+
eof_action
+

The action to take when EOF is encountered on the secondary input, accepts one +of the following values: +

+
+
repeat
+

repeat the last frame (the default) +

+
endall
+

end both streams +

+
pass
+

pass through the main input +

+
+ +
+
eval
+

Set when the expressions for ‘x’, and ‘y’ are evaluated. +

+

It accepts the following values: +

+
init
+

only evaluate expressions once during the filter initialization or +when a command is processed +

+
+
frame
+

evaluate expressions for each incoming frame +

+
+ +

Default value is ‘frame’. +

+
+
shortest
+

If set to 1, force the output to terminate when the shortest input +terminates. Default value is 0. +

+
+
format
+

Set the format for the output video. +

+

It accepts the following values: +

+
yuv420
+

force YUV420 output +

+
+
yuv422
+

force YUV422 output +

+
+
yuv444
+

force YUV444 output +

+
+
rgb
+

force RGB output +

+
+ +

Default value is ‘yuv420’. +

+
+
rgb (deprecated)
+

If set to 1, force the filter to accept inputs in the RGB +color space. Default value is 0. This option is deprecated, use +‘format’ instead. +

+
+
repeatlast
+

If set to 1, force the filter to draw the last overlay frame over the +main input until the end of the stream. A value of 0 disables this +behavior. Default value is 1. +

+
+ +

The ‘x’, and ‘y’ expressions can contain the following +parameters. +

+
+
main_w, W
+
main_h, H
+

main input width and height +

+
+
overlay_w, w
+
overlay_h, h
+

overlay input width and height +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values of the output +format. For example for the pixel format "yuv422p" hsub is 2 and +vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

Note that the n, pos, t variables are available only +when evaluation is done per frame, and will evaluate to NAN +when ‘eval’ is set to ‘init’. +

+

Be aware that frames are taken from each input video in timestamp +order, hence, if their initial timestamps differ, it is a good idea +to pass the two inputs through a setpts=PTS-STARTPTS filter to +have them begin in the same zero timestamp, as it does the example for +the movie filter. +

+

You can chain together more overlays but you should test the +efficiency of such approach. +

+ +

28.60.1 Commands

+ +

This filter supports the following commands: +

+
x
+
y
+

Modify the x and y of the overlay input. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

28.60.2 Examples

+ +
    +
  • +Draw the overlay at 10 pixels from the bottom right corner of the main +video: +
     
    overlay=main_w-overlay_w-10:main_h-overlay_h-10
    +
    + +

    Using named options the example above becomes: +

     
    overlay=x=main_w-overlay_w-10:y=main_h-overlay_h-10
    +
    + +
  • +Insert a transparent PNG logo in the bottom left corner of the input, +using the ffmpeg tool with the -filter_complex option: +
     
    ffmpeg -i input -i logo -filter_complex 'overlay=10:main_h-overlay_h-10' output
    +
    + +
  • +Insert 2 different transparent PNG logos (second logo on bottom +right corner) using the ffmpeg tool: +
     
    ffmpeg -i input -i logo1 -i logo2 -filter_complex 'overlay=x=10:y=H-h-10,overlay=x=W-w-10:y=H-h-10' output
    +
    + +
  • +Add a transparent color layer on top of the main video, WxH +must specify the size of the main input to the overlay filter: +
     
    color=color=red@.3:size=WxH [over]; [in][over] overlay [out]
    +
    + +
  • +Play an original video and a filtered version (here with the deshake +filter) side by side using the ffplay tool: +
     
    ffplay input.avi -vf 'split[a][b]; [a]pad=iw*2:ih[src]; [b]deshake[filt]; [src][filt]overlay=w'
    +
    + +

    The above command is the same as: +

     
    ffplay input.avi -vf 'split[b], pad=iw*2[src], [b]deshake, [src]overlay=w'
    +
    + +
  • +Make a sliding overlay appearing from the left to the right top part of the +screen starting since time 2: +
     
    overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0
    +
    + +
  • +Compose output by putting two input videos side to side: +
     
    ffmpeg -i left.avi -i right.avi -filter_complex "
    +nullsrc=size=200x100 [background];
    +[0:v] setpts=PTS-STARTPTS, scale=100x100 [left];
    +[1:v] setpts=PTS-STARTPTS, scale=100x100 [right];
    +[background][left]       overlay=shortest=1       [background+left];
    +[background+left][right] overlay=shortest=1:x=100 [left+right]
    +"
    +
    + +
  • +mask 10-20 seconds of a video by applying the delogo filter to a section +
     
    ffmpeg -i test.avi -codec:v:0 wmv2 -ar 11025 -b:v 9000k
    +-vf '[in]split[split_main][split_delogo];[split_delogo]trim=start=360:end=371,delogo=0:0:640:480[delogoed];[split_main][delogoed]overlay=eof_action=pass[out]'
    +masked.avi
    +
    + +
  • +Chain several overlays in cascade: +
     
    nullsrc=s=200x200 [bg];
    +testsrc=s=100x100, split=4 [in0][in1][in2][in3];
    +[in0] lutrgb=r=0, [bg]   overlay=0:0     [mid0];
    +[in1] lutrgb=g=0, [mid0] overlay=100:0   [mid1];
    +[in2] lutrgb=b=0, [mid1] overlay=0:100   [mid2];
    +[in3] null,       [mid2] overlay=100:100 [out0]
    +
    + +
+ + +

28.61 owdenoise

+ +

Apply Overcomplete Wavelet denoiser. +

+

The filter accepts the following options: +

+
+
depth
+

Set depth. +

+

Larger depth values will denoise lower frequency components more, but +slow down filtering. +

+

Must be an int in the range 8-16, default is 8. +

+
+
luma_strength, ls
+

Set luma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+
chroma_strength, cs
+

Set chroma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+ + +

28.62 pad

+ +

Add paddings to the input image, and place the original input at the +given coordinates x, y. +

+

This filter accepts the following parameters: +

+
+
width, w
+
height, h
+

Specify an expression for the size of the output image with the +paddings added. If the value for width or height is 0, the +corresponding input size is used for the output. +

+

The width expression can reference the value set by the +height expression, and vice versa. +

+

The default value of width and height is 0. +

+
+
x
+
y
+

Specify an expression for the offsets where to place the input image +in the padded area with respect to the top/left border of the output +image. +

+

The x expression can reference the value set by the y +expression, and vice versa. +

+

The default value of x and y is 0. +

+
+
color
+

Specify the color of the padded area. For the syntax of this option, +check the "Color" section in the ffmpeg-utils manual. +

+

The default value of color is "black". +

+
+ +

The value for the width, height, x, and y +options are expressions containing the following constants: +

+
+
in_w
+
in_h
+

the input video width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
x
+
y
+

x and y offsets as specified by the x and y +expressions, or NAN if not yet specified +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

28.62.1 Examples

+ +
    +
  • +Add paddings with color "violet" to the input video. Output video +size is 640x480, the top-left corner of the input video is placed at +column 0, row 40: +
     
    pad=640:480:0:40:violet
    +
    + +

    The example above is equivalent to the following command: +

     
    pad=width=640:height=480:x=0:y=40:color=violet
    +
    + +
  • +Pad the input to get an output with dimensions increased by 3/2, +and put the input video at the center of the padded area: +
     
    pad="3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a squared output with size equal to the maximum +value between the input width and height, and put the input video at +the center of the padded area: +
     
    pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a final w/h ratio of 16:9: +
     
    pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +In case of anamorphic video, in order to set the output display aspect +correctly, it is necessary to use sar in the expression, +according to the relation: +
     
    (ih * X / ih) * sar = output_dar
    +X = output_dar / sar
    +
    + +

    Thus the previous example needs to be modified to: +

     
    pad="ih*16/9/sar:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Double output size and put the input video in the bottom-right +corner of the output padded area: +
     
    pad="2*iw:2*ih:ow-iw:oh-ih"
    +
    +
+ + +

28.63 perspective

+ +

Correct perspective of video not recorded perpendicular to the screen. +

+

A description of the accepted parameters follows. +

+
+
x0
+
y0
+
x1
+
y1
+
x2
+
y2
+
x3
+
y3
+

Set coordinates expression for top left, top right, bottom left and bottom right corners. +Default values are 0:0:W:0:0:H:W:H with which perspective will remain unchanged. +

+

The expressions can use the following variables: +

+
+
W
+
H
+

the width and height of video frame. +

+
+ +
+
interpolation
+

Set interpolation for perspective correction. +

+

It accepts the following values: +

+
linear
+
cubic
+
+ +

Default value is ‘linear’. +

+
+ + +

28.64 phase

+ +

Delay interlaced video by one field time so that the field order changes. +

+

The intended use is to fix PAL movies that have been captured with the +opposite field order to the film-to-video transfer. +

+

A description of the accepted parameters follows. +

+
+
mode
+

Set phase mode. +

+

It accepts the following values: +

+
t
+

Capture field order top-first, transfer bottom-first. +Filter will delay the bottom field. +

+
+
b
+

Capture field order bottom-first, transfer top-first. +Filter will delay the top field. +

+
+
p
+

Capture and transfer with the same field order. This mode only exists +for the documentation of the other options to refer to, but if you +actually select it, the filter will faithfully do nothing. +

+
+
a
+

Capture field order determined automatically by field flags, transfer +opposite. +Filter selects among ‘t’ and ‘b’ modes on a frame by frame +basis using field flags. If no field information is available, +then this works just like ‘u’. +

+
+
u
+

Capture unknown or varying, transfer opposite. +Filter selects among ‘t’ and ‘b’ on a frame by frame basis by +analyzing the images and selecting the alternative that produces best +match between the fields. +

+
+
T
+

Capture top-first, transfer unknown or varying. +Filter selects among ‘t’ and ‘p’ using image analysis. +

+
+
B
+

Capture bottom-first, transfer unknown or varying. +Filter selects among ‘b’ and ‘p’ using image analysis. +

+
+
A
+

Capture determined by field flags, transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using field flags and +image analysis. If no field information is available, then this works just +like ‘U’. This is the default mode. +

+
+
U
+

Both capture and transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using image analysis only. +

+
+
+
+ + +

28.65 pixdesctest

+ +

Pixel format descriptor test filter, mainly useful for internal +testing. The output video should be equal to the input video. +

+

For example: +

 
format=monow, pixdesctest
+
+ +

can be used to test the monowhite pixel format descriptor definition. +

+ +

28.66 pp

+ +

Enable the specified chain of postprocessing subfilters using libpostproc. This +library should be automatically selected with a GPL build (--enable-gpl). +Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’. +Each subfilter and some options have a short and a long name that can be used +interchangeably, i.e. dr/dering are the same. +

+

The filters accept the following options: +

+
+
subfilters
+

Set postprocessing subfilters string. +

+
+ +

All subfilters share common options to determine their scope: +

+
+
a/autoq
+

Honor the quality commands for this subfilter. +

+
+
c/chrom
+

Do chrominance filtering, too (default). +

+
+
y/nochrom
+

Do luminance filtering only (no chrominance). +

+
+
n/noluma
+

Do chrominance filtering only (no luminance). +

+
+ +

These options can be appended after the subfilter name, separated by a ’|’. +

+

Available subfilters are: +

+
+
hb/hdeblock[|difference[|flatness]]
+

Horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
vb/vdeblock[|difference[|flatness]]
+

Vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
ha/hadeblock[|difference[|flatness]]
+

Accurate horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
va/vadeblock[|difference[|flatness]]
+

Accurate vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+
+
+ +

The horizontal and vertical deblocking filters share the difference and +flatness values so you cannot set different horizontal and vertical +thresholds. +

+
+
h1/x1hdeblock
+

Experimental horizontal deblocking filter +

+
+
v1/x1vdeblock
+

Experimental vertical deblocking filter +

+
+
dr/dering
+

Deringing filter +

+
+
tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+
threshold1
+

larger -> stronger filtering +

+
threshold2
+

larger -> stronger filtering +

+
threshold3
+

larger -> stronger filtering +

+
+ +
+
al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+
f/fullyrange
+

Stretch luminance to 0-255. +

+
+ +
+
lb/linblenddeint
+

Linear blend deinterlacing filter that deinterlaces the given block by +filtering all lines with a (1 2 1) filter. +

+
+
li/linipoldeint
+

Linear interpolating deinterlacing filter that deinterlaces the given block by +linearly interpolating every second line. +

+
+
ci/cubicipoldeint
+

Cubic interpolating deinterlacing filter deinterlaces the given block by +cubically interpolating every second line. +

+
+
md/mediandeint
+

Median deinterlacing filter that deinterlaces the given block by applying a +median filter to every second line. +

+
+
fd/ffmpegdeint
+

FFmpeg deinterlacing filter that deinterlaces the given block by filtering every +second line with a (-1 4 2 4 -1) filter. +

+
+
l5/lowpass5
+

Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given +block by filtering all lines with a (-1 2 6 2 -1) filter. +

+
+
fq/forceQuant[|quantizer]
+

Overrides the quantizer table from the input with the constant quantizer you +specify. +

+
quantizer
+

Quantizer to use +

+
+ +
+
de/default
+

Default pp filter combination (hb|a,vb|a,dr|a) +

+
+
fa/fast
+

Fast pp filter combination (h1|a,v1|a,dr|a) +

+
+
ac
+

High quality pp filter combination (ha|a|128|7,va|a,dr|a) +

+
+ + +

28.66.1 Examples

+ +
    +
  • +Apply horizontal and vertical deblocking, deringing and automatic +brightness/contrast: +
     
    pp=hb/vb/dr/al
    +
    + +
  • +Apply default filters without brightness/contrast correction: +
     
    pp=de/-al
    +
    + +
  • +Apply default filters and temporal denoiser: +
     
    pp=default/tmpnoise|1|2|3
    +
    + +
  • +Apply deblocking on luminance only, and switch vertical deblocking on or off +automatically depending on available CPU time: +
     
    pp=hb|y/vb|a
    +
    +
+ + +

28.67 psnr

+ +

Obtain the average, maximum and minimum PSNR (Peak Signal to Noise +Ratio) between two input videos. +

+

This filter takes in input two input videos, the first input is +considered the "main" source and is passed unchanged to the +output. The second input is used as a "reference" video for computing +the PSNR. +

+

Both video inputs must have the same resolution and pixel format for +this filter to work correctly. Also it assumes that both inputs +have the same number of frames, which are compared one by one. +

+

The obtained average PSNR is printed through the logging system. +

+

The filter stores the accumulated MSE (mean squared error) of each +frame, and at the end of the processing it is averaged across all frames +equally, and the following formula is applied to obtain the PSNR: +

+
 
PSNR = 10*log10(MAX^2/MSE)
+
+ +

Where MAX is the average of the maximum values of each component of the +image. +

+

The description of the accepted parameters follows. +

+
+
stats_file, f
+

If specified the filter will use the named file to save the PSNR of +each individual frame. +

+
+ +

The file printed if stats_file is selected, contains a sequence of +key/value pairs of the form key:value for each compared +couple of frames. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 1 +

+
+
mse_avg
+

Mean Square Error pixel-by-pixel average difference of the compared +frames, averaged over all the image components. +

+
+
mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+

Mean Square Error pixel-by-pixel average difference of the compared +frames for the component specified by the suffix. +

+
+
psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+

Peak Signal to Noise ratio of the compared frames for the component +specified by the suffix. +

+
+ +

For example: +

 
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+ +

On this example the input file being processed is compared with the +reference file ‘ref_movie.mpg’. The PSNR of each individual frame +is stored in ‘stats.log’. +

+

+

+

28.68 pullup

+ +

Pulldown reversal (inverse telecine) filter, capable of handling mixed +hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive +content. +

+

The pullup filter is designed to take advantage of future context in making +its decisions. This filter is stateless in the sense that it does not lock +onto a pattern to follow, but it instead looks forward to the following +fields in order to identify matches and rebuild progressive frames. +

+

To produce content with an even framerate, insert the fps filter after +pullup, use fps=24000/1001 if the input frame rate is 29.97fps, +fps=24 for 30fps and the (rare) telecined 25fps input. +

+

The filter accepts the following options: +

+
+
jl
+
jr
+
jt
+
jb
+

These options set the amount of "junk" to ignore at the left, right, top, and +bottom of the image, respectively. Left and right are in units of 8 pixels, +while top and bottom are in units of 2 lines. +The default is 8 pixels on each side. +

+
+
sb
+

Set the strict breaks. Setting this option to 1 will reduce the chances of +filter generating an occasional mismatched frame, but it may also cause an +excessive number of frames to be dropped during high motion sequences. +Conversely, setting it to -1 will make filter match fields more easily. +This may help processing of video where there is slight blurring between +the fields, but may also cause there to be interlaced frames in the output. +Default value is 0. +

+
+
mp
+

Set the metric plane to use. It accepts the following values: +

+
l
+

Use luma plane. +

+
+
u
+

Use chroma blue plane. +

+
+
v
+

Use chroma red plane. +

+
+ +

This option may be set to use chroma plane instead of the default luma plane +for doing filter’s computations. This may improve accuracy on very clean +source material, but more likely will decrease accuracy, especially if there +is chroma noise (rainbow effect) or any grayscale video. +The main purpose of setting ‘mp’ to a chroma plane is to reduce CPU +load and make pullup usable in realtime on slow machines. +

+
+ +

For best results (without duplicated frames in the output file) it is +necessary to change the output frame rate. For example, to inverse +telecine NTSC input: +

 
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+ + +

28.69 removelogo

+ +

Suppress a TV station logo, using an image file to determine which +pixels comprise the logo. It works by filling in the pixels that +comprise the logo with neighboring pixels. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filter bitmap file, which can be any image format supported by +libavformat. The width and height of the image file must match those of the +video stream being processed. +

+
+ +

Pixels in the provided bitmap image with a value of zero are not +considered part of the logo, non-zero pixels are considered part of +the logo. If you use white (255) for the logo and black (0) for the +rest, you will be safe. For making the filter bitmap, it is +recommended to take a screen capture of a black frame with the logo +visible, and then using a threshold filter followed by the erode +filter once or twice. +

+

If needed, little splotches can be fixed manually. Remember that if +logo pixels are not covered, the filter quality will be much +reduced. Marking too many pixels as part of the logo does not hurt as +much, but it will increase the amount of blurring needed to cover over +the image and will destroy more information than necessary, and extra +pixels will slow things down on a large logo. +

+ +

28.70 rotate

+ +

Rotate video by an arbitrary angle expressed in radians. +

+

The filter accepts the following options: +

+

A description of the optional parameters follows. +

+
angle, a
+

Set an expression for the angle by which to rotate the input video +clockwise, expressed as a number of radians. A negative value will +result in a counter-clockwise rotation. By default it is set to "0". +

+

This expression is evaluated for each frame. +

+
+
out_w, ow
+

Set the output width expression, default value is "iw". +This expression is evaluated just once during configuration. +

+
+
out_h, oh
+

Set the output height expression, default value is "ih". +This expression is evaluated just once during configuration. +

+
+
bilinear
+

Enable bilinear interpolation if set to 1, a value of 0 disables +it. Default value is 1. +

+
+
fillcolor, c
+

Set the color used to fill the output area not covered by the rotated +image. For the generalsyntax of this option, check the "Color" section in the +ffmpeg-utils manual. If the special value "none" is selected then no +background is printed (useful for example if the background is never shown). +

+

Default value is "black". +

+
+ +

The expressions for the angle and the output size can contain the +following constants and functions: +

+
+
n
+

sequential number of the input frame, starting from 0. It is always NAN +before the first frame is filtered. +

+
+
t
+

time in seconds of the input frame, it is set to 0 when the filter is +configured. It is always NAN before the first frame is filtered. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_w, iw
+
in_h, ih
+

the input video width and height +

+
+
out_w, ow
+
out_h, oh
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
rotw(a)
+
roth(a)
+

the minimal width/height required for completely containing the input +video rotated by a radians. +

+

These are only available when computing the ‘out_w’ and +‘out_h’ expressions. +

+
+ + +

28.70.1 Examples

+ +
    +
  • +Rotate the input by PI/6 radians clockwise: +
     
    rotate=PI/6
    +
    + +
  • +Rotate the input by PI/6 radians counter-clockwise: +
     
    rotate=-PI/6
    +
    + +
  • +Rotate the input by 45 degrees clockwise: +
     
    rotate=45*PI/180
    +
    + +
  • +Apply a constant rotation with period T, starting from an angle of PI/3: +
     
    rotate=PI/3+2*PI*t/T
    +
    + +
  • +Make the input video rotation oscillating with a period of T +seconds and an amplitude of A radians: +
     
    rotate=A*sin(2*PI/T*t)
    +
    + +
  • +Rotate the video, output size is chosen so that the whole rotating +input video is always completely contained in the output: +
     
    rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
    +
    + +
  • +Rotate the video, reduce the output size so that no background is ever +shown: +
     
    rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
    +
    +
+ + +

28.70.2 Commands

+ +

The filter supports the following commands: +

+
+
a, angle
+

Set the angle expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

28.71 sab

+ +

Apply Shape Adaptive Blur. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set luma blur filter strength, must be a value in range 0.1-4.0, default +value is 1.0. A greater value will result in a more blurred image, and +in slower processing. +

+
+
luma_pre_filter_radius, lpfr
+

Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default +value is 1.0. +

+
+
luma_strength, ls
+

Set luma maximum difference between pixels to still be considered, must +be a value in the 0.1-100.0 range, default value is 1.0. +

+
+
chroma_radius, cr
+

Set chroma blur filter strength, must be a value in range 0.1-4.0. A +greater value will result in a more blurred image, and in slower +processing. +

+
+
chroma_pre_filter_radius, cpfr
+

Set chroma pre-filter radius, must be a value in the 0.1-2.0 range. +

+
+
chroma_strength, cs
+

Set chroma maximum difference between pixels to still be considered, +must be a value in the 0.1-100.0 range. +

+
+ +

Each chroma option value, if not explicitly specified, is set to the +corresponding luma option value. +

+

+

+

28.72 scale

+ +

Scale (resize) the input video, using the libswscale library. +

+

The scale filter forces the output display aspect ratio to be the same +of the input, by changing the output sample aspect ratio. +

+

If the input image format is different from the format requested by +the next filter, the scale filter will convert the input to the +requested format. +

+ +

28.72.1 Options

+

The filter accepts the following options, or any of the options +supported by the libswscale scaler. +

+

See (ffmpeg-scaler)scaler_options for +the complete list of scaler options. +

+
+
width, w
+
height, h
+

Set the output video dimension expression. Default value is the input +dimension. +

+

If the value is 0, the input width is used for the output. +

+

If one of the values is -1, the scale filter will use a value that +maintains the aspect ratio of the input image, calculated from the +other specified dimension. If both of them are -1, the input size is +used +

+

If one of the values is -n with n > 1, the scale filter will also use a value +that maintains the aspect ratio of the input image, calculated from the other +specified dimension. After that it will, however, make sure that the calculated +dimension is divisible by n and adjust the value if necessary. +

+

See below for the list of accepted constants for use in the dimension +expression. +

+
+
interl
+

Set the interlacing mode. It accepts the following values: +

+
+
1
+

Force interlaced aware scaling. +

+
+
0
+

Do not apply interlaced scaling. +

+
+
-1
+

Select interlaced aware scaling depending on whether the source frames +are flagged as interlaced or not. +

+
+ +

Default value is ‘0’. +

+
+
flags
+

Set libswscale scaling flags. See +(ffmpeg-scaler)sws_flags for the +complete list of values. If not explicitly specified the filter applies +the default flags. +

+
+
size, s
+

Set the video size. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
in_color_matrix
+
out_color_matrix
+

Set in/output YCbCr color space type. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. +

+

If not specified, the color space type depends on the pixel format. +

+

Possible values: +

+
+
auto
+

Choose automatically. +

+
+
bt709
+

Format conforming to International Telecommunication Union (ITU) +Recommendation BT.709. +

+
+
fcc
+

Set color space conforming to the United States Federal Communications +Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a). +

+
+
bt601
+

Set color space conforming to: +

+
    +
  • +ITU Radiocommunication Sector (ITU-R) Recommendation BT.601 + +
  • +ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G + +
  • +Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004 + +
+ +
+
smpte240m
+

Set color space conforming to SMPTE ST 240:1999. +

+
+ +
+
in_range
+
out_range
+

Set in/output YCbCr sample range. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. If not specified, the +range depends on the pixel format. Possible values: +

+
+
auto
+

Choose automatically. +

+
+
jpeg/full/pc
+

Set full range (0-255 in case of 8-bit luma). +

+
+
mpeg/tv
+

Set "MPEG" range (16-235 in case of 8-bit luma). +

+
+ +
+
force_original_aspect_ratio
+

Enable decreasing or increasing output video width or height if necessary to +keep the original aspect ratio. Possible values: +

+
+
disable
+

Scale the video as specified and disable this feature. +

+
+
decrease
+

The output video dimensions will automatically be decreased if needed. +

+
+
increase
+

The output video dimensions will automatically be increased if needed. +

+
+
+ +

One useful instance of this option is that when you know a specific device’s +maximum allowed resolution, you can use this to limit the output video to +that, while retaining the aspect ratio. For example, device A allows +1280x720 playback, and your video is 1920x800. Using this option (set it to +decrease) and specifying 1280x720 to the command line makes the output +1280x533. +

+

Please note that this is a different thing than specifying -1 for ‘w’ +or ‘h’, you still need to specify the output resolution for this option +to work. +

+
+
+ +

The values of the ‘w’ and ‘h’ options are expressions +containing the following constants: +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (scaled) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio. Calculated from (iw / ih) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical input chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
ohsub
+
ovsub
+

horizontal and vertical output chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

28.72.2 Examples

+ +
    +
  • +Scale the input video to a size of 200x100: +
     
    scale=w=200:h=100
    +
    + +

    This is equivalent to: +

     
    scale=200:100
    +
    + +

    or: +

     
    scale=200x100
    +
    + +
  • +Specify a size abbreviation for the output size: +
     
    scale=qcif
    +
    + +

    which can also be written as: +

     
    scale=size=qcif
    +
    + +
  • +Scale the input to 2x: +
     
    scale=w=2*iw:h=2*ih
    +
    + +
  • +The above is the same as: +
     
    scale=2*in_w:2*in_h
    +
    + +
  • +Scale the input to 2x with forced interlaced scaling: +
     
    scale=2*iw:2*ih:interl=1
    +
    + +
  • +Scale the input to half size: +
     
    scale=w=iw/2:h=ih/2
    +
    + +
  • +Increase the width, and set the height to the same size: +
     
    scale=3/2*iw:ow
    +
    + +
  • +Seek for Greek harmony: +
     
    scale=iw:1/PHI*iw
    +scale=ih*PHI:ih
    +
    + +
  • +Increase the height, and set the width to 3/2 of the height: +
     
    scale=w=3/2*oh:h=3/5*ih
    +
    + +
  • +Increase the size, but make the size a multiple of the chroma +subsample values: +
     
    scale="trunc(3/2*iw/hsub)*hsub:trunc(3/2*ih/vsub)*vsub"
    +
    + +
  • +Increase the width to a maximum of 500 pixels, keep the same input +aspect ratio: +
     
    scale=w='min(500\, iw*3/2):h=-1'
    +
    +
+ + +

28.73 separatefields

+ +

The separatefields takes a frame-based video input and splits +each frame into its components fields, producing a new half height clip +with twice the frame rate and twice the frame count. +

+

This filter use field-dominance information in frame to decide which +of each pair of fields to place first in the output. +If it gets it wrong use setfield filter before separatefields filter. +

+ +

28.74 setdar, setsar

+ +

The setdar filter sets the Display Aspect Ratio for the filter +output video. +

+

This is done by changing the specified Sample (aka Pixel) Aspect +Ratio, according to the following equation: +

 
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+ +

Keep in mind that the setdar filter does not modify the pixel +dimensions of the video frame. Also the display aspect ratio set by +this filter may be changed by later filters in the filterchain, +e.g. in case of scaling or if another "setdar" or a "setsar" filter is +applied. +

+

The setsar filter sets the Sample (aka Pixel) Aspect Ratio for +the filter output video. +

+

Note that as a consequence of the application of this filter, the +output display aspect ratio will change according to the equation +above. +

+

Keep in mind that the sample aspect ratio set by the setsar +filter may be changed by later filters in the filterchain, e.g. if +another "setsar" or a "setdar" filter is applied. +

+

The filters accept the following options: +

+
+
r, ratio, dar (setdar only), sar (setsar only)
+

Set the aspect ratio used by the filter. +

+

The parameter can be a floating point number string, an expression, or +a string of the form num:den, where num and +den are the numerator and denominator of the aspect ratio. If +the parameter is not specified, it is assumed the value "0". +In case the form "num:den" is used, the : character +should be escaped. +

+
+
max
+

Set the maximum integer value to use for expressing numerator and +denominator when reducing the expressed aspect ratio to a rational. +Default value is 100. +

+
+
+ +

The parameter sar is an expression containing +the following constants: +

+
+
E, PI, PHI
+

the corresponding mathematical approximated values for e +(euler number), pi (greek PI), phi (golden ratio) +

+
+
w, h
+

the input width and height +

+
+
a
+

same as w / h +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub, vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

28.74.1 Examples

+ +
    +
  • +To change the display aspect ratio to 16:9, specify one of the following: +
     
    setdar=dar=1.77777
    +setdar=dar=16/9
    +setdar=dar=1.77777
    +
    + +
  • +To change the sample aspect ratio to 10:11, specify: +
     
    setsar=sar=10/11
    +
    + +
  • +To set a display aspect ratio of 16:9, and specify a maximum integer value of +1000 in the aspect ratio reduction, use the command: +
     
    setdar=ratio=16/9:max=1000
    +
    + +
+ +

+

+

28.75 setfield

+ +

Force field for the output video frame. +

+

The setfield filter marks the interlace type field for the +output frames. It does not change the input frame, but only sets the +corresponding property, which affects how the frame is treated by +following filters (e.g. fieldorder or yadif). +

+

The filter accepts the following options: +

+
+
mode
+

Available values are: +

+
+
auto
+

Keep the same field property. +

+
+
bff
+

Mark the frame as bottom-field-first. +

+
+
tff
+

Mark the frame as top-field-first. +

+
+
prog
+

Mark the frame as progressive. +

+
+
+
+ + +

28.76 showinfo

+ +

Show a line containing various information for each input video frame. +The input video is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation TimeStamp of the input frame, expressed as a number of +time base units. The time base unit depends on the filter input pad. +

+
+
pts_time
+

Presentation TimeStamp of the input frame, expressed as a number of +seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic video) +

+
+
fmt
+

pixel format name +

+
+
sar
+

sample aspect ratio of the input frame, expressed in the form +num/den +

+
+
s
+

size of the input frame. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
i
+

interlaced mode ("P" for "progressive", "T" for top field first, "B" +for bottom field first) +

+
+
iskey
+

1 if the frame is a key frame, 0 otherwise +

+
+
type
+

picture type of the input frame ("I" for an I-frame, "P" for a +P-frame, "B" for a B-frame, "?" for unknown type). +Check also the documentation of the AVPictureType enum and of +the av_get_picture_type_char function defined in +‘libavutil/avutil.h’. +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame +

+
+
plane_checksum
+

Adler-32 checksum (printed in hexadecimal) of each plane of the input frame, +expressed in the form "[c0 c1 c2 c3]" +

+
+ +

+

+

28.77 smartblur

+ +

Blur the input video without impacting the outlines. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set the luma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
luma_strength, ls
+

Set the luma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
luma_threshold, lt
+

Set the luma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+
chroma_radius, cr
+

Set the chroma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
chroma_strength, cs
+

Set the chroma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
chroma_threshold, ct
+

Set the chroma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+ +

If a chroma option is not explicitly set, the corresponding luma value +is set. +

+ +

28.78 stereo3d

+ +

Convert between different stereoscopic image formats. +

+

The filters accept the following options: +

+
+
in
+

Set stereoscopic image format of input. +

+

Available values for input image formats are: +

+
sbsl
+

side by side parallel (left eye left, right eye right) +

+
+
sbsr
+

side by side crosseye (right eye left, left eye right) +

+
+
sbs2l
+

side by side parallel with half width resolution +(left eye left, right eye right) +

+
+
sbs2r
+

side by side crosseye with half width resolution +(right eye left, left eye right) +

+
+
abl
+

above-below (left eye above, right eye below) +

+
+
abr
+

above-below (right eye above, left eye below) +

+
+
ab2l
+

above-below with half height resolution +(left eye above, right eye below) +

+
+
ab2r
+

above-below with half height resolution +(right eye above, left eye below) +

+
+
al
+

alternating frames (left eye first, right eye second) +

+
+
ar
+

alternating frames (right eye first, left eye second) +

+

Default value is ‘sbsl’. +

+
+ +
+
out
+

Set stereoscopic image format of output. +

+

Available values for output image formats are all the input formats as well as: +

+
arbg
+

anaglyph red/blue gray +(red filter on left eye, blue filter on right eye) +

+
+
argg
+

anaglyph red/green gray +(red filter on left eye, green filter on right eye) +

+
+
arcg
+

anaglyph red/cyan gray +(red filter on left eye, cyan filter on right eye) +

+
+
arch
+

anaglyph red/cyan half colored +(red filter on left eye, cyan filter on right eye) +

+
+
arcc
+

anaglyph red/cyan color +(red filter on left eye, cyan filter on right eye) +

+
+
arcd
+

anaglyph red/cyan color optimized with the least squares projection of dubois +(red filter on left eye, cyan filter on right eye) +

+
+
agmg
+

anaglyph green/magenta gray +(green filter on left eye, magenta filter on right eye) +

+
+
agmh
+

anaglyph green/magenta half colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmc
+

anaglyph green/magenta colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmd
+

anaglyph green/magenta color optimized with the least squares projection of dubois +(green filter on left eye, magenta filter on right eye) +

+
+
aybg
+

anaglyph yellow/blue gray +(yellow filter on left eye, blue filter on right eye) +

+
+
aybh
+

anaglyph yellow/blue half colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybc
+

anaglyph yellow/blue colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybd
+

anaglyph yellow/blue color optimized with the least squares projection of dubois +(yellow filter on left eye, blue filter on right eye) +

+
+
irl
+

interleaved rows (left eye has top row, right eye starts on next row) +

+
+
irr
+

interleaved rows (right eye has top row, left eye starts on next row) +

+
+
ml
+

mono output (left eye only) +

+
+
mr
+

mono output (right eye only) +

+
+ +

Default value is ‘arcd’. +

+
+ + +

28.78.1 Examples

+ +
    +
  • +Convert input video from side by side parallel to anaglyph yellow/blue dubois: +
     
    stereo3d=sbsl:aybd
    +
    + +
  • +Convert input video from above bellow (left eye above, right eye below) to side by side crosseye. +
     
    stereo3d=abl:sbsr
    +
    +
+ + +

28.79 spp

+ +

Apply a simple postprocessing filter that compresses and decompresses the image +at several (or - in the case of ‘quality’ level 6 - all) shifts +and average the results. +

+

The filter accepts the following options: +

+
+
quality
+

Set quality. This option defines the number of levels for averaging. It accepts +an integer in the range 0-6. If set to 0, the filter will have no +effect. A value of 6 means the higher quality. For each increment of +that value the speed drops by a factor of approximately 2. Default value is +3. +

+
+
qp
+

Force a constant quantization parameter. If not set, the filter will use the QP +from the video stream (if available). +

+
+
mode
+

Set thresholding mode. Available modes are: +

+
+
hard
+

Set hard thresholding (default). +

+
soft
+

Set soft thresholding (better de-ringing effect, but likely blurrier). +

+
+ +
+
use_bframe_qp
+

Enable the use of the QP from the B-Frames if set to 1. Using this +option may cause flicker since the B-Frames have often larger QP. Default is +0 (not enabled). +

+
+ +

+

+

28.80 subtitles

+ +

Draw subtitles on top of input video using the libass library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libass. This filter also requires a build with libavcodec and +libavformat to convert the passed subtitles file to ASS (Advanced Substation +Alpha) subtitles format. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filename of the subtitle file to read. It must be specified. +

+
+
original_size
+

Specify the size of the original video, the video for which the ASS file +was composed. For the syntax of this option, check the "Video size" section in +the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic, +this is necessary to correctly scale the fonts if the aspect ratio has been +changed. +

+
+
charenc
+

Set subtitles input character encoding. subtitles filter only. Only +useful if not UTF-8. +

+
+ +

If the first key is not specified, it is assumed that the first value +specifies the ‘filename’. +

+

For example, to render the file ‘sub.srt’ on top of the input +video, use the command: +

 
subtitles=sub.srt
+
+ +

which is equivalent to: +

 
subtitles=filename=sub.srt
+
+ + +

28.81 super2xsai

+ +

Scale the input by 2x and smooth using the Super2xSaI (Scale and +Interpolate) pixel art scaling algorithm. +

+

Useful for enlarging pixel art images without reducing sharpness. +

+ +

28.82 swapuv

+

Swap U & V plane. +

+ +

28.83 telecine

+ +

Apply telecine process to the video. +

+

This filter accepts the following options: +

+
+
first_field
+
+
top, t
+

top field first +

+
bottom, b
+

bottom field first +The default value is top. +

+
+ +
+
pattern
+

A string of numbers representing the pulldown pattern you wish to apply. +The default value is 23. +

+
+ +
 
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+ + +

28.84 thumbnail

+

Select the most representative frame in a given sequence of consecutive frames. +

+

The filter accepts the following options: +

+
+
n
+

Set the frames batch size to analyze; in a set of n frames, the filter +will pick one of them, and then handle the next batch of n frames until +the end. Default is 100. +

+
+ +

Since the filter keeps track of the whole frames sequence, a bigger n +value will result in a higher memory usage, so a high value is not recommended. +

+ +

28.84.1 Examples

+ +
    +
  • +Extract one picture each 50 frames: +
     
    thumbnail=50
    +
    + +
  • +Complete example of a thumbnail creation with ffmpeg: +
     
    ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
    +
    +
+ + +

28.85 tile

+ +

Tile several successive frames together. +

+

The filter accepts the following options: +

+
+
layout
+

Set the grid size (i.e. the number of lines and columns). For the syntax of +this option, check the "Video size" section in the ffmpeg-utils manual. +

+
+
nb_frames
+

Set the maximum number of frames to render in the given area. It must be less +than or equal to wxh. The default value is 0, meaning all +the area will be used. +

+
+
margin
+

Set the outer border margin in pixels. +

+
+
padding
+

Set the inner border thickness (i.e. the number of pixels between frames). For +more advanced padding options (such as having different values for the edges), +refer to the pad video filter. +

+
+
color
+

Specify the color of the unused areaFor the syntax of this option, check the +"Color" section in the ffmpeg-utils manual. The default value of color +is "black". +

+
+ + +

28.85.1 Examples

+ +
    +
  • +Produce 8x8 PNG tiles of all keyframes (‘-skip_frame nokey’) in a movie: +
     
    ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
    +
    +

    The ‘-vsync 0’ is necessary to prevent ffmpeg from +duplicating each output frame to accommodate the originally detected frame +rate. +

    +
  • +Display 5 pictures in an area of 3x2 frames, +with 7 pixels between them, and 2 pixels of initial margin, using +mixed flat and named options: +
     
    tile=3x2:nb_frames=5:padding=7:margin=2
    +
    +
+ + +

28.86 tinterlace

+ +

Perform various types of temporal field interlacing. +

+

Frames are counted starting from 1, so the first input frame is +considered odd. +

+

The filter accepts the following options: +

+
+
mode
+

Specify the mode of the interlacing. This option can also be specified +as a value alone. See below for a list of values for this option. +

+

Available values are: +

+
+
merge, 0
+

Move odd frames into the upper field, even into the lower field, +generating a double height frame at half frame rate. +

+
+
drop_odd, 1
+

Only output even frames, odd frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
drop_even, 2
+

Only output odd frames, even frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
pad, 3
+

Expand each frame to full height, but pad alternate lines with black, +generating a frame with double height at the same input frame rate. +

+
+
interleave_top, 4
+

Interleave the upper field from odd frames with the lower field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interleave_bottom, 5
+

Interleave the lower field from odd frames with the upper field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interlacex2, 6
+

Double frame rate with unchanged height. Frames are inserted each +containing the second temporal field from the previous input frame and +the first temporal field from the next input frame. This mode relies on +the top_field_first flag. Useful for interlaced video displays with no +field synchronisation. +

+
+ +

Numeric values are deprecated but are accepted for backward +compatibility reasons. +

+

Default mode is merge. +

+
+
flags
+

Specify flags influencing the filter process. +

+

Available value for flags is: +

+
+
low_pass_filter, vlfp
+

Enable vertical low-pass filtering in the filter. +Vertical low-pass filtering is required when creating an interlaced +destination from a progressive source which contains high-frequency +vertical detail. Filtering will reduce interlace ’twitter’ and Moire +patterning. +

+

Vertical low-pass filtering can only be enabled for ‘mode’ +interleave_top and interleave_bottom. +

+
+
+
+
+ + +

28.87 transpose

+ +

Transpose rows with columns in the input video and optionally flip it. +

+

This filter accepts the following options: +

+
+
dir
+

Specify the transposition direction. +

+

Can assume the following values: +

+
0, 4, cclock_flip
+

Rotate by 90 degrees counterclockwise and vertically flip (default), that is: +

 
L.R     L.l
+. . ->  . .
+l.r     R.r
+
+ +
+
1, 5, clock
+

Rotate by 90 degrees clockwise, that is: +

 
L.R     l.L
+. . ->  . .
+l.r     r.R
+
+ +
+
2, 6, cclock
+

Rotate by 90 degrees counterclockwise, that is: +

 
L.R     R.r
+. . ->  . .
+l.r     L.l
+
+ +
+
3, 7, clock_flip
+

Rotate by 90 degrees clockwise and vertically flip, that is: +

 
L.R     r.R
+. . ->  . .
+l.r     l.L
+
+
+
+ +

For values between 4-7, the transposition is only done if the input +video geometry is portrait and not landscape. These values are +deprecated, the passthrough option should be used instead. +

+

Numerical values are deprecated, and should be dropped in favor of +symbolic constants. +

+
+
passthrough
+

Do not apply the transposition if the input geometry matches the one +specified by the specified value. It accepts the following values: +

+
none
+

Always apply transposition. +

+
portrait
+

Preserve portrait geometry (when height >= width). +

+
landscape
+

Preserve landscape geometry (when width >= height). +

+
+ +

Default value is none. +

+
+ +

For example to rotate by 90 degrees clockwise and preserve portrait +layout: +

 
transpose=dir=1:passthrough=portrait
+
+ +

The command above can also be specified as: +

 
transpose=1:portrait
+
+ + +

28.88 trim

+

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the frame with the +timestamp start will be the first frame in the output. +

+
+
end
+

Specify time of the first frame that will be dropped, i.e. the frame +immediately preceding the one with the timestamp end will be the last +frame in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in timebase +units instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in timebase units +instead of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_frame
+

Number of the first frame that should be passed to output. +

+
+
end_frame
+

Number of the first frame that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _frame variants simply count the +frames that pass through the filter. Also note that this filter does not modify +the timestamps. If you wish that the output timestamps start at zero, insert a +setpts filter after the trim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all the frames that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple trim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -vf trim=60:120
    +
    + +
  • +keep only the first second +
     
    ffmpeg -i INPUT -vf trim=duration=1
    +
    + +
+ + + +

28.89 unsharp

+ +

Sharpen or blur the input video. +

+

It accepts the following parameters: +

+
+
luma_msize_x, lx
+

Set the luma matrix horizontal size. It must be an odd integer between +3 and 63, default value is 5. +

+
+
luma_msize_y, ly
+

Set the luma matrix vertical size. It must be an odd integer between 3 +and 63, default value is 5. +

+
+
luma_amount, la
+

Set the luma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 1.0. +

+
+
chroma_msize_x, cx
+

Set the chroma matrix horizontal size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_msize_y, cy
+

Set the chroma matrix vertical size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_amount, ca
+

Set the chroma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 0.0. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ +

All parameters are optional and default to the equivalent of the +string ’5:5:1.0:5:5:0.0’. +

+ +

28.89.1 Examples

+ +
    +
  • +Apply strong luma sharpen effect: +
     
    unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
    +
    + +
  • +Apply strong blur of both luma and chroma parameters: +
     
    unsharp=7:7:-2:7:7:-2
    +
    +
+ +

+

+

28.90 vidstabdetect

+ +

Analyze video stabilization/deshaking. Perform pass 1 of 2, see +vidstabtransform for pass 2. +

+

This filter generates a file with relative translation and rotation +transform information about subsequent frames, which is then used by +the vidstabtransform filter. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+

This filter accepts the following options: +

+
+
result
+

Set the path to the file used to write the transforms information. +Default value is ‘transforms.trf’. +

+
+
shakiness
+

Set how shaky the video is and how quick the camera is. It accepts an +integer in the range 1-10, a value of 1 means little shakiness, a +value of 10 means strong shakiness. Default value is 5. +

+
+
accuracy
+

Set the accuracy of the detection process. It must be a value in the +range 1-15. A value of 1 means low accuracy, a value of 15 means high +accuracy. Default value is 15. +

+
+
stepsize
+

Set stepsize of the search process. The region around minimum is +scanned with 1 pixel resolution. Default value is 6. +

+
+
mincontrast
+

Set minimum contrast. Below this value a local measurement field is +discarded. Must be a floating point value in the range 0-1. Default +value is 0.3. +

+
+
tripod
+

Set reference frame number for tripod mode. +

+

If enabled, the motion of the frames is compared to a reference frame +in the filtered stream, identified by the specified number. The idea +is to compensate all movements in a more-or-less static scene and keep +the camera view absolutely still. +

+

If set to 0, it is disabled. The frames are counted starting from 1. +

+
+
show
+

Show fields and transforms in the resulting frames. It accepts an +integer in the range 0-2. Default value is 0, which disables any +visualization. +

+
+ + +

28.90.1 Examples

+ +
    +
  • +Use default values: +
     
    vidstabdetect
    +
    + +
  • +Analyze strongly shaky movie and put the results in file +‘mytransforms.trf’: +
     
    vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
    +
    + +
  • +Visualize the result of internal transformations in the resulting +video: +
     
    vidstabdetect=show=1
    +
    + +
  • +Analyze a video with medium shakiness using ffmpeg: +
     
    ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
    +
    +
+ +

+

+

28.91 vidstabtransform

+ +

Video stabilization/deshaking: pass 2 of 2, +see vidstabdetect for pass 1. +

+

Read a file with transform information for each frame and +apply/compensate them. Together with the vidstabdetect +filter this can be used to deshake videos. See also +http://public.hronopik.de/vid.stab. It is important to also use +the unsharp filter, see below. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+ +

28.91.1 Options

+ +
+
input
+

Set path to the file used to read the transforms. Default value is +‘transforms.trf’). +

+
+
smoothing
+

Set the number of frames (value*2 + 1) used for lowpass filtering the +camera movements. Default value is 10. +

+

For example a number of 10 means that 21 frames are used (10 in the +past and 10 in the future) to smoothen the motion in the video. A +larger values leads to a smoother video, but limits the acceleration +of the camera (pan/tilt movements). 0 is a special case where a +static camera is simulated. +

+
+
optalgo
+

Set the camera path optimization algorithm. +

+

Accepted values are: +

+
gauss
+

gaussian kernel low-pass filter on camera motion (default) +

+
avg
+

averaging on transformations +

+
+ +
+
maxshift
+

Set maximal number of pixels to translate frames. Default value is -1, +meaning no limit. +

+
+
maxangle
+

Set maximal angle in radians (degree*PI/180) to rotate frames. Default +value is -1, meaning no limit. +

+
+
crop
+

Specify how to deal with borders that may be visible due to movement +compensation. +

+

Available values are: +

+
keep
+

keep image information from previous frame (default) +

+
black
+

fill the border black +

+
+ +
+
invert
+

Invert transforms if set to 1. Default value is 0. +

+
+
relative
+

Consider transforms as relative to previsou frame if set to 1, +absolute if set to 0. Default value is 0. +

+
+
zoom
+

Set percentage to zoom. A positive value will result in a zoom-in +effect, a negative value in a zoom-out effect. Default value is 0 (no +zoom). +

+
+
optzoom
+

Set optimal zooming to avoid borders. +

+

Accepted values are: +

+
0
+

disabled +

+
1
+

optimal static zoom value is determined (only very strong movements +will lead to visible borders) (default) +

+
2
+

optimal adaptive zoom value is determined (no borders will be +visible), see ‘zoomspeed’ +

+
+ +

Note that the value given at zoom is added to the one calculated here. +

+
+
zoomspeed
+

Set percent to zoom maximally each frame (enabled when +‘optzoom’ is set to 2). Range is from 0 to 5, default value is +0.25. +

+
+
interpol
+

Specify type of interpolation. +

+

Available values are: +

+
no
+

no interpolation +

+
linear
+

linear only horizontal +

+
bilinear
+

linear in both directions (default) +

+
bicubic
+

cubic in both directions (slow) +

+
+ +
+
tripod
+

Enable virtual tripod mode if set to 1, which is equivalent to +relative=0:smoothing=0. Default value is 0. +

+

Use also tripod option of vidstabdetect. +

+
+
debug
+

Increase log verbosity if set to 1. Also the detected global motions +are written to the temporary file ‘global_motions.trf’. Default +value is 0. +

+
+ + +

28.91.2 Examples

+ +
    +
  • +Use ffmpeg for a typical stabilization with default values: +
     
    ffmpeg -i inp.mpeg -vf vidstabtransform,unsharp=5:5:0.8:3:3:0.4 inp_stabilized.mpeg
    +
    + +

    Note the use of the unsharp filter which is always recommended. +

    +
  • +Zoom in a bit more and load transform data from a given file: +
     
    vidstabtransform=zoom=5:input="mytransforms.trf"
    +
    + +
  • +Smoothen the video even more: +
     
    vidstabtransform=smoothing=30
    +
    +
+ + +

28.92 vflip

+ +

Flip the input video vertically. +

+

For example, to vertically flip a video with ffmpeg: +

 
ffmpeg -i in.avi -vf "vflip" out.avi
+
+ + +

28.93 vignette

+ +

Make or reverse a natural vignetting effect. +

+

The filter accepts the following options: +

+
+
angle, a
+

Set lens angle expression as a number of radians. +

+

The value is clipped in the [0,PI/2] range. +

+

Default value: "PI/5" +

+
+
x0
+
y0
+

Set center coordinates expressions. Respectively "w/2" and "h/2" +by default. +

+
+
mode
+

Set forward/backward mode. +

+

Available modes are: +

+
forward
+

The larger the distance from the central point, the darker the image becomes. +

+
+
backward
+

The larger the distance from the central point, the brighter the image becomes. +This can be used to reverse a vignette effect, though there is no automatic +detection to extract the lens ‘angle’ and other settings (yet). It can +also be used to create a burning effect. +

+
+ +

Default value is ‘forward’. +

+
+
eval
+

Set evaluation mode for the expressions (‘angle’, ‘x0’, ‘y0’). +

+

It accepts the following values: +

+
init
+

Evaluate expressions only once during the filter initialization. +

+
+
frame
+

Evaluate expressions for each incoming frame. This is way slower than the +‘init’ mode since it requires all the scalers to be re-computed, but it +allows advanced dynamic expressions. +

+
+ +

Default value is ‘init’. +

+
+
dither
+

Set dithering to reduce the circular banding effects. Default is 1 +(enabled). +

+
+
aspect
+

Set vignette aspect. This setting allows one to adjust the shape of the vignette. +Setting this value to the SAR of the input will make a rectangular vignetting +following the dimensions of the video. +

+

Default is 1/1. +

+
+ + +

28.93.1 Expressions

+ +

The ‘alpha’, ‘x0’ and ‘y0’ expressions can contain the +following parameters. +

+
+
w
+
h
+

input width and height +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pts
+

the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in +TB units, NAN if undefined +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
tb
+

time base of the input video +

+
+ + + +

28.93.2 Examples

+ +
    +
  • +Apply simple strong vignetting effect: +
     
    vignette=PI/4
    +
    + +
  • +Make a flickering vignetting: +
     
    vignette='PI/4+random(1)*PI/50':eval=frame
    +
    + +
+ + +

28.94 w3fdif

+ +

Deinterlace the input video ("w3fdif" stands for "Weston 3 Field +Deinterlacing Filter"). +

+

Based on the process described by Martin Weston for BBC R&D, and +implemented based on the de-interlace algorithm written by Jim +Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter +uses filter coefficients calculated by BBC R&D. +

+

There are two sets of filter coefficients, so called "simple": +and "complex". Which set of filter coefficients is used can +be set by passing an optional parameter: +

+
+
filter
+

Set the interlacing filter coefficients. Accepts one of the following values: +

+
+
simple
+

Simple filter coefficient set. +

+
complex
+

More-complex filter coefficient set. +

+
+

Default value is ‘complex’. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following values: +

+
+
all
+

Deinterlace all frames, +

+
interlaced
+

Only deinterlace frames marked as interlaced. +

+
+ +

Default value is ‘all’. +

+
+ +

+

+

28.95 yadif

+ +

Deinterlace the input video ("yadif" means "yet another deinterlacing +filter"). +

+

This filter accepts the following options: +

+ +
+
mode
+

The interlacing mode to adopt, accepts one of the following values: +

+
+
0, send_frame
+

output 1 frame for each frame +

+
1, send_field
+

output 1 frame for each field +

+
2, send_frame_nospatial
+

like send_frame but skip spatial interlacing check +

+
3, send_field_nospatial
+

like send_field but skip spatial interlacing check +

+
+ +

Default value is send_frame. +

+
+
parity
+

The picture field parity assumed for the input interlaced video, accepts one of +the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
-1, auto
+

enable automatic detection +

+
+ +

Default value is auto. +If interlacing is unknown or decoder does not export this information, +top field first will be assumed. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following +values: +

+
+
0, all
+

deinterlace all frames +

+
1, interlaced
+

only deinterlace frames marked as interlaced +

+
+ +

Default value is all. +

+
+ + + +

29. Video Sources

+ +

Below is a description of the currently available video sources. +

+ +

29.1 buffer

+ +

Buffer video frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/vsrc_buffer.h’. +

+

This source accepts the following options: +

+
+
video_size
+

Specify the size (width and height) of the buffered video frames. For the +syntax of this option, check the "Video size" section in the ffmpeg-utils +manual. +

+
+
width
+

Input video width. +

+
+
height
+

Input video height. +

+
+
pix_fmt
+

A string representing the pixel format of the buffered video frames. +It may be a number corresponding to a pixel format, or a pixel format +name. +

+
+
time_base
+

Specify the timebase assumed by the timestamps of the buffered frames. +

+
+
frame_rate
+

Specify the frame rate expected for the video stream. +

+
+
pixel_aspect, sar
+

Specify the sample aspect ratio assumed by the video frames. +

+
+
sws_param
+

Specify the optional parameters to be used for the scale filter which +is automatically inserted when an input change is detected in the +input size or format. +

+
+ +

For example: +

 
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+ +

will instruct the source to accept video frames with size 320x240 and +with format "yuv410p", assuming 1/24 as the timestamps timebase and +square pixels (1:1 sample aspect ratio). +Since the pixel format with name "yuv410p" corresponds to the number 6 +(check the enum AVPixelFormat definition in ‘libavutil/pixfmt.h’), +this example corresponds to: +

 
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+ +

Alternatively, the options can be specified as a flat string, but this +syntax is deprecated: +

+

width:height:pix_fmt:time_base.num:time_base.den:pixel_aspect.num:pixel_aspect.den[:sws_param] +

+ +

29.2 cellauto

+ +

Create a pattern generated by an elementary cellular automaton. +

+

The initial state of the cellular automaton can be defined through the +‘filename’, and ‘pattern’ options. If such options are +not specified an initial state is created randomly. +

+

At each new frame a new row in the video is filled with the result of +the cellular automaton next generation. The behavior when the whole +frame is filled is defined by the ‘scroll’ option. +

+

This source accepts the following options: +

+
+
filename, f
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified file. +In the file, each non-whitespace character is considered an alive +cell, a newline will terminate the row, and further characters in the +file will be ignored. +

+
+
pattern, p
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified string. +

+

Each non-whitespace character in the string is considered an alive +cell, a newline will terminate the row, and further characters in the +string will be ignored. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial cellular automaton row. It +is a floating point number value ranging from 0 to 1, defaults to +1/PHI. +

+

This option is ignored when a file or a pattern is specified. +

+
+
random_seed, seed
+

Set the seed for filling randomly the initial row, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the cellular automaton rule, it is a number ranging from 0 to 255. +Default value is 110. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ or ‘pattern’ is specified, the size is set +by default to the width of the specified initial state row, and the +height is set to width * PHI. +

+

If ‘size’ is set, it must contain the width of the specified +pattern string, and the specified pattern will be centered in the +larger row. +

+

If a filename or a pattern string is not specified, the size value +defaults to "320x518" (used for a randomly generated initial state). +

+
+
scroll
+

If set to 1, scroll the output upward when all the rows in the output +have been already filled. If set to 0, the new generated row will be +written over the top row just after the bottom row is filled. +Defaults to 1. +

+
+
start_full, full
+

If set to 1, completely fill the output with generated rows before +outputting the first frame. +This is the default behavior, for disabling set the value to 0. +

+
+
stitch
+

If set to 1, stitch the left and right row edges together. +This is the default behavior, for disabling set the value to 0. +

+
+ + +

29.2.1 Examples

+ +
    +
  • +Read the initial state from ‘pattern’, and specify an output of +size 200x400. +
     
    cellauto=f=pattern:s=200x400
    +
    + +
  • +Generate a random initial row with a width of 200 cells, with a fill +ratio of 2/3: +
     
    cellauto=ratio=2/3:s=200x200
    +
    + +
  • +Create a pattern generated by rule 18 starting by a single alive cell +centered on an initial row with width 100: +
     
    cellauto=p=@:s=100x400:full=0:rule=18
    +
    + +
  • +Specify a more elaborated initial pattern: +
     
    cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
    +
    + +
+ + +

29.3 mandelbrot

+ +

Generate a Mandelbrot set fractal, and progressively zoom towards the +point specified with start_x and start_y. +

+

This source accepts the following options: +

+
+
end_pts
+

Set the terminal pts value. Default value is 400. +

+
+
end_scale
+

Set the terminal scale value. +Must be a floating point value. Default value is 0.3. +

+
+
inner
+

Set the inner coloring mode, that is the algorithm used to draw the +Mandelbrot fractal internal region. +

+

It shall assume one of the following values: +

+
black
+

Set black mode. +

+
convergence
+

Show time until convergence. +

+
mincol
+

Set color based on point closest to the origin of the iterations. +

+
period
+

Set period mode. +

+
+ +

Default value is mincol. +

+
+
bailout
+

Set the bailout value. Default value is 10.0. +

+
+
maxiter
+

Set the maximum of iterations performed by the rendering +algorithm. Default value is 7189. +

+
+
outer
+

Set outer coloring mode. +It shall assume one of following values: +

+
iteration_count
+

Set iteration cound mode. +

+
normalized_iteration_count
+

set normalized iteration count mode. +

+
+

Default value is normalized_iteration_count. +

+
+
rate, r
+

Set frame rate, expressed as number of frames per second. Default +value is "25". +

+
+
size, s
+

Set frame size. For the syntax of this option, check the "Video +size" section in the ffmpeg-utils manual. Default value is "640x480". +

+
+
start_scale
+

Set the initial scale value. Default value is 3.0. +

+
+
start_x
+

Set the initial x position. Must be a floating point value between +-100 and 100. Default value is -0.743643887037158704752191506114774. +

+
+
start_y
+

Set the initial y position. Must be a floating point value between +-100 and 100. Default value is -0.131825904205311970493132056385139. +

+
+ + +

29.4 mptestsrc

+ +

Generate various test patterns, as generated by the MPlayer test filter. +

+

The size of the generated video is fixed, and is 256x256. +This source is useful in particular for testing encoding features. +

+

This source accepts the following options: +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH:MM:SS[.m...]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
test, t
+
+

Set the number or the name of the test to perform. Supported tests are: +

+
dc_luma
+
dc_chroma
+
freq_luma
+
freq_chroma
+
amp_luma
+
amp_chroma
+
cbp
+
mv
+
ring1
+
ring2
+
all
+
+ +

Default value is "all", which will cycle through the list of all tests. +

+
+ +

For example the following: +

 
testsrc=t=dc_luma
+
+ +

will generate a "dc_luma" test pattern. +

+ +

29.5 frei0r_src

+ +

Provide a frei0r source. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This source accepts the following options: +

+
+
size
+

The size of the video to generate. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+
+
framerate
+

Framerate of the generated video, may be a string of the form +num/den or a frame rate abbreviation. +

+
+
filter_name
+

The name to the frei0r source to load. For more information regarding frei0r and +how to set the parameters read the section frei0r in the description of +the video filters. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r source. +

+
+
+ +

For example, to generate a frei0r partik0l source with size 200x200 +and frame rate 10 which is overlayed on the overlay filter main input: +

 
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+ + +

29.6 life

+ +

Generate a life pattern. +

+

This source is based on a generalization of John Conway’s life game. +

+

The sourced input represents a life grid, each pixel represents a cell +which can be in one of two possible states, alive or dead. Every cell +interacts with its eight neighbours, which are the cells that are +horizontally, vertically, or diagonally adjacent. +

+

At each interaction the grid evolves according to the adopted rule, +which specifies the number of neighbor alive cells which will make a +cell stay alive or born. The ‘rule’ option allows one to specify +the rule to adopt. +

+

This source accepts the following options: +

+
+
filename, f
+

Set the file from which to read the initial grid state. In the file, +each non-whitespace character is considered an alive cell, and newline +is used to delimit the end of each row. +

+

If this option is not specified, the initial grid is generated +randomly. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial random grid. It is a +floating point number value ranging from 0 to 1, defaults to 1/PHI. +It is ignored when a file is specified. +

+
+
random_seed, seed
+

Set the seed for filling the initial random grid, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the life rule. +

+

A rule can be specified with a code of the kind "SNS/BNB", +where NS and NB are sequences of numbers in the range 0-8, +NS specifies the number of alive neighbor cells which make a +live cell stay alive, and NB the number of alive neighbor cells +which make a dead cell to become alive (i.e. to "born"). +"s" and "b" can be used in place of "S" and "B", respectively. +

+

Alternatively a rule can be specified by an 18-bits integer. The 9 +high order bits are used to encode the next cell state if it is alive +for each number of neighbor alive cells, the low order bits specify +the rule for "borning" new cells. Higher order bits encode for an +higher number of neighbor cells. +For example the number 6153 = (12<<9)+9 specifies a stay alive +rule of 12 and a born rule of 9, which corresponds to "S23/B03". +

+

Default value is "S23/B3", which is the original Conway’s game of life +rule, and will keep a cell alive if it has 2 or 3 neighbor alive +cells, and will born a new cell if there are three alive cells around +a dead cell. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ is specified, the size is set by default to the +same size of the input file. If ‘size’ is set, it must contain +the size specified in the input file, and the initial grid defined in +that file is centered in the larger resulting area. +

+

If a filename is not specified, the size value defaults to "320x240" +(used for a randomly generated initial grid). +

+
+
stitch
+

If set to 1, stitch the left and right grid edges together, and the +top and bottom edges also. Defaults to 1. +

+
+
mold
+

Set cell mold speed. If set, a dead cell will go from ‘death_color’ to +‘mold_color’ with a step of ‘mold’. ‘mold’ can have a +value from 0 to 255. +

+
+
life_color
+

Set the color of living (or new born) cells. +

+
+
death_color
+

Set the color of dead cells. If ‘mold’ is set, this is the first color +used to represent a dead cell. +

+
+
mold_color
+

Set mold color, for definitely dead and moldy cells. +

+

For the syntax of these 3 color options, check the "Color" section in the +ffmpeg-utils manual. +

+
+ + +

29.6.1 Examples

+ +
    +
  • +Read a grid from ‘pattern’, and center it on a grid of size +300x300 pixels: +
     
    life=f=pattern:s=300x300
    +
    + +
  • +Generate a random grid of size 200x200, with a fill ratio of 2/3: +
     
    life=ratio=2/3:s=200x200
    +
    + +
  • +Specify a custom rule for evolving a randomly generated grid: +
     
    life=rule=S14/B34
    +
    + +
  • +Full example with slow death effect (mold) using ffplay: +
     
    ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
    +
    +
+ +

+ + + + + + +

+

29.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc

+ +

The color source provides an uniformly colored input. +

+

The haldclutsrc source provides an identity Hald CLUT. See also +haldclut filter. +

+

The nullsrc source returns unprocessed video frames. It is +mainly useful to be employed in analysis / debugging tools, or as the +source for filters which ignore the input data. +

+

The rgbtestsrc source generates an RGB test pattern useful for +detecting RGB vs BGR issues. You should see a red, green and blue +stripe from top to bottom. +

+

The smptebars source generates a color bars pattern, based on +the SMPTE Engineering Guideline EG 1-1990. +

+

The smptehdbars source generates a color bars pattern, based on +the SMPTE RP 219-2002. +

+

The testsrc source generates a test video pattern, showing a +color pattern, a scrolling gradient and a timestamp. This is mainly +intended for testing purposes. +

+

The sources accept the following options: +

+
+
color, c
+

Specify the color of the source, only available in the color +source. For the syntax of this option, check the "Color" section in the +ffmpeg-utils manual. +

+
+
level
+

Specify the level of the Hald CLUT, only available in the haldclutsrc +source. A level of N generates a picture of N*N*N by N*N*N +pixels to be used as identity matrix for 3D lookup tables. Each component is +coded on a 1/(N*N) scale. +

+
+
size, s
+

Specify the size of the sourced video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. The default value is +"320x240". +

+

This option is not available with the haldclutsrc filter. +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
sar
+

Set the sample aspect ratio of the sourced video. +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
decimals, n
+

Set the number of decimals to show in the timestamp, only available in the +testsrc source. +

+

The displayed timestamp value will correspond to the original +timestamp value multiplied by the power of 10 of the specified +value. Default value is 0. +

+
+ +

For example the following: +

 
testsrc=duration=5.3:size=qcif:rate=10
+
+ +

will generate a video with a duration of 5.3 seconds, with size +176x144 and a frame rate of 10 frames per second. +

+

The following graph description will generate a red source +with an opacity of 0.2, with size "qcif" and a frame rate of 10 +frames per second. +

 
color=c=red@0.2:s=qcif:r=10
+
+ +

If the input content is to be ignored, nullsrc can be used. The +following command generates noise in the luminance plane by employing +the geq filter: +

 
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+ + +

29.7.1 Commands

+ +

The color source supports the following commands: +

+
+
c, color
+

Set the color of the created image. Accepts the same syntax of the +corresponding ‘color’ option. +

+
+ + + +

30. Video Sinks

+ +

Below is a description of the currently available video sinks. +

+ +

30.1 buffersink

+ +

Buffer video frames, and make them available to the end of the filter +graph. +

+

This sink is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVBufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

30.2 nullsink

+ +

Null video sink, do absolutely nothing with the input video. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

31. Multimedia Filters

+ +

Below is a description of the currently available multimedia filters. +

+ +

31.1 avectorscope

+ +

Convert input audio to a video output, representing the audio vector +scope. +

+

The filter is used to measure the difference between channels of stereo +audio stream. A monoaural signal, consisting of identical left and right +signal, results in straight vertical line. Any stereo separation is visible +as a deviation from this line, creating a Lissajous figure. +If the straight (or deviation from it) but horizontal line appears this +indicates that the left and right channels are out of phase. +

+

The filter accepts the following options: +

+
+
mode, m
+

Set the vectorscope mode. +

+

Available values are: +

+
lissajous
+

Lissajous rotated by 45 degrees. +

+
+
lissajous_xy
+

Same as above but not rotated. +

+
+ +

Default value is ‘lissajous’. +

+
+
size, s
+

Set the video size for the output. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. Default value is 400x400. +

+
+
rate, r
+

Set the output frame rate. Default value is 25. +

+
+
rc
+
gc
+
bc
+

Specify the red, green and blue contrast. Default values are 40, 160 and 80. +Allowed range is [0, 255]. +

+
+
rf
+
gf
+
bf
+

Specify the red, green and blue fade. Default values are 15, 10 and 5. +Allowed range is [0, 255]. +

+
+
zoom
+

Set the zoom factor. Default value is 1. Allowed range is [1, 10]. +

+
+ + +

31.1.1 Examples

+ +
    +
  • +Complete example using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
    +
    +
+ + +

31.2 concat

+ +

Concatenate audio and video streams, joining them together one after the +other. +

+

The filter works on segments of synchronized video and audio streams. All +segments must have the same number of streams of each type, and that will +also be the number of streams at output. +

+

The filter accepts the following options: +

+
+
n
+

Set the number of segments. Default is 2. +

+
+
v
+

Set the number of output video streams, that is also the number of video +streams in each segment. Default is 1. +

+
+
a
+

Set the number of output audio streams, that is also the number of video +streams in each segment. Default is 0. +

+
+
unsafe
+

Activate unsafe mode: do not fail if segments have a different format. +

+
+
+ +

The filter has v+a outputs: first v video outputs, then +a audio outputs. +

+

There are nx(v+a) inputs: first the inputs for the first +segment, in the same order as the outputs, then the inputs for the second +segment, etc. +

+

Related streams do not always have exactly the same duration, for various +reasons including codec frame size or sloppy authoring. For that reason, +related synchronized streams (e.g. a video and its audio track) should be +concatenated at once. The concat filter will use the duration of the longest +stream in each segment (except the last one), and if necessary pad shorter +audio streams with silence. +

+

For this filter to work correctly, all segments must start at timestamp 0. +

+

All corresponding streams must have the same parameters in all segments; the +filtering system will automatically select a common pixel format for video +streams, and a common sample format, sample rate and channel layout for +audio streams, but other settings, such as resolution, must be converted +explicitly by the user. +

+

Different frame rates are acceptable but will result in variable frame rate +at output; be sure to configure the output file to handle it. +

+ +

31.2.1 Examples

+ +
    +
  • +Concatenate an opening, an episode and an ending, all in bilingual version +(video in stream 0, audio in streams 1 and 2): +
     
    ffmpeg -i opening.mkv -i episode.mkv -i ending.mkv -filter_complex \
    +  '[0:0] [0:1] [0:2] [1:0] [1:1] [1:2] [2:0] [2:1] [2:2]
    +   concat=n=3:v=1:a=2 [v] [a1] [a2]' \
    +  -map '[v]' -map '[a1]' -map '[a2]' output.mkv
    +
    + +
  • +Concatenate two parts, handling audio and video separately, using the +(a)movie sources, and adjusting the resolution: +
     
    movie=part1.mp4, scale=512:288 [v1] ; amovie=part1.mp4 [a1] ;
    +movie=part2.mp4, scale=512:288 [v2] ; amovie=part2.mp4 [a2] ;
    +[v1] [v2] concat [outv] ; [a1] [a2] concat=v=0:a=1 [outa]
    +
    +

    Note that a desync will happen at the stitch if the audio and video streams +do not have exactly the same duration in the first file. +

    +
+ + +

31.3 ebur128

+ +

EBU R128 scanner filter. This filter takes an audio stream as input and outputs +it unchanged. By default, it logs a message at a frequency of 10Hz with the +Momentary loudness (identified by M), Short-term loudness (S), +Integrated loudness (I) and Loudness Range (LRA). +

+

The filter also has a video output (see the video option) with a real +time graph to observe the loudness evolution. The graphic contains the logged +message mentioned above, so it is not printed anymore when this option is set, +unless the verbose logging is set. The main graphing area contains the +short-term loudness (3 seconds of analysis), and the gauge on the right is for +the momentary loudness (400 milliseconds). +

+

More information about the Loudness Recommendation EBU R128 on +http://tech.ebu.ch/loudness. +

+

The filter accepts the following options: +

+
+
video
+

Activate the video output. The audio stream is passed unchanged whether this +option is set or no. The video stream will be the first output stream if +activated. Default is 0. +

+
+
size
+

Set the video size. This option is for video only. For the syntax of this +option, check the "Video size" section in the ffmpeg-utils manual. Default +and minimum resolution is 640x480. +

+
+
meter
+

Set the EBU scale meter. Default is 9. Common values are 9 and +18, respectively for EBU scale meter +9 and EBU scale meter +18. Any +other integer value between this range is allowed. +

+
+
metadata
+

Set metadata injection. If set to 1, the audio input will be segmented +into 100ms output frames, each of them containing various loudness information +in metadata. All the metadata keys are prefixed with lavfi.r128.. +

+

Default is 0. +

+
+
framelog
+

Force the frame logging level. +

+

Available values are: +

+
info
+

information logging level +

+
verbose
+

verbose logging level +

+
+ +

By default, the logging level is set to info. If the ‘video’ or +the ‘metadata’ options are set, it switches to verbose. +

+
+
peak
+

Set peak mode(s). +

+

Available modes can be cumulated (the option is a flag type). Possible +values are: +

+
none
+

Disable any peak mode (default). +

+
sample
+

Enable sample-peak mode. +

+

Simple peak mode looking for the higher sample value. It logs a message +for sample-peak (identified by SPK). +

+
true
+

Enable true-peak mode. +

+

If enabled, the peak lookup is done on an over-sampled version of the input +stream for better peak accuracy. It logs a message for true-peak. +(identified by TPK) and true-peak per frame (identified by FTPK). +This mode requires a build with libswresample. +

+
+ +
+
+ + +

31.3.1 Examples

+ +
    +
  • +Real-time graph using ffplay, with a EBU scale meter +18: +
     
    ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
    +
    + +
  • +Run an analysis with ffmpeg: +
     
    ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
    +
    +
+ + +

31.4 interleave, ainterleave

+ +

Temporally interleave frames from several inputs. +

+

interleave works with video inputs, ainterleave with audio. +

+

These filters read frames from several inputs and send the oldest +queued frame to the output. +

+

Input streams must have a well defined, monotonically increasing frame +timestamp values. +

+

In order to submit one frame to output, these filters need to enqueue +at least one frame for each input, so they cannot work in case one +input is not yet terminated and will not receive incoming frames. +

+

For example consider the case when one input is a select filter +which always drop input frames. The interleave filter will keep +reading from that input, but it will never be able to send new frames +to output until the input will send an end-of-stream signal. +

+

Also, depending on inputs synchronization, the filters will drop +frames in case one input receives more frames than the other ones, and +the queue is already filled. +

+

These filters accept the following options: +

+
+
nb_inputs, n
+

Set the number of different inputs, it is 2 by default. +

+
+ + +

31.4.1 Examples

+ +
    +
  • +Interleave frames belonging to different streams using ffmpeg: +
     
    ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
    +
    + +
  • +Add flickering blur effect: +
     
    select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
    +
    +
+ + +

31.5 perms, aperms

+ +

Set read/write permissions for the output frames. +

+

These filters are mainly aimed at developers to test direct path in the +following filter in the filtergraph. +

+

The filters accept the following options: +

+
+
mode
+

Select the permissions mode. +

+

It accepts the following values: +

+
none
+

Do nothing. This is the default. +

+
ro
+

Set all the output frames read-only. +

+
rw
+

Set all the output frames directly writable. +

+
toggle
+

Make the frame read-only if writable, and writable if read-only. +

+
random
+

Set each output frame read-only or writable randomly. +

+
+ +
+
seed
+

Set the seed for the random mode, must be an integer included between +0 and UINT32_MAX. If not specified, or if explicitly set to +-1, the filter will try to use a good random seed on a best effort +basis. +

+
+ +

Note: in case of auto-inserted filter between the permission filter and the +following one, the permission might not be received as expected in that +following filter. Inserting a format or aformat filter before the +perms/aperms filter can avoid this problem. +

+ +

31.6 select, aselect

+ +

Select frames to pass in output. +

+

This filter accepts the following options: +

+
+
expr, e
+

Set expression, which is evaluated for each input frame. +

+

If the expression is evaluated to zero, the frame is discarded. +

+

If the evaluation result is negative or NaN, the frame is sent to the +first output; otherwise it is sent to the output with index +ceil(val)-1, assuming that the input index starts from 0. +

+

For example a value of 1.2 corresponds to the output with index +ceil(1.2)-1 = 2-1 = 1, that is the second output. +

+
+
outputs, n
+

Set the number of outputs. The output to which to send the selected +frame is based on the result of the evaluation. Default value is 1. +

+
+ +

The expression can contain the following constants: +

+
+
n
+

the sequential number of the filtered frame, starting from 0 +

+
+
selected_n
+

the sequential number of the selected frame, starting from 0 +

+
+
prev_selected_n
+

the sequential number of the last selected frame, NAN if undefined +

+
+
TB
+

timebase of the input timestamps +

+
+
pts
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in TB units, NAN if undefined +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
prev_pts
+

the PTS of the previously filtered video frame, NAN if undefined +

+
+
prev_selected_pts
+

the PTS of the last previously filtered video frame, NAN if undefined +

+
+
prev_selected_t
+

the PTS of the last previously selected video frame, NAN if undefined +

+
+
start_pts
+

the PTS of the first video frame in the video, NAN if undefined +

+
+
start_t
+

the time of the first video frame in the video, NAN if undefined +

+
+
pict_type (video only)
+

the type of the filtered frame, can assume one of the following +values: +

+
I
+
P
+
B
+
S
+
SI
+
SP
+
BI
+
+ +
+
interlace_type (video only)
+

the frame interlace type, can assume one of the following values: +

+
PROGRESSIVE
+

the frame is progressive (not interlaced) +

+
TOPFIRST
+

the frame is top-field-first +

+
BOTTOMFIRST
+

the frame is bottom-field-first +

+
+ +
+
consumed_sample_n (audio only)
+

the number of selected samples before the current frame +

+
+
samples_n (audio only)
+

the number of samples in the current frame +

+
+
sample_rate (audio only)
+

the input sample rate +

+
+
key
+

1 if the filtered frame is a key-frame, 0 otherwise +

+
+
pos
+

the position in the file of the filtered frame, -1 if the information +is not available (e.g. for synthetic video) +

+
+
scene (video only)
+

value between 0 and 1 to indicate a new scene; a low value reflects a low +probability for the current frame to introduce a new scene, while a higher +value means the current frame is more likely to be one (see the example below) +

+
+
+ +

The default value of the select expression is "1". +

+ +

31.6.1 Examples

+ +
    +
  • +Select all frames in input: +
     
    select
    +
    + +

    The example above is the same as: +

     
    select=1
    +
    + +
  • +Skip all frames: +
     
    select=0
    +
    + +
  • +Select only I-frames: +
     
    select='eq(pict_type\,I)'
    +
    + +
  • +Select one frame every 100: +
     
    select='not(mod(n\,100))'
    +
    + +
  • +Select only frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)
    +
    + +
  • +Select only I frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)*eq(pict_type\,I)
    +
    + +
  • +Select frames with a minimum distance of 10 seconds: +
     
    select='isnan(prev_selected_t)+gte(t-prev_selected_t\,10)'
    +
    + +
  • +Use aselect to select only audio frames with samples number > 100: +
     
    aselect='gt(samples_n\,100)'
    +
    + +
  • +Create a mosaic of the first scenes: +
     
    ffmpeg -i video.avi -vf select='gt(scene\,0.4)',scale=160:120,tile -frames:v 1 preview.png
    +
    + +

    Comparing scene against a value between 0.3 and 0.5 is generally a sane +choice. +

    +
  • +Send even and odd frames to separate outputs, and compose them: +
     
    select=n=2:e='mod(n, 2)+1' [odd][even]; [odd] pad=h=2*ih [tmp]; [tmp][even] overlay=y=h
    +
    +
+ + +

31.7 sendcmd, asendcmd

+ +

Send commands to filters in the filtergraph. +

+

These filters read commands to be sent to other filters in the +filtergraph. +

+

sendcmd must be inserted between two video filters, +asendcmd must be inserted between two audio filters, but apart +from that they act the same way. +

+

The specification of commands can be provided in the filter arguments +with the commands option, or in a file specified by the +filename option. +

+

These filters accept the following options: +

+
commands, c
+

Set the commands to be read and sent to the other filters. +

+
filename, f
+

Set the filename of the commands to be read and sent to the other +filters. +

+
+ + +

31.7.1 Commands syntax

+ +

A commands description consists of a sequence of interval +specifications, comprising a list of commands to be executed when a +particular event related to that interval occurs. The occurring event +is typically the current frame time entering or leaving a given time +interval. +

+

An interval is specified by the following syntax: +

 
START[-END] COMMANDS;
+
+ +

The time interval is specified by the START and END times. +END is optional and defaults to the maximum time. +

+

The current frame time is considered within the specified interval if +it is included in the interval [START, END), that is when +the time is greater or equal to START and is lesser than +END. +

+

COMMANDS consists of a sequence of one or more command +specifications, separated by ",", relating to that interval. The +syntax of a command specification is given by: +

 
[FLAGS] TARGET COMMAND ARG
+
+ +

FLAGS is optional and specifies the type of events relating to +the time interval which enable sending the specified command, and must +be a non-null sequence of identifier flags separated by "+" or "|" and +enclosed between "[" and "]". +

+

The following flags are recognized: +

+
enter
+

The command is sent when the current frame timestamp enters the +specified interval. In other words, the command is sent when the +previous frame timestamp was not in the given interval, and the +current is. +

+
+
leave
+

The command is sent when the current frame timestamp leaves the +specified interval. In other words, the command is sent when the +previous frame timestamp was in the given interval, and the +current is not. +

+
+ +

If FLAGS is not specified, a default value of [enter] is +assumed. +

+

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional list of argument for +the given COMMAND. +

+

Between one interval specification and another, whitespaces, or +sequences of characters starting with # until the end of line, +are ignored and can be used to annotate comments. +

+

A simplified BNF description of the commands specification syntax +follows: +

 
COMMAND_FLAG  ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG]
+COMMAND       ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG]
+COMMANDS      ::= COMMAND [,COMMANDS]
+INTERVAL      ::= START[-END] COMMANDS
+INTERVALS     ::= INTERVAL[;INTERVALS]
+
+ + +

31.7.2 Examples

+ +
    +
  • +Specify audio tempo change at second 4: +
     
    asendcmd=c='4.0 atempo tempo 1.5',atempo
    +
    + +
  • +Specify a list of drawtext and hue commands in a file. +
     
    # show text in the interval 5-10
    +5.0-10.0 [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=hello world',
    +         [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=';
    +
    +# desaturate the image in the interval 15-20
    +15.0-20.0 [enter] hue s 0,
    +          [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=nocolor',
    +          [leave] hue s 1,
    +          [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=color';
    +
    +# apply an exponential saturation fade-out effect, starting from time 25
    +25 [enter] hue s exp(25-t)
    +
    + +

    A filtergraph allowing to read and process the above command list +stored in a file ‘test.cmd’, can be specified with: +

     
    sendcmd=f=test.cmd,drawtext=fontfile=FreeSerif.ttf:text='',hue
    +
    +
+ +

+

+

31.8 setpts, asetpts

+ +

Change the PTS (presentation timestamp) of the input frames. +

+

setpts works on video frames, asetpts on audio frames. +

+

This filter accepts the following options: +

+
+
expr
+

The expression which is evaluated for each frame to construct its timestamp. +

+
+
+ +

The expression is evaluated through the eval API and can contain the following +constants: +

+
+
FRAME_RATE
+

frame rate, only defined for constant frame-rate video +

+
+
PTS
+

the presentation timestamp in input +

+
+
N
+

the count of the input frame for video or the number of consumed samples, +not including the current frame for audio, starting from 0. +

+
+
NB_CONSUMED_SAMPLES
+

the number of consumed samples, not including the current frame (only +audio) +

+
+
NB_SAMPLES, S
+

the number of samples in the current frame (only audio) +

+
+
SAMPLE_RATE, SR
+

audio sample rate +

+
+
STARTPTS
+

the PTS of the first frame +

+
+
STARTT
+

the time in seconds of the first frame +

+
+
INTERLACED
+

tell if the current frame is interlaced +

+
+
T
+

the time in seconds of the current frame +

+
+
POS
+

original position in the file of the frame, or undefined if undefined +for the current frame +

+
+
PREV_INPTS
+

previous input PTS +

+
+
PREV_INT
+

previous input time in seconds +

+
+
PREV_OUTPTS
+

previous output PTS +

+
+
PREV_OUTT
+

previous output time in seconds +

+
+
RTCTIME
+

wallclock (RTC) time in microseconds. This is deprecated, use time(0) +instead. +

+
+
RTCSTART
+

wallclock (RTC) time at the start of the movie in microseconds +

+
+
TB
+

timebase of the input timestamps +

+
+
+ + +

31.8.1 Examples

+ +
    +
  • +Start counting PTS from zero +
     
    setpts=PTS-STARTPTS
    +
    + +
  • +Apply fast motion effect: +
     
    setpts=0.5*PTS
    +
    + +
  • +Apply slow motion effect: +
     
    setpts=2.0*PTS
    +
    + +
  • +Set fixed rate of 25 frames per second: +
     
    setpts=N/(25*TB)
    +
    + +
  • +Set fixed rate 25 fps with some jitter: +
     
    setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
    +
    + +
  • +Apply an offset of 10 seconds to the input PTS: +
     
    setpts=PTS+10/TB
    +
    + +
  • +Generate timestamps from a "live source" and rebase onto the current timebase: +
     
    setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
    +
    + +
  • +Generate timestamps by counting samples: +
     
    asetpts=N/SR/TB
    +
    + +
+ + +

31.9 settb, asettb

+ +

Set the timebase to use for the output frames timestamps. +It is mainly useful for testing timebase configuration. +

+

This filter accepts the following options: +

+
+
expr, tb
+

The expression which is evaluated into the output timebase. +

+
+
+ +

The value for ‘tb’ is an arithmetic expression representing a +rational. The expression can contain the constants "AVTB" (the default +timebase), "intb" (the input timebase) and "sr" (the sample rate, +audio only). Default value is "intb". +

+ +

31.9.1 Examples

+ +
    +
  • +Set the timebase to 1/25: +
     
    settb=expr=1/25
    +
    + +
  • +Set the timebase to 1/10: +
     
    settb=expr=0.1
    +
    + +
  • +Set the timebase to 1001/1000: +
     
    settb=1+0.001
    +
    + +
  • +Set the timebase to 2*intb: +
     
    settb=2*intb
    +
    + +
  • +Set the default timebase value: +
     
    settb=AVTB
    +
    +
+ + +

31.10 showspectrum

+ +

Convert input audio to a video output, representing the audio frequency +spectrum. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value is +640x512. +

+
+
slide
+

Specify if the spectrum should slide along the window. Default value is +0. +

+
+
mode
+

Specify display mode. +

+

It accepts the following values: +

+
combined
+

all channels are displayed in the same row +

+
separate
+

all channels are displayed in separate rows +

+
+ +

Default value is ‘combined’. +

+
+
color
+

Specify display color mode. +

+

It accepts the following values: +

+
channel
+

each channel is displayed in a separate color +

+
intensity
+

each channel is is displayed using the same color scheme +

+
+ +

Default value is ‘channel’. +

+
+
scale
+

Specify scale used for calculating intensity color values. +

+

It accepts the following values: +

+
lin
+

linear +

+
sqrt
+

square root, default +

+
cbrt
+

cubic root +

+
log
+

logarithmic +

+
+ +

Default value is ‘sqrt’. +

+
+
saturation
+

Set saturation modifier for displayed colors. Negative values provide +alternative color scheme. 0 is no saturation at all. +Saturation must be in [-10.0, 10.0] range. +Default value is 1. +

+
+
win_func
+

Set window function. +

+

It accepts the following values: +

+
none
+

No samples pre-processing (do not expect this to be faster) +

+
hann
+

Hann window +

+
hamming
+

Hamming window +

+
blackman
+

Blackman window +

+
+ +

Default value is hann. +

+
+ +

The usage is very similar to the showwaves filter; see the examples in that +section. +

+ +

31.10.1 Examples

+ +
    +
  • +Large window with logarithmic color scaling: +
     
    showspectrum=s=1280x480:scale=log
    +
    + +
  • +Complete example for a colored and sliding spectrum per channel using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
    +
    +
+ + +

31.11 showwaves

+ +

Convert input audio to a video output, representing the samples waves. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value +is "600x240". +

+
+
mode
+

Set display mode. +

+

Available values are: +

+
point
+

Draw a point for each sample. +

+
+
line
+

Draw a vertical line for each sample. +

+
+ +

Default value is point. +

+
+
n
+

Set the number of samples which are printed on the same column. A +larger value will decrease the frame rate. Must be a positive +integer. This option can be set only if the value for rate +is not explicitly specified. +

+
+
rate, r
+

Set the (approximate) output frame rate. This is done by setting the +option n. Default value is "25". +

+
+
+ + +

31.11.1 Examples

+ +
    +
  • +Output the input file audio and the corresponding video representation +at the same time: +
     
    amovie=a.mp3,asplit[out0],showwaves[out1]
    +
    + +
  • +Create a synthetic signal and show it with showwaves, forcing a +frame rate of 30 frames per second: +
     
    aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
    +
    +
+ + +

31.12 split, asplit

+ +

Split input into several identical outputs. +

+

asplit works with audio input, split with video. +

+

The filter accepts a single parameter which specifies the number of outputs. If +unspecified, it defaults to 2. +

+ +

31.12.1 Examples

+ +
    +
  • +Create two separate outputs from the same input: +
     
    [in] split [out0][out1]
    +
    + +
  • +To create 3 or more outputs, you need to specify the number of +outputs, like in: +
     
    [in] asplit=3 [out0][out1][out2]
    +
    + +
  • +Create two separate outputs from the same input, one cropped and +one padded: +
     
    [in] split [splitout1][splitout2];
    +[splitout1] crop=100:100:0:0    [cropout];
    +[splitout2] pad=200:200:100:100 [padout];
    +
    + +
  • +Create 5 copies of the input audio with ffmpeg: +
     
    ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
    +
    +
+ + +

31.13 zmq, azmq

+ +

Receive commands sent through a libzmq client, and forward them to +filters in the filtergraph. +

+

zmq and azmq work as a pass-through filters. zmq +must be inserted between two video filters, azmq between two +audio filters. +

+

To enable these filters you need to install the libzmq library and +headers and configure FFmpeg with --enable-libzmq. +

+

For more information about libzmq see: +http://www.zeromq.org/ +

+

The zmq and azmq filters work as a libzmq server, which +receives messages sent through a network interface defined by the +‘bind_address’ option. +

+

The received message must be in the form: +

 
TARGET COMMAND [ARG]
+
+ +

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional argument list for the +given COMMAND. +

+

Upon reception, the message is processed and the corresponding command +is injected into the filtergraph. Depending on the result, the filter +will send a reply to the client, adopting the format: +

 
ERROR_CODE ERROR_REASON
+MESSAGE
+
+ +

MESSAGE is optional. +

+ +

31.13.1 Examples

+ +

Look at ‘tools/zmqsend’ for an example of a zmq client which can +be used to send commands processed by these filters. +

+

Consider the following filtergraph generated by ffplay +

 
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red  [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l]   overlay      [bg+l];
+[bg+l][r] overlay=x=100 "
+
+ +

To change the color of the left side of the video, the following +command can be used: +

 
echo Parsed_color_0 c yellow | tools/zmqsend
+
+ +

To change the right side: +

 
echo Parsed_color_1 c pink | tools/zmqsend
+
+ + + +

32. Multimedia Sources

+ +

Below is a description of the currently available multimedia sources. +

+ +

32.1 amovie

+ +

This is the same as movie source, except it selects an audio +stream by default. +

+

+

+

32.2 movie

+ +

Read audio and/or video stream(s) from a movie container. +

+

This filter accepts the following options: +

+
+
filename
+

The name of the resource to read (not necessarily a file but also a device or a +stream accessed through some protocol). +

+
+
format_name, f
+

Specifies the format assumed for the movie to read, and can be either +the name of a container or an input device. If not specified the +format is guessed from movie_name or by probing. +

+
+
seek_point, sp
+

Specifies the seek point in seconds, the frames will be output +starting from this seek point, the parameter is evaluated with +av_strtod so the numerical value may be suffixed by an IS +postfix. Default value is "0". +

+
+
streams, s
+

Specifies the streams to read. Several streams can be specified, +separated by "+". The source will then have as many outputs, in the +same order. The syntax is explained in the “Stream specifiers” +section in the ffmpeg manual. Two special names, "dv" and "da" specify +respectively the default (best suited) video and audio stream. Default +is "dv", or "da" if the filter is called as "amovie". +

+
+
stream_index, si
+

Specifies the index of the video stream to read. If the value is -1, +the best suited video stream will be automatically selected. Default +value is "-1". Deprecated. If the filter is called "amovie", it will select +audio instead of video. +

+
+
loop
+

Specifies how many times to read the stream in sequence. +If the value is less than 1, the stream will be read again and again. +Default value is "1". +

+

Note that when the movie is looped the source timestamps are not +changed, so it will generate non monotonically increasing timestamps. +

+
+ +

This filter allows one to overlay a second video on top of main input of +a filtergraph as shown in this graph: +

 
input -----------> deltapts0 --> overlay --> output
+                                    ^
+                                    |
+movie --> scale--> deltapts1 -------+
+
+ + +

32.2.1 Examples

+ +
    +
  • +Skip 3.2 seconds from the start of the avi file in.avi, and overlay it +on top of the input labelled as "in": +
     
    movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read from a video4linux2 device, and overlay it on top of the input +labelled as "in": +
     
    movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read the first video stream and the audio stream with id 0x81 from +dvd.vob; the video is connected to the pad named "video" and the audio is +connected to the pad named "audio": +
     
    movie=dvd.vob:s=v:0+#0x81 [video] [audio]
    +
    +
+ + + +

33. See Also

+ +

ffplay, +ffmpeg, ffprobe, ffserver, +ffmpeg-utils, +ffmpeg-scaler, +ffmpeg-resampler, +ffmpeg-codecs, +ffmpeg-bitstream-filters, +ffmpeg-formats, +ffmpeg-devices, +ffmpeg-protocols, +ffmpeg-filters +

+ + +

34. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffplay.html b/dependencies64/ffmpeg/doc/ffplay.html new file mode 100644 index 000000000..c44c5997c --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffplay.html @@ -0,0 +1,688 @@ + + + + + +FFmpeg documentation : ffplay + + + + + + + + + + +
+
+ + +

ffplay Documentation

+ + +

Table of Contents

+ + + +

1. Synopsis

+ +

ffplay [options] [‘input_file’] +

+ +

2. Description

+ +

FFplay is a very simple and portable media player using the FFmpeg +libraries and the SDL library. It is mostly used as a testbed for the +various FFmpeg APIs. +

+ +

3. Options

+ +

All the numerical options, if not specified otherwise, accept a string +representing a number as input, which may be followed by one of the SI +unit prefixes, for example: ’K’, ’M’, or ’G’. +

+

If ’i’ is appended to the SI unit prefix, the complete prefix will be +interpreted as a unit prefix for binary multiplies, which are based on +powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit +prefix multiplies the value by 8. This allows using, for example: +’KB’, ’MiB’, ’G’ and ’B’ as number suffixes. +

+

Options which do not take arguments are boolean options, and set the +corresponding value to true. They can be set to false by prefixing +the option name with "no". For example using "-nofoo" +will set the boolean option with name "foo" to false. +

+

+

+

3.1 Stream specifiers

+

Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers +are used to precisely specify which stream(s) a given option belongs to. +

+

A stream specifier is a string generally appended to the option name and +separated from it by a colon. E.g. -codec:a:1 ac3 contains the +a:1 stream specifier, which matches the second audio stream. Therefore, it +would select the ac3 codec for the second audio stream. +

+

A stream specifier can match several streams, so that the option is applied to all +of them. E.g. the stream specifier in -b:a 128k matches all audio +streams. +

+

An empty stream specifier matches all streams. For example, -codec copy +or -codec: copy would copy all the streams without reencoding. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. E.g. -threads:1 4 would set the +thread count for the second stream to 4. +

+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle, +’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches +stream number stream_index of this type. Otherwise, it matches all +streams of this type. +

+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number stream_index +in the program with the id program_id. Otherwise, it matches all streams in the +program. +

+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ + +

3.2 Generic options

+ +

These options are shared amongst the ff* tools. +

+
+
-L
+

Show license. +

+
+
-h, -?, -help, --help [arg]
+

Show help. An optional parameter may be specified to print help about a specific +item. If no argument is specified, only basic (non advanced) tool +options are shown. +

+

Possible values of arg are: +

+
long
+

Print advanced tool options in addition to the basic tool options. +

+
+
full
+

Print complete list of options, including shared and private options +for encoders, decoders, demuxers, muxers, filters, etc. +

+
+
decoder=decoder_name
+

Print detailed information about the decoder named decoder_name. Use the +‘-decoders’ option to get a list of all decoders. +

+
+
encoder=encoder_name
+

Print detailed information about the encoder named encoder_name. Use the +‘-encoders’ option to get a list of all encoders. +

+
+
demuxer=demuxer_name
+

Print detailed information about the demuxer named demuxer_name. Use the +‘-formats’ option to get a list of all demuxers and muxers. +

+
+
muxer=muxer_name
+

Print detailed information about the muxer named muxer_name. Use the +‘-formats’ option to get a list of all muxers and demuxers. +

+
+
filter=filter_name
+

Print detailed information about the filter name filter_name. Use the +‘-filters’ option to get a list of all filters. +

+
+ +
+
-version
+

Show version. +

+
+
-formats
+

Show available formats. +

+
+
-codecs
+

Show all codecs known to libavcodec. +

+

Note that the term ’codec’ is used throughout this documentation as a shortcut +for what is more correctly called a media bitstream format. +

+
+
-decoders
+

Show available decoders. +

+
+
-encoders
+

Show all available encoders. +

+
+
-bsfs
+

Show available bitstream filters. +

+
+
-protocols
+

Show available protocols. +

+
+
-filters
+

Show available libavfilter filters. +

+
+
-pix_fmts
+

Show available pixel formats. +

+
+
-sample_fmts
+

Show available sample formats. +

+
+
-layouts
+

Show channel names and standard channel layouts. +

+
+
-colors
+

Show recognized color names. +

+
+
-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+

Set the logging level used by the library. +Adding "repeat+" indicates that repeated log output should not be compressed +to the first line and the "Last message repeated n times" line will be +omitted. "repeat" can also be used alone. +If "repeat" is used alone, and with no prior loglevel set, the default +loglevel will be used. If multiple loglevel parameters are given, using +’repeat’ will not change the loglevel. +loglevel is a number or a string containing one of the following values: +

+
quiet
+

Show nothing at all; be silent. +

+
panic
+

Only show fatal errors which could lead the process to crash, such as +and assert failure. This is not currently used for anything. +

+
fatal
+

Only show fatal errors. These are errors after which the process absolutely +cannot continue after. +

+
error
+

Show all errors, including ones which can be recovered from. +

+
warning
+

Show all warnings and errors. Any message related to possibly +incorrect or unexpected events will be shown. +

+
info
+

Show informative messages during processing. This is in addition to +warnings and errors. This is the default value. +

+
verbose
+

Same as info, except more verbose. +

+
debug
+

Show everything, including debugging information. +

+
+ +

By default the program logs to stderr, if coloring is supported by the +terminal, colors are used to mark errors and warnings. Log coloring +can be disabled setting the environment variable +AV_LOG_FORCE_NOCOLOR or NO_COLOR, or can be forced setting +the environment variable AV_LOG_FORCE_COLOR. +The use of the environment variable NO_COLOR is deprecated and +will be dropped in a following FFmpeg version. +

+
+
-report
+

Dump full command line and console output to a file named +program-YYYYMMDD-HHMMSS.log in the current +directory. +This file can be useful for bug reports. +It also implies -loglevel verbose. +

+

Setting the environment variable FFREPORT to any value has the +same effect. If the value is a ’:’-separated key=value sequence, these +options will affect the report; options values must be escaped if they +contain special characters or the options delimiter ’:’ (see the +“Quoting and escaping” section in the ffmpeg-utils manual). The +following option is recognized: +

+
file
+

set the file name to use for the report; %p is expanded to the name +of the program, %t is expanded to a timestamp, %% is expanded +to a plain % +

+
+ +

Errors in parsing the environment variable are not fatal, and will not +appear in the report. +

+
+
-hide_banner
+

Suppress printing banner. +

+

All FFmpeg tools will normally show a copyright notice, build options +and library versions. This option can be used to suppress printing +this information. +

+
+
-cpuflags flags (global)
+

Allows setting and clearing cpu flags. This option is intended +for testing. Do not use it unless you know what you’re doing. +

 
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+

Possible flags for this option are: +

+
x86
+
+
mmx
+
mmxext
+
sse
+
sse2
+
sse2slow
+
sse3
+
sse3slow
+
ssse3
+
atom
+
sse4.1
+
sse4.2
+
avx
+
xop
+
fma4
+
3dnow
+
3dnowext
+
cmov
+
+
+
ARM
+
+
armv5te
+
armv6
+
armv6t2
+
vfp
+
vfpv3
+
neon
+
+
+
PowerPC
+
+
altivec
+
+
+
Specific Processors
+
+
pentium2
+
pentium3
+
pentium4
+
k6
+
k62
+
athlon
+
athlonxp
+
k8
+
+
+
+ +
+
-opencl_bench
+

Benchmark all available OpenCL devices and show the results. This option +is only available when FFmpeg has been compiled with --enable-opencl. +

+
+
-opencl_options options (global)
+

Set OpenCL environment options. This option is only available when +FFmpeg has been compiled with --enable-opencl. +

+

options must be a list of key=value option pairs +separated by ’:’. See the “OpenCL Options” section in the +ffmpeg-utils manual for the list of supported options. +

+
+ + +

3.3 AVOptions

+ +

These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +‘-help’ option. They are separated into two categories: +

+
generic
+

These options can be set for any container, codec or device. Generic options +are listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +

+
private
+

These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +

+
+ +

For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the ‘id3v2_version’ private option of the MP3 +muxer: +

 
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+ +

All codec AVOptions are per-stream, and thus a stream specifier +should be attached to them. +

+

Note: the ‘-nooption’ syntax cannot be used for boolean +AVOptions, use ‘-option 0’/‘-option 1’. +

+

Note: the old undocumented way of specifying per-stream AVOptions by +prepending v/a/s to the options name is now obsolete and will be +removed soon. +

+ +

3.4 Main options

+ +
+
-x width
+

Force displayed width. +

+
-y height
+

Force displayed height. +

+
-s size
+

Set frame size (WxH or abbreviation), needed for videos which do +not contain a header with the frame size like raw YUV. This option +has been deprecated in favor of private options, try -video_size. +

+
-an
+

Disable audio. +

+
-vn
+

Disable video. +

+
-ss pos
+

Seek to a given position in seconds. +

+
-t duration
+

play <duration> seconds of audio/video +

+
-bytes
+

Seek by bytes. +

+
-nodisp
+

Disable graphical display. +

+
-f fmt
+

Force format. +

+
-window_title title
+

Set window title (default is the input filename). +

+
-loop number
+

Loops movie playback <number> times. 0 means forever. +

+
-showmode mode
+

Set the show mode to use. +Available values for mode are: +

+
0, video
+

show video +

+
1, waves
+

show audio waves +

+
2, rdft
+

show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform) +

+
+ +

Default value is "video", if video is not present or cannot be played +"rdft" is automatically selected. +

+

You can interactively cycle through the available show modes by +pressing the key <w>. +

+
+
-vf filtergraph
+

Create the filtergraph specified by filtergraph and use it to +filter the video stream. +

+

filtergraph is a description of the filtergraph to apply to +the stream, and must have a single video input and a single video +output. In the filtergraph, the input is associated to the label +in, and the output to the label out. See the +ffmpeg-filters manual for more information about the filtergraph +syntax. +

+
+
-af filtergraph
+

filtergraph is a description of the filtergraph to apply to +the input audio. +Use the option "-filters" to show all the available filters (including +sources and sinks). +

+
+
-i input_file
+

Read input_file. +

+
+ + +

3.5 Advanced options

+
+
-pix_fmt format
+

Set pixel format. +This option has been deprecated in favor of private options, try -pixel_format. +

+
+
-stats
+

Print several playback statistics, in particular show the stream +duration, the codec parameters, the current position in the stream and +the audio/video synchronisation drift. It is on by default, to +explicitly disable it you need to specify -nostats. +

+
+
-bug
+

Work around bugs. +

+
-fast
+

Non-spec-compliant optimizations. +

+
-genpts
+

Generate pts. +

+
-rtp_tcp
+

Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful +if you are streaming with the RTSP protocol. +

+
-sync type
+

Set the master clock to audio (type=audio), video +(type=video) or external (type=ext). Default is audio. The +master clock is used to control audio-video synchronization. Most media +players use audio as master clock, but in some cases (streaming or high +quality broadcast) it is necessary to change that. This option is mainly +used for debugging purposes. +

+
-threads count
+

Set the thread count. +

+
-ast audio_stream_number
+

Select the desired audio stream number, counting from 0. The number +refers to the list of all the input audio streams. If it is greater +than the number of audio streams minus one, then the last one is +selected, if it is negative the audio playback is disabled. +

+
-vst video_stream_number
+

Select the desired video stream number, counting from 0. The number +refers to the list of all the input video streams. If it is greater +than the number of video streams minus one, then the last one is +selected, if it is negative the video playback is disabled. +

+
-sst subtitle_stream_number
+

Select the desired subtitle stream number, counting from 0. The number +refers to the list of all the input subtitle streams. If it is greater +than the number of subtitle streams minus one, then the last one is +selected, if it is negative the subtitle rendering is disabled. +

+
-autoexit
+

Exit when video is done playing. +

+
-exitonkeydown
+

Exit if any key is pressed. +

+
-exitonmousedown
+

Exit if any mouse button is pressed. +

+
+
-codec:media_specifier codec_name
+

Force a specific decoder implementation for the stream identified by +media_specifier, which can assume the values a (audio), +v (video), and s subtitle. +

+
+
-acodec codec_name
+

Force a specific audio decoder. +

+
+
-vcodec codec_name
+

Force a specific video decoder. +

+
+
-scodec codec_name
+

Force a specific subtitle decoder. +

+
+ + +

3.6 While playing

+ +
+
<q, ESC>
+

Quit. +

+
+
<f>
+

Toggle full screen. +

+
+
<p, SPC>
+

Pause. +

+
+
<a>
+

Cycle audio channel in the curret program. +

+
+
<v>
+

Cycle video channel. +

+
+
<t>
+

Cycle subtitle channel in the current program. +

+
+
<c>
+

Cycle program. +

+
+
<w>
+

Show audio waves. +

+
+
<s>
+

Step to the next frame. +

+

Pause if the stream is not already paused, step to the next video +frame, and pause. +

+
+
<left/right>
+

Seek backward/forward 10 seconds. +

+
+
<down/up>
+

Seek backward/forward 1 minute. +

+
+
<page down/page up>
+

Seek to the previous/next chapter. +or if there are no chapters +Seek backward/forward 10 minutes. +

+
+
<mouse click>
+

Seek to percentage in file corresponding to fraction of width. +

+
+
+ + + + +

4. See Also

+ +

ffmpeg-all, +ffmpeg, ffprobe, ffserver, +ffmpeg-utils, +ffmpeg-scaler, +ffmpeg-resampler, +ffmpeg-codecs, +ffmpeg-bitstream-filters, +ffmpeg-formats, +ffmpeg-devices, +ffmpeg-protocols, +ffmpeg-filters +

+ + +

5. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffprobe-all.html b/dependencies64/ffmpeg/doc/ffprobe-all.html new file mode 100644 index 000000000..83de20180 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffprobe-all.html @@ -0,0 +1,19328 @@ + + + + + +FFmpeg documentation : ffprobe + + + + + + + + + + +
+
+ + +

ffprobe Documentation

+ + +

Table of Contents

+
+ + +
+ + +

1. Synopsis

+ +

ffprobe [options] [‘input_file’] +

+ +

2. Description

+ +

ffprobe gathers information from multimedia streams and prints it in +human- and machine-readable fashion. +

+

For example it can be used to check the format of the container used +by a multimedia stream and the format and type of each media stream +contained in it. +

+

If a filename is specified in input, ffprobe will try to open and +probe the file content. If the file cannot be opened or recognized as +a multimedia file, a positive exit code is returned. +

+

ffprobe may be employed both as a standalone application or in +combination with a textual filter, which may perform more +sophisticated processing, e.g. statistical processing or plotting. +

+

Options are used to list some of the formats supported by ffprobe or +for specifying which information to display, and for setting how +ffprobe will show it. +

+

ffprobe output is designed to be easily parsable by a textual filter, +and consists of one or more sections of a form defined by the selected +writer, which is specified by the ‘print_format’ option. +

+

Sections may contain other nested sections, and are identified by a +name (which may be shared by other sections), and an unique +name. See the output of ‘sections’. +

+

Metadata tags stored in the container or in the streams are recognized +and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM" +section. +

+ + +

3. Options

+ +

All the numerical options, if not specified otherwise, accept a string +representing a number as input, which may be followed by one of the SI +unit prefixes, for example: ’K’, ’M’, or ’G’. +

+

If ’i’ is appended to the SI unit prefix, the complete prefix will be +interpreted as a unit prefix for binary multiplies, which are based on +powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit +prefix multiplies the value by 8. This allows using, for example: +’KB’, ’MiB’, ’G’ and ’B’ as number suffixes. +

+

Options which do not take arguments are boolean options, and set the +corresponding value to true. They can be set to false by prefixing +the option name with "no". For example using "-nofoo" +will set the boolean option with name "foo" to false. +

+

+

+

3.1 Stream specifiers

+

Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers +are used to precisely specify which stream(s) a given option belongs to. +

+

A stream specifier is a string generally appended to the option name and +separated from it by a colon. E.g. -codec:a:1 ac3 contains the +a:1 stream specifier, which matches the second audio stream. Therefore, it +would select the ac3 codec for the second audio stream. +

+

A stream specifier can match several streams, so that the option is applied to all +of them. E.g. the stream specifier in -b:a 128k matches all audio +streams. +

+

An empty stream specifier matches all streams. For example, -codec copy +or -codec: copy would copy all the streams without reencoding. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. E.g. -threads:1 4 would set the +thread count for the second stream to 4. +

+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle, +’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches +stream number stream_index of this type. Otherwise, it matches all +streams of this type. +

+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number stream_index +in the program with the id program_id. Otherwise, it matches all streams in the +program. +

+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ + +

3.2 Generic options

+ +

These options are shared amongst the ff* tools. +

+
+
-L
+

Show license. +

+
+
-h, -?, -help, --help [arg]
+

Show help. An optional parameter may be specified to print help about a specific +item. If no argument is specified, only basic (non advanced) tool +options are shown. +

+

Possible values of arg are: +

+
long
+

Print advanced tool options in addition to the basic tool options. +

+
+
full
+

Print complete list of options, including shared and private options +for encoders, decoders, demuxers, muxers, filters, etc. +

+
+
decoder=decoder_name
+

Print detailed information about the decoder named decoder_name. Use the +‘-decoders’ option to get a list of all decoders. +

+
+
encoder=encoder_name
+

Print detailed information about the encoder named encoder_name. Use the +‘-encoders’ option to get a list of all encoders. +

+
+
demuxer=demuxer_name
+

Print detailed information about the demuxer named demuxer_name. Use the +‘-formats’ option to get a list of all demuxers and muxers. +

+
+
muxer=muxer_name
+

Print detailed information about the muxer named muxer_name. Use the +‘-formats’ option to get a list of all muxers and demuxers. +

+
+
filter=filter_name
+

Print detailed information about the filter name filter_name. Use the +‘-filters’ option to get a list of all filters. +

+
+ +
+
-version
+

Show version. +

+
+
-formats
+

Show available formats. +

+
+
-codecs
+

Show all codecs known to libavcodec. +

+

Note that the term ’codec’ is used throughout this documentation as a shortcut +for what is more correctly called a media bitstream format. +

+
+
-decoders
+

Show available decoders. +

+
+
-encoders
+

Show all available encoders. +

+
+
-bsfs
+

Show available bitstream filters. +

+
+
-protocols
+

Show available protocols. +

+
+
-filters
+

Show available libavfilter filters. +

+
+
-pix_fmts
+

Show available pixel formats. +

+
+
-sample_fmts
+

Show available sample formats. +

+
+
-layouts
+

Show channel names and standard channel layouts. +

+
+
-colors
+

Show recognized color names. +

+
+
-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+

Set the logging level used by the library. +Adding "repeat+" indicates that repeated log output should not be compressed +to the first line and the "Last message repeated n times" line will be +omitted. "repeat" can also be used alone. +If "repeat" is used alone, and with no prior loglevel set, the default +loglevel will be used. If multiple loglevel parameters are given, using +’repeat’ will not change the loglevel. +loglevel is a number or a string containing one of the following values: +

+
quiet
+

Show nothing at all; be silent. +

+
panic
+

Only show fatal errors which could lead the process to crash, such as +and assert failure. This is not currently used for anything. +

+
fatal
+

Only show fatal errors. These are errors after which the process absolutely +cannot continue after. +

+
error
+

Show all errors, including ones which can be recovered from. +

+
warning
+

Show all warnings and errors. Any message related to possibly +incorrect or unexpected events will be shown. +

+
info
+

Show informative messages during processing. This is in addition to +warnings and errors. This is the default value. +

+
verbose
+

Same as info, except more verbose. +

+
debug
+

Show everything, including debugging information. +

+
+ +

By default the program logs to stderr, if coloring is supported by the +terminal, colors are used to mark errors and warnings. Log coloring +can be disabled setting the environment variable +AV_LOG_FORCE_NOCOLOR or NO_COLOR, or can be forced setting +the environment variable AV_LOG_FORCE_COLOR. +The use of the environment variable NO_COLOR is deprecated and +will be dropped in a following FFmpeg version. +

+
+
-report
+

Dump full command line and console output to a file named +program-YYYYMMDD-HHMMSS.log in the current +directory. +This file can be useful for bug reports. +It also implies -loglevel verbose. +

+

Setting the environment variable FFREPORT to any value has the +same effect. If the value is a ’:’-separated key=value sequence, these +options will affect the report; options values must be escaped if they +contain special characters or the options delimiter ’:’ (see the +“Quoting and escaping” section in the ffmpeg-utils manual). The +following option is recognized: +

+
file
+

set the file name to use for the report; %p is expanded to the name +of the program, %t is expanded to a timestamp, %% is expanded +to a plain % +

+
+ +

Errors in parsing the environment variable are not fatal, and will not +appear in the report. +

+
+
-hide_banner
+

Suppress printing banner. +

+

All FFmpeg tools will normally show a copyright notice, build options +and library versions. This option can be used to suppress printing +this information. +

+
+
-cpuflags flags (global)
+

Allows setting and clearing cpu flags. This option is intended +for testing. Do not use it unless you know what you’re doing. +

 
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+

Possible flags for this option are: +

+
x86
+
+
mmx
+
mmxext
+
sse
+
sse2
+
sse2slow
+
sse3
+
sse3slow
+
ssse3
+
atom
+
sse4.1
+
sse4.2
+
avx
+
xop
+
fma4
+
3dnow
+
3dnowext
+
cmov
+
+
+
ARM
+
+
armv5te
+
armv6
+
armv6t2
+
vfp
+
vfpv3
+
neon
+
+
+
PowerPC
+
+
altivec
+
+
+
Specific Processors
+
+
pentium2
+
pentium3
+
pentium4
+
k6
+
k62
+
athlon
+
athlonxp
+
k8
+
+
+
+ +
+
-opencl_bench
+

Benchmark all available OpenCL devices and show the results. This option +is only available when FFmpeg has been compiled with --enable-opencl. +

+
+
-opencl_options options (global)
+

Set OpenCL environment options. This option is only available when +FFmpeg has been compiled with --enable-opencl. +

+

options must be a list of key=value option pairs +separated by ’:’. See the “OpenCL Options” section in the +ffmpeg-utils manual for the list of supported options. +

+
+ + +

3.3 AVOptions

+ +

These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +‘-help’ option. They are separated into two categories: +

+
generic
+

These options can be set for any container, codec or device. Generic options +are listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +

+
private
+

These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +

+
+ +

For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the ‘id3v2_version’ private option of the MP3 +muxer: +

 
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+ +

All codec AVOptions are per-stream, and thus a stream specifier +should be attached to them. +

+

Note: the ‘-nooption’ syntax cannot be used for boolean +AVOptions, use ‘-option 0’/‘-option 1’. +

+

Note: the old undocumented way of specifying per-stream AVOptions by +prepending v/a/s to the options name is now obsolete and will be +removed soon. +

+ +

3.4 Main options

+ +
+
-f format
+

Force format to use. +

+
+
-unit
+

Show the unit of the displayed values. +

+
+
-prefix
+

Use SI prefixes for the displayed values. +Unless the "-byte_binary_prefix" option is used all the prefixes +are decimal. +

+
+
-byte_binary_prefix
+

Force the use of binary prefixes for byte values. +

+
+
-sexagesimal
+

Use sexagesimal format HH:MM:SS.MICROSECONDS for time values. +

+
+
-pretty
+

Prettify the format of the displayed values, it corresponds to the +options "-unit -prefix -byte_binary_prefix -sexagesimal". +

+
+
-of, -print_format writer_name[=writer_options]
+

Set the output printing format. +

+

writer_name specifies the name of the writer, and +writer_options specifies the options to be passed to the writer. +

+

For example for printing the output in JSON format, specify: +

 
-print_format json
+
+ +

For more details on the available output printing formats, see the +Writers section below. +

+
+
-sections
+

Print sections structure and section information, and exit. The output +is not meant to be parsed by a machine. +

+
+
-select_streams stream_specifier
+

Select only the streams specified by stream_specifier. This +option affects only the options related to streams +(e.g. show_streams, show_packets, etc.). +

+

For example to show only audio streams, you can use the command: +

 
ffprobe -show_streams -select_streams a INPUT
+
+ +

To show only video packets belonging to the video stream with index 1: +

 
ffprobe -show_packets -select_streams v:1 INPUT
+
+ +
+
-show_data
+

Show payload data, as a hexadecimal and ASCII dump. Coupled with +‘-show_packets’, it will dump the packets’ data. Coupled with +‘-show_streams’, it will dump the codec extradata. +

+

The dump is printed as the "data" field. It may contain newlines. +

+
+
-show_error
+

Show information about the error found when trying to probe the input. +

+

The error information is printed within a section with name "ERROR". +

+
+
-show_format
+

Show information about the container format of the input multimedia +stream. +

+

All the container format information is printed within a section with +name "FORMAT". +

+
+
-show_format_entry name
+

Like ‘-show_format’, but only prints the specified entry of the +container format information, rather than all. This option may be given more +than once, then all specified entries will be shown. +

+

This option is deprecated, use show_entries instead. +

+
+
-show_entries section_entries
+

Set list of entries to show. +

+

Entries are specified according to the following +syntax. section_entries contains a list of section entries +separated by :. Each section entry is composed by a section +name (or unique name), optionally followed by a list of entries local +to that section, separated by ,. +

+

If section name is specified but is followed by no =, all +entries are printed to output, together with all the contained +sections. Otherwise only the entries specified in the local section +entries list are printed. In particular, if = is specified but +the list of local entries is empty, then no entries will be shown for +that section. +

+

Note that the order of specification of the local section entries is +not honored in the output, and the usual display order will be +retained. +

+

The formal syntax is given by: +

 
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME[,LOCAL_SECTION_ENTRIES]
+SECTION_ENTRY         ::= SECTION_NAME[=[LOCAL_SECTION_ENTRIES]]
+SECTION_ENTRIES       ::= SECTION_ENTRY[:SECTION_ENTRIES]
+
+ +

For example, to show only the index and type of each stream, and the PTS +time, duration time, and stream index of the packets, you can specify +the argument: +

 
packet=pts_time,duration_time,stream_index : stream=index,codec_type
+
+ +

To show all the entries in the section "format", but only the codec +type in the section "stream", specify the argument: +

 
format : stream=codec_type
+
+ +

To show all the tags in the stream and format sections: +

 
format_tags : format_tags
+
+ +

To show only the title tag (if available) in the stream +sections: +

 
stream_tags=title
+
+ +
+
-show_packets
+

Show information about each packet contained in the input multimedia +stream. +

+

The information for each single packet is printed within a dedicated +section with name "PACKET". +

+
+
-show_frames
+

Show information about each frame and subtitle contained in the input +multimedia stream. +

+

The information for each single frame is printed within a dedicated +section with name "FRAME" or "SUBTITLE". +

+
+
-show_streams
+

Show information about each media stream contained in the input +multimedia stream. +

+

Each media stream information is printed within a dedicated section +with name "STREAM". +

+
+
-show_programs
+

Show information about programs and their streams contained in the input +multimedia stream. +

+

Each media stream information is printed within a dedicated section +with name "PROGRAM_STREAM". +

+
+
-show_chapters
+

Show information about chapters stored in the format. +

+

Each chapter is printed within a dedicated section with name "CHAPTER". +

+
+
-count_frames
+

Count the number of frames per stream and report it in the +corresponding stream section. +

+
+
-count_packets
+

Count the number of packets per stream and report it in the +corresponding stream section. +

+
+
-read_intervals read_intervals
+
+

Read only the specified intervals. read_intervals must be a +sequence of interval specifications separated by ",". +ffprobe will seek to the interval starting point, and will +continue reading from that. +

+

Each interval is specified by two optional parts, separated by "%". +

+

The first part specifies the interval start position. It is +interpreted as an abolute position, or as a relative offset from the +current position if it is preceded by the "+" character. If this first +part is not specified, no seeking will be performed when reading this +interval. +

+

The second part specifies the interval end position. It is interpreted +as an absolute position, or as a relative offset from the current +position if it is preceded by the "+" character. If the offset +specification starts with "#", it is interpreted as the number of +packets to read (not including the flushing packets) from the interval +start. If no second part is specified, the program will read until the +end of the input. +

+

Note that seeking is not accurate, thus the actual interval start +point may be different from the specified position. Also, when an +interval duration is specified, the absolute end time will be computed +by adding the duration to the interval start point found by seeking +the file, rather than to the specified start value. +

+

The formal syntax is given by: +

 
INTERVAL  ::= [START|+START_OFFSET][%[END|+END_OFFSET]]
+INTERVALS ::= INTERVAL[,INTERVALS]
+
+ +

A few examples follow. +

    +
  • +Seek to time 10, read packets until 20 seconds after the found seek +point, then seek to position 01:30 (1 minute and thirty +seconds) and read packets until position 01:45. +
     
    10%+20,01:30%01:45
    +
    + +
  • +Read only 42 packets after seeking to position 01:23: +
     
    01:23%+#42
    +
    + +
  • +Read only the first 20 seconds from the start: +
     
    %+20
    +
    + +
  • +Read from the start until position 02:30: +
     
    %02:30
    +
    +
+ +
+
-show_private_data, -private
+

Show private data, that is data depending on the format of the +particular shown element. +This option is enabled by default, but you may need to disable it +for specific uses, for example when creating XSD-compliant XML output. +

+
+
-show_program_version
+

Show information related to program version. +

+

Version information is printed within a section with name +"PROGRAM_VERSION". +

+
+
-show_library_versions
+

Show information related to library versions. +

+

Version information for each library is printed within a section with +name "LIBRARY_VERSION". +

+
+
-show_versions
+

Show information related to program and library versions. This is the +equivalent of setting both ‘-show_program_version’ and +‘-show_library_versions’ options. +

+
+
-bitexact
+

Force bitexact output, useful to produce output which is not dependent +on the specific build. +

+
+
-i input_file
+

Read input_file. +

+
+
+ + +

4. Writers

+ +

A writer defines the output format adopted by ffprobe, and will be +used for printing all the parts of the output. +

+

A writer may accept one or more arguments, which specify the options +to adopt. The options are specified as a list of key=value +pairs, separated by ":". +

+

All writers support the following options: +

+
+
string_validation, sv
+

Set string validation mode. +

+

The following values are accepted. +

+
fail
+

The writer will fail immediately in case an invalid string (UTF-8) +sequence or code point is found in the input. This is especially +useful to validate input metadata. +

+
+
ignore
+

Any validation error will be ignored. This will result in possibly +broken output, especially with the json or xml writer. +

+
+
replace
+

The writer will substitute invalid UTF-8 sequences or code points with +the string specified with the ‘string_validation_replacement’. +

+
+ +

Default value is ‘replace’. +

+
+
string_validation_replacement, svr
+

Set replacement string to use in case ‘string_validation’ is +set to ‘replace’. +

+

In case the option is not specified, the writer will assume the empty +string, that is it will remove the invalid sequences from the input +strings. +

+
+ +

A description of the currently available writers follows. +

+ +

4.1 default

+

Default format. +

+

Print each section in the form: +

 
[SECTION]
+key1=val1
+...
+keyN=valN
+[/SECTION]
+
+ +

Metadata tags are printed as a line in the corresponding FORMAT, STREAM or +PROGRAM_STREAM section, and are prefixed by the string "TAG:". +

+

A description of the accepted options follows. +

+
+
nokey, nk
+

If set to 1 specify not to print the key of each field. Default value +is 0. +

+
+
noprint_wrappers, nw
+

If set to 1 specify not to print the section header and footer. +Default value is 0. +

+
+ + +

4.2 compact, csv

+

Compact and CSV format. +

+

The csv writer is equivalent to compact, but supports +different defaults. +

+

Each section is printed on a single line. +If no option is specifid, the output has the form: +

 
section|key1=val1| ... |keyN=valN
+
+ +

Metadata tags are printed in the corresponding "format" or "stream" +section. A metadata tag key, if printed, is prefixed by the string +"tag:". +

+

The description of the accepted options follows. +

+
+
item_sep, s
+

Specify the character to use for separating fields in the output line. +It must be a single printable character, it is "|" by default ("," for +the csv writer). +

+
+
nokey, nk
+

If set to 1 specify not to print the key of each field. Its default +value is 0 (1 for the csv writer). +

+
+
escape, e
+

Set the escape mode to use, default to "c" ("csv" for the csv +writer). +

+

It can assume one of the following values: +

+
c
+

Perform C-like escaping. Strings containing a newline (’\n’), carriage +return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping +character (’\’) or the item separator character SEP are escaped using C-like fashioned +escaping, so that a newline is converted to the sequence "\n", a +carriage return to "\r", ’\’ to "\\" and the separator SEP is +converted to "\SEP". +

+
+
csv
+

Perform CSV-like escaping, as described in RFC4180. Strings +containing a newline (’\n’), a carriage return (’\r’), a double quote +(’"’), or SEP are enclosed in double-quotes. +

+
+
none
+

Perform no escaping. +

+
+ +
+
print_section, p
+

Print the section name at the begin of each line if the value is +1, disable it with value set to 0. Default value is +1. +

+
+
+ + +

4.3 flat

+

Flat format. +

+

A free-form output where each line contains an explicit key=value, such as +"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be +directly embedded in sh scripts as long as the separator character is an +alphanumeric character or an underscore (see sep_char option). +

+

The description of the accepted options follows. +

+
+
sep_char, s
+

Separator character used to separate the chapter, the section name, IDs and +potential tags in the printed field key. +

+

Default value is ’.’. +

+
+
hierarchical, h
+

Specify if the section name specification should be hierarchical. If +set to 1, and if there is more than one section in the current +chapter, the section name will be prefixed by the name of the +chapter. A value of 0 will disable this behavior. +

+

Default value is 1. +

+
+ + +

4.4 ini

+

INI format output. +

+

Print output in an INI based format. +

+

The following conventions are adopted: +

+
    +
  • +all key and values are UTF-8 +
  • +’.’ is the subgroup separator +
  • +newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped +
  • +’\’ is the escape character +
  • +’#’ is the comment indicator +
  • +’=’ is the key/value separator +
  • +’:’ is not used but usually parsed as key/value separator +
+ +

This writer accepts options as a list of key=value pairs, +separated by ":". +

+

The description of the accepted options follows. +

+
+
hierarchical, h
+

Specify if the section name specification should be hierarchical. If +set to 1, and if there is more than one section in the current +chapter, the section name will be prefixed by the name of the +chapter. A value of 0 will disable this behavior. +

+

Default value is 1. +

+
+ + +

4.5 json

+

JSON based format. +

+

Each section is printed using JSON notation. +

+

The description of the accepted options follows. +

+
+
compact, c
+

If set to 1 enable compact output, that is each section will be +printed on a single line. Default value is 0. +

+
+ +

For more information about JSON, see http://www.json.org/. +

+ +

4.6 xml

+

XML based format. +

+

The XML output is described in the XML schema description file +‘ffprobe.xsd’ installed in the FFmpeg datadir. +

+

An updated version of the schema can be retrieved at the url +http://www.ffmpeg.org/schema/ffprobe.xsd, which redirects to the +latest schema committed into the FFmpeg development source code tree. +

+

Note that the output issued will be compliant to the +‘ffprobe.xsd’ schema only when no special global output options +(‘unit’, ‘prefix’, ‘byte_binary_prefix’, +‘sexagesimal’ etc.) are specified. +

+

The description of the accepted options follows. +

+
+
fully_qualified, q
+

If set to 1 specify if the output should be fully qualified. Default +value is 0. +This is required for generating an XML file which can be validated +through an XSD file. +

+
+
xsd_compliant, x
+

If set to 1 perform more checks for ensuring that the output is XSD +compliant. Default value is 0. +This option automatically sets ‘fully_qualified’ to 1. +

+
+ +

For more information about the XML format, see +http://www.w3.org/XML/. +

+ +

5. Timecode

+ +

ffprobe supports Timecode extraction: +

+
    +
  • +MPEG1/2 timecode is extracted from the GOP, and is available in the video +stream details (‘-show_streams’, see timecode). + +
  • +MOV timecode is extracted from tmcd track, so is available in the tmcd +stream metadata (‘-show_streams’, see TAG:timecode). + +
  • +DV, GXF and AVI timecodes are available in format metadata +(‘-show_format’, see TAG:timecode). + +
+ + +

6. Syntax

+ +

This section documents the syntax and formats employed by the FFmpeg +libraries and tools. +

+

+

+

6.1 Quoting and escaping

+ +

FFmpeg adopts the following quoting and escaping mechanism, unless +explicitly specified. The following rules are applied: +

+
    +
  • +' and \ are special characters (respectively used for +quoting and escaping). In addition to them, there might be other +special characters depending on the specific syntax where the escaping +and quoting are employed. + +
  • +A special character is escaped by prefixing it with a ’\’. + +
  • +All characters enclosed between ” are included literally in the +parsed string. The quote character ' itself cannot be quoted, +so you may need to close the quote and escape it. + +
  • +Leading and trailing whitespaces, unless escaped or quoted, are +removed from the parsed string. +
+ +

Note that you may need to add a second level of escaping when using +the command line or a script, which depends on the syntax of the +adopted shell language. +

+

The function av_get_token defined in +‘libavutil/avstring.h’ can be used to parse a token quoted or +escaped according to the rules defined above. +

+

The tool ‘tools/ffescape’ in the FFmpeg source tree can be used +to automatically quote or escape a string in a script. +

+ +

6.1.1 Examples

+ +
    +
  • +Escape the string Crime d'Amour containing the ' special +character: +
     
    Crime d\'Amour
    +
    + +
  • +The string above contains a quote, so the ' needs to be escaped +when quoting it: +
     
    'Crime d'\''Amour'
    +
    + +
  • +Include leading or trailing whitespaces using quoting: +
     
    '  this string starts and ends with whitespaces  '
    +
    + +
  • +Escaping and quoting can be mixed together: +
     
    ' The string '\'string\'' is a string '
    +
    + +
  • +To include a literal \ you can use either escaping or quoting: +
     
    'c:\foo' can be written as c:\\foo
    +
    +
+ +

+

+

6.2 Date

+ +

The accepted syntax is: +

 
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+ +

If the value is "now" it takes the current time. +

+

Time is local time unless Z is appended, in which case it is +interpreted as UTC. +If the year-month-day part is not specified it takes the current +year-month-day. +

+

+

+

6.3 Time duration

+ +

There are two accepted syntaxes for expressing time duration. +

+
 
[-][HH:]MM:SS[.m...]
+
+ +

HH expresses the number of hours, MM the number of minutes +for a maximum of 2 digits, and SS the number of seconds for a +maximum of 2 digits. The m at the end expresses decimal value for +SS. +

+

or +

+
 
[-]S+[.m...]
+
+ +

S expresses the number of seconds, with the optional decimal part +m. +

+

In both expressions, the optional ‘-’ indicates negative duration. +

+ +

6.3.1 Examples

+ +

The following examples are all valid time duration: +

+
+
55
+

55 seconds +

+
+
12:03:45
+

12 hours, 03 minutes and 45 seconds +

+
+
23.189
+

23.189 seconds +

+
+ +

+

+

6.4 Video size

+

Specify the size of the sourced video, it may be a string of the form +widthxheight, or the name of a size abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

720x480 +

+
pal
+

720x576 +

+
qntsc
+

352x240 +

+
qpal
+

352x288 +

+
sntsc
+

640x480 +

+
spal
+

768x576 +

+
film
+

352x240 +

+
ntsc-film
+

352x240 +

+
sqcif
+

128x96 +

+
qcif
+

176x144 +

+
cif
+

352x288 +

+
4cif
+

704x576 +

+
16cif
+

1408x1152 +

+
qqvga
+

160x120 +

+
qvga
+

320x240 +

+
vga
+

640x480 +

+
svga
+

800x600 +

+
xga
+

1024x768 +

+
uxga
+

1600x1200 +

+
qxga
+

2048x1536 +

+
sxga
+

1280x1024 +

+
qsxga
+

2560x2048 +

+
hsxga
+

5120x4096 +

+
wvga
+

852x480 +

+
wxga
+

1366x768 +

+
wsxga
+

1600x1024 +

+
wuxga
+

1920x1200 +

+
woxga
+

2560x1600 +

+
wqsxga
+

3200x2048 +

+
wquxga
+

3840x2400 +

+
whsxga
+

6400x4096 +

+
whuxga
+

7680x4800 +

+
cga
+

320x200 +

+
ega
+

640x350 +

+
hd480
+

852x480 +

+
hd720
+

1280x720 +

+
hd1080
+

1920x1080 +

+
2k
+

2048x1080 +

+
2kflat
+

1998x1080 +

+
2kscope
+

2048x858 +

+
4k
+

4096x2160 +

+
4kflat
+

3996x2160 +

+
4kscope
+

4096x1716 +

+
nhd
+

640x360 +

+
hqvga
+

240x160 +

+
wqvga
+

400x240 +

+
fwqvga
+

432x240 +

+
hvga
+

480x320 +

+
qhd
+

960x540 +

+
+ +

+

+

6.5 Video rate

+ +

Specify the frame rate of a video, expressed as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. +

+

The following abbreviations are recognized: +

+
ntsc
+

30000/1001 +

+
pal
+

25/1 +

+
qntsc
+

30000/1001 +

+
qpal
+

25/1 +

+
sntsc
+

30000/1001 +

+
spal
+

25/1 +

+
film
+

24/1 +

+
ntsc-film
+

24000/1001 +

+
+ +

+

+

6.6 Ratio

+ +

A ratio can be expressed as an expression, or in the form +numerator:denominator. +

+

Note that a ratio with infinite (1/0) or negative value is +considered valid, so you should check on the returned value if you +want to exclude those values. +

+

The undefined value can be expressed using the "0:0" string. +

+

+

+

6.7 Color

+ +

It can be the name of a color as defined below (case insensitive match) or a +[0x|#]RRGGBB[AA] sequence, possibly followed by @ and a string +representing the alpha component. +

+

The alpha component may be a string composed by "0x" followed by an +hexadecimal number or a decimal number between 0.0 and 1.0, which +represents the opacity value (‘0x00’ or ‘0.0’ means completely +transparent, ‘0xff’ or ‘1.0’ completely opaque). If the alpha +component is not specified then ‘0xff’ is assumed. +

+

The string ‘random’ will result in a random color. +

+

The following names of colors are recognized: +

+
AliceBlue
+

0xF0F8FF +

+
AntiqueWhite
+

0xFAEBD7 +

+
Aqua
+

0x00FFFF +

+
Aquamarine
+

0x7FFFD4 +

+
Azure
+

0xF0FFFF +

+
Beige
+

0xF5F5DC +

+
Bisque
+

0xFFE4C4 +

+
Black
+

0x000000 +

+
BlanchedAlmond
+

0xFFEBCD +

+
Blue
+

0x0000FF +

+
BlueViolet
+

0x8A2BE2 +

+
Brown
+

0xA52A2A +

+
BurlyWood
+

0xDEB887 +

+
CadetBlue
+

0x5F9EA0 +

+
Chartreuse
+

0x7FFF00 +

+
Chocolate
+

0xD2691E +

+
Coral
+

0xFF7F50 +

+
CornflowerBlue
+

0x6495ED +

+
Cornsilk
+

0xFFF8DC +

+
Crimson
+

0xDC143C +

+
Cyan
+

0x00FFFF +

+
DarkBlue
+

0x00008B +

+
DarkCyan
+

0x008B8B +

+
DarkGoldenRod
+

0xB8860B +

+
DarkGray
+

0xA9A9A9 +

+
DarkGreen
+

0x006400 +

+
DarkKhaki
+

0xBDB76B +

+
DarkMagenta
+

0x8B008B +

+
DarkOliveGreen
+

0x556B2F +

+
Darkorange
+

0xFF8C00 +

+
DarkOrchid
+

0x9932CC +

+
DarkRed
+

0x8B0000 +

+
DarkSalmon
+

0xE9967A +

+
DarkSeaGreen
+

0x8FBC8F +

+
DarkSlateBlue
+

0x483D8B +

+
DarkSlateGray
+

0x2F4F4F +

+
DarkTurquoise
+

0x00CED1 +

+
DarkViolet
+

0x9400D3 +

+
DeepPink
+

0xFF1493 +

+
DeepSkyBlue
+

0x00BFFF +

+
DimGray
+

0x696969 +

+
DodgerBlue
+

0x1E90FF +

+
FireBrick
+

0xB22222 +

+
FloralWhite
+

0xFFFAF0 +

+
ForestGreen
+

0x228B22 +

+
Fuchsia
+

0xFF00FF +

+
Gainsboro
+

0xDCDCDC +

+
GhostWhite
+

0xF8F8FF +

+
Gold
+

0xFFD700 +

+
GoldenRod
+

0xDAA520 +

+
Gray
+

0x808080 +

+
Green
+

0x008000 +

+
GreenYellow
+

0xADFF2F +

+
HoneyDew
+

0xF0FFF0 +

+
HotPink
+

0xFF69B4 +

+
IndianRed
+

0xCD5C5C +

+
Indigo
+

0x4B0082 +

+
Ivory
+

0xFFFFF0 +

+
Khaki
+

0xF0E68C +

+
Lavender
+

0xE6E6FA +

+
LavenderBlush
+

0xFFF0F5 +

+
LawnGreen
+

0x7CFC00 +

+
LemonChiffon
+

0xFFFACD +

+
LightBlue
+

0xADD8E6 +

+
LightCoral
+

0xF08080 +

+
LightCyan
+

0xE0FFFF +

+
LightGoldenRodYellow
+

0xFAFAD2 +

+
LightGreen
+

0x90EE90 +

+
LightGrey
+

0xD3D3D3 +

+
LightPink
+

0xFFB6C1 +

+
LightSalmon
+

0xFFA07A +

+
LightSeaGreen
+

0x20B2AA +

+
LightSkyBlue
+

0x87CEFA +

+
LightSlateGray
+

0x778899 +

+
LightSteelBlue
+

0xB0C4DE +

+
LightYellow
+

0xFFFFE0 +

+
Lime
+

0x00FF00 +

+
LimeGreen
+

0x32CD32 +

+
Linen
+

0xFAF0E6 +

+
Magenta
+

0xFF00FF +

+
Maroon
+

0x800000 +

+
MediumAquaMarine
+

0x66CDAA +

+
MediumBlue
+

0x0000CD +

+
MediumOrchid
+

0xBA55D3 +

+
MediumPurple
+

0x9370D8 +

+
MediumSeaGreen
+

0x3CB371 +

+
MediumSlateBlue
+

0x7B68EE +

+
MediumSpringGreen
+

0x00FA9A +

+
MediumTurquoise
+

0x48D1CC +

+
MediumVioletRed
+

0xC71585 +

+
MidnightBlue
+

0x191970 +

+
MintCream
+

0xF5FFFA +

+
MistyRose
+

0xFFE4E1 +

+
Moccasin
+

0xFFE4B5 +

+
NavajoWhite
+

0xFFDEAD +

+
Navy
+

0x000080 +

+
OldLace
+

0xFDF5E6 +

+
Olive
+

0x808000 +

+
OliveDrab
+

0x6B8E23 +

+
Orange
+

0xFFA500 +

+
OrangeRed
+

0xFF4500 +

+
Orchid
+

0xDA70D6 +

+
PaleGoldenRod
+

0xEEE8AA +

+
PaleGreen
+

0x98FB98 +

+
PaleTurquoise
+

0xAFEEEE +

+
PaleVioletRed
+

0xD87093 +

+
PapayaWhip
+

0xFFEFD5 +

+
PeachPuff
+

0xFFDAB9 +

+
Peru
+

0xCD853F +

+
Pink
+

0xFFC0CB +

+
Plum
+

0xDDA0DD +

+
PowderBlue
+

0xB0E0E6 +

+
Purple
+

0x800080 +

+
Red
+

0xFF0000 +

+
RosyBrown
+

0xBC8F8F +

+
RoyalBlue
+

0x4169E1 +

+
SaddleBrown
+

0x8B4513 +

+
Salmon
+

0xFA8072 +

+
SandyBrown
+

0xF4A460 +

+
SeaGreen
+

0x2E8B57 +

+
SeaShell
+

0xFFF5EE +

+
Sienna
+

0xA0522D +

+
Silver
+

0xC0C0C0 +

+
SkyBlue
+

0x87CEEB +

+
SlateBlue
+

0x6A5ACD +

+
SlateGray
+

0x708090 +

+
Snow
+

0xFFFAFA +

+
SpringGreen
+

0x00FF7F +

+
SteelBlue
+

0x4682B4 +

+
Tan
+

0xD2B48C +

+
Teal
+

0x008080 +

+
Thistle
+

0xD8BFD8 +

+
Tomato
+

0xFF6347 +

+
Turquoise
+

0x40E0D0 +

+
Violet
+

0xEE82EE +

+
Wheat
+

0xF5DEB3 +

+
White
+

0xFFFFFF +

+
WhiteSmoke
+

0xF5F5F5 +

+
Yellow
+

0xFFFF00 +

+
YellowGreen
+

0x9ACD32 +

+
+ +

+

+

6.8 Channel Layout

+ +

A channel layout specifies the spatial disposition of the channels in +a multi-channel audio stream. To specify a channel layout, FFmpeg +makes use of a special syntax. +

+

Individual channels are identified by an id, as given by the table +below: +

+
FL
+

front left +

+
FR
+

front right +

+
FC
+

front center +

+
LFE
+

low frequency +

+
BL
+

back left +

+
BR
+

back right +

+
FLC
+

front left-of-center +

+
FRC
+

front right-of-center +

+
BC
+

back center +

+
SL
+

side left +

+
SR
+

side right +

+
TC
+

top center +

+
TFL
+

top front left +

+
TFC
+

top front center +

+
TFR
+

top front right +

+
TBL
+

top back left +

+
TBC
+

top back center +

+
TBR
+

top back right +

+
DL
+

downmix left +

+
DR
+

downmix right +

+
WL
+

wide left +

+
WR
+

wide right +

+
SDL
+

surround direct left +

+
SDR
+

surround direct right +

+
LFE2
+

low frequency 2 +

+
+ +

Standard channel layout compositions can be specified by using the +following identifiers: +

+
mono
+

FC +

+
stereo
+

FL+FR +

+
2.1
+

FL+FR+LFE +

+
3.0
+

FL+FR+FC +

+
3.0(back)
+

FL+FR+BC +

+
4.0
+

FL+FR+FC+BC +

+
quad
+

FL+FR+BL+BR +

+
quad(side)
+

FL+FR+SL+SR +

+
3.1
+

FL+FR+FC+LFE +

+
5.0
+

FL+FR+FC+BL+BR +

+
5.0(side)
+

FL+FR+FC+SL+SR +

+
4.1
+

FL+FR+FC+LFE+BC +

+
5.1
+

FL+FR+FC+LFE+BL+BR +

+
5.1(side)
+

FL+FR+FC+LFE+SL+SR +

+
6.0
+

FL+FR+FC+BC+SL+SR +

+
6.0(front)
+

FL+FR+FLC+FRC+SL+SR +

+
hexagonal
+

FL+FR+FC+BL+BR+BC +

+
6.1
+

FL+FR+FC+LFE+BC+SL+SR +

+
6.1
+

FL+FR+FC+LFE+BL+BR+BC +

+
6.1(front)
+

FL+FR+LFE+FLC+FRC+SL+SR +

+
7.0
+

FL+FR+FC+BL+BR+SL+SR +

+
7.0(front)
+

FL+FR+FC+FLC+FRC+SL+SR +

+
7.1
+

FL+FR+FC+LFE+BL+BR+SL+SR +

+
7.1(wide)
+

FL+FR+FC+LFE+BL+BR+FLC+FRC +

+
7.1(wide-side)
+

FL+FR+FC+LFE+FLC+FRC+SL+SR +

+
octagonal
+

FL+FR+FC+BL+BR+BC+SL+SR +

+
downmix
+

DL+DR +

+
+ +

A custom channel layout can be specified as a sequence of terms, separated by +’+’ or ’|’. Each term can be: +

    +
  • +the name of a standard channel layout (e.g. ‘mono’, +‘stereo’, ‘4.0’, ‘quad’, ‘5.0’, etc.) + +
  • +the name of a single channel (e.g. ‘FL’, ‘FR’, ‘FC’, ‘LFE’, etc.) + +
  • +a number of channels, in decimal, optionally followed by ’c’, yielding +the default channel layout for that number of channels (see the +function av_get_default_channel_layout) + +
  • +a channel layout mask, in hexadecimal starting with "0x" (see the +AV_CH_* macros in ‘libavutil/channel_layout.h’. +
+ +

Starting from libavutil version 53 the trailing character "c" to +specify a number of channels will be required, while a channel layout +mask could also be specified as a decimal number (if and only if not +followed by "c"). +

+

See also the function av_get_channel_layout defined in +‘libavutil/channel_layout.h’. +

+ +

7. Expression Evaluation

+ +

When evaluating an arithmetic expression, FFmpeg uses an internal +formula evaluator, implemented through the ‘libavutil/eval.h’ +interface. +

+

An expression may contain unary, binary operators, constants, and +functions. +

+

Two expressions expr1 and expr2 can be combined to form +another expression "expr1;expr2". +expr1 and expr2 are evaluated in turn, and the new +expression evaluates to the value of expr2. +

+

The following binary operators are available: +, -, +*, /, ^. +

+

The following unary operators are available: +, -. +

+

The following functions are available: +

+
abs(x)
+

Compute absolute value of x. +

+
+
acos(x)
+

Compute arccosine of x. +

+
+
asin(x)
+

Compute arcsine of x. +

+
+
atan(x)
+

Compute arctangent of x. +

+
+
between(x, min, max)
+

Return 1 if x is greater than or equal to min and lesser than or +equal to max, 0 otherwise. +

+
+
bitand(x, y)
+
bitor(x, y)
+

Compute bitwise and/or operation on x and y. +

+

The results of the evaluation of x and y are converted to +integers before executing the bitwise operation. +

+

Note that both the conversion to integer and the conversion back to +floating point can lose precision. Beware of unexpected results for +large numbers (usually 2^53 and larger). +

+
+
ceil(expr)
+

Round the value of expression expr upwards to the nearest +integer. For example, "ceil(1.5)" is "2.0". +

+
+
cos(x)
+

Compute cosine of x. +

+
+
cosh(x)
+

Compute hyperbolic cosine of x. +

+
+
eq(x, y)
+

Return 1 if x and y are equivalent, 0 otherwise. +

+
+
exp(x)
+

Compute exponential of x (with base e, the Euler’s number). +

+
+
floor(expr)
+

Round the value of expression expr downwards to the nearest +integer. For example, "floor(-1.5)" is "-2.0". +

+
+
gauss(x)
+

Compute Gauss function of x, corresponding to +exp(-x*x/2) / sqrt(2*PI). +

+
+
gcd(x, y)
+

Return the greatest common divisor of x and y. If both x and +y are 0 or either or both are less than zero then behavior is undefined. +

+
+
gt(x, y)
+

Return 1 if x is greater than y, 0 otherwise. +

+
+
gte(x, y)
+

Return 1 if x is greater than or equal to y, 0 otherwise. +

+
+
hypot(x, y)
+

This function is similar to the C function with the same name; it returns +"sqrt(x*x + y*y)", the length of the hypotenuse of a +right triangle with sides of length x and y, or the distance of the +point (x, y) from the origin. +

+
+
if(x, y)
+

Evaluate x, and if the result is non-zero return the result of +the evaluation of y, return 0 otherwise. +

+
+
if(x, y, z)
+

Evaluate x, and if the result is non-zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
ifnot(x, y)
+

Evaluate x, and if the result is zero return the result of the +evaluation of y, return 0 otherwise. +

+
+
ifnot(x, y, z)
+

Evaluate x, and if the result is zero return the evaluation +result of y, otherwise the evaluation result of z. +

+
+
isinf(x)
+

Return 1.0 if x is +/-INFINITY, 0.0 otherwise. +

+
+
isnan(x)
+

Return 1.0 if x is NAN, 0.0 otherwise. +

+
+
ld(var)
+

Allow to load the value of the internal variable with number +var, which was previously stored with st(var, expr). +The function returns the loaded value. +

+
+
log(x)
+

Compute natural logarithm of x. +

+
+
lt(x, y)
+

Return 1 if x is lesser than y, 0 otherwise. +

+
+
lte(x, y)
+

Return 1 if x is lesser than or equal to y, 0 otherwise. +

+
+
max(x, y)
+

Return the maximum between x and y. +

+
+
min(x, y)
+

Return the maximum between x and y. +

+
+
mod(x, y)
+

Compute the remainder of division of x by y. +

+
+
not(expr)
+

Return 1.0 if expr is zero, 0.0 otherwise. +

+
+
pow(x, y)
+

Compute the power of x elevated y, it is equivalent to +"(x)^(y)". +

+
+
print(t)
+
print(t, l)
+

Print the value of expression t with loglevel l. If +l is not specified then a default log level is used. +Returns the value of the expression printed. +

+

Prints t with loglevel l +

+
+
random(x)
+

Return a pseudo random value between 0.0 and 1.0. x is the index of the +internal variable which will be used to save the seed/state. +

+
+
root(expr, max)
+

Find an input value for which the function represented by expr +with argument ld(0) is 0 in the interval 0..max. +

+

The expression in expr must denote a continuous function or the +result is undefined. +

+

ld(0) is used to represent the function input value, which means +that the given expression will be evaluated multiple times with +various input values that the expression can access through +ld(0). When the expression evaluates to 0 then the +corresponding input value will be returned. +

+
+
sin(x)
+

Compute sine of x. +

+
+
sinh(x)
+

Compute hyperbolic sine of x. +

+
+
sqrt(expr)
+

Compute the square root of expr. This is equivalent to +"(expr)^.5". +

+
+
squish(x)
+

Compute expression 1/(1 + exp(4*x)). +

+
+
st(var, expr)
+

Allow to store the value of the expression expr in an internal +variable. var specifies the number of the variable where to +store the value, and it is a value ranging from 0 to 9. The function +returns the value stored in the internal variable. +Note, Variables are currently not shared between expressions. +

+
+
tan(x)
+

Compute tangent of x. +

+
+
tanh(x)
+

Compute hyperbolic tangent of x. +

+
+
taylor(expr, x)
+
taylor(expr, x, id)
+

Evaluate a Taylor series at x, given an expression representing +the ld(id)-th derivative of a function at 0. +

+

When the series does not converge the result is undefined. +

+

ld(id) is used to represent the derivative order in expr, +which means that the given expression will be evaluated multiple times +with various input values that the expression can access through +ld(id). If id is not specified then 0 is assumed. +

+

Note, when you have the derivatives at y instead of 0, +taylor(expr, x-y) can be used. +

+
+
time(0)
+

Return the current (wallclock) time in seconds. +

+
+
trunc(expr)
+

Round the value of expression expr towards zero to the nearest +integer. For example, "trunc(-1.5)" is "-1.0". +

+
+
while(cond, expr)
+

Evaluate expression expr while the expression cond is +non-zero, and returns the value of the last expr evaluation, or +NAN if cond was always false. +

+
+ +

The following constants are available: +

+
PI
+

area of the unit disc, approximately 3.14 +

+
E
+

exp(1) (Euler’s number), approximately 2.718 +

+
PHI
+

golden ratio (1+sqrt(5))/2, approximately 1.618 +

+
+ +

Assuming that an expression is considered "true" if it has a non-zero +value, note that: +

+

* works like AND +

+

+ works like OR +

+

For example the construct: +

 
if (A AND B) then C
+
+

is equivalent to: +

 
if(A*B, C)
+
+ +

In your C code, you can extend the list of unary and binary functions, +and define recognized constants, so that they are available for your +expressions. +

+

The evaluator also recognizes the International System unit prefixes. +If ’i’ is appended after the prefix, binary prefixes are used, which +are based on powers of 1024 instead of powers of 1000. +The ’B’ postfix multiplies the value by 8, and can be appended after a +unit prefix or used alone. This allows using for example ’KB’, ’MiB’, +’G’ and ’B’ as number postfix. +

+

The list of available International System prefixes follows, with +indication of the corresponding powers of 10 and of 2. +

+
y
+

10^-24 / 2^-80 +

+
z
+

10^-21 / 2^-70 +

+
a
+

10^-18 / 2^-60 +

+
f
+

10^-15 / 2^-50 +

+
p
+

10^-12 / 2^-40 +

+
n
+

10^-9 / 2^-30 +

+
u
+

10^-6 / 2^-20 +

+
m
+

10^-3 / 2^-10 +

+
c
+

10^-2 +

+
d
+

10^-1 +

+
h
+

10^2 +

+
k
+

10^3 / 2^10 +

+
K
+

10^3 / 2^10 +

+
M
+

10^6 / 2^20 +

+
G
+

10^9 / 2^30 +

+
T
+

10^12 / 2^40 +

+
P
+

10^15 / 2^40 +

+
E
+

10^18 / 2^50 +

+
Z
+

10^21 / 2^60 +

+
Y
+

10^24 / 2^70 +

+
+ + + +

8. OpenCL Options

+ +

When FFmpeg is configured with --enable-opencl, it is possible +to set the options for the global OpenCL context. +

+

The list of supported options follows: +

+
+
build_options
+

Set build options used to compile the registered kernels. +

+

See reference "OpenCL Specification Version: 1.2 chapter 5.6.4". +

+
+
platform_idx
+

Select the index of the platform to run OpenCL code. +

+

The specified index must be one of the indexes in the device list +which can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
device_idx
+

Select the index of the device used to run OpenCL code. +

+

The specified index must be one of the indexes in the device list which +can be obtained with ffmpeg -opencl_bench or av_opencl_get_device_list(). +

+
+
+ +

+

+

9. Codec Options

+ +

libavcodec provides some generic global options, which can be set on +all the encoders and decoders. In addition each codec may support +so-called private options, which are specific for a given codec. +

+

Sometimes, a global option may only affect a specific kind of codec, +and may be unsensical or ignored by another, so you need to be aware +of the meaning of the specified options. Also some options are +meant only for decoding or encoding. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVCodecContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follow: +

+
+
b integer (encoding,audio,video)
+

Set bitrate in bits/s. Default value is 200K. +

+
+
ab integer (encoding,audio)
+

Set audio bitrate (in bits/s). Default value is 128K. +

+
+
bt integer (encoding,video)
+

Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate +tolerance specifies how far ratecontrol is willing to deviate from the +target average bitrate value. This is not related to min/max +bitrate. Lowering tolerance too much has an adverse effect on quality. +

+
+
flags flags (decoding/encoding,audio,video,subtitles)
+

Set generic flags. +

+

Possible values: +

+
mv4
+

Use four motion vector by macroblock (mpeg4). +

+
qpel
+

Use 1/4 pel motion compensation. +

+
loop
+

Use loop filter. +

+
qscale
+

Use fixed qscale. +

+
gmc
+

Use gmc. +

+
mv0
+

Always try a mb with mv=<0,0>. +

+
input_preserved
+
pass1
+

Use internal 2pass ratecontrol in first pass mode. +

+
pass2
+

Use internal 2pass ratecontrol in second pass mode. +

+
gray
+

Only decode/encode grayscale. +

+
emu_edge
+

Do not draw edges. +

+
psnr
+

Set error[?] variables during encoding. +

+
truncated
+
naq
+

Normalize adaptive quantization. +

+
ildct
+

Use interlaced DCT. +

+
low_delay
+

Force low delay. +

+
global_header
+

Place global headers in extradata instead of every keyframe. +

+
bitexact
+

Use only bitexact stuff (except (I)DCT). +

+
aic
+

Apply H263 advanced intra coding / mpeg4 ac prediction. +

+
cbp
+

Deprecated, use mpegvideo private options instead. +

+
qprd
+

Deprecated, use mpegvideo private options instead. +

+
ilme
+

Apply interlaced motion estimation. +

+
cgop
+

Use closed gop. +

+
+ +
+
me_method integer (encoding,video)
+

Set motion estimation method. +

+

Possible values: +

+
zero
+

zero motion estimation (fastest) +

+
full
+

full motion estimation (slowest) +

+
epzs
+

EPZS motion estimation (default) +

+
esa
+

esa motion estimation (alias for full) +

+
tesa
+

tesa motion estimation +

+
dia
+

dia motion estimation (alias for epzs) +

+
log
+

log motion estimation +

+
phods
+

phods motion estimation +

+
x1
+

X1 motion estimation +

+
hex
+

hex motion estimation +

+
umh
+

umh motion estimation +

+
iter
+

iter motion estimation +

+
+ +
+
extradata_size integer
+

Set extradata size. +

+
+
time_base rational number
+

Set codec time base. +

+

It is the fundamental unit of time (in seconds) in terms of which +frame timestamps are represented. For fixed-fps content, timebase +should be 1 / frame_rate and timestamp increments should be +identically 1. +

+
+
g integer (encoding,video)
+

Set the group of picture size. Default value is 12. +

+
+
ar integer (decoding/encoding,audio)
+

Set audio sampling rate (in Hz). +

+
+
ac integer (decoding/encoding,audio)
+

Set number of audio channels. +

+
+
cutoff integer (encoding,audio)
+

Set cutoff bandwidth. +

+
+
frame_size integer (encoding,audio)
+

Set audio frame size. +

+

Each submitted frame except the last must contain exactly frame_size +samples per channel. May be 0 when the codec has +CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not +restricted. It is set by some decoders to indicate constant frame +size. +

+
+
frame_number integer
+

Set the frame number. +

+
+
delay integer
+
qcomp float (encoding,video)
+

Set video quantizer scale compression (VBR). It is used as a constant +in the ratecontrol equation. Recommended range for default rc_eq: +0.0-1.0. +

+
+
qblur float (encoding,video)
+

Set video quantizer scale blur (VBR). +

+
+
qmin integer (encoding,video)
+

Set min video quantizer scale (VBR). Must be included between -1 and +69, default value is 2. +

+
+
qmax integer (encoding,video)
+

Set max video quantizer scale (VBR). Must be included between -1 and +1024, default value is 31. +

+
+
qdiff integer (encoding,video)
+

Set max difference between the quantizer scale (VBR). +

+
+
bf integer (encoding,video)
+

Set max number of B frames between non-B-frames. +

+

Must be an integer between -1 and 16. 0 means that B-frames are +disabled. If a value of -1 is used, it will choose an automatic value +depending on the encoder. +

+

Default value is 0. +

+
+
b_qfactor float (encoding,video)
+

Set qp factor between P and B frames. +

+
+
rc_strategy integer (encoding,video)
+

Set ratecontrol method. +

+
+
b_strategy integer (encoding,video)
+

Set strategy to choose between I/P/B-frames. +

+
+
ps integer (encoding,video)
+

Set RTP payload size in bytes. +

+
+
mv_bits integer
+
header_bits integer
+
i_tex_bits integer
+
p_tex_bits integer
+
i_count integer
+
p_count integer
+
skip_count integer
+
misc_bits integer
+
frame_bits integer
+
codec_tag integer
+
bug flags (decoding,video)
+

Workaround not auto detected encoder bugs. +

+

Possible values: +

+
autodetect
+
old_msmpeg4
+

some old lavc generated msmpeg4v3 files (no autodetection) +

+
xvid_ilace
+

Xvid interlacing bug (autodetected if fourcc==XVIX) +

+
ump4
+

(autodetected if fourcc==UMP4) +

+
no_padding
+

padding bug (autodetected) +

+
amv
+
ac_vlc
+

illegal vlc bug (autodetected per fourcc) +

+
qpel_chroma
+
std_qpel
+

old standard qpel (autodetected per fourcc/version) +

+
qpel_chroma2
+
direct_blocksize
+

direct-qpel-blocksize bug (autodetected per fourcc/version) +

+
edge
+

edge padding bug (autodetected per fourcc/version) +

+
hpel_chroma
+
dc_clip
+
ms
+

Workaround various bugs in microsoft broken decoders. +

+
trunc
+

trancated frames +

+
+ +
+
lelim integer (encoding,video)
+

Set single coefficient elimination threshold for luminance (negative +values also consider DC coefficient). +

+
+
celim integer (encoding,video)
+

Set single coefficient elimination threshold for chrominance (negative +values also consider dc coefficient) +

+
+
strict integer (decoding/encoding,audio,video)
+

Specify how strictly to follow the standards. +

+

Possible values: +

+
very
+

strictly conform to a older more strict version of the spec or reference software +

+
strict
+

strictly conform to all the things in the spec no matter what consequences +

+
normal
+
unofficial
+

allow unofficial extensions +

+
experimental
+

allow non standardized experimental things, experimental +(unfinished/work in progress/not well tested) decoders and encoders. +Note: experimental decoders can pose a security risk, do not use this for +decoding untrusted input. +

+
+ +
+
b_qoffset float (encoding,video)
+

Set QP offset between P and B frames. +

+
+
err_detect flags (decoding,audio,video)
+

Set error detection flags. +

+

Possible values: +

+
crccheck
+

verify embedded CRCs +

+
bitstream
+

detect bitstream specification deviations +

+
buffer
+

detect improper bitstream length +

+
explode
+

abort decoding on minor error detection +

+
careful
+

consider things that violate the spec and have not been seen in the wild as errors +

+
compliant
+

consider all spec non compliancies as errors +

+
aggressive
+

consider things that a sane encoder should not do as an error +

+
+ +
+
has_b_frames integer
+
block_align integer
+
mpeg_quant integer (encoding,video)
+

Use MPEG quantizers instead of H.263. +

+
+
qsquish float (encoding,video)
+

How to keep quantizer between qmin and qmax (0 = clip, 1 = use +differentiable function). +

+
+
rc_qmod_amp float (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_qmod_freq integer (encoding,video)
+

Set experimental quantizer modulation. +

+
+
rc_override_count integer
+
rc_eq string (encoding,video)
+

Set rate control equation. When computing the expression, besides the +standard functions defined in the section ’Expression Evaluation’, the +following functions are available: bits2qp(bits), qp2bits(qp). Also +the following constants are available: iTex pTex tex mv fCode iCount +mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex +avgTex. +

+
+
maxrate integer (encoding,audio,video)
+

Set max bitrate tolerance (in bits/s). Requires bufsize to be set. +

+
+
minrate integer (encoding,audio,video)
+

Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR +encode. It is of little use elsewise. +

+
+
bufsize integer (encoding,audio,video)
+

Set ratecontrol buffer size (in bits). +

+
+
rc_buf_aggressivity float (encoding,video)
+

Currently useless. +

+
+
i_qfactor float (encoding,video)
+

Set QP factor between P and I frames. +

+
+
i_qoffset float (encoding,video)
+

Set QP offset between P and I frames. +

+
+
rc_init_cplx float (encoding,video)
+

Set initial complexity for 1-pass encoding. +

+
+
dct integer (encoding,video)
+

Set DCT algorithm. +

+

Possible values: +

+
auto
+

autoselect a good one (default) +

+
fastint
+

fast integer +

+
int
+

accurate integer +

+
mmx
+
altivec
+
faan
+

floating point AAN DCT +

+
+ +
+
lumi_mask float (encoding,video)
+

Compress bright areas stronger than medium ones. +

+
+
tcplx_mask float (encoding,video)
+

Set temporal complexity masking. +

+
+
scplx_mask float (encoding,video)
+

Set spatial complexity masking. +

+
+
p_mask float (encoding,video)
+

Set inter masking. +

+
+
dark_mask float (encoding,video)
+

Compress dark areas stronger than medium ones. +

+
+
idct integer (decoding/encoding,video)
+

Select IDCT implementation. +

+

Possible values: +

+
auto
+
int
+
simple
+
simplemmx
+
arm
+
altivec
+
sh4
+
simplearm
+
simplearmv5te
+
simplearmv6
+
simpleneon
+
simplealpha
+
ipp
+
xvidmmx
+
faani
+

floating point AAN IDCT +

+
+ +
+
slice_count integer
+
ec flags (decoding,video)
+

Set error concealment strategy. +

+

Possible values: +

+
guess_mvs
+

iterative motion vector (MV) search (slow) +

+
deblock
+

use strong deblock filter for damaged MBs +

+
+ +
+
bits_per_coded_sample integer
+
pred integer (encoding,video)
+

Set prediction method. +

+

Possible values: +

+
left
+
plane
+
median
+
+ +
+
aspect rational number (encoding,video)
+

Set sample aspect ratio. +

+
+
debug flags (decoding/encoding,audio,video,subtitles)
+

Print specific debug info. +

+

Possible values: +

+
pict
+

picture info +

+
rc
+

rate control +

+
bitstream
+
mb_type
+

macroblock (MB) type +

+
qp
+

per-block quantization parameter (QP) +

+
mv
+

motion vector +

+
dct_coeff
+
skip
+
startcode
+
pts
+
er
+

error recognition +

+
mmco
+

memory management control operations (H.264) +

+
bugs
+
vis_qp
+

visualize quantization parameter (QP), lower QP are tinted greener +

+
vis_mb_type
+

visualize block types +

+
buffers
+

picture buffer allocations +

+
thread_ops
+

threading operations +

+
+ +
+
vismv integer (decoding,video)
+

Visualize motion vectors (MVs). +

+

Possible values: +

+
pf
+

forward predicted MVs of P-frames +

+
bf
+

forward predicted MVs of B-frames +

+
bb
+

backward predicted MVs of B-frames +

+
+ +
+
cmp integer (encoding,video)
+

Set full pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
subcmp integer (encoding,video)
+

Set sub pel me compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
mbcmp integer (encoding,video)
+

Set macroblock compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
ildctcmp integer (encoding,video)
+

Set interlaced dct compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation. +

+
+
last_pred integer (encoding,video)
+

Set amount of motion predictors from the previous frame. +

+
+
preme integer (encoding,video)
+

Set pre motion estimation. +

+
+
precmp integer (encoding,video)
+

Set pre motion estimation compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
pre_dia_size integer (encoding,video)
+

Set diamond type & size for motion estimation pre-pass. +

+
+
subq integer (encoding,video)
+

Set sub pel motion estimation quality. +

+
+
dtg_active_format integer
+
me_range integer (encoding,video)
+

Set limit motion vectors range (1023 for DivX player). +

+
+
ibias integer (encoding,video)
+

Set intra quant bias. +

+
+
pbias integer (encoding,video)
+

Set inter quant bias. +

+
+
color_table_id integer
+
global_quality integer (encoding,audio,video)
+
coder integer (encoding,video)
+
+

Possible values: +

+
vlc
+

variable length coder / huffman coder +

+
ac
+

arithmetic coder +

+
raw
+

raw (no encoding) +

+
rle
+

run-length coder +

+
deflate
+

deflate-based coder +

+
+ +
+
context integer (encoding,video)
+

Set context model. +

+
+
slice_flags integer
+
xvmc_acceleration integer
+
mbd integer (encoding,video)
+

Set macroblock decision algorithm (high quality mode). +

+

Possible values: +

+
simple
+

use mbcmp (default) +

+
bits
+

use fewest bits +

+
rd
+

use best rate distortion +

+
+ +
+
stream_codec_tag integer
+
sc_threshold integer (encoding,video)
+

Set scene change threshold. +

+
+
lmin integer (encoding,video)
+

Set min lagrange factor (VBR). +

+
+
lmax integer (encoding,video)
+

Set max lagrange factor (VBR). +

+
+
nr integer (encoding,video)
+

Set noise reduction. +

+
+
rc_init_occupancy integer (encoding,video)
+

Set number of bits which should be loaded into the rc buffer before +decoding starts. +

+
+
flags2 flags (decoding/encoding,audio,video)
+
+

Possible values: +

+
fast
+

Allow non spec compliant speedup tricks. +

+
sgop
+

Deprecated, use mpegvideo private options instead. +

+
noout
+

Skip bitstream encoding. +

+
ignorecrop
+

Ignore cropping information from sps. +

+
local_header
+

Place global headers at every keyframe instead of in extradata. +

+
chunks
+

Frame data might be split into multiple chunks. +

+
showall
+

Show all frames before the first keyframe. +

+
skiprd
+

Deprecated, use mpegvideo private options instead. +

+
+ +
+
error integer (encoding,video)
+
qns integer (encoding,video)
+

Deprecated, use mpegvideo private options instead. +

+
+
threads integer (decoding/encoding,video)
+
+

Possible values: +

+
auto
+

detect a good number of threads +

+
+ +
+
me_threshold integer (encoding,video)
+

Set motion estimation threshold. +

+
+
mb_threshold integer (encoding,video)
+

Set macroblock threshold. +

+
+
dc integer (encoding,video)
+

Set intra_dc_precision. +

+
+
nssew integer (encoding,video)
+

Set nsse weight. +

+
+
skip_top integer (decoding,video)
+

Set number of macroblock rows at the top which are skipped. +

+
+
skip_bottom integer (decoding,video)
+

Set number of macroblock rows at the bottom which are skipped. +

+
+
profile integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
aac_main
+
aac_low
+
aac_ssr
+
aac_ltp
+
aac_he
+
aac_he_v2
+
aac_ld
+
aac_eld
+
mpeg2_aac_low
+
mpeg2_aac_he
+
dts
+
dts_es
+
dts_96_24
+
dts_hd_hra
+
dts_hd_ma
+
+ +
+
level integer (encoding,audio,video)
+
+

Possible values: +

+
unknown
+
+ +
+
lowres integer (decoding,audio,video)
+

Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions. +

+
+
skip_threshold integer (encoding,video)
+

Set frame skip threshold. +

+
+
skip_factor integer (encoding,video)
+

Set frame skip factor. +

+
+
skip_exp integer (encoding,video)
+

Set frame skip exponent. +Negative values behave identical to the corresponding positive ones, except +that the score is normalized. +Positive values exist primarly for compatibility reasons and are not so useful. +

+
+
skipcmp integer (encoding,video)
+

Set frame skip compare function. +

+

Possible values: +

+
sad
+

sum of absolute differences, fast (default) +

+
sse
+

sum of squared errors +

+
satd
+

sum of absolute Hadamard transformed differences +

+
dct
+

sum of absolute DCT transformed differences +

+
psnr
+

sum of squared quantization errors (avoid, low quality) +

+
bit
+

number of bits needed for the block +

+
rd
+

rate distortion optimal, slow +

+
zero
+

0 +

+
vsad
+

sum of absolute vertical differences +

+
vsse
+

sum of squared vertical differences +

+
nsse
+

noise preserving sum of squared differences +

+
w53
+

5/3 wavelet, only used in snow +

+
w97
+

9/7 wavelet, only used in snow +

+
dctmax
+
chroma
+
+ +
+
border_mask float (encoding,video)
+

Increase the quantizer for macroblocks close to borders. +

+
+
mblmin integer (encoding,video)
+

Set min macroblock lagrange factor (VBR). +

+
+
mblmax integer (encoding,video)
+

Set max macroblock lagrange factor (VBR). +

+
+
mepc integer (encoding,video)
+

Set motion estimation bitrate penalty compensation (1.0 = 256). +

+
+
skip_loop_filter integer (decoding,video)
+
skip_idct integer (decoding,video)
+
skip_frame integer (decoding,video)
+
+

Make decoder discard processing depending on the frame type selected +by the option value. +

+

skip_loop_filter’ skips frame loop filtering, ‘skip_idct’ +skips frame IDCT/dequantization, ‘skip_frame’ skips decoding. +

+

Possible values: +

+
none
+

Discard no frame. +

+
+
default
+

Discard useless frames like 0-sized frames. +

+
+
noref
+

Discard all non-reference frames. +

+
+
bidir
+

Discard all bidirectional frames. +

+
+
nokey
+

Discard all frames excepts keyframes. +

+
+
all
+

Discard all frames. +

+
+ +

Default value is ‘default’. +

+
+
bidir_refine integer (encoding,video)
+

Refine the two motion vectors used in bidirectional macroblocks. +

+
+
brd_scale integer (encoding,video)
+

Downscale frames for dynamic B-frame decision. +

+
+
keyint_min integer (encoding,video)
+

Set minimum interval between IDR-frames. +

+
+
refs integer (encoding,video)
+

Set reference frames to consider for motion compensation. +

+
+
chromaoffset integer (encoding,video)
+

Set chroma qp offset from luma. +

+
+
trellis integer (encoding,audio,video)
+

Set rate-distortion optimal quantization. +

+
+
sc_factor integer (encoding,video)
+

Set value multiplied by qscale for each frame and added to +scene_change_score. +

+
+
mv0_threshold integer (encoding,video)
+
b_sensitivity integer (encoding,video)
+

Adjust sensitivity of b_frame_strategy 1. +

+
+
compression_level integer (encoding,audio,video)
+
min_prediction_order integer (encoding,audio)
+
max_prediction_order integer (encoding,audio)
+
timecode_frame_start integer (encoding,video)
+

Set GOP timecode frame start number, in non drop frame format. +

+
+
request_channels integer (decoding,audio)
+

Set desired number of audio channels. +

+
+
bits_per_raw_sample integer
+
channel_layout integer (decoding/encoding,audio)
+
+

Possible values: +

+
request_channel_layout integer (decoding,audio)
+
+

Possible values: +

+
rc_max_vbv_use float (encoding,video)
+
rc_min_vbv_use float (encoding,video)
+
ticks_per_frame integer (decoding/encoding,audio,video)
+
color_primaries integer (decoding/encoding,video)
+
color_trc integer (decoding/encoding,video)
+
colorspace integer (decoding/encoding,video)
+
color_range integer (decoding/encoding,video)
+
chroma_sample_location integer (decoding/encoding,video)
+
log_level_offset integer
+

Set the log level offset. +

+
+
slices integer (encoding,video)
+

Number of slices, used in parallelized encoding. +

+
+
thread_type flags (decoding/encoding,video)
+

Select multithreading type. +

+

Possible values: +

+
slice
+
frame
+
+
+
audio_service_type integer (encoding,audio)
+

Set audio service type. +

+

Possible values: +

+
ma
+

Main Audio Service +

+
ef
+

Effects +

+
vi
+

Visually Impaired +

+
hi
+

Hearing Impaired +

+
di
+

Dialogue +

+
co
+

Commentary +

+
em
+

Emergency +

+
vo
+

Voice Over +

+
ka
+

Karaoke +

+
+ +
+
request_sample_fmt sample_fmt (decoding,audio)
+

Set sample format audio decoders should prefer. Default value is +none. +

+
+
pkt_timebase rational number
+
sub_charenc encoding (decoding,subtitles)
+

Set the input subtitles character encoding. +

+
+
field_order field_order (video)
+

Set/override the field order of the video. +Possible values: +

+
progressive
+

Progressive video +

+
tt
+

Interlaced video, top field coded and displayed first +

+
bb
+

Interlaced video, bottom field coded and displayed first +

+
tb
+

Interlaced video, top coded first, bottom displayed first +

+
bt
+

Interlaced video, bottom coded first, top displayed first +

+
+ +
+
skip_alpha integer (decoding,video)
+

Set to 1 to disable processing alpha (transparency). This works like the +‘gray’ flag in the ‘flags’ option which skips chroma information +instead of alpha. Default is 0. +

+
+ + + +

10. Decoders

+ +

Decoders are configured elements in FFmpeg which allow the decoding of +multimedia streams. +

+

When you configure your FFmpeg build, all the supported native decoders +are enabled by default. Decoders requiring an external library must be enabled +manually via the corresponding --enable-lib option. You can list all +available decoders using the configure option --list-decoders. +

+

You can disable all the decoders with the configure option +--disable-decoders and selectively enable / disable single decoders +with the options --enable-decoder=DECODER / +--disable-decoder=DECODER. +

+

The option -decoders of the ff* tools will display the list of +enabled decoders. +

+ + +

11. Video Decoders

+ +

A description of some of the currently available video decoders +follows. +

+ +

11.1 rawvideo

+ +

Raw video decoder. +

+

This decoder decodes rawvideo streams. +

+ +

11.1.1 Options

+ +
+
top top_field_first
+

Specify the assumed field type of the input video. +

+
-1
+

the video is assumed to be progressive (default) +

+
0
+

bottom-field-first is assumed +

+
1
+

top-field-first is assumed +

+
+ +
+
+ + + +

12. Audio Decoders

+ +

A description of some of the currently available audio decoders +follows. +

+ +

12.1 ac3

+ +

AC-3 audio decoder. +

+

This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as +the undocumented RealAudio 3 (a.k.a. dnet). +

+ +

12.1.1 AC-3 Decoder Options

+ +
+
-drc_scale value
+

Dynamic Range Scale Factor. The factor to apply to dynamic range values +from the AC-3 stream. This factor is applied exponentially. +There are 3 notable scale factor ranges: +

+
drc_scale == 0
+

DRC disabled. Produces full range audio. +

+
0 < drc_scale <= 1
+

DRC enabled. Applies a fraction of the stream DRC value. +Audio reproduction is between full range and full compression. +

+
drc_scale > 1
+

DRC enabled. Applies drc_scale asymmetrically. +Loud sounds are fully compressed. Soft sounds are enhanced. +

+
+ +
+
+ + +

12.2 ffwavesynth

+ +

Internal wave synthetizer. +

+

This decoder generates wave patterns according to predefined sequences. Its +use is purely internal and the format of the data it accepts is not publicly +documented. +

+ +

12.3 libcelt

+ +

libcelt decoder wrapper. +

+

libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec. +Requires the presence of the libcelt headers and library during configuration. +You need to explicitly configure the build with --enable-libcelt. +

+ +

12.4 libgsm

+ +

libgsm decoder wrapper. +

+

libgsm allows libavcodec to decode the GSM full rate audio codec. Requires +the presence of the libgsm headers and library during configuration. You need +to explicitly configure the build with --enable-libgsm. +

+

This decoder supports both the ordinary GSM and the Microsoft variant. +

+ +

12.5 libilbc

+ +

libilbc decoder wrapper. +

+

libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC) +audio codec. Requires the presence of the libilbc headers and library during +configuration. You need to explicitly configure the build with +--enable-libilbc. +

+ +

12.5.1 Options

+ +

The following option is supported by the libilbc wrapper. +

+
+
enhance
+
+

Enable the enhancement of the decoded audio when set to 1. The default +value is 0 (disabled). +

+
+
+ + +

12.6 libopencore-amrnb

+ +

libopencore-amrnb decoder wrapper. +

+

libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate +Narrowband audio codec. Using it requires the presence of the +libopencore-amrnb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrnb. +

+

An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB +without this library. +

+ +

12.7 libopencore-amrwb

+ +

libopencore-amrwb decoder wrapper. +

+

libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate +Wideband audio codec. Using it requires the presence of the +libopencore-amrwb headers and library during configuration. You need to +explicitly configure the build with --enable-libopencore-amrwb. +

+

An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB +without this library. +

+ +

12.8 libopus

+ +

libopus decoder wrapper. +

+

libopus allows libavcodec to decode the Opus Interactive Audio Codec. +Requires the presence of the libopus headers and library during +configuration. You need to explicitly configure the build with +--enable-libopus. +

+ + +

13. Subtitles Decoders

+ + +

13.1 dvdsub

+ +

This codec decodes the bitmap subtitles used in DVDs; the same subtitles can +also be found in VobSub file pairs and in some Matroska files. +

+ +

13.1.1 Options

+ +
+
palette
+

Specify the global palette used by the bitmaps. When stored in VobSub, the +palette is normally specified in the index file; in Matroska, the palette is +stored in the codec extra-data in the same format as in VobSub. In DVDs, the +palette is stored in the IFO file, and therefore not available when reading +from dumped VOB files. +

+

The format for this option is a string containing 16 24-bits hexadecimal +numbers (without 0x prefix) separated by comas, for example 0d00ee, +ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1, +7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b. +

+
+ + +

13.2 libzvbi-teletext

+ +

Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext +subtitles. Requires the presence of the libzvbi headers and library during +configuration. You need to explicitly configure the build with +--enable-libzvbi. +

+ +

13.2.1 Options

+ +
+
txt_page
+

List of teletext page numbers to decode. You may use the special * string to +match all pages. Pages that do not match the specified list are dropped. +Default value is *. +

+
txt_chop_top
+

Discards the top teletext line. Default value is 1. +

+
txt_format
+

Specifies the format of the decoded subtitles. The teletext decoder is capable +of decoding the teletext pages to bitmaps or to simple text, you should use +"bitmap" for teletext pages, because certain graphics and colors cannot be +expressed in simple text. You might use "text" for teletext based subtitles if +your application can handle simple text based subtitles. Default value is +bitmap. +

+
txt_left
+

X offset of generated bitmaps, default is 0. +

+
txt_top
+

Y offset of generated bitmaps, default is 0. +

+
txt_chop_spaces
+

Chops leading and trailing spaces and removes empty lines from the generated +text. This option is useful for teletext based subtitles where empty spaces may +be present at the start or at the end of the lines or empty lines may be +present between the subtitle lines because of double-sized teletext charactes. +Default value is 1. +

+
txt_duration
+

Sets the display duration of the decoded teletext pages or subtitles in +miliseconds. Default value is 30000 which is 30 seconds. +

+
txt_transparent
+

Force transparent background of the generated teletext bitmaps. Default value +is 0 which means an opaque (black) background. +

+
+ + +

14. Bitstream Filters

+ +

When you configure your FFmpeg build, all the supported bitstream +filters are enabled by default. You can list all available ones using +the configure option --list-bsfs. +

+

You can disable all the bitstream filters using the configure option +--disable-bsfs, and selectively enable any bitstream filter using +the option --enable-bsf=BSF, or you can disable a particular +bitstream filter using the option --disable-bsf=BSF. +

+

The option -bsfs of the ff* tools will display the list of +all the supported bitstream filters included in your build. +

+

Below is a description of the currently available bitstream filters. +

+ +

14.1 aac_adtstoasc

+ +

Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration +bitstream filter. +

+

This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4 +ADTS header and removes the ADTS header. +

+

This is required for example when copying an AAC stream from a raw +ADTS AAC container to a FLV or a MOV/MP4 file. +

+ +

14.2 chomp

+ +

Remove zero padding at the end of a packet. +

+ +

14.3 dump_extra

+ +

Add extradata to the beginning of the filtered packets. +

+

The additional argument specifies which packets should be filtered. +It accepts the values: +

+
a
+

add extradata to all key packets, but only if local_header is +set in the ‘flags2’ codec context field +

+
+
k
+

add extradata to all key packets +

+
+
e
+

add extradata to all packets +

+
+ +

If not specified it is assumed ‘k’. +

+

For example the following ffmpeg command forces a global +header (thus disabling individual packet headers) in the H.264 packets +generated by the libx264 encoder, but corrects them by adding +the header stored in extradata to the key packets: +

 
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+ + +

14.4 h264_mp4toannexb

+ +

Convert an H.264 bitstream from length prefixed mode to start code +prefixed mode (as defined in the Annex B of the ITU-T H.264 +specification). +

+

This is required by some streaming formats, typically the MPEG-2 +transport stream format ("mpegts"). +

+

For example to remux an MP4 file containing an H.264 stream to mpegts +format with ffmpeg, you can use the command: +

+
 
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+ + +

14.5 imx_dump_header

+ + +

14.6 mjpeg2jpeg

+ +

Convert MJPEG/AVI1 packets to full JPEG/JFIF packets. +

+

MJPEG is a video codec wherein each video frame is essentially a +JPEG image. The individual frames can be extracted without loss, +e.g. by +

+
 
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+ +

Unfortunately, these chunks are incomplete JPEG images, because +they lack the DHT segment required for decoding. Quoting from +http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml: +

+

Avery Lee, writing in the rec.video.desktop newsgroup in 2001, +commented that "MJPEG, or at least the MJPEG in AVIs having the +MJPG fourcc, is restricted JPEG with a fixed – and *omitted* – +Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2, +and it must use basic Huffman encoding, not arithmetic or +progressive. . . . You can indeed extract the MJPEG frames and +decode them with a regular JPEG decoder, but you have to prepend +the DHT segment to them, or else the decoder won’t have any idea +how to decompress the data. The exact table necessary is given in +the OpenDML spec." +

+

This bitstream filter patches the header of frames extracted from an MJPEG +stream (carrying the AVI1 header ID and lacking a DHT segment) to +produce fully qualified JPEG images. +

+
 
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+ + +

14.7 mjpega_dump_header

+ + +

14.8 movsub

+ + +

14.9 mp3_header_decompress

+ + +

14.10 noise

+ + +

14.11 remove_extra

+ + +

15. Format Options

+ +

The libavformat library provides some generic global options, which +can be set on all the muxers and demuxers. In addition each muxer or +demuxer may support so-called private options, which are specific for +that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+

The list of supported options follows: +

+
+
avioflags flags (input/output)
+

Possible values: +

+
direct
+

Reduce buffering. +

+
+ +
+
probesize integer (input)
+

Set probing size in bytes, i.e. the size of the data to analyze to get +stream information. A higher value will allow to detect more +information in case it is dispersed into the stream, but will increase +latency. Must be an integer not lesser than 32. It is 5000000 by default. +

+
+
packetsize integer (output)
+

Set packet size. +

+
+
fflags flags (input/output)
+

Set format flags. +

+

Possible values: +

+
ignidx
+

Ignore index. +

+
genpts
+

Generate PTS. +

+
nofillin
+

Do not fill in missing values that can be exactly calculated. +

+
noparse
+

Disable AVParsers, this needs +nofillin too. +

+
igndts
+

Ignore DTS. +

+
discardcorrupt
+

Discard corrupted frames. +

+
sortdts
+

Try to interleave output packets by DTS. +

+
keepside
+

Do not merge side data. +

+
latm
+

Enable RTP MP4A-LATM payload. +

+
nobuffer
+

Reduce the latency introduced by optional buffering +

+
+ +
+
seek2any integer (input)
+

Allow seeking to non-keyframes on demuxer level when supported if set to 1. +Default is 0. +

+
+
analyzeduration integer (input)
+

Specify how many microseconds are analyzed to probe the input. A +higher value will allow to detect more accurate information, but will +increase latency. It defaults to 5,000,000 microseconds = 5 seconds. +

+
+
cryptokey hexadecimal string (input)
+

Set decryption key. +

+
+
indexmem integer (input)
+

Set max memory used for timestamp index (per stream). +

+
+
rtbufsize integer (input)
+

Set max memory used for buffering real-time frames. +

+
+
fdebug flags (input/output)
+

Print specific debug info. +

+

Possible values: +

+
ts
+
+ +
+
max_delay integer (input/output)
+

Set maximum muxing or demuxing delay in microseconds. +

+
+
fpsprobesize integer (input)
+

Set number of frames used to probe fps. +

+
+
audio_preload integer (output)
+

Set microseconds by which audio packets should be interleaved earlier. +

+
+
chunk_duration integer (output)
+

Set microseconds for each chunk. +

+
+
chunk_size integer (output)
+

Set size in bytes for each chunk. +

+
+
err_detect, f_err_detect flags (input)
+

Set error detection flags. f_err_detect is deprecated and +should be used only via the ffmpeg tool. +

+

Possible values: +

+
crccheck
+

Verify embedded CRCs. +

+
bitstream
+

Detect bitstream specification deviations. +

+
buffer
+

Detect improper bitstream length. +

+
explode
+

Abort decoding on minor error detection. +

+
careful
+

Consider things that violate the spec and have not been seen in the +wild as errors. +

+
compliant
+

Consider all spec non compliancies as errors. +

+
aggressive
+

Consider things that a sane encoder should not do as an error. +

+
+ +
+
use_wallclock_as_timestamps integer (input)
+

Use wallclock as timestamps. +

+
+
avoid_negative_ts integer (output)
+
+

Possible values: +

+
make_non_negative
+

Shift timestamps to make them non-negative. +Also note that this affects only leading negative timestamps, and not +non-monotonic negative timestamps. +

+
make_zero
+

Shift timestamps so that the first timestamp is 0. +

+
auto (default)
+

Enables shifting when required by the target format. +

+
disabled
+

Disables shifting of timestamp. +

+
+ +

When shifting is enabled, all output timestamps are shifted by the +same amount. Audio, video, and subtitles desynching and relative +timestamp differences are preserved compared to how they would have +been without shifting. +

+
+
skip_initial_bytes integer (input)
+

Set number of bytes to skip before reading header and frames if set to 1. +Default is 0. +

+
+
correct_ts_overflow integer (input)
+

Correct single timestamp overflows if set to 1. Default is 1. +

+
+
flush_packets integer (output)
+

Flush the underlying I/O stream after each packet. Default 1 enables it, and +has the effect of reducing the latency; 0 disables it and may slightly +increase performance in some cases. +

+
+
output_ts_offset offset (output)
+

Set the output time offset. +

+

offset must be a time duration specification, +see (ffmpeg-utils)time duration syntax. +

+

The offset is added by the muxer to the output timestamps. +

+

Specifying a positive offset means that the corresponding streams are +delayed bt the time duration specified in offset. Default value +is 0 (meaning that no offset is applied). +

+
+ + +

+

+

15.1 Format stream specifiers

+ +

Format stream specifiers allow selection of one or more streams that +match specific properties. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. +

+
+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, +’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If +stream_index is given, then it matches the stream number +stream_index of this type. Otherwise, it matches all streams of +this type. +

+
+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number +stream_index in the program with the id +program_id. Otherwise, it matches all streams in the program. +

+
+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ +

The exact semantics of stream specifiers is defined by the +avformat_match_stream_specifier() function declared in the +‘libavformat/avformat.h’ header. +

+ +

16. Demuxers

+ +

Demuxers are configured elements in FFmpeg that can read the +multimedia streams from a particular type of file. +

+

When you configure your FFmpeg build, all the supported demuxers +are enabled by default. You can list all available ones using the +configure option --list-demuxers. +

+

You can disable all the demuxers using the configure option +--disable-demuxers, and selectively enable a single demuxer with +the option --enable-demuxer=DEMUXER, or disable it +with the option --disable-demuxer=DEMUXER. +

+

The option -formats of the ff* tools will display the list of +enabled demuxers. +

+

The description of some of the currently available demuxers follows. +

+ +

16.1 applehttp

+ +

Apple HTTP Live Streaming demuxer. +

+

This demuxer presents all AVStreams from all variant streams. +The id field is set to the bitrate variant index number. By setting +the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay), +the caller can decide which variant streams to actually receive. +The total bitrate of the variant that the stream belongs to is +available in a metadata key named "variant_bitrate". +

+ +

16.2 asf

+ +

Advanced Systems Format demuxer. +

+

This demuxer is used to demux ASF files and MMS network streams. +

+
+
-no_resync_search bool
+

Do not try to resynchronize by looking for a certain optional start code. +

+
+ +

+

+

16.3 concat

+ +

Virtual concatenation script demuxer. +

+

This demuxer reads a list of files and other directives from a text file and +demuxes them one after the other, as if all their packet had been muxed +together. +

+

The timestamps in the files are adjusted so that the first file starts at 0 +and each next file starts where the previous one finishes. Note that it is +done globally and may cause gaps if all streams do not have exactly the same +length. +

+

All files must have the same streams (same codecs, same time base, etc.). +

+

The duration of each file is used to adjust the timestamps of the next file: +if the duration is incorrect (because it was computed using the bit-rate or +because the file is truncated, for example), it can cause artifacts. The +duration directive can be used to override the duration stored in +each file. +

+ +

16.3.1 Syntax

+ +

The script is a text file in extended-ASCII, with one directive per line. +Empty lines, leading spaces and lines starting with ’#’ are ignored. The +following directive is recognized: +

+
+
file path
+

Path to a file to read; special characters and spaces must be escaped with +backslash or single quotes. +

+

All subsequent directives apply to that file. +

+
+
ffconcat version 1.0
+

Identify the script type and version. It also sets the ‘safe’ option +to 1 if it was to its default -1. +

+

To make FFmpeg recognize the format automatically, this directive must +appears exactly as is (no extra space or byte-order-mark) on the very first +line of the script. +

+
+
duration dur
+

Duration of the file. This information can be specified from the file; +specifying it here may be more efficient or help if the information from the +file is not available or accurate. +

+

If the duration is set for all files, then it is possible to seek in the +whole concatenated video. +

+
+
+ + +

16.3.2 Options

+ +

This demuxer accepts the following option: +

+
+
safe
+

If set to 1, reject unsafe file paths. A file path is considered safe if it +does not contain a protocol specification and is relative and all components +only contain characters from the portable character set (letters, digits, +period, underscore and hyphen) and have no period at the beginning of a +component. +

+

If set to 0, any file name is accepted. +

+

The default is -1, it is equivalent to 1 if the format was automatically +probed and 0 otherwise. +

+
+
+ + +

16.4 flv

+ +

Adobe Flash Video Format demuxer. +

+

This demuxer is used to demux FLV files and RTMP network streams. +

+
+
-flv_metadata bool
+

Allocate the streams according to the onMetaData array content. +

+
+ + +

16.5 libgme

+ +

The Game Music Emu library is a collection of video game music file emulators. +

+

See http://code.google.com/p/game-music-emu/ for more information. +

+

Some files have multiple tracks. The demuxer will pick the first track by +default. The ‘track_index’ option can be used to select a different +track. Track indexes start at 0. The demuxer exports the number of tracks as +tracks meta data entry. +

+

For very large files, the ‘max_size’ option may have to be adjusted. +

+ +

16.6 libquvi

+ +

Play media from Internet services using the quvi project. +

+

The demuxer accepts a ‘format’ option to request a specific quality. It +is by default set to best. +

+

See http://quvi.sourceforge.net/ for more information. +

+

FFmpeg needs to be built with --enable-libquvi for this demuxer to be +enabled. +

+ +

16.7 image2

+ +

Image file demuxer. +

+

This demuxer reads from a list of image files specified by a pattern. +The syntax and meaning of the pattern is specified by the +option pattern_type. +

+

The pattern may contain a suffix which is used to automatically +determine the format of the images contained in the files. +

+

The size, the pixel format, and the format of each image must be the +same for all the files in the sequence. +

+

This demuxer accepts the following options: +

+
framerate
+

Set the frame rate for the video stream. It defaults to 25. +

+
loop
+

If set to 1, loop over the input. Default value is 0. +

+
pattern_type
+

Select the pattern type used to interpret the provided filename. +

+

pattern_type accepts one of the following values. +

+
sequence
+

Select a sequence pattern type, used to specify a sequence of files +indexed by sequential numbers. +

+

A sequence pattern may contain the string "%d" or "%0Nd", which +specifies the position of the characters representing a sequential +number in each filename matched by the pattern. If the form +"%d0Nd" is used, the string representing the number in each +filename is 0-padded and N is the total number of 0-padded +digits representing the number. The literal character ’%’ can be +specified in the pattern with the string "%%". +

+

If the sequence pattern contains "%d" or "%0Nd", the first filename of +the file list specified by the pattern must contain a number +inclusively contained between start_number and +start_number+start_number_range-1, and all the following +numbers must be sequential. +

+

For example the pattern "img-%03d.bmp" will match a sequence of +filenames of the form ‘img-001.bmp’, ‘img-002.bmp’, ..., +‘img-010.bmp’, etc.; the pattern "i%%m%%g-%d.jpg" will match a +sequence of filenames of the form ‘i%m%g-1.jpg’, +‘i%m%g-2.jpg’, ..., ‘i%m%g-10.jpg’, etc. +

+

Note that the pattern must not necessarily contain "%d" or +"%0Nd", for example to convert a single image file +‘img.jpeg’ you can employ the command: +

 
ffmpeg -i img.jpeg img.png
+
+ +
+
glob
+

Select a glob wildcard pattern type. +

+

The pattern is interpreted like a glob() pattern. This is only +selectable if libavformat was compiled with globbing support. +

+
+
glob_sequence (deprecated, will be removed)
+

Select a mixed glob wildcard/sequence pattern. +

+

If your version of libavformat was compiled with globbing support, and +the provided pattern contains at least one glob meta character among +%*?[]{} that is preceded by an unescaped "%", the pattern is +interpreted like a glob() pattern, otherwise it is interpreted +like a sequence pattern. +

+

All glob special characters %*?[]{} must be prefixed +with "%". To escape a literal "%" you shall use "%%". +

+

For example the pattern foo-%*.jpeg will match all the +filenames prefixed by "foo-" and terminating with ".jpeg", and +foo-%?%?%?.jpeg will match all the filenames prefixed with +"foo-", followed by a sequence of three characters, and terminating +with ".jpeg". +

+

This pattern type is deprecated in favor of glob and +sequence. +

+
+ +

Default value is glob_sequence. +

+
pixel_format
+

Set the pixel format of the images to read. If not specified the pixel +format is guessed from the first image file in the sequence. +

+
start_number
+

Set the index of the file matched by the image file pattern to start +to read from. Default value is 0. +

+
start_number_range
+

Set the index interval range to check when looking for the first image +file in the sequence, starting from start_number. Default value +is 5. +

+
ts_from_file
+

If set to 1, will set frame timestamp to modification time of image file. Note +that monotonity of timestamps is not provided: images go in the same order as +without this option. Default value is 0. +

+
video_size
+

Set the video size of the images to read. If not specified the video +size is guessed from the first image file in the sequence. +

+
+ + +

16.7.1 Examples

+ +
    +
  • +Use ffmpeg for creating a video from the images in the file +sequence ‘img-001.jpeg’, ‘img-002.jpeg’, ..., assuming an +input frame rate of 10 frames per second: +
     
    ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +As above, but start by reading from a file with index 100 in the sequence: +
     
    ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
    +
    + +
  • +Read images matching the "*.png" glob pattern , that is all the files +terminating with the ".png" suffix: +
     
    ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
    +
    +
+ + +

16.8 mpegts

+ +

MPEG-2 transport stream demuxer. +

+
+
fix_teletext_pts
+

Overrides teletext packet PTS and DTS values with the timestamps calculated +from the PCR of the first program which the teletext stream is part of and is +not discarded. Default value is 1, set this option to 0 if you want your +teletext packet PTS and DTS values untouched. +

+
+ + +

16.9 rawvideo

+ +

Raw video demuxer. +

+

This demuxer allows one to read raw video data. Since there is no header +specifying the assumed video parameters, the user must specify them +in order to be able to decode the data correctly. +

+

This demuxer accepts the following options: +

+
framerate
+

Set input video frame rate. Default value is 25. +

+
+
pixel_format
+

Set the input video pixel format. Default value is yuv420p. +

+
+
video_size
+

Set the input video size. This value must be specified explicitly. +

+
+ +

For example to read a rawvideo file ‘input.raw’ with +ffplay, assuming a pixel format of rgb24, a video +size of 320x240, and a frame rate of 10 images per second, use +the command: +

 
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+ + +

16.10 sbg

+ +

SBaGen script demuxer. +

+

This demuxer reads the script language used by SBaGen +http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG +script looks like that: +

 
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW      == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00    off
+
+ +

A SBG script can mix absolute and relative timestamps. If the script uses +either only absolute timestamps (including the script start time) or only +relative ones, then its layout is fixed, and the conversion is +straightforward. On the other hand, if the script mixes both kind of +timestamps, then the NOW reference for relative timestamps will be +taken from the current time of day at the time the script is read, and the +script layout will be frozen according to that reference. That means that if +the script is directly played, the actual times will match the absolute +timestamps up to the sound controller’s clock accuracy, but if the user +somehow pauses the playback or seeks, all times will be shifted accordingly. +

+ +

16.11 tedcaptions

+ +

JSON captions used for TED Talks. +

+

TED does not provide links to the captions, but they can be guessed from the +page. The file ‘tools/bookmarklets.html’ from the FFmpeg source tree +contains a bookmarklet to expose them. +

+

This demuxer accepts the following option: +

+
start_time
+

Set the start time of the TED talk, in milliseconds. The default is 15000 +(15s). It is used to sync the captions with the downloadable videos, because +they include a 15s intro. +

+
+ +

Example: convert the captions to a format most players understand: +

 
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+ + +

17. Metadata

+ +

FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded +INI-like text file and then load it back using the metadata muxer/demuxer. +

+

The file format is as follows: +

    +
  1. +A file consists of a header and a number of metadata tags divided into sections, +each on its own line. + +
  2. +The header is a ’;FFMETADATA’ string, followed by a version number (now 1). + +
  3. +Metadata tags are of the form ’key=value’ + +
  4. +Immediately after header follows global metadata + +
  5. +After global metadata there may be sections with per-stream/per-chapter +metadata. + +
  6. +A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in +brackets (’[’, ’]’) and ends with next section or end of file. + +
  7. +At the beginning of a chapter section there may be an optional timebase to be +used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and +den are integers. If the timebase is missing then start/end times are assumed to +be in milliseconds. +Next a chapter section must contain chapter start and end times in form +’START=num’, ’END=num’, where num is a positive integer. + +
  8. +Empty lines and lines starting with ’;’ or ’#’ are ignored. + +
  9. +Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a +newline) must be escaped with a backslash ’\’. + +
  10. +Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of +the tag (in the example above key is ’foo ’, value is ’ bar’). +
+ +

A ffmetadata file might look like this: +

 
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+ +

By using the ffmetadata muxer and demuxer it is possible to extract +metadata from an input file to an ffmetadata file, and then transcode +the file into an output file with the edited ffmetadata file. +

+

Extracting an ffmetadata file with ‘ffmpeg’ goes as follows: +

 
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+ +

Reinserting edited metadata information from the FFMETADATAFILE file can +be done as: +

 
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+ + +

18. Protocols

+ +

Protocols are configured elements in FFmpeg that enable access to +resources that require specific protocols. +

+

When you configure your FFmpeg build, all the supported protocols are +enabled by default. You can list all available ones using the +configure option "–list-protocols". +

+

You can disable all the protocols using the configure option +"–disable-protocols", and selectively enable a protocol using the +option "–enable-protocol=PROTOCOL", or you can disable a +particular protocol using the option +"–disable-protocol=PROTOCOL". +

+

The option "-protocols" of the ff* tools will display the list of +supported protocols. +

+

A description of the currently available protocols follows. +

+ +

18.1 bluray

+ +

Read BluRay playlist. +

+

The accepted options are: +

+
angle
+

BluRay angle +

+
+
chapter
+

Start chapter (1...N) +

+
+
playlist
+

Playlist to read (BDMV/PLAYLIST/?????.mpls) +

+
+
+ +

Examples: +

+

Read longest playlist from BluRay mounted to /mnt/bluray: +

 
bluray:/mnt/bluray
+
+ +

Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2: +

 
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+ + +

18.2 cache

+ +

Caching wrapper for input stream. +

+

Cache the input stream to temporary file. It brings seeking capability to live streams. +

+
 
cache:URL
+
+ + +

18.3 concat

+ +

Physical concatenation protocol. +

+

Allow to read and seek from many resource in sequence as if they were +a unique resource. +

+

A URL accepted by this protocol has the syntax: +

 
concat:URL1|URL2|...|URLN
+
+ +

where URL1, URL2, ..., URLN are the urls of the +resource to be concatenated, each one possibly specifying a distinct +protocol. +

+

For example to read a sequence of files ‘split1.mpeg’, +‘split2.mpeg’, ‘split3.mpeg’ with ffplay use the +command: +

 
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+ +

Note that you may need to escape the character "|" which is special for +many shells. +

+ +

18.4 crypto

+ +

AES-encrypted stream reading protocol. +

+

The accepted options are: +

+
key
+

Set the AES decryption key binary block from given hexadecimal representation. +

+
+
iv
+

Set the AES decryption initialization vector binary block from given hexadecimal representation. +

+
+ +

Accepted URL formats: +

 
crypto:URL
+crypto+URL
+
+ + +

18.5 data

+ +

Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme. +

+

For example, to convert a GIF file given inline with ffmpeg: +

 
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+ + +

18.6 file

+ +

File access protocol. +

+

Allow to read from or write to a file. +

+

A file URL can have the form: +

 
file:filename
+
+ +

where filename is the path of the file to read. +

+

An URL that does not have a protocol prefix will be assumed to be a +file URL. Depending on the build, an URL that looks like a Windows +path with the drive letter at the beginning will also be assumed to be +a file URL (usually not the case in builds for unix-like systems). +

+

For example to read from a file ‘input.mpeg’ with ffmpeg +use the command: +

 
ffmpeg -i file:input.mpeg output.mpeg
+
+ +

This protocol accepts the following options: +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable for files on slow medium. +

+
+ + +

18.7 ftp

+ +

FTP (File Transfer Protocol). +

+

Allow to read from or write to remote resources using FTP protocol. +

+

Following syntax is required. +

 
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
ftp-anonymous-password
+

Password used when login as anonymous user. Typically an e-mail address +should be used. +

+
+
ftp-write-seekable
+

Control seekability of connection during encoding. If set to 1 the +resource is supposed to be seekable, if set to 0 it is assumed not +to be seekable. Default value is 0. +

+
+ +

NOTE: Protocol can be used as output, but it is recommended to not do +it, unless special care is taken (tests, customized server configuration +etc.). Different FTP servers behave in different way during seek +operation. ff* tools may produce incomplete content due to server limitations. +

+ +

18.8 gopher

+ +

Gopher protocol. +

+ +

18.9 hls

+ +

Read Apple HTTP Live Streaming compliant segmented stream as +a uniform one. The M3U8 playlists describing the segments can be +remote HTTP resources or local files, accessed using the standard +file protocol. +The nested protocol is declared by specifying +"+proto" after the hls URI scheme name, where proto +is either "file" or "http". +

+
 
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+ +

Using this protocol is discouraged - the hls demuxer should work +just as well (if not, please report the issues) and is more complete. +To use the hls demuxer instead, simply use the direct URLs to the +m3u8 files. +

+ +

18.10 http

+ +

HTTP (Hyper Text Transfer Protocol). +

+

This protocol accepts the following options: +

+
+
seekable
+

Control seekability of connection. If set to 1 the resource is +supposed to be seekable, if set to 0 it is assumed not to be seekable, +if set to -1 it will try to autodetect if it is seekable. Default +value is -1. +

+
+
chunked_post
+

If set to 1 use chunked Transfer-Encoding for posts, default is 1. +

+
+
content_type
+

Set a specific content type for the POST messages. +

+
+
headers
+

Set custom HTTP headers, can override built in default headers. The +value must be a string encoding the headers. +

+
+
multiple_requests
+

Use persistent connections if set to 1, default is 0. +

+
+
post_data
+

Set custom HTTP post data. +

+
+
user-agent
+
user_agent
+

Override the User-Agent header. If not specified the protocol will use a +string describing the libavformat build. ("Lavf/<version>") +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout is +not specified. +

+
+
mime_type
+

Export the MIME type. +

+
+
icy
+

If set to 1 request ICY (SHOUTcast) metadata from the server. If the server +supports this, the metadata has to be retrieved by the application by reading +the ‘icy_metadata_headers’ and ‘icy_metadata_packet’ options. +The default is 0. +

+
+
icy_metadata_headers
+

If the server supports ICY metadata, this contains the ICY-specific HTTP reply +headers, separated by newline characters. +

+
+
icy_metadata_packet
+

If the server supports ICY metadata, and ‘icy’ was set to 1, this +contains the last non-empty metadata packet sent by the server. It should be +polled in regular intervals by applications interested in mid-stream metadata +updates. +

+
+
cookies
+

Set the cookies to be sent in future requests. The format of each cookie is the +same as the value of a Set-Cookie HTTP response field. Multiple cookies can be +delimited by a newline character. +

+
+
offset
+

Set initial byte offset. +

+
+
end_offset
+

Try to limit the request to bytes preceding this offset. +

+
+ + +

18.10.1 HTTP Cookies

+ +

Some HTTP requests will be denied unless cookie values are passed in with the +request. The ‘cookies’ option allows these cookies to be specified. At +the very least, each cookie must specify a value along with a path and domain. +HTTP requests that match both the domain and path will automatically include the +cookie value in the HTTP Cookie header field. Multiple cookies can be delimited +by a newline. +

+

The required syntax to play a stream specifying a cookie is: +

 
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+ + +

18.11 mmst

+ +

MMS (Microsoft Media Server) protocol over TCP. +

+ +

18.12 mmsh

+ +

MMS (Microsoft Media Server) protocol over HTTP. +

+

The required syntax is: +

 
mmsh://server[:port][/app][/playpath]
+
+ + +

18.13 md5

+ +

MD5 output protocol. +

+

Computes the MD5 hash of the data to be written, and on close writes +this to the designated output or stdout if none is specified. It can +be used to test muxers without writing an actual file. +

+

Some examples follow. +

 
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+ +

Note that some formats (typically MOV) require the output protocol to +be seekable, so they will fail with the MD5 output protocol. +

+ +

18.14 pipe

+ +

UNIX pipe access protocol. +

+

Allow to read and write from UNIX pipes. +

+

The accepted syntax is: +

 
pipe:[number]
+
+ +

number is the number corresponding to the file descriptor of the +pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number +is not specified, by default the stdout file descriptor will be used +for writing, stdin for reading. +

+

For example to read from stdin with ffmpeg: +

 
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+ +

For writing to stdout with ffmpeg: +

 
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+ +

This protocol accepts the following options: +

+
+
blocksize
+

Set I/O operation maximum block size, in bytes. Default value is +INT_MAX, which results in not limiting the requested block size. +Setting this value reasonably low improves user termination request reaction +time, which is valuable if data transmission is slow. +

+
+ +

Note that some formats (typically MOV), require the output protocol to +be seekable, so they will fail with the pipe output protocol. +

+ +

18.15 rtmp

+ +

Real-Time Messaging Protocol. +

+

The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia +content across a TCP/IP network. +

+

The required syntax is: +

 
rtmp://[username:password@]server[:port][/app][/instance][/playpath]
+
+ +

The accepted parameters are: +

+
username
+

An optional username (mostly for publishing). +

+
+
password
+

An optional password (mostly for publishing). +

+
+
server
+

The address of the RTMP server. +

+
+
port
+

The number of the TCP port to use (by default is 1935). +

+
+
app
+

It is the name of the application to access. It usually corresponds to +the path where the application is installed on the RTMP server +(e.g. ‘/ondemand/’, ‘/flash/live/’, etc.). You can override +the value parsed from the URI through the rtmp_app option, too. +

+
+
playpath
+

It is the path or name of the resource to play with reference to the +application specified in app, may be prefixed by "mp4:". You +can override the value parsed from the URI through the rtmp_playpath +option, too. +

+
+
listen
+

Act as a server, listening for an incoming connection. +

+
+
timeout
+

Maximum time to wait for the incoming connection. Implies listen. +

+
+ +

Additionally, the following parameters can be set via command line options +(or in code via AVOptions): +

+
rtmp_app
+

Name of application to connect on the RTMP server. This option +overrides the parameter specified in the URI. +

+
+
rtmp_buffer
+

Set the client buffer time in milliseconds. The default is 3000. +

+
+
rtmp_conn
+

Extra arbitrary AMF connection parameters, parsed from a string, +e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0. +Each value is prefixed by a single character denoting the type, +B for Boolean, N for number, S for string, O for object, or Z for null, +followed by a colon. For Booleans the data must be either 0 or 1 for +FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or +1 to end or begin an object, respectively. Data items in subobjects may +be named, by prefixing the type with ’N’ and specifying the name before +the value (i.e. NB:myFlag:1). This option may be used multiple +times to construct arbitrary AMF sequences. +

+
+
rtmp_flashver
+

Version of the Flash plugin used to run the SWF player. The default +is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible; +<libavformat version>).) +

+
+
rtmp_flush_interval
+

Number of packets flushed in the same request (RTMPT only). The default +is 10. +

+
+
rtmp_live
+

Specify that the media is a live stream. No resuming or seeking in +live streams is possible. The default value is any, which means the +subscriber first tries to play the live stream specified in the +playpath. If a live stream of that name is not found, it plays the +recorded stream. The other possible values are live and +recorded. +

+
+
rtmp_pageurl
+

URL of the web page in which the media was embedded. By default no +value will be sent. +

+
+
rtmp_playpath
+

Stream identifier to play or to publish. This option overrides the +parameter specified in the URI. +

+
+
rtmp_subscribe
+

Name of live stream to subscribe to. By default no value will be sent. +It is only sent if the option is specified or if rtmp_live +is set to live. +

+
+
rtmp_swfhash
+

SHA256 hash of the decompressed SWF file (32 bytes). +

+
+
rtmp_swfsize
+

Size of the decompressed SWF file, required for SWFVerification. +

+
+
rtmp_swfurl
+

URL of the SWF player for the media. By default no value will be sent. +

+
+
rtmp_swfverify
+

URL to player swf file, compute hash/size automatically. +

+
+
rtmp_tcurl
+

URL of the target stream. Defaults to proto://host[:port]/app. +

+
+
+ +

For example to read with ffplay a multimedia resource named +"sample" from the application "vod" from an RTMP server "myserver": +

 
ffplay rtmp://myserver/vod/sample
+
+ +

To publish to a password protected server, passing the playpath and +app names separately: +

 
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+ + +

18.16 rtmpe

+ +

Encrypted Real-Time Messaging Protocol. +

+

The Encrypted Real-Time Messaging Protocol (RTMPE) is used for +streaming multimedia content within standard cryptographic primitives, +consisting of Diffie-Hellman key exchange and HMACSHA256, generating +a pair of RC4 keys. +

+ +

18.17 rtmps

+ +

Real-Time Messaging Protocol over a secure SSL connection. +

+

The Real-Time Messaging Protocol (RTMPS) is used for streaming +multimedia content across an encrypted connection. +

+ +

18.18 rtmpt

+ +

Real-Time Messaging Protocol tunneled through HTTP. +

+

The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used +for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

18.19 rtmpte

+ +

Encrypted Real-Time Messaging Protocol tunneled through HTTP. +

+

The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE) +is used for streaming multimedia content within HTTP requests to traverse +firewalls. +

+ +

18.20 rtmpts

+ +

Real-Time Messaging Protocol tunneled through HTTPS. +

+

The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used +for streaming multimedia content within HTTPS requests to traverse +firewalls. +

+ +

18.21 libssh

+ +

Secure File Transfer Protocol via libssh +

+

Allow to read from or write to remote resources using SFTP protocol. +

+

Following syntax is required. +

+
 
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+ +

This protocol accepts the following options. +

+
+
timeout
+

Set timeout of socket I/O operations used by the underlying low level +operation. By default it is set to -1, which means that the timeout +is not specified. +

+
+
truncate
+

Truncate existing files on write, if set to 1. A value of 0 prevents +truncating. Default value is 1. +

+
+
private_key
+

Specify the path of the file containing private key to use during authorization. +By default libssh searches for keys in the ‘~/.ssh/’ directory. +

+
+
+ +

Example: Play a file stored on remote server. +

+
 
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+ + +

18.22 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte

+ +

Real-Time Messaging Protocol and its variants supported through +librtmp. +

+

Requires the presence of the librtmp headers and library during +configuration. You need to explicitly configure the build with +"–enable-librtmp". If enabled this will replace the native RTMP +protocol. +

+

This protocol provides most client functions and a few server +functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT), +encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled +variants of these encrypted types (RTMPTE, RTMPTS). +

+

The required syntax is: +

 
rtmp_proto://server[:port][/app][/playpath] options
+
+ +

where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe", +"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and +server, port, app and playpath have the same +meaning as specified for the RTMP native protocol. +options contains a list of space-separated options of the form +key=val. +

+

See the librtmp manual page (man 3 librtmp) for more information. +

+

For example, to stream a file in real-time to an RTMP server using +ffmpeg: +

 
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+ +

To play the same stream using ffplay: +

 
ffplay "rtmp://myserver/live/mystream live=1"
+
+ + +

18.23 rtp

+ +

Real-time Transport Protocol. +

+

The required syntax for an RTP URL is: +rtp://hostname[:port][?option=val...] +

+

port specifies the RTP port to use. +

+

The following URL options are supported: +

+
+
ttl=n
+

Set the TTL (Time-To-Live) value (for multicast only). +

+
+
rtcpport=n
+

Set the remote RTCP port to n. +

+
+
localrtpport=n
+

Set the local RTP port to n. +

+
+
localrtcpport=n'
+

Set the local RTCP port to n. +

+
+
pkt_size=n
+

Set max packet size (in bytes) to n. +

+
+
connect=0|1
+

Do a connect() on the UDP socket (if set to 1) or not (if set +to 0). +

+
+
sources=ip[,ip]
+

List allowed source IP addresses. +

+
+
block=ip[,ip]
+

List disallowed (blocked) source IP addresses. +

+
+
write_to_source=0|1
+

Send packets to the source address of the latest received packet (if +set to 1) or to a default remote address (if set to 0). +

+
+
localport=n
+

Set the local RTP port to n. +

+

This is a deprecated option. Instead, ‘localrtpport’ should be +used. +

+
+
+ +

Important notes: +

+
    +
  1. +If ‘rtcpport’ is not set the RTCP port will be set to the RTP +port value plus 1. + +
  2. +If ‘localrtpport’ (the local RTP port) is not set any available +port will be used for the local RTP and RTCP ports. + +
  3. +If ‘localrtcpport’ (the local RTCP port) is not set it will be +set to the the local RTP port value plus 1. +
+ + +

18.24 rtsp

+ +

Real-Time Streaming Protocol. +

+

RTSP is not technically a protocol handler in libavformat, it is a demuxer +and muxer. The demuxer supports both normal RTSP (with data transferred +over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with +data transferred over RDT). +

+

The muxer can be used to send a stream using RTSP ANNOUNCE to a server +supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s +RTSP server). +

+

The required syntax for a RTSP url is: +

 
rtsp://hostname[:port]/path
+
+ +

Options can be set on the ffmpeg/ffplay command +line, or set in code via AVOptions or in +avformat_open_input. +

+

The following options are supported. +

+
+
initial_pause
+

Do not start playing the stream immediately if set to 1. Default value +is 0. +

+
+
rtsp_transport
+

Set RTSP trasport protocols. +

+

It accepts the following values: +

+
udp
+

Use UDP as lower transport protocol. +

+
+
tcp
+

Use TCP (interleaving within the RTSP control channel) as lower +transport protocol. +

+
+
udp_multicast
+

Use UDP multicast as lower transport protocol. +

+
+
http
+

Use HTTP tunneling as lower transport protocol, which is useful for +passing proxies. +

+
+ +

Multiple lower transport protocols may be specified, in that case they are +tried one at a time (if the setup of one fails, the next one is tried). +For the muxer, only the ‘tcp’ and ‘udp’ options are supported. +

+
+
rtsp_flags
+

Set RTSP flags. +

+

The following values are accepted: +

+
filter_src
+

Accept packets only from negotiated peer address and port. +

+
listen
+

Act as a server, listening for an incoming connection. +

+
+ +

Default value is ‘none’. +

+
+
allowed_media_types
+

Set media types to accept from the server. +

+

The following flags are accepted: +

+
video
+
audio
+
data
+
+ +

By default it accepts all media types. +

+
+
min_port
+

Set minimum local UDP port. Default value is 5000. +

+
+
max_port
+

Set maximum local UDP port. Default value is 65000. +

+
+
timeout
+

Set maximum timeout (in seconds) to wait for incoming connections. +

+

A value of -1 mean infinite (default). This option implies the +‘rtsp_flags’ set to ‘listen’. +

+
+
reorder_queue_size
+

Set number of packets to buffer for handling of reordered packets. +

+
+
stimeout
+

Set socket TCP I/O timeout in micro seconds. +

+
+
user-agent
+

Override User-Agent header. If not specified, it default to the +libavformat identifier string. +

+
+ +

When receiving data over UDP, the demuxer tries to reorder received packets +(since they may arrive out of order, or packets may get lost totally). This +can be disabled by setting the maximum demuxing delay to zero (via +the max_delay field of AVFormatContext). +

+

When watching multi-bitrate Real-RTSP streams with ffplay, the +streams to display can be chosen with -vst n and +-ast n for video and audio respectively, and can be switched +on the fly by pressing v and a. +

+ +

18.24.1 Examples

+ +

The following examples all make use of the ffplay and +ffmpeg tools. +

+
    +
  • +Watch a stream over UDP, with a max reordering delay of 0.5 seconds: +
     
    ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
    +
    + +
  • +Watch a stream tunneled over HTTP: +
     
    ffplay -rtsp_transport http rtsp://server/video.mp4
    +
    + +
  • +Send a stream in realtime to a RTSP server, for others to watch: +
     
    ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
    +
    + +
  • +Receive a stream in realtime: +
     
    ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
    +
    +
+ + +

18.25 sap

+ +

Session Announcement Protocol (RFC 2974). This is not technically a +protocol handler in libavformat, it is a muxer and demuxer. +It is used for signalling of RTP streams, by announcing the SDP for the +streams regularly on a separate port. +

+ +

18.25.1 Muxer

+ +

The syntax for a SAP url given to the muxer is: +

 
sap://destination[:port][?options]
+
+ +

The RTP packets are sent to destination on port port, +or to port 5004 if no port is specified. +options is a &-separated list. The following options +are supported: +

+
+
announce_addr=address
+

Specify the destination IP address for sending the announcements to. +If omitted, the announcements are sent to the commonly used SAP +announcement multicast address 224.2.127.254 (sap.mcast.net), or +ff0e::2:7ffe if destination is an IPv6 address. +

+
+
announce_port=port
+

Specify the port to send the announcements on, defaults to +9875 if not specified. +

+
+
ttl=ttl
+

Specify the time to live value for the announcements and RTP packets, +defaults to 255. +

+
+
same_port=0|1
+

If set to 1, send all RTP streams on the same port pair. If zero (the +default), all streams are sent on unique ports, with each stream on a +port 2 numbers higher than the previous. +VLC/Live555 requires this to be set to 1, to be able to receive the stream. +The RTP stack in libavformat for receiving requires all streams to be sent +on unique ports. +

+
+ +

Example command lines follow. +

+

To broadcast a stream on the local subnet, for watching in VLC: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+ +

Similarly, for watching in ffplay: +

+
 
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+ +

And for watching in ffplay, over IPv6: +

+
 
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+ + +

18.25.2 Demuxer

+ +

The syntax for a SAP url given to the demuxer is: +

 
sap://[address][:port]
+
+ +

address is the multicast address to listen for announcements on, +if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port +is the port that is listened on, 9875 if omitted. +

+

The demuxers listens for announcements on the given address and port. +Once an announcement is received, it tries to receive that particular stream. +

+

Example command lines follow. +

+

To play back the first stream announced on the normal SAP multicast address: +

+
 
ffplay sap://
+
+ +

To play back the first stream announced on one the default IPv6 SAP multicast address: +

+
 
ffplay sap://[ff0e::2:7ffe]
+
+ + +

18.26 sctp

+ +

Stream Control Transmission Protocol. +

+

The accepted URL syntax is: +

 
sctp://host:port[?options]
+
+ +

The protocol accepts the following options: +

+
listen
+

If set to any value, listen for an incoming connection. Outgoing connection is done by default. +

+
+
max_streams
+

Set the maximum number of streams. By default no limit is set. +

+
+ + +

18.27 srtp

+ +

Secure Real-time Transport Protocol. +

+

The accepted options are: +

+
srtp_in_suite
+
srtp_out_suite
+

Select input and output encoding suites. +

+

Supported values: +

+
AES_CM_128_HMAC_SHA1_80
+
SRTP_AES128_CM_HMAC_SHA1_80
+
AES_CM_128_HMAC_SHA1_32
+
SRTP_AES128_CM_HMAC_SHA1_32
+
+ +
+
srtp_in_params
+
srtp_out_params
+

Set input and output encoding parameters, which are expressed by a +base64-encoded representation of a binary block. The first 16 bytes of +this binary block are used as master key, the following 14 bytes are +used as master salt. +

+
+ + +

18.28 tcp

+ +

Transmission Control Protocol. +

+

The required syntax for a TCP url is: +

 
tcp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form +key=val. +

+

The list of supported options follows. +

+
+
listen=1|0
+

Listen for an incoming connection. Default value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+
listen_timeout=microseconds
+

Set listen timeout, expressed in microseconds. +

+
+ +

The following example shows how to setup a listening TCP connection +with ffmpeg, which is then accessed with ffplay: +

 
ffmpeg -i input -f format tcp://hostname:port?listen
+ffplay tcp://hostname:port
+
+ + +

18.29 tls

+ +

Transport Layer Security (TLS) / Secure Sockets Layer (SSL) +

+

The required syntax for a TLS/SSL url is: +

 
tls://hostname:port[?options]
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
ca_file, cafile=filename
+

A file containing certificate authority (CA) root certificates to treat +as trusted. If the linked TLS library contains a default this might not +need to be specified for verification to work, but not all libraries and +setups have defaults built in. +The file must be in OpenSSL PEM format. +

+
+
tls_verify=1|0
+

If enabled, try to verify the peer that we are communicating with. +Note, if using OpenSSL, this currently only makes sure that the +peer certificate is signed by one of the root certificates in the CA +database, but it does not validate that the certificate actually +matches the host name we are trying to connect to. (With GnuTLS, +the host name is validated as well.) +

+

This is disabled by default since it requires a CA database to be +provided by the caller in many cases. +

+
+
cert_file, cert=filename
+

A file containing a certificate to use in the handshake with the peer. +(When operating as server, in listen mode, this is more often required +by the peer, while client certificates only are mandated in certain +setups.) +

+
+
key_file, key=filename
+

A file containing the private key for the certificate. +

+
+
listen=1|0
+

If enabled, listen for connections on the provided port, and assume +the server role in the handshake instead of the client role. +

+
+
+ +

Example command lines: +

+

To create a TLS/SSL server that serves an input stream. +

+
 
ffmpeg -i input -f format tls://hostname:port?listen&cert=server.crt&key=server.key
+
+ +

To play back a stream from the TLS/SSL server using ffplay: +

+
 
ffplay tls://hostname:port
+
+ + +

18.30 udp

+ +

User Datagram Protocol. +

+

The required syntax for an UDP URL is: +

 
udp://hostname:port[?options]
+
+ +

options contains a list of &-separated options of the form key=val. +

+

In case threading is enabled on the system, a circular buffer is used +to store the incoming data, which allows one to reduce loss of data due to +UDP socket buffer overruns. The fifo_size and +overrun_nonfatal options are related to this buffer. +

+

The list of supported options follows. +

+
+
buffer_size=size
+

Set the UDP socket buffer size in bytes. This is used both for the +receiving and the sending buffer size. +

+
+
localport=port
+

Override the local UDP port to bind with. +

+
+
localaddr=addr
+

Choose the local IP address. This is useful e.g. if sending multicast +and the host has multiple interfaces, where the user can choose +which interface to send on by specifying the IP address of that interface. +

+
+
pkt_size=size
+

Set the size in bytes of UDP packets. +

+
+
reuse=1|0
+

Explicitly allow or disallow reusing UDP sockets. +

+
+
ttl=ttl
+

Set the time to live value (for multicast only). +

+
+
connect=1|0
+

Initialize the UDP socket with connect(). In this case, the +destination address can’t be changed with ff_udp_set_remote_url later. +If the destination address isn’t known at the start, this option can +be specified in ff_udp_set_remote_url, too. +This allows finding out the source address for the packets with getsockname, +and makes writes return with AVERROR(ECONNREFUSED) if "destination +unreachable" is received. +For receiving, this gives the benefit of only receiving packets from +the specified peer address/port. +

+
+
sources=address[,address]
+

Only receive packets sent to the multicast group from one of the +specified sender IP addresses. +

+
+
block=address[,address]
+

Ignore packets sent to the multicast group from the specified +sender IP addresses. +

+
+
fifo_size=units
+

Set the UDP receiving circular buffer size, expressed as a number of +packets with size of 188 bytes. If not specified defaults to 7*4096. +

+
+
overrun_nonfatal=1|0
+

Survive in case of UDP receiving circular buffer overrun. Default +value is 0. +

+
+
timeout=microseconds
+

Set raise error timeout, expressed in microseconds. +

+

This option is only relevant in read mode: if no data arrived in more +than this time interval, raise error. +

+
+ + +

18.30.1 Examples

+ +
    +
  • +Use ffmpeg to stream over UDP to a remote endpoint: +
     
    ffmpeg -i input -f format udp://hostname:port
    +
    + +
  • +Use ffmpeg to stream in mpegts format over UDP using 188 +sized UDP packets, using a large input buffer: +
     
    ffmpeg -i input -f mpegts udp://hostname:port?pkt_size=188&buffer_size=65535
    +
    + +
  • +Use ffmpeg to receive over UDP from a remote endpoint: +
     
    ffmpeg -i udp://[multicast-address]:port ...
    +
    +
+ + +

18.31 unix

+ +

Unix local socket +

+

The required syntax for a Unix socket URL is: +

+
 
unix://filepath
+
+ +

The following parameters can be set via command line options +(or in code via AVOptions): +

+
+
timeout
+

Timeout in ms. +

+
listen
+

Create the Unix socket in listening mode. +

+
+ + +

19. Device Options

+ +

The libavdevice library provides the same interface as +libavformat. Namely, an input device is considered like a demuxer, and +an output device like a muxer, and the interface and generic device +options are the same provided by libavformat (see the ffmpeg-formats +manual). +

+

In addition each input or output device may support so-called private +options, which are specific for that component. +

+

Options may be set by specifying -option value in the +FFmpeg tools, or by setting the value explicitly in the device +AVFormatContext options or using the ‘libavutil/opt.h’ API +for programmatic use. +

+ + +

20. Input Devices

+ +

Input devices are configured elements in FFmpeg which allow to access +the data coming from a multimedia device attached to your system. +

+

When you configure your FFmpeg build, all the supported input devices +are enabled by default. You can list all available ones using the +configure option "–list-indevs". +

+

You can disable all the input devices using the configure option +"–disable-indevs", and selectively enable an input device using the +option "–enable-indev=INDEV", or you can disable a particular +input device using the option "–disable-indev=INDEV". +

+

The option "-formats" of the ff* tools will display the list of +supported input devices (amongst the demuxers). +

+

A description of the currently available input devices follows. +

+ +

20.1 alsa

+ +

ALSA (Advanced Linux Sound Architecture) input device. +

+

To enable this input device during configuration you need libasound +installed on your system. +

+

This device allows capturing from an ALSA device. The name of the +device to capture has to be an ALSA card identifier. +

+

An ALSA identifier has the syntax: +

 
hw:CARD[,DEV[,SUBDEV]]
+
+ +

where the DEV and SUBDEV components are optional. +

+

The three arguments (in order: CARD,DEV,SUBDEV) +specify card number or identifier, device number and subdevice number +(-1 means any). +

+

To see the list of cards currently recognized by your system check the +files ‘/proc/asound/cards’ and ‘/proc/asound/devices’. +

+

For example to capture with ffmpeg from an ALSA device with +card id 0, you may run the command: +

 
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+ +

For more information see: +http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html +

+ +

20.2 bktr

+ +

BSD video input device. +

+ +

20.3 dshow

+ +

Windows DirectShow input device. +

+

DirectShow support is enabled when FFmpeg is built with the mingw-w64 project. +Currently only audio and video devices are supported. +

+

Multiple devices may be opened as separate inputs, but they may also be +opened on the same input, which should improve synchronism between them. +

+

The input name should be in the format: +

+
 
TYPE=NAME[:TYPE=NAME]
+
+ +

where TYPE can be either audio or video, +and NAME is the device’s name. +

+ +

20.3.1 Options

+ +

If no options are specified, the device’s defaults are used. +If the device does not support the requested options, it will +fail to open. +

+
+
video_size
+

Set the video size in the captured video. +

+
+
framerate
+

Set the frame rate in the captured video. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. +

+
+
channels
+

Set the number of channels in the captured audio. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +

+
+
list_options
+

If set to ‘true’, print a list of selected device’s options +and exit. +

+
+
video_device_number
+

Set video device number for devices with same name (starts at 0, +defaults to 0). +

+
+
audio_device_number
+

Set audio device number for devices with same name (starts at 0, +defaults to 0). +

+
+
pixel_format
+

Select pixel format to be used by DirectShow. This may only be set when +the video codec is not set or set to rawvideo. +

+
+
audio_buffer_size
+

Set audio device buffer size in milliseconds (which can directly +impact latency, depending on the device). +Defaults to using the audio device’s +default buffer size (typically some multiple of 500ms). +Setting this value too low can degrade performance. +See also +http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx +

+
+
+ + +

20.3.2 Examples

+ +
    +
  • +Print the list of DirectShow supported devices and exit: +
     
    $ ffmpeg -list_devices true -f dshow -i dummy
    +
    + +
  • +Open video device Camera: +
     
    $ ffmpeg -f dshow -i video="Camera"
    +
    + +
  • +Open second video device with name Camera: +
     
    $ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
    +
    + +
  • +Open video device Camera and audio device Microphone: +
     
    $ ffmpeg -f dshow -i video="Camera":audio="Microphone"
    +
    + +
  • +Print the list of supported options in selected device and exit: +
     
    $ ffmpeg -list_options true -f dshow -i video="Camera"
    +
    + +
+ + +

20.4 dv1394

+ +

Linux DV 1394 input device. +

+ +

20.5 fbdev

+ +

Linux framebuffer input device. +

+

The Linux framebuffer is a graphic hardware-independent abstraction +layer to show graphics on a computer monitor, typically on the +console. It is accessed through a file device node, usually +‘/dev/fb0’. +

+

For more detailed information read the file +Documentation/fb/framebuffer.txt included in the Linux source tree. +

+

To record from the framebuffer device ‘/dev/fb0’ with +ffmpeg: +

 
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+ +

You can take a single screenshot image with the command: +

 
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+ +

See also http://linux-fbdev.sourceforge.net/, and fbset(1). +

+ +

20.6 iec61883

+ +

FireWire DV/HDV input device using libiec61883. +

+

To enable this input device, you need libiec61883, libraw1394 and +libavc1394 installed on your system. Use the configure option +--enable-libiec61883 to compile with the device enabled. +

+

The iec61883 capture device supports capturing from a video device +connected via IEEE1394 (FireWire), using libiec61883 and the new Linux +FireWire stack (juju). This is the default DV/HDV input method in Linux +Kernel 2.6.37 and later, since the old FireWire stack was removed. +

+

Specify the FireWire port to be used as input file, or "auto" +to choose the first port connected. +

+ +

20.6.1 Options

+ +
+
dvtype
+

Override autodetection of DV/HDV. This should only be used if auto +detection does not work, or if usage of a different device type +should be prohibited. Treating a DV device as HDV (or vice versa) will +not work and result in undefined behavior. +The values ‘auto’, ‘dv’ and ‘hdv’ are supported. +

+
+
dvbuffer
+

Set maxiumum size of buffer for incoming data, in frames. For DV, this +is an exact value. For HDV, it is not frame exact, since HDV does +not have a fixed frame size. +

+
+
dvguid
+

Select the capture device by specifying it’s GUID. Capturing will only +be performed from the specified device and fails if no device with the +given GUID is found. This is useful to select the input if multiple +devices are connected at the same time. +Look at /sys/bus/firewire/devices to find out the GUIDs. +

+
+
+ + +

20.6.2 Examples

+ +
    +
  • +Grab and show the input of a FireWire DV/HDV device. +
     
    ffplay -f iec61883 -i auto
    +
    + +
  • +Grab and record the input of a FireWire DV/HDV device, +using a packet buffer of 100000 packets if the source is HDV. +
     
    ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
    +
    + +
+ + +

20.7 jack

+ +

JACK input device. +

+

To enable this input device during configuration you need libjack +installed on your system. +

+

A JACK input device creates one or more JACK writable clients, one for +each audio channel, with name client_name:input_N, where +client_name is the name provided by the application, and N +is a number which identifies the channel. +Each writable client will send the acquired data to the FFmpeg input +device. +

+

Once you have created one or more JACK readable clients, you need to +connect them to one or more JACK writable clients. +

+

To connect or disconnect JACK clients you can use the jack_connect +and jack_disconnect programs, or do it through a graphical interface, +for example with qjackctl. +

+

To list the JACK clients and their properties you can invoke the command +jack_lsp. +

+

Follows an example which shows how to capture a JACK readable client +with ffmpeg. +

 
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+ +

For more information read: +http://jackaudio.org/ +

+ +

20.8 lavfi

+ +

Libavfilter input virtual device. +

+

This input device reads data from the open output pads of a libavfilter +filtergraph. +

+

For each filtergraph open output, the input device will create a +corresponding stream which is mapped to the generated output. Currently +only video data is supported. The filtergraph is specified through the +option ‘graph’. +

+ +

20.8.1 Options

+ +
+
graph
+

Specify the filtergraph to use as input. Each video open output must be +labelled by a unique string of the form "outN", where N is a +number starting from 0 corresponding to the mapped input stream +generated by the device. +The first unlabelled output is automatically assigned to the "out0" +label, but all the others need to be specified explicitly. +

+

If not specified defaults to the filename specified for the input +device. +

+
+
graph_file
+

Set the filename of the filtergraph to be read and sent to the other +filters. Syntax of the filtergraph is the same as the one specified by +the option graph. +

+
+
+ + +

20.8.2 Examples

+ +
    +
  • +Create a color video stream and play it back with ffplay: +
     
    ffplay -f lavfi -graph "color=c=pink [out0]" dummy
    +
    + +
  • +As the previous example, but use filename for specifying the graph +description, and omit the "out0" label: +
     
    ffplay -f lavfi color=c=pink
    +
    + +
  • +Create three different video test filtered sources and play them: +
     
    ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
    +
    + +
  • +Read an audio stream from a file using the amovie source and play it +back with ffplay: +
     
    ffplay -f lavfi "amovie=test.wav"
    +
    + +
  • +Read an audio stream and a video stream and play it back with +ffplay: +
     
    ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
    +
    + +
+ + +

20.9 libdc1394

+ +

IIDC1394 input device, based on libdc1394 and libraw1394. +

+ +

20.10 openal

+ +

The OpenAL input device provides audio capture on all systems with a +working OpenAL 1.1 implementation. +

+

To enable this input device during configuration, you need OpenAL +headers and libraries installed on your system, and need to configure +FFmpeg with --enable-openal. +

+

OpenAL headers and libraries should be provided as part of your OpenAL +implementation, or as an additional download (an SDK). Depending on your +installation you may need to specify additional flags via the +--extra-cflags and --extra-ldflags for allowing the build +system to locate the OpenAL headers and libraries. +

+

An incomplete list of OpenAL implementations follows: +

+
+
Creative
+

The official Windows implementation, providing hardware acceleration +with supported devices and software fallback. +See http://openal.org/. +

+
OpenAL Soft
+

Portable, open source (LGPL) software implementation. Includes +backends for the most common sound APIs on the Windows, Linux, +Solaris, and BSD operating systems. +See http://kcat.strangesoft.net/openal.html. +

+
Apple
+

OpenAL is part of Core Audio, the official Mac OS X Audio interface. +See http://developer.apple.com/technologies/mac/audio-and-video.html +

+
+ +

This device allows one to capture from an audio input device handled +through OpenAL. +

+

You need to specify the name of the device to capture in the provided +filename. If the empty string is provided, the device will +automatically select the default device. You can get the list of the +supported devices by using the option list_devices. +

+ +

20.10.1 Options

+ +
+
channels
+

Set the number of channels in the captured audio. Only the values +‘1’ (monaural) and ‘2’ (stereo) are currently supported. +Defaults to ‘2’. +

+
+
sample_size
+

Set the sample size (in bits) of the captured audio. Only the values +‘8’ and ‘16’ are currently supported. Defaults to +‘16’. +

+
+
sample_rate
+

Set the sample rate (in Hz) of the captured audio. +Defaults to ‘44.1k’. +

+
+
list_devices
+

If set to ‘true’, print a list of devices and exit. +Defaults to ‘false’. +

+
+
+ + +

20.10.2 Examples

+ +

Print the list of OpenAL supported devices and exit: +

 
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+ +

Capture from the OpenAL device ‘DR-BT101 via PulseAudio’: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+ +

Capture from the default device (note the empty string ” as filename): +

 
$ ffmpeg -f openal -i '' out.ogg
+
+ +

Capture from two devices simultaneously, writing to two different files, +within the same ffmpeg command: +

 
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+

Note: not all OpenAL implementations support multiple simultaneous capture - +try the latest OpenAL Soft if the above does not work. +

+ +

20.11 oss

+ +

Open Sound System input device. +

+

The filename to provide to the input device is the device node +representing the OSS input device, and is usually set to +‘/dev/dsp’. +

+

For example to grab from ‘/dev/dsp’ using ffmpeg use the +command: +

 
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+ +

For more information about OSS see: +http://manuals.opensound.com/usersguide/dsp.html +

+ +

20.12 pulse

+ +

PulseAudio input device. +

+

To enable this output device you need to configure FFmpeg with --enable-libpulse. +

+

The filename to provide to the input device is a source device or the +string "default" +

+

To list the PulseAudio source devices and their properties you can invoke +the command pactl list sources. +

+

More information about PulseAudio can be found on http://www.pulseaudio.org. +

+ +

20.12.1 Options

+
+
server
+

Connect to a specific PulseAudio server, specified by an IP address. +Default server is used when not provided. +

+
+
name
+

Specify the application name PulseAudio will use when showing active clients, +by default it is the LIBAVFORMAT_IDENT string. +

+
+
stream_name
+

Specify the stream name PulseAudio will use when showing active streams, +by default it is "record". +

+
+
sample_rate
+

Specify the samplerate in Hz, by default 48kHz is used. +

+
+
channels
+

Specify the channels in use, by default 2 (stereo) is set. +

+
+
frame_size
+

Specify the number of bytes per frame, by default it is set to 1024. +

+
+
fragment_size
+

Specify the minimal buffering fragment in PulseAudio, it will affect the +audio latency. By default it is unset. +

+
+ + +

20.12.2 Examples

+

Record a stream from default device: +

 
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+ + +

20.13 sndio

+ +

sndio input device. +

+

To enable this input device during configuration you need libsndio +installed on your system. +

+

The filename to provide to the input device is the device node +representing the sndio input device, and is usually set to +‘/dev/audio0’. +

+

For example to grab from ‘/dev/audio0’ using ffmpeg use the +command: +

 
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+ + +

20.14 video4linux2, v4l2

+ +

Video4Linux2 input video device. +

+

"v4l2" can be used as alias for "video4linux2". +

+

If FFmpeg is built with v4l-utils support (by using the +--enable-libv4l2 configure option), it is possible to use it with the +-use_libv4l2 input device option. +

+

The name of the device to grab is a file device node, usually Linux +systems tend to automatically create such nodes when the device +(e.g. an USB webcam) is plugged into the system, and has a name of the +kind ‘/dev/videoN’, where N is a number associated to +the device. +

+

Video4Linux2 devices usually support a limited set of +widthxheight sizes and frame rates. You can check which are +supported using -list_formats all for Video4Linux2 devices. +Some devices, like TV cards, support one or more standards. It is possible +to list all the supported standards using -list_standards all. +

+

The time base for the timestamps is 1 microsecond. Depending on the kernel +version and configuration, the timestamps may be derived from the real time +clock (origin at the Unix Epoch) or the monotonic clock (origin usually at +boot time, unaffected by NTP or manual changes to the clock). The +‘-timestamps abs’ or ‘-ts abs’ option can be used to force +conversion into the real time clock. +

+

Some usage examples of the video4linux2 device with ffmpeg +and ffplay: +

    +
  • +Grab and show the input of a video4linux2 device: +
     
    ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
    +
    + +
  • +Grab and record the input of a video4linux2 device, leave the +frame rate and size as previously set: +
     
    ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
    +
    +
+ +

For more information about Video4Linux, check http://linuxtv.org/. +

+ +

20.14.1 Options

+ +
+
standard
+

Set the standard. Must be the name of a supported standard. To get a +list of the supported standards, use the ‘list_standards’ +option. +

+
+
channel
+

Set the input channel number. Default to -1, which means using the +previously selected channel. +

+
+
video_size
+

Set the video frame size. The argument must be a string in the form +WIDTHxHEIGHT or a valid size abbreviation. +

+
+
pixel_format
+

Select the pixel format (only valid for raw video input). +

+
+
input_format
+

Set the preferred pixel format (for raw video) or a codec name. +This option allows one to select the input format, when several are +available. +

+
+
framerate
+

Set the preferred video frame rate. +

+
+
list_formats
+

List available formats (supported pixel formats, codecs, and frame +sizes) and exit. +

+

Available values are: +

+
all
+

Show all available (compressed and non-compressed) formats. +

+
+
raw
+

Show only raw video (non-compressed) formats. +

+
+
compressed
+

Show only compressed formats. +

+
+ +
+
list_standards
+

List supported standards and exit. +

+

Available values are: +

+
all
+

Show all supported standards. +

+
+ +
+
timestamps, ts
+

Set type of timestamps for grabbed frames. +

+

Available values are: +

+
default
+

Use timestamps from the kernel. +

+
+
abs
+

Use absolute timestamps (wall clock). +

+
+
mono2abs
+

Force conversion from monotonic to absolute timestamps. +

+
+ +

Default value is default. +

+
+ + +

20.15 vfwcap

+ +

VfW (Video for Windows) capture input device. +

+

The filename passed as input is the capture driver number, ranging from +0 to 9. You may use "list" as filename to print a list of drivers. Any +other filename will be interpreted as device number 0. +

+ +

20.16 x11grab

+ +

X11 video input device. +

+

This device allows one to capture a region of an X11 display. +

+

The filename passed as input has the syntax: +

 
[hostname]:display_number.screen_number[+x_offset,y_offset]
+
+ +

hostname:display_number.screen_number specifies the +X11 display name of the screen to grab from. hostname can be +omitted, and defaults to "localhost". The environment variable +DISPLAY contains the default display name. +

+

x_offset and y_offset specify the offsets of the grabbed +area with respect to the top-left border of the X11 screen. They +default to 0. +

+

Check the X11 documentation (e.g. man X) for more detailed information. +

+

Use the dpyinfo program for getting basic information about the +properties of your X11 display (e.g. grep for "name" or "dimensions"). +

+

For example to grab from ‘:0.0’ using ffmpeg: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

Grab at position 10,20: +

 
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ + +

20.16.1 Options

+ +
+
draw_mouse
+

Specify whether to draw the mouse pointer. A value of 0 specify +not to draw the pointer. Default value is 1. +

+
+
follow_mouse
+

Make the grabbed area follow the mouse. The argument can be +centered or a number of pixels PIXELS. +

+

When it is specified with "centered", the grabbing region follows the mouse +pointer and keeps the pointer at the center of region; otherwise, the region +follows only when the mouse pointer reaches within PIXELS (greater than +zero) to the edge of region. +

+

For example: +

 
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +

To follow only when the mouse pointer reaches within 100 pixels to edge: +

 
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
framerate
+

Set the grabbing frame rate. Default value is ntsc, +corresponding to a frame rate of 30000/1001. +

+
+
show_region
+

Show grabbed region on screen. +

+

If show_region is specified with 1, then the grabbing +region will be indicated on screen. With this option, it is easy to +know what is being grabbed if only a portion of the screen is grabbed. +

+

For example: +

 
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+ +

With follow_mouse: +

 
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+ +
+
video_size
+

Set the video frame size. Default value is vga. +

+
+ + +

21. Resampler Options

+ +

The audio resampler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools, option=value for the aresample filter, +by setting the value explicitly in the +SwrContext options or using the ‘libavutil/opt.h’ API for +programmatic use. +

+
+
ich, in_channel_count
+

Set the number of input channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘in_channel_layout’ is set. +

+
+
och, out_channel_count
+

Set the number of output channels. Default value is 0. Setting this +value is not mandatory if the corresponding channel layout +‘out_channel_layout’ is set. +

+
+
uch, used_channel_count
+

Set the number of used input channels. Default value is 0. This option is +only used for special remapping. +

+
+
isr, in_sample_rate
+

Set the input sample rate. Default value is 0. +

+
+
osr, out_sample_rate
+

Set the output sample rate. Default value is 0. +

+
+
isf, in_sample_fmt
+

Specify the input sample format. It is set by default to none. +

+
+
osf, out_sample_fmt
+

Specify the output sample format. It is set by default to none. +

+
+
tsf, internal_sample_fmt
+

Set the internal sample format. Default value is none. +This will automatically be chosen when it is not explicitly set. +

+
+
icl, in_channel_layout
+
ocl, out_channel_layout
+

Set the input/output channel layout. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+
clev, center_mix_level
+

Set the center mix level. It is a value expressed in deciBel, and must be +in the interval [-32,32]. +

+
+
slev, surround_mix_level
+

Set the surround mix level. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
lfe_mix_level
+

Set LFE mix into non LFE level. It is used when there is a LFE input but no +LFE output. It is a value expressed in deciBel, and must +be in the interval [-32,32]. +

+
+
rmvol, rematrix_volume
+

Set rematrix volume. Default value is 1.0. +

+
+
rematrix_maxval
+

Set maximum output value for rematrixing. +This can be used to prevent clipping vs. preventing volumn reduction +A value of 1.0 prevents cliping. +

+
+
flags, swr_flags
+

Set flags used by the converter. Default value is 0. +

+

It supports the following individual flags: +

+
res
+

force resampling, this flag forces resampling to be used even when the +input and output sample rates match. +

+
+ +
+
dither_scale
+

Set the dither scale. Default value is 1. +

+
+
dither_method
+

Set dither method. Default value is 0. +

+

Supported values: +

+
rectangular
+

select rectangular dither +

+
triangular
+

select triangular dither +

+
triangular_hp
+

select triangular dither with high pass +

+
lipshitz
+

select lipshitz noise shaping dither +

+
shibata
+

select shibata noise shaping dither +

+
low_shibata
+

select low shibata noise shaping dither +

+
high_shibata
+

select high shibata noise shaping dither +

+
f_weighted
+

select f-weighted noise shaping dither +

+
modified_e_weighted
+

select modified-e-weighted noise shaping dither +

+
improved_e_weighted
+

select improved-e-weighted noise shaping dither +

+
+
+ +
+
resampler
+

Set resampling engine. Default value is swr. +

+

Supported values: +

+
swr
+

select the native SW Resampler; filter options precision and cheby are not +applicable in this case. +

+
soxr
+

select the SoX Resampler (where available); compensation, and filter options +filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this +case. +

+
+ +
+
filter_size
+

For swr only, set resampling filter size, default value is 32. +

+
+
phase_shift
+

For swr only, set resampling phase shift, default value is 10, and must be in +the interval [0,30]. +

+
+
linear_interp
+

Use Linear Interpolation if set to 1, default value is 0. +

+
+
cutoff
+

Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float +value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr +(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz). +

+
+
precision
+

For soxr only, the precision in bits to which the resampled signal will be +calculated. The default value of 20 (which, with suitable dithering, is +appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a +value of 28 gives SoX’s ’Very High Quality’. +

+
+
cheby
+

For soxr only, selects passband rolloff none (Chebyshev) & higher-precision +approximation for ’irrational’ ratios. Default value is 0. +

+
+
async
+

For swr only, simple 1 parameter audio sync to timestamps using stretching, +squeezing, filling and trimming. Setting this to 1 will enable filling and +trimming, larger values represent the maximum amount in samples that the data +may be stretched or squeezed for each second. +Default value is 0, thus no compensation is applied to make the samples match +the audio timestamps. +

+
+
first_pts
+

For swr only, assume the first pts should be this value. The time unit is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
min_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger stretching/squeezing/filling or trimming of the +data to make it match the timestamps. The default is that +stretching/squeezing/filling and trimming is disabled +(‘min_comp’ = FLT_MAX). +

+
+
min_hard_comp
+

For swr only, set the minimum difference between timestamps and audio data (in +seconds) to trigger adding/dropping samples to make it match the +timestamps. This option effectively is a threshold to select between +hard (trim/fill) and soft (squeeze/stretch) compensation. Note that +all compensation is by default disabled through ‘min_comp’. +The default is 0.1. +

+
+
comp_duration
+

For swr only, set duration (in seconds) over which data is stretched/squeezed +to make it match the timestamps. Must be a non-negative double float value, +default value is 1.0. +

+
+
max_soft_comp
+

For swr only, set maximum factor by which data is stretched/squeezed to make it +match the timestamps. Must be a non-negative double float value, default value +is 0. +

+
+
matrix_encoding
+

Select matrixed stereo encoding. +

+

It accepts the following values: +

+
none
+

select none +

+
dolby
+

select Dolby +

+
dplii
+

select Dolby Pro Logic II +

+
+ +

Default value is none. +

+
+
filter_type
+

For swr only, select resampling filter type. This only affects resampling +operations. +

+

It accepts the following values: +

+
cubic
+

select cubic +

+
blackman_nuttall
+

select Blackman Nuttall Windowed Sinc +

+
kaiser
+

select Kaiser Windowed Sinc +

+
+ +
+
kaiser_beta
+

For swr only, set Kaiser Window Beta value. Must be an integer in the +interval [2,16], default value is 9. +

+
+
output_sample_bits
+

For swr only, set number of used output sample bits for dithering. Must be an integer in the +interval [0,64], default value is 0, which means it’s not used. +

+
+
+ +

+

+

22. Scaler Options

+ +

The video scaler supports the following named options. +

+

Options may be set by specifying -option value in the +FFmpeg tools. For programmatic use, they can be set explicitly in the +SwsContext options or through the ‘libavutil/opt.h’ API. +

+
+
+

+

+
sws_flags
+

Set the scaler flags. This is also used to set the scaling +algorithm. Only a single algorithm should be selected. +

+

It accepts the following values: +

+
fast_bilinear
+

Select fast bilinear scaling algorithm. +

+
+
bilinear
+

Select bilinear scaling algorithm. +

+
+
bicubic
+

Select bicubic scaling algorithm. +

+
+
experimental
+

Select experimental scaling algorithm. +

+
+
neighbor
+

Select nearest neighbor rescaling algorithm. +

+
+
area
+

Select averaging area rescaling algorithm. +

+
+
bicublin
+

Select bicubic scaling algorithm for the luma component, bilinear for +chroma components. +

+
+
gauss
+

Select Gaussian rescaling algorithm. +

+
+
sinc
+

Select sinc rescaling algorithm. +

+
+
lanczos
+

Select lanczos rescaling algorithm. +

+
+
spline
+

Select natural bicubic spline rescaling algorithm. +

+
+
print_info
+

Enable printing/debug logging. +

+
+
accurate_rnd
+

Enable accurate rounding. +

+
+
full_chroma_int
+

Enable full chroma interpolation. +

+
+
full_chroma_inp
+

Select full chroma input. +

+
+
bitexact
+

Enable bitexact output. +

+
+ +
+
srcw
+

Set source width. +

+
+
srch
+

Set source height. +

+
+
dstw
+

Set destination width. +

+
+
dsth
+

Set destination height. +

+
+
src_format
+

Set source pixel format (must be expressed as an integer). +

+
+
dst_format
+

Set destination pixel format (must be expressed as an integer). +

+
+
src_range
+

Select source range. +

+
+
dst_range
+

Select destination range. +

+
+
param0, param1
+

Set scaling algorithm parameters. The specified values are specific of +some scaling algorithms and ignored by others. The specified values +are floating point number values. +

+
+
sws_dither
+

Set the dithering algorithm. Accepts one of the following +values. Default value is ‘auto’. +

+
+
auto
+

automatic choice +

+
+
none
+

no dithering +

+
+
bayer
+

bayer dither +

+
+
ed
+

error diffusion dither +

+
+ +
+
+ + +

23. Filtering Introduction

+ +

Filtering in FFmpeg is enabled through the libavfilter library. +

+

In libavfilter, a filter can have multiple inputs and multiple +outputs. +To illustrate the sorts of things that are possible, we consider the +following filtergraph. +

+
 
                [main]
+input --> split ---------------------> overlay --> output
+            |                             ^
+            |[tmp]                  [flip]|
+            +-----> crop --> vflip -------+
+
+ +

This filtergraph splits the input stream in two streams, sends one +stream through the crop filter and the vflip filter before merging it +back with the other stream by overlaying it on top. You can use the +following command to achieve this: +

+
 
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+ +

The result will be that in output the top half of the video is mirrored +onto the bottom half. +

+

Filters in the same linear chain are separated by commas, and distinct +linear chains of filters are separated by semicolons. In our example, +crop,vflip are in one linear chain, split and +overlay are separately in another. The points where the linear +chains join are labelled by names enclosed in square brackets. In the +example, the split filter generates two outputs that are associated to +the labels [main] and [tmp]. +

+

The stream sent to the second output of split, labelled as +[tmp], is processed through the crop filter, which crops +away the lower half part of the video, and then vertically flipped. The +overlay filter takes in input the first unchanged output of the +split filter (which was labelled as [main]), and overlay on its +lower half the output generated by the crop,vflip filterchain. +

+

Some filters take in input a list of parameters: they are specified +after the filter name and an equal sign, and are separated from each other +by a colon. +

+

There exist so-called source filters that do not have an +audio/video input, and sink filters that will not have audio/video +output. +

+ + +

24. graph2dot

+ +

The ‘graph2dot’ program included in the FFmpeg ‘tools’ +directory can be used to parse a filtergraph description and issue a +corresponding textual representation in the dot language. +

+

Invoke the command: +

 
graph2dot -h
+
+ +

to see how to use ‘graph2dot’. +

+

You can then pass the dot description to the ‘dot’ program (from +the graphviz suite of programs) and obtain a graphical representation +of the filtergraph. +

+

For example the sequence of commands: +

 
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+ +

can be used to create and display an image representing the graph +described by the GRAPH_DESCRIPTION string. Note that this string must be +a complete self-contained graph, with its inputs and outputs explicitly defined. +For example if your command line is of the form: +

 
ffmpeg -i infile -vf scale=640:360 outfile
+
+

your GRAPH_DESCRIPTION string will need to be of the form: +

 
nullsrc,scale=640:360,nullsink
+
+

you may also need to set the nullsrc parameters and add a format +filter in order to simulate a specific input file. +

+ + +

25. Filtergraph description

+ +

A filtergraph is a directed graph of connected filters. It can contain +cycles, and there can be multiple links between a pair of +filters. Each link has one input pad on one side connecting it to one +filter from which it takes its input, and one output pad on the other +side connecting it to the one filter accepting its output. +

+

Each filter in a filtergraph is an instance of a filter class +registered in the application, which defines the features and the +number of input and output pads of the filter. +

+

A filter with no input pads is called a "source", a filter with no +output pads is called a "sink". +

+

+

+

25.1 Filtergraph syntax

+ +

A filtergraph can be represented using a textual representation, which is +recognized by the ‘-filter’/‘-vf’ and ‘-filter_complex’ +options in ffmpeg and ‘-vf’ in ffplay, and by the +avfilter_graph_parse()/avfilter_graph_parse2() function defined in +‘libavfilter/avfilter.h’. +

+

A filterchain consists of a sequence of connected filters, each one +connected to the previous one in the sequence. A filterchain is +represented by a list of ","-separated filter descriptions. +

+

A filtergraph consists of a sequence of filterchains. A sequence of +filterchains is represented by a list of ";"-separated filterchain +descriptions. +

+

A filter is represented by a string of the form: +[in_link_1]...[in_link_N]filter_name=arguments[out_link_1]...[out_link_M] +

+

filter_name is the name of the filter class of which the +described filter is an instance of, and has to be the name of one of +the filter classes registered in the program. +The name of the filter class is optionally followed by a string +"=arguments". +

+

arguments is a string which contains the parameters used to +initialize the filter instance. It may have one of the following forms: +

    +
  • +A ’:’-separated list of key=value pairs. + +
  • +A ’:’-separated list of value. In this case, the keys are assumed to be +the option names in the order they are declared. E.g. the fade filter +declares three options in this order – ‘type’, ‘start_frame’ and +‘nb_frames’. Then the parameter list in:0:30 means that the value +in is assigned to the option ‘type’, 0 to +‘start_frame’ and 30 to ‘nb_frames’. + +
  • +A ’:’-separated list of mixed direct value and long key=value +pairs. The direct value must precede the key=value pairs, and +follow the same constraints order of the previous point. The following +key=value pairs can be set in any preferred order. + +
+ +

If the option value itself is a list of items (e.g. the format filter +takes a list of pixel formats), the items in the list are usually separated by +’|’. +

+

The list of arguments can be quoted using the character "’" as initial +and ending mark, and the character ’\’ for escaping the characters +within the quoted text; otherwise the argument string is considered +terminated when the next special character (belonging to the set +"[]=;,") is encountered. +

+

The name and arguments of the filter are optionally preceded and +followed by a list of link labels. +A link label allows one to name a link and associate it to a filter output +or input pad. The preceding labels in_link_1 +... in_link_N, are associated to the filter input pads, +the following labels out_link_1 ... out_link_M, are +associated to the output pads. +

+

When two link labels with the same name are found in the +filtergraph, a link between the corresponding input and output pad is +created. +

+

If an output pad is not labelled, it is linked by default to the first +unlabelled input pad of the next filter in the filterchain. +For example in the filterchain: +

 
nullsrc, split[L1], [L2]overlay, nullsink
+
+

the split filter instance has two output pads, and the overlay filter +instance two input pads. The first output pad of split is labelled +"L1", the first input pad of overlay is labelled "L2", and the second +output pad of split is linked to the second input pad of overlay, +which are both unlabelled. +

+

In a complete filterchain all the unlabelled filter input and output +pads must be connected. A filtergraph is considered valid if all the +filter input and output pads of all the filterchains are connected. +

+

Libavfilter will automatically insert scale filters where format +conversion is required. It is possible to specify swscale flags +for those automatically inserted scalers by prepending +sws_flags=flags; +to the filtergraph description. +

+

Follows a BNF description for the filtergraph syntax: +

 
NAME             ::= sequence of alphanumeric characters and '_'
+LINKLABEL        ::= "[" NAME "]"
+LINKLABELS       ::= LINKLABEL [LINKLABELS]
+FILTER_ARGUMENTS ::= sequence of chars (eventually quoted)
+FILTER           ::= [LINKLABELS] NAME ["=" FILTER_ARGUMENTS] [LINKLABELS]
+FILTERCHAIN      ::= FILTER [,FILTERCHAIN]
+FILTERGRAPH      ::= [sws_flags=flags;] FILTERCHAIN [;FILTERGRAPH]
+
+ + +

25.2 Notes on filtergraph escaping

+ +

Filtergraph description composition entails several levels of +escaping. See (ffmpeg-utils)quoting_and_escaping for more +information about the employed escaping procedure. +

+

A first level escaping affects the content of each filter option +value, which may contain the special character : used to +separate values, or one of the escaping characters \'. +

+

A second level escaping affects the whole filter description, which +may contain the escaping characters \' or the special +characters [],; used by the filtergraph description. +

+

Finally, when you specify a filtergraph on a shell commandline, you +need to perform a third level escaping for the shell special +characters contained within it. +

+

For example, consider the following string to be embedded in +the drawtext filter description ‘text’ value: +

 
this is a 'string': may contain one, or more, special characters
+
+ +

This string contains the ' special escaping character, and the +: special character, so it needs to be escaped in this way: +

 
text=this is a \'string\'\: may contain one, or more, special characters
+
+ +

A second level of escaping is required when embedding the filter +description in a filtergraph description, in order to escape all the +filtergraph special characters. Thus the example above becomes: +

 
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+

(note that in addition to the \' escaping special characters, +also , needs to be escaped). +

+

Finally an additional level of escaping is needed when writing the +filtergraph description in a shell command, which depends on the +escaping rules of the adopted shell. For example, assuming that +\ is special and needs to be escaped with another \, the +previous string will finally result in: +

 
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+ + +

26. Timeline editing

+ +

Some filters support a generic ‘enable’ option. For the filters +supporting timeline editing, this option can be set to an expression which is +evaluated before sending a frame to the filter. If the evaluation is non-zero, +the filter will be enabled, otherwise the frame will be sent unchanged to the +next filter in the filtergraph. +

+

The expression accepts the following values: +

+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+ +

Additionally, these filters support an ‘enable’ command that can be used +to re-define the expression. +

+

Like any other filtering option, the ‘enable’ option follows the same +rules. +

+

For example, to enable a blur filter (smartblur) from 10 seconds to 3 +minutes, and a curves filter starting at 3 seconds: +

 
smartblur = enable='between(t,10,3*60)',
+curves    = enable='gte(t,3)' : preset=cross_process
+
+ + + +

27. Audio Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the audio filters included in your +build. +

+

Below is a description of the currently available audio filters. +

+ +

27.1 aconvert

+ +

Convert the input audio format to the specified formats. +

+

This filter is deprecated. Use aformat instead. +

+

The filter accepts a string of the form: +"sample_format:channel_layout". +

+

sample_format specifies the sample format, and can be a string or the +corresponding numeric value defined in ‘libavutil/samplefmt.h’. Use ’p’ +suffix for a planar sample format. +

+

channel_layout specifies the channel layout, and can be a string +or the corresponding number value defined in ‘libavutil/channel_layout.h’. +

+

The special parameter "auto", signifies that the filter will +automatically select the output format depending on the output filter. +

+ +

27.1.1 Examples

+ +
    +
  • +Convert input to float, planar, stereo: +
     
    aconvert=fltp:stereo
    +
    + +
  • +Convert input to unsigned 8-bit, automatically select out channel layout: +
     
    aconvert=u8:auto
    +
    +
+ + +

27.2 adelay

+ +

Delay one or more audio channels. +

+

Samples in delayed channel are filled with silence. +

+

The filter accepts the following option: +

+
+
delays
+

Set list of delays in milliseconds for each channel separated by ’|’. +At least one delay greater than 0 should be provided. +Unused delays will be silently ignored. If number of given delays is +smaller than number of channels all remaining channels will not be delayed. +

+
+ + +

27.2.1 Examples

+ +
    +
  • +Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave +the second channel (and any other channels that may be present) unchanged. +
     
    adelay=1500|0|500
    +
    +
+ + +

27.3 aecho

+ +

Apply echoing to the input audio. +

+

Echoes are reflected sound and can occur naturally amongst mountains +(and sometimes large buildings) when talking or shouting; digital echo +effects emulate this behaviour and are often used to help fill out the +sound of a single instrument or vocal. The time difference between the +original signal and the reflection is the delay, and the +loudness of the reflected signal is the decay. +Multiple echoes can have different delays and decays. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain of reflected signal. Default is 0.6. +

+
+
out_gain
+

Set output gain of reflected signal. Default is 0.3. +

+
+
delays
+

Set list of time intervals in milliseconds between original signal and reflections +separated by ’|’. Allowed range for each delay is (0 - 90000.0]. +Default is 1000. +

+
+
decays
+

Set list of loudnesses of reflected signals separated by ’|’. +Allowed range for each decay is (0 - 1.0]. +Default is 0.5. +

+
+ + +

27.3.1 Examples

+ +
    +
  • +Make it sound as if there are twice as many instruments as are actually playing: +
     
    aecho=0.8:0.88:60:0.4
    +
    + +
  • +If delay is very short, then it sound like a (metallic) robot playing music: +
     
    aecho=0.8:0.88:6:0.4
    +
    + +
  • +A longer delay will sound like an open air concert in the mountains: +
     
    aecho=0.8:0.9:1000:0.3
    +
    + +
  • +Same as above but with one more mountain: +
     
    aecho=0.8:0.9:1000|1800:0.3|0.25
    +
    +
+ + +

27.4 aeval

+ +

Modify an audio signal according to the specified expressions. +

+

This filter accepts one or more expressions (one for each channel), +which are evaluated and used to modify a corresponding audio signal. +

+

This filter accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. If +the number of input channels is greater than the number of +expressions, the last specified expression is used for the remaining +output channels. +

+
+
channel_layout, c
+

Set output channel layout. If not specified, the channel layout is +specified by the number of expressions. If set to ‘same’, it will +use by default the same input channel layout. +

+
+ +

Each expression in exprs can contain the following constants and functions: +

+
+
ch
+

channel number of the current expression +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
s
+

sample rate +

+
+
t
+

time of the evaluated sample expressed in seconds +

+
+
nb_in_channels
+
nb_out_channels
+

input and output number of channels +

+
+
val(CH)
+

the value of input channel with number CH +

+
+ +

Note: this filter is slow. For faster processing you should use a +dedicated filter. +

+ +

27.4.1 Examples

+ +
    +
  • +Half volume: +
     
    aeval=val(ch)/2:c=same
    +
    + +
  • +Invert phase of the second channel: +
     
    eval=val(0)|-val(1)
    +
    +
+ + +

27.5 afade

+ +

Apply fade-in/out effect to input audio. +

+

A description of the accepted parameters follows. +

+
+
type, t
+

Specify the effect type, can be either in for fade-in, or +out for a fade-out effect. Default is in. +

+
+
start_sample, ss
+

Specify the number of the start sample for starting to apply the fade +effect. Default is 0. +

+
+
nb_samples, ns
+

Specify the number of samples for which the fade effect has to last. At +the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. Default is 44100. +

+
+
start_time, st
+

Specify time for starting to apply the fade effect. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +If set this option is used instead of start_sample one. +

+
+
duration, d
+

Specify the duration for which the fade effect has to last. Default is 0. +The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +At the end of the fade-in effect the output audio will have the same +volume as the input audio, at the end of the fade-out transition +the output audio will be silence. +If set this option is used instead of nb_samples one. +

+
+
curve
+

Set curve for fade transition. +

+

It accepts the following values: +

+
tri
+

select triangular, linear slope (default) +

+
qsin
+

select quarter of sine wave +

+
hsin
+

select half of sine wave +

+
esin
+

select exponential sine wave +

+
log
+

select logarithmic +

+
par
+

select inverted parabola +

+
qua
+

select quadratic +

+
cub
+

select cubic +

+
squ
+

select square root +

+
cbr
+

select cubic root +

+
+
+
+ + +

27.5.1 Examples

+ +
    +
  • +Fade in first 15 seconds of audio: +
     
    afade=t=in:ss=0:d=15
    +
    + +
  • +Fade out last 25 seconds of a 900 seconds audio: +
     
    afade=t=out:st=875:d=25
    +
    +
+ +

+

+

27.6 aformat

+ +

Set output format constraints for the input audio. The framework will +negotiate the most appropriate format to minimize conversions. +

+

The filter accepts the following named parameters: +

+
sample_fmts
+

A ’|’-separated list of requested sample formats. +

+
+
sample_rates
+

A ’|’-separated list of requested sample rates. +

+
+
channel_layouts
+

A ’|’-separated list of requested channel layouts. +

+

See (ffmpeg-utils)channel layout syntax +for the required syntax. +

+
+ +

If a parameter is omitted, all values are allowed. +

+

For example to force the output to either unsigned 8-bit or signed 16-bit stereo: +

 
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+ + +

27.7 allpass

+ +

Apply a two-pole all-pass filter with central frequency (in Hz) +frequency, and filter-width width. +An all-pass filter changes the audio’s frequency to phase relationship +without changing its frequency to amplitude relationship. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

27.8 amerge

+ +

Merge two or more audio streams into a single multi-channel stream. +

+

The filter accepts the following options: +

+
+
inputs
+

Set the number of inputs. Default is 2. +

+
+
+ +

If the channel layouts of the inputs are disjoint, and therefore compatible, +the channel layout of the output will be set accordingly and the channels +will be reordered as necessary. If the channel layouts of the inputs are not +disjoint, the output will have all the channels of the first input then all +the channels of the second input, in that order, and the channel layout of +the output will be the default value corresponding to the total number of +channels. +

+

For example, if the first input is in 2.1 (FL+FR+LF) and the second input +is FC+BL+BR, then the output will be in 5.1, with the channels in the +following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the +first input, b1 is the first channel of the second input). +

+

On the other hand, if both input are in stereo, the output channels will be +in the default order: a1, a2, b1, b2, and the channel layout will be +arbitrarily set to 4.0, which may or may not be the expected value. +

+

All inputs must have the same sample rate, and format. +

+

If inputs do not have the same duration, the output will stop with the +shortest. +

+ +

27.8.1 Examples

+ +
    +
  • +Merge two mono files into a stereo stream: +
     
    amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
    +
    + +
  • +Multiple merges assuming 1 video stream and 6 audio streams in ‘input.mkv’: +
     
    ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
    +
    +
+ + +

27.9 amix

+ +

Mixes multiple audio inputs into a single output. +

+

For example +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+

will mix 3 input audio streams to a single output with the same duration as the +first input and a dropout transition time of 3 seconds. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of inputs. If unspecified, it defaults to 2. +

+
+
duration
+

How to determine the end-of-stream. +

+
longest
+

Duration of longest input. (default) +

+
+
shortest
+

Duration of shortest input. +

+
+
first
+

Duration of first input. +

+
+
+ +
+
dropout_transition
+

Transition time, in seconds, for volume renormalization when an input +stream ends. The default value is 2 seconds. +

+
+
+ + +

27.10 anull

+ +

Pass the audio source unchanged to the output. +

+ +

27.11 apad

+ +

Pad the end of a audio stream with silence, this can be used together with +-shortest to extend audio streams to the same length as the video stream. +

+ +

27.12 aphaser

+

Add a phasing effect to the input audio. +

+

A phaser filter creates series of peaks and troughs in the frequency spectrum. +The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect. +

+

A description of the accepted parameters follows. +

+
+
in_gain
+

Set input gain. Default is 0.4. +

+
+
out_gain
+

Set output gain. Default is 0.74 +

+
+
delay
+

Set delay in milliseconds. Default is 3.0. +

+
+
decay
+

Set decay. Default is 0.4. +

+
+
speed
+

Set modulation speed in Hz. Default is 0.5. +

+
+
type
+

Set modulation type. Default is triangular. +

+

It accepts the following values: +

+
triangular, t
+
sinusoidal, s
+
+
+
+ +

+

+

27.13 aresample

+ +

Resample the input audio to the specified parameters, using the +libswresample library. If none are specified then the filter will +automatically convert between its input and output. +

+

This filter is also able to stretch/squeeze the audio data to make it match +the timestamps or to inject silence / cut out audio to make it match the +timestamps, do a combination of both or do neither. +

+

The filter accepts the syntax +[sample_rate:]resampler_options, where sample_rate +expresses a sample rate and resampler_options is a list of +key=value pairs, separated by ":". See the +ffmpeg-resampler manual for the complete list of supported options. +

+ +

27.13.1 Examples

+ +
    +
  • +Resample the input audio to 44100Hz: +
     
    aresample=44100
    +
    + +
  • +Stretch/squeeze samples to the given timestamps, with a maximum of 1000 +samples per second compensation: +
     
    aresample=async=1000
    +
    +
+ + +

27.14 asetnsamples

+ +

Set the number of samples per each output audio frame. +

+

The last output packet may contain a different number of samples, as +the filter will flush all the remaining samples when the input audio +signal its end. +

+

The filter accepts the following options: +

+
+
nb_out_samples, n
+

Set the number of frames per each output audio frame. The number is +intended as the number of samples per each channel. +Default value is 1024. +

+
+
pad, p
+

If set to 1, the filter will pad the last audio frame with zeroes, so +that the last frame will contain the same number of samples as the +previous ones. Default value is 1. +

+
+ +

For example, to set the number of per-frame samples to 1234 and +disable padding for the last frame, use: +

 
asetnsamples=n=1234:p=0
+
+ + +

27.15 asetrate

+ +

Set the sample rate without altering the PCM data. +This will result in a change of speed and pitch. +

+

The filter accepts the following options: +

+
+
sample_rate, r
+

Set the output sample rate. Default is 44100 Hz. +

+
+ + +

27.16 ashowinfo

+ +

Show a line containing various information for each input audio frame. +The input audio is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation timestamp of the input frame, in time base units; the time base +depends on the filter input pad, and is usually 1/sample_rate. +

+
+
pts_time
+

presentation timestamp of the input frame in seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic audio) +

+
+
fmt
+

sample format +

+
+
chlayout
+

channel layout +

+
+
rate
+

sample rate for the audio frame +

+
+
nb_samples
+

number of samples (per channel) in the frame +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of the audio data. For planar audio +the data is treated as if all the planes were concatenated. +

+
+
plane_checksums
+

A list of Adler-32 checksums for each data plane. +

+
+ + +

27.17 astats

+ +

Display time domain statistical information about the audio channels. +Statistics are calculated and displayed for each audio channel and, +where applicable, an overall figure is also given. +

+

The filter accepts the following option: +

+
length
+

Short window length in seconds, used for peak and trough RMS measurement. +Default is 0.05 (50 miliseconds). Allowed range is [0.1 - 10]. +

+
+ +

A description of each shown parameter follows: +

+
+
DC offset
+

Mean amplitude displacement from zero. +

+
+
Min level
+

Minimal sample level. +

+
+
Max level
+

Maximal sample level. +

+
+
Peak level dB
+
RMS level dB
+

Standard peak and RMS level measured in dBFS. +

+
+
RMS peak dB
+
RMS trough dB
+

Peak and trough values for RMS level measured over a short window. +

+
+
Crest factor
+

Standard ratio of peak to RMS level (note: not in dB). +

+
+
Flat factor
+

Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels +(i.e. either Min level or Max level). +

+
+
Peak count
+

Number of occasions (not the number of samples) that the signal attained either +Min level or Max level. +

+
+ + +

27.18 astreamsync

+ +

Forward two audio streams and control the order the buffers are forwarded. +

+

The filter accepts the following options: +

+
+
expr, e
+

Set the expression deciding which stream should be +forwarded next: if the result is negative, the first stream is forwarded; if +the result is positive or zero, the second stream is forwarded. It can use +the following variables: +

+
+
b1 b2
+

number of buffers forwarded so far on each stream +

+
s1 s2
+

number of samples forwarded so far on each stream +

+
t1 t2
+

current timestamp of each stream +

+
+ +

The default value is t1-t2, which means to always forward the stream +that has a smaller timestamp. +

+
+ + +

27.18.1 Examples

+ +

Stress-test amerge by randomly sending buffers on the wrong +input, while avoiding too much of a desynchronization: +

 
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+ + +

27.19 asyncts

+ +

Synchronize audio data with timestamps by squeezing/stretching it and/or +dropping samples/adding silence when needed. +

+

This filter is not built by default, please use aresample to do squeezing/stretching. +

+

The filter accepts the following named parameters: +

+
compensate
+

Enable stretching/squeezing the data to make it match the timestamps. Disabled +by default. When disabled, time gaps are covered with silence. +

+
+
min_delta
+

Minimum difference between timestamps and audio data (in seconds) to trigger +adding/dropping samples. Default value is 0.1. If you get non-perfect sync with +this filter, try setting this parameter to 0. +

+
+
max_comp
+

Maximum compensation in samples per second. Relevant only with compensate=1. +Default value 500. +

+
+
first_pts
+

Assume the first pts should be this value. The time base is 1 / sample rate. +This allows for padding/trimming at the start of stream. By default, no +assumption is made about the first frame’s expected pts, so no padding or +trimming is done. For example, this could be set to 0 to pad the beginning with +silence if an audio stream starts after the video stream or to trim any samples +with a negative pts due to encoder delay. +

+
+
+ + +

27.20 atempo

+ +

Adjust audio tempo. +

+

The filter accepts exactly one parameter, the audio tempo. If not +specified then the filter will assume nominal 1.0 tempo. Tempo must +be in the [0.5, 2.0] range. +

+ +

27.20.1 Examples

+ +
    +
  • +Slow down audio to 80% tempo: +
     
    atempo=0.8
    +
    + +
  • +To speed up audio to 125% tempo: +
     
    atempo=1.25
    +
    +
+ + +

27.21 atrim

+ +

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the audio sample +with the timestamp start will be the first sample in the output. +

+
+
end
+

Specify time of the first audio sample that will be dropped, i.e. the +audio sample immediately preceding the one with the timestamp end will be +the last sample in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in samples +instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in samples instead +of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_sample
+

Number of the first sample that should be passed to output. +

+
+
end_sample
+

Number of the first sample that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _sample options simply count the +samples that pass through the filter. So start/end_pts and start/end_sample will +give different results when the timestamps are wrong, inexact or do not start at +zero. Also note that this filter does not modify the timestamps. If you wish +that the output timestamps start at zero, insert the asetpts filter after the +atrim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all samples that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple atrim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -af atrim=60:120
    +
    + +
  • +keep only the first 1000 samples +
     
    ffmpeg -i INPUT -af atrim=end_sample=1000
    +
    + +
+ + +

27.22 bandpass

+ +

Apply a two-pole Butterworth band-pass filter with central +frequency frequency, and (3dB-point) band-width width. +The csg option selects a constant skirt gain (peak gain = Q) +instead of the default: constant 0dB peak gain. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
csg
+

Constant skirt gain if set to 1. Defaults to 0. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

27.23 bandreject

+ +

Apply a two-pole Butterworth band-reject filter with central +frequency frequency, and (3dB-point) band-width width. +The filter roll off at 6dB per octave (20dB per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency. Default is 3000. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+ + +

27.24 bass

+ +

Boost or cut the bass (lower) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at 0 Hz. Its useful range is about -20 +(for a large cut) to +20 (for a large boost). +Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 100 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

27.25 biquad

+ +

Apply a biquad IIR filter with the given coefficients. +Where b0, b1, b2 and a0, a1, a2 +are the numerator and denominator coefficients respectively. +

+ +

27.26 channelmap

+ +

Remap input channels to new locations. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the output stream. +

+
+
map
+

Map channels from input to output. The argument is a ’|’-separated list of +mappings, each in the in_channel-out_channel or +in_channel form. in_channel can be either the name of the input +channel (e.g. FL for front left) or its index in the input channel layout. +out_channel is the name of the output channel or its index in the output +channel layout. If out_channel is not given then it is implicitly an +index, starting with zero and increasing by one for each mapping. +

+
+ +

If no mapping is present, the filter will implicitly map input channels to +output channels preserving index. +

+

For example, assuming a 5.1+downmix input MOV file +

 
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+

will create an output WAV file tagged as stereo from the downmix channels of +the input. +

+

To fix a 5.1 WAV improperly encoded in AAC’s native channel order +

 
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+ + +

27.27 channelsplit

+ +

Split each channel in input audio stream into a separate output stream. +

+

This filter accepts the following named parameters: +

+
channel_layout
+

Channel layout of the input stream. Default is "stereo". +

+
+ +

For example, assuming a stereo input MP3 file +

 
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+

will create an output Matroska file with two audio streams, one containing only +the left channel and the other the right channel. +

+

To split a 5.1 WAV file into per-channel files +

 
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+ + +

27.28 compand

+

Compress or expand audio dynamic range. +

+

A description of the accepted options follows. +

+
+
attacks
+
decays
+

Set list of times in seconds for each channel over which the instantaneous level +of the input signal is averaged to determine its volume. attacks refers to +increase of volume and decays refers to decrease of volume. For most +situations, the attack time (response to the audio getting louder) should be +shorter than the decay time because the human ear is more sensitive to sudden +loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and +a typical value for decay is 0.8 seconds. +

+
+
points
+

Set list of points for the transfer function, specified in dB relative to the +maximum possible signal amplitude. Each key points list must be defined using +the following syntax: x0/y0|x1/y1|x2/y2|.... or +x0/y0 x1/y1 x2/y2 .... +

+

The input values must be in strictly increasing order but the transfer function +does not have to be monotonically rising. The point 0/0 is assumed but +may be overridden (by 0/out-dBn). Typical values for the transfer +function are -70/-70|-60/-20. +

+
+
soft-knee
+

Set the curve radius in dB for all joints. Defaults to 0.01. +

+
+
gain
+

Set additional gain in dB to be applied at all points on the transfer function. +This allows easy adjustment of the overall gain. Defaults to 0. +

+
+
volume
+

Set initial volume in dB to be assumed for each channel when filtering starts. +This permits the user to supply a nominal level initially, so that, for +example, a very large gain is not applied to initial signal levels before the +companding has begun to operate. A typical value for audio which is initially +quiet is -90 dB. Defaults to 0. +

+
+
delay
+

Set delay in seconds. The input audio is analyzed immediately, but audio is +delayed before being fed to the volume adjuster. Specifying a delay +approximately equal to the attack/decay times allows the filter to effectively +operate in predictive rather than reactive mode. Defaults to 0. +

+
+
+ + +

27.28.1 Examples

+ +
    +
  • +Make music with both quiet and loud passages suitable for listening in a noisy +environment: +
     
    compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
    +
    + +
  • +Noise gate for when the noise is at a lower level than the signal: +
     
    compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
    +
    + +
  • +Here is another noise gate, this time for when the noise is at a higher level +than the signal (making it, in some ways, similar to squelch): +
     
    compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
    +
    +
+ + +

27.29 earwax

+ +

Make audio easier to listen to on headphones. +

+

This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio +so that when listened to on headphones the stereo image is moved from +inside your head (standard for headphones) to outside and in front of +the listener (standard for speakers). +

+

Ported from SoX. +

+ +

27.30 equalizer

+ +

Apply a two-pole peaking equalisation (EQ) filter. With this +filter, the signal-level at and around a selected frequency can +be increased or decreased, whilst (unlike bandpass and bandreject +filters) that at all other frequencies is unchanged. +

+

In order to produce complex equalisation curves, this filter can +be given several times, each with a different central frequency. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the filter’s central frequency in Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +

+
+
gain, g
+

Set the required gain or attenuation in dB. +Beware of clipping when using a positive gain. +

+
+ + +

27.30.1 Examples

+
    +
  • +Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz: +
     
    equalizer=f=1000:width_type=h:width=200:g=-10
    +
    + +
  • +Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2: +
     
    equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
    +
    +
+ + +

27.31 highpass

+ +

Apply a high-pass filter with 3dB point frequency. +The filter can be either single-pole, or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 3000. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

27.32 join

+ +

Join multiple input streams into one multi-channel stream. +

+

The filter accepts the following named parameters: +

+
inputs
+

Number of input streams. Defaults to 2. +

+
+
channel_layout
+

Desired output channel layout. Defaults to stereo. +

+
+
map
+

Map channels from inputs to output. The argument is a ’|’-separated list of +mappings, each in the input_idx.in_channel-out_channel +form. input_idx is the 0-based index of the input stream. in_channel +can be either the name of the input channel (e.g. FL for front left) or its +index in the specified input stream. out_channel is the name of the output +channel. +

+
+ +

The filter will attempt to guess the mappings when those are not specified +explicitly. It does so by first trying to find an unused matching input channel +and if that fails it picks the first unused input channel. +

+

E.g. to join 3 inputs (with properly set channel layouts) +

 
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+ +

To build a 5.1 output from 6 single-channel streams: +

 
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+ + +

27.33 ladspa

+ +

Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-ladspa. +

+
+
file, f
+

Specifies the name of LADSPA plugin library to load. If the environment +variable LADSPA_PATH is defined, the LADSPA plugin is searched in +each one of the directories specified by the colon separated list in +LADSPA_PATH, otherwise in the standard LADSPA paths, which are in +this order: ‘HOME/.ladspa/lib/’, ‘/usr/local/lib/ladspa/’, +‘/usr/lib/ladspa/’. +

+
+
plugin, p
+

Specifies the plugin within the library. Some libraries contain only +one plugin, but others contain many of them. If this is not set filter +will list all available plugins within the specified library. +

+
+
controls, c
+

Set the ’|’ separated list of controls which are zero or more floating point +values that determine the behavior of the loaded plugin (for example delay, +threshold or gain). +Controls need to be defined using the following syntax: +c0=value0|c1=value1|c2=value2|..., where +valuei is the value set on the i-th control. +If ‘controls’ is set to help, all available controls and +their valid ranges are printed. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. Only used if plugin have +zero inputs. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, default +is 1024. Only used if plugin have zero inputs. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format, also check the "Time duration" +section in the ffmpeg-utils manual. +Note that the resulting duration may be greater than the specified duration, +as the generated audio is always cut at the end of a complete frame. +If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +Only used if plugin have zero inputs. +

+
+
+ + +

27.33.1 Examples

+ +
    +
  • +List all available plugins within amp (LADSPA example plugin) library: +
     
    ladspa=file=amp
    +
    + +
  • +List all available controls and their valid ranges for vcf_notch +plugin from VCF library: +
     
    ladspa=f=vcf:p=vcf_notch:c=help
    +
    + +
  • +Simulate low quality audio equipment using Computer Music Toolkit (CMT) +plugin library: +
     
    ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
    +
    + +
  • +Add reverberation to the audio using TAP-plugins +(Tom’s Audio Processing plugins): +
     
    ladspa=file=tap_reverb:tap_reverb
    +
    + +
  • +Generate white noise, with 0.2 amplitude: +
     
    ladspa=file=cmt:noise_source_white:c=c0=.2
    +
    + +
  • +Generate 20 bpm clicks using plugin C* Click - Metronome from the +C* Audio Plugin Suite (CAPS) library: +
     
    ladspa=file=caps:Click:c=c1=20'
    +
    + +
  • +Apply C* Eq10X2 - Stereo 10-band equaliser effect: +
     
    ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
    +
    +
+ + +

27.33.2 Commands

+ +

This filter supports the following commands: +

+
cN
+

Modify the N-th control value. +

+

If the specified value is not valid, it is ignored and prior one is kept. +

+
+ + +

27.34 lowpass

+ +

Apply a low-pass filter with 3dB point frequency. +The filter can be either single-pole or double-pole (the default). +The filter roll off at 6dB per pole per octave (20dB per pole per decade). +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set frequency in Hz. Default is 500. +

+
+
poles, p
+

Set number of poles. Default is 2. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Specify the band-width of a filter in width_type units. +Applies only to double-pole filter. +The default is 0.707q and gives a Butterworth response. +

+
+ + +

27.35 pan

+ +

Mix channels with specific gain levels. The filter accepts the output +channel layout followed by a set of channels definitions. +

+

This filter is also designed to remap efficiently the channels of an audio +stream. +

+

The filter accepts parameters of the form: +"l:outdef:outdef:..." +

+
+
l
+

output channel layout or number of channels +

+
+
outdef
+

output channel specification, of the form: +"out_name=[gain*]in_name[+[gain*]in_name...]" +

+
+
out_name
+

output channel to define, either a channel name (FL, FR, etc.) or a channel +number (c0, c1, etc.) +

+
+
gain
+

multiplicative coefficient for the channel, 1 leaving the volume unchanged +

+
+
in_name
+

input channel to use, see out_name for details; it is not possible to mix +named and numbered input channels +

+
+ +

If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for +that specification will be renormalized so that the total is 1, thus +avoiding clipping noise. +

+ +

27.35.1 Mixing examples

+ +

For example, if you want to down-mix from stereo to mono, but with a bigger +factor for the left channel: +

 
pan=1:c0=0.9*c0+0.1*c1
+
+ +

A customized down-mix to stereo that works automatically for 3-, 4-, 5- and +7-channels surround: +

 
pan=stereo: FL < FL + 0.5*FC + 0.6*BL + 0.6*SL : FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+ +

Note that ffmpeg integrates a default down-mix (and up-mix) system +that should be preferred (see "-ac" option) unless you have very specific +needs. +

+ +

27.35.2 Remapping examples

+ +

The channel remapping will be effective if, and only if: +

+
    +
  • gain coefficients are zeroes or ones, +
  • only one input per channel output, +
+ +

If all these conditions are satisfied, the filter will notify the user ("Pure +channel mapping detected"), and use an optimized and lossless method to do the +remapping. +

+

For example, if you have a 5.1 source and want a stereo audio stream by +dropping the extra channels: +

 
pan="stereo: c0=FL : c1=FR"
+
+ +

Given the same source, you can also switch front left and front right channels +and keep the input channel layout: +

 
pan="5.1: c0=c1 : c1=c0 : c2=c2 : c3=c3 : c4=c4 : c5=c5"
+
+ +

If the input is a stereo audio stream, you can mute the front left channel (and +still keep the stereo channel layout) with: +

 
pan="stereo:c1=c1"
+
+ +

Still with a stereo audio stream input, you can copy the right channel in both +front left and right: +

 
pan="stereo: c0=FR : c1=FR"
+
+ + +

27.36 replaygain

+ +

ReplayGain scanner filter. This filter takes an audio stream as an input and +outputs it unchanged. +At end of filtering it displays track_gain and track_peak. +

+ +

27.37 resample

+ +

Convert the audio sample format, sample rate and channel layout. This filter is +not meant to be used directly. +

+ +

27.38 silencedetect

+ +

Detect silence in an audio stream. +

+

This filter logs a message when it detects that the input audio volume is less +or equal to a noise tolerance value for a duration greater or equal to the +minimum detected noise duration. +

+

The printed times and duration are expressed in seconds. +

+

The filter accepts the following options: +

+
+
duration, d
+

Set silence duration until notification (default is 2 seconds). +

+
+
noise, n
+

Set noise tolerance. Can be specified in dB (in case "dB" is appended to the +specified value) or amplitude ratio. Default is -60dB, or 0.001. +

+
+ + +

27.38.1 Examples

+ +
    +
  • +Detect 5 seconds of silence with -50dB noise tolerance: +
     
    silencedetect=n=-50dB:d=5
    +
    + +
  • +Complete example with ffmpeg to detect silence with 0.0001 noise +tolerance in ‘silence.mp3’: +
     
    ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
    +
    +
+ + +

27.39 treble

+ +

Boost or cut treble (upper) frequencies of the audio using a two-pole +shelving filter with a response similar to that of a standard +hi-fi’s tone-controls. This is also known as shelving equalisation (EQ). +

+

The filter accepts the following options: +

+
+
gain, g
+

Give the gain at whichever is the lower of ~22 kHz and the +Nyquist frequency. Its useful range is about -20 (for a large cut) +to +20 (for a large boost). Beware of clipping when using a positive gain. +

+
+
frequency, f
+

Set the filter’s central frequency and so can be used +to extend or reduce the frequency range to be boosted or cut. +The default value is 3000 Hz. +

+
+
width_type
+

Set method to specify band-width of filter. +

+
h
+

Hz +

+
q
+

Q-Factor +

+
o
+

octave +

+
s
+

slope +

+
+ +
+
width, w
+

Determine how steep is the filter’s shelf transition. +

+
+ + +

27.40 volume

+ +

Adjust the input audio volume. +

+

The filter accepts the following options: +

+
+
volume
+

Set audio volume expression. +

+

Output values are clipped to the maximum value. +

+

The output audio volume is given by the relation: +

 
output_volume = volume * input_volume
+
+ +

Default value for volume is "1.0". +

+
+
precision
+

Set the mathematical precision. +

+

This determines which input sample formats will be allowed, which affects the +precision of the volume scaling. +

+
+
fixed
+

8-bit fixed-point; limits input sample format to U8, S16, and S32. +

+
float
+

32-bit floating-point; limits input sample format to FLT. (default) +

+
double
+

64-bit floating-point; limits input sample format to DBL. +

+
+ +
+
eval
+

Set when the volume expression is evaluated. +

+

It accepts the following values: +

+
once
+

only evaluate expression once during the filter initialization, or +when the ‘volume’ command is sent +

+
+
frame
+

evaluate expression for each incoming frame +

+
+ +

Default value is ‘once’. +

+
+ +

The volume expression can contain the following parameters. +

+
+
n
+

frame number (starting at zero) +

+
nb_channels
+

number of channels +

+
nb_consumed_samples
+

number of samples consumed by the filter +

+
nb_samples
+

number of samples in the current frame +

+
pos
+

original frame position in the file +

+
pts
+

frame PTS +

+
sample_rate
+

sample rate +

+
startpts
+

PTS at start of stream +

+
startt
+

time at start of stream +

+
t
+

frame time +

+
tb
+

timestamp timebase +

+
volume
+

last set volume value +

+
+ +

Note that when ‘eval’ is set to ‘once’ only the +sample_rate and tb variables are available, all other +variables will evaluate to NAN. +

+ +

27.40.1 Commands

+ +

This filter supports the following commands: +

+
volume
+

Modify the volume expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

27.40.2 Examples

+ +
    +
  • +Halve the input audio volume: +
     
    volume=volume=0.5
    +volume=volume=1/2
    +volume=volume=-6.0206dB
    +
    + +

    In all the above example the named key for ‘volume’ can be +omitted, for example like in: +

     
    volume=0.5
    +
    + +
  • +Increase input audio power by 6 decibels using fixed-point precision: +
     
    volume=volume=6dB:precision=fixed
    +
    + +
  • +Fade volume after time 10 with an annihilation period of 5 seconds: +
     
    volume='if(lt(t,10),1,max(1-(t-10)/5,0))':eval=frame
    +
    +
+ + +

27.41 volumedetect

+ +

Detect the volume of the input video. +

+

The filter has no parameters. The input is not modified. Statistics about +the volume will be printed in the log when the input stream end is reached. +

+

In particular it will show the mean volume (root mean square), maximum +volume (on a per-sample basis), and the beginning of a histogram of the +registered volume values (from the maximum value to a cumulated 1/1000 of +the samples). +

+

All volumes are in decibels relative to the maximum PCM value. +

+ +

27.41.1 Examples

+ +

Here is an excerpt of the output: +

 
[Parsed_volumedetect_0  0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0  0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0  0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0  0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0  0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0  0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0  0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0  0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0  0xa23120] histogram_10db: 8409
+
+ +

It means that: +

    +
  • +The mean square energy is approximately -27 dB, or 10^-2.7. +
  • +The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB. +
  • +There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc. +
+ +

In other words, raising the volume by +4 dB does not cause any clipping, +raising it by +5 dB causes clipping for 6 samples, etc. +

+ + +

28. Audio Sources

+ +

Below is a description of the currently available audio sources. +

+ +

28.1 abuffer

+ +

Buffer audio frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/asrc_abuffer.h’. +

+

It accepts the following named parameters: +

+
+
time_base
+

Timebase which will be used for timestamps of submitted frames. It must be +either a floating-point number or in numerator/denominator form. +

+
+
sample_rate
+

The sample rate of the incoming audio buffers. +

+
+
sample_fmt
+

The sample format of the incoming audio buffers. +Either a sample format name or its corresponging integer representation from +the enum AVSampleFormat in ‘libavutil/samplefmt.h’ +

+
+
channel_layout
+

The channel layout of the incoming audio buffers. +Either a channel layout name from channel_layout_map in +‘libavutil/channel_layout.c’ or its corresponding integer representation +from the AV_CH_LAYOUT_* macros in ‘libavutil/channel_layout.h’ +

+
+
channels
+

The number of channels of the incoming audio buffers. +If both channels and channel_layout are specified, then they +must be consistent. +

+
+
+ + +

28.1.1 Examples

+ +
 
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+ +

will instruct the source to accept planar 16bit signed stereo at 44100Hz. +Since the sample format with name "s16p" corresponds to the number +6 and the "stereo" channel layout corresponds to the value 0x3, this is +equivalent to: +

 
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+ + +

28.2 aevalsrc

+ +

Generate an audio signal specified by an expression. +

+

This source accepts in input one or more expressions (one for each +channel), which are evaluated and used to generate a corresponding +audio signal. +

+

This source accepts the following options: +

+
+
exprs
+

Set the ’|’-separated expressions list for each separate channel. In case the +‘channel_layout’ option is not specified, the selected channel layout +depends on the number of provided expressions. Otherwise the last +specified expression is applied to the remaining output channels. +

+
+
channel_layout, c
+

Set the channel layout. The number of channels in the specified layout +must be equal to the number of specified expressions. +

+
+
duration, d
+

Set the minimum duration of the sourced audio. See the function +av_parse_time() for the accepted format. +Note that the resulting duration may be greater than the specified +duration, as the generated audio is always cut at the end of a +complete frame. +

+

If not specified, or the expressed duration is negative, the audio is +supposed to be generated forever. +

+
+
nb_samples, n
+

Set the number of samples per channel per each output frame, +default to 1024. +

+
+
sample_rate, s
+

Specify the sample rate, default to 44100. +

+
+ +

Each expression in exprs can contain the following constants: +

+
+
n
+

number of the evaluated sample, starting from 0 +

+
+
t
+

time of the evaluated sample expressed in seconds, starting from 0 +

+
+
s
+

sample rate +

+
+
+ + +

28.2.1 Examples

+ +
    +
  • +Generate silence: +
     
    aevalsrc=0
    +
    + +
  • +Generate a sin signal with frequency of 440 Hz, set sample rate to +8000 Hz: +
     
    aevalsrc="sin(440*2*PI*t):s=8000"
    +
    + +
  • +Generate a two channels signal, specify the channel layout (Front +Center + Back Center) explicitly: +
     
    aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
    +
    + +
  • +Generate white noise: +
     
    aevalsrc="-2+random(0)"
    +
    + +
  • +Generate an amplitude modulated signal: +
     
    aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
    +
    + +
  • +Generate 2.5 Hz binaural beats on a 360 Hz carrier: +
     
    aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
    +
    + +
+ + +

28.3 anullsrc

+ +

Null audio source, return unprocessed audio frames. It is mainly useful +as a template and to be employed in analysis / debugging tools, or as +the source for filters which ignore the input data (for example the sox +synth filter). +

+

This source accepts the following options: +

+
+
channel_layout, cl
+
+

Specify the channel layout, and can be either an integer or a string +representing a channel layout. The default value of channel_layout +is "stereo". +

+

Check the channel_layout_map definition in +‘libavutil/channel_layout.c’ for the mapping between strings and +channel layout values. +

+
+
sample_rate, r
+

Specify the sample rate, and defaults to 44100. +

+
+
nb_samples, n
+

Set the number of samples per requested frames. +

+
+
+ + +

28.3.1 Examples

+ +
    +
  • +Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO. +
     
    anullsrc=r=48000:cl=4
    +
    + +
  • +Do the same operation with a more obvious syntax: +
     
    anullsrc=r=48000:cl=mono
    +
    +
+ +

All the parameters need to be explicitly defined. +

+ +

28.4 flite

+ +

Synthesize a voice utterance using the libflite library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libflite. +

+

Note that the flite library is not thread-safe. +

+

The filter accepts the following options: +

+
+
list_voices
+

If set to 1, list the names of the available voices and exit +immediately. Default value is 0. +

+
+
nb_samples, n
+

Set the maximum number of samples per frame. Default value is 512. +

+
+
textfile
+

Set the filename containing the text to speak. +

+
+
text
+

Set the text to speak. +

+
+
voice, v
+

Set the voice to use for the speech synthesis. Default value is +kal. See also the list_voices option. +

+
+ + +

28.4.1 Examples

+ +
    +
  • +Read from file ‘speech.txt’, and synthetize the text using the +standard flite voice: +
     
    flite=textfile=speech.txt
    +
    + +
  • +Read the specified text selecting the slt voice: +
     
    flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Input text to ffmpeg: +
     
    ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
    +
    + +
  • +Make ‘ffplay’ speak the specified text, using flite and +the lavfi device: +
     
    ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
    +
    +
+ +

For more information about libflite, check: +http://www.speech.cs.cmu.edu/flite/ +

+ +

28.5 sine

+ +

Generate an audio signal made of a sine wave with amplitude 1/8. +

+

The audio signal is bit-exact. +

+

The filter accepts the following options: +

+
+
frequency, f
+

Set the carrier frequency. Default is 440 Hz. +

+
+
beep_factor, b
+

Enable a periodic beep every second with frequency beep_factor times +the carrier frequency. Default is 0, meaning the beep is disabled. +

+
+
sample_rate, r
+

Specify the sample rate, default is 44100. +

+
+
duration, d
+

Specify the duration of the generated audio stream. +

+
+
samples_per_frame
+

Set the number of samples per output frame, default is 1024. +

+
+ + +

28.5.1 Examples

+ +
    +
  • +Generate a simple 440 Hz sine wave: +
     
    sine
    +
    + +
  • +Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds: +
     
    sine=220:4:d=5
    +sine=f=220:b=4:d=5
    +sine=frequency=220:beep_factor=4:duration=5
    +
    + +
+ + + +

29. Audio Sinks

+ +

Below is a description of the currently available audio sinks. +

+ +

29.1 abuffersink

+ +

Buffer audio frames, and make them available to the end of filter chain. +

+

This sink is mainly intended for programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVABufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

29.2 anullsink

+ +

Null audio sink, do absolutely nothing with the input audio. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

30. Video Filters

+ +

When you configure your FFmpeg build, you can disable any of the +existing filters using --disable-filters. +The configure output will show the video filters included in your +build. +

+

Below is a description of the currently available video filters. +

+ +

30.1 alphaextract

+ +

Extract the alpha component from the input as a grayscale video. This +is especially useful with the alphamerge filter. +

+ +

30.2 alphamerge

+ +

Add or replace the alpha component of the primary input with the +grayscale value of a second input. This is intended for use with +alphaextract to allow the transmission or storage of frame +sequences that have alpha in a format that doesn’t support an alpha +channel. +

+

For example, to reconstruct full frames from a normal YUV-encoded video +and a separate video created with alphaextract, you might use: +

 
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+ +

Since this filter is designed for reconstruction, it operates on frame +sequences without considering timestamps, and terminates when either +input reaches end of stream. This will cause problems if your encoding +pipeline drops frames. If you’re trying to apply an image as an +overlay to a video stream, consider the overlay filter instead. +

+ +

30.3 ass

+ +

Same as the subtitles filter, except that it doesn’t require libavcodec +and libavformat to work. On the other hand, it is limited to ASS (Advanced +Substation Alpha) subtitles files. +

+ +

30.4 bbox

+ +

Compute the bounding box for the non-black pixels in the input frame +luminance plane. +

+

This filter computes the bounding box containing all the pixels with a +luminance value greater than the minimum allowed value. +The parameters describing the bounding box are printed on the filter +log. +

+

The filter accepts the following option: +

+
+
min_val
+

Set the minimal luminance value. Default is 16. +

+
+ + +

30.5 blackdetect

+ +

Detect video intervals that are (almost) completely black. Can be +useful to detect chapter transitions, commercials, or invalid +recordings. Output lines contains the time for the start, end and +duration of the detected black interval expressed in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
black_min_duration, d
+

Set the minimum detected black duration expressed in seconds. It must +be a non-negative floating point number. +

+

Default value is 2.0. +

+
+
picture_black_ratio_th, pic_th
+

Set the threshold for considering a picture "black". +Express the minimum value for the ratio: +

 
nb_black_pixels / nb_pixels
+
+ +

for which a picture is considered black. +Default value is 0.98. +

+
+
pixel_black_th, pix_th
+

Set the threshold for considering a pixel "black". +

+

The threshold expresses the maximum pixel luminance value for which a +pixel is considered "black". The provided value is scaled according to +the following equation: +

 
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+ +

luminance_range_size and luminance_minimum_value depend on +the input video format, the range is [0-255] for YUV full-range +formats and [16-235] for YUV non full-range formats. +

+

Default value is 0.10. +

+
+ +

The following example sets the maximum pixel threshold to the minimum +value, and detects only black intervals of 2 or more seconds: +

 
blackdetect=d=2:pix_th=0.00
+
+ + +

30.6 blackframe

+ +

Detect frames that are (almost) completely black. Can be useful to +detect chapter transitions or commercials. Output lines consist of +the frame number of the detected frame, the percentage of blackness, +the position in the file if known or -1 and the timestamp in seconds. +

+

In order to display the output lines, you need to set the loglevel at +least to the AV_LOG_INFO value. +

+

The filter accepts the following options: +

+
+
amount
+

Set the percentage of the pixels that have to be below the threshold, defaults +to 98. +

+
+
threshold, thresh
+

Set the threshold below which a pixel value is considered black, defaults to +32. +

+
+
+ + +

30.7 blend

+ +

Blend two video frames into each other. +

+

It takes two input streams and outputs one stream, the first input is the +"top" layer and second input is "bottom" layer. +Output terminates when shortest input terminates. +

+

A description of the accepted options follows. +

+
+
c0_mode
+
c1_mode
+
c2_mode
+
c3_mode
+
all_mode
+

Set blend mode for specific pixel component or all pixel components in case +of all_mode. Default value is normal. +

+

Available values for component modes are: +

+
addition
+
and
+
average
+
burn
+
darken
+
difference
+
divide
+
dodge
+
exclusion
+
hardlight
+
lighten
+
multiply
+
negation
+
normal
+
or
+
overlay
+
phoenix
+
pinlight
+
reflect
+
screen
+
softlight
+
subtract
+
vividlight
+
xor
+
+ +
+
c0_opacity
+
c1_opacity
+
c2_opacity
+
c3_opacity
+
all_opacity
+

Set blend opacity for specific pixel component or all pixel components in case +of all_opacity. Only used in combination with pixel component blend modes. +

+
+
c0_expr
+
c1_expr
+
c2_expr
+
c3_expr
+
all_expr
+

Set blend expression for specific pixel component or all pixel components in case +of all_expr. Note that related mode options will be ignored if those are set. +

+

The expressions can use the following variables: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

the coordinates of the current sample +

+
+
W
+
H
+

the width and height of currently filtered plane +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
TOP, A
+

Value of pixel component at current location for first video frame (top layer). +

+
+
BOTTOM, B
+

Value of pixel component at current location for second video frame (bottom layer). +

+
+ +
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last bottom frame after the end of the stream. A value of +0 disable the filter after the last frame of the bottom layer is reached. +Default is 1. +

+
+ + +

30.7.1 Examples

+ +
    +
  • +Apply transition from bottom layer to top layer in first 10 seconds: +
     
    blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
    +
    + +
  • +Apply 1x1 checkerboard effect: +
     
    blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
    +
    + +
  • +Apply uncover left effect: +
     
    blend=all_expr='if(gte(N*SW+X,W),A,B)'
    +
    + +
  • +Apply uncover down effect: +
     
    blend=all_expr='if(gte(Y-N*SH,0),A,B)'
    +
    + +
  • +Apply uncover up-left effect: +
     
    blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
    +
    +
+ + +

30.8 boxblur

+ +

Apply boxblur algorithm to the input video. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+
luma_power, lp
+
chroma_radius, cr
+
chroma_power, cp
+
alpha_radius, ar
+
alpha_power, ap
+
+ +

A description of the accepted options follows. +

+
+
luma_radius, lr
+
chroma_radius, cr
+
alpha_radius, ar
+

Set an expression for the box radius in pixels used for blurring the +corresponding input plane. +

+

The radius value must be a non-negative number, and must not be +greater than the value of the expression min(w,h)/2 for the +luma and alpha planes, and of min(cw,ch)/2 for the chroma +planes. +

+

Default value for ‘luma_radius’ is "2". If not specified, +‘chroma_radius’ and ‘alpha_radius’ default to the +corresponding value set for ‘luma_radius’. +

+

The expressions can contain the following constants: +

+
w
+
h
+

the input width and height in pixels +

+
+
cw
+
ch
+

the input chroma image width and height in pixels +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ +
+
luma_power, lp
+
chroma_power, cp
+
alpha_power, ap
+

Specify how many times the boxblur filter is applied to the +corresponding plane. +

+

Default value for ‘luma_power’ is 2. If not specified, +‘chroma_power’ and ‘alpha_power’ default to the +corresponding value set for ‘luma_power’. +

+

A value of 0 will disable the effect. +

+
+ + +

30.8.1 Examples

+ +
    +
  • +Apply a boxblur filter with luma, chroma, and alpha radius +set to 2: +
     
    boxblur=luma_radius=2:luma_power=1
    +boxblur=2:1
    +
    + +
  • +Set luma radius to 2, alpha and chroma radius to 0: +
     
    boxblur=2:1:cr=0:ar=0
    +
    + +
  • +Set luma and chroma radius to a fraction of the video dimension: +
     
    boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
    +
    +
+ + +

30.9 colorbalance

+

Modify intensity of primary colors (red, green and blue) of input frames. +

+

The filter allows an input frame to be adjusted in the shadows, midtones or highlights +regions for the red-cyan, green-magenta or blue-yellow balance. +

+

A positive adjustment value shifts the balance towards the primary color, a negative +value towards the complementary color. +

+

The filter accepts the following options: +

+
+
rs
+
gs
+
bs
+

Adjust red, green and blue shadows (darkest pixels). +

+
+
rm
+
gm
+
bm
+

Adjust red, green and blue midtones (medium pixels). +

+
+
rh
+
gh
+
bh
+

Adjust red, green and blue highlights (brightest pixels). +

+

Allowed ranges for options are [-1.0, 1.0]. Defaults are 0. +

+
+ + +

30.9.1 Examples

+ +
    +
  • +Add red color cast to shadows: +
     
    colorbalance=rs=.3
    +
    +
+ + +

30.10 colorchannelmixer

+ +

Adjust video input frames by re-mixing color channels. +

+

This filter modifies a color channel by adding the values associated to +the other channels of the same pixels. For example if the value to +modify is red, the output value will be: +

 
red=red*rr + blue*rb + green*rg + alpha*ra
+
+ +

The filter accepts the following options: +

+
+
rr
+
rg
+
rb
+
ra
+

Adjust contribution of input red, green, blue and alpha channels for output red channel. +Default is 1 for rr, and 0 for rg, rb and ra. +

+
+
gr
+
gg
+
gb
+
ga
+

Adjust contribution of input red, green, blue and alpha channels for output green channel. +Default is 1 for gg, and 0 for gr, gb and ga. +

+
+
br
+
bg
+
bb
+
ba
+

Adjust contribution of input red, green, blue and alpha channels for output blue channel. +Default is 1 for bb, and 0 for br, bg and ba. +

+
+
ar
+
ag
+
ab
+
aa
+

Adjust contribution of input red, green, blue and alpha channels for output alpha channel. +Default is 1 for aa, and 0 for ar, ag and ab. +

+

Allowed ranges for options are [-2.0, 2.0]. +

+
+ + +

30.10.1 Examples

+ +
    +
  • +Convert source to grayscale: +
     
    colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
    +
    +
  • +Simulate sepia tones: +
     
    colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
    +
    +
+ + +

30.11 colormatrix

+ +

Convert color matrix. +

+

The filter accepts the following options: +

+
+
src
+
dst
+

Specify the source and destination color matrix. Both values must be +specified. +

+

The accepted values are: +

+
bt709
+

BT.709 +

+
+
bt601
+

BT.601 +

+
+
smpte240m
+

SMPTE-240M +

+
+
fcc
+

FCC +

+
+
+
+ +

For example to convert from BT.601 to SMPTE-240M, use the command: +

 
colormatrix=bt601:smpte240m
+
+ + +

30.12 copy

+ +

Copy the input source unchanged to the output. Mainly useful for +testing purposes. +

+ +

30.13 crop

+ +

Crop the input video to given dimensions. +

+

The filter accepts the following options: +

+
+
w, out_w
+

Width of the output video. It defaults to iw. +This expression is evaluated only once during the filter +configuration. +

+
+
h, out_h
+

Height of the output video. It defaults to ih. +This expression is evaluated only once during the filter +configuration. +

+
+
x
+

Horizontal position, in the input video, of the left edge of the output video. +It defaults to (in_w-out_w)/2. +This expression is evaluated per-frame. +

+
+
y
+

Vertical position, in the input video, of the top edge of the output video. +It defaults to (in_h-out_h)/2. +This expression is evaluated per-frame. +

+
+
keep_aspect
+

If set to 1 will force the output display aspect ratio +to be the same of the input, by changing the output sample aspect +ratio. It defaults to 0. +

+
+ +

The out_w, out_h, x, y parameters are +expressions containing the following constants: +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (cropped) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

The expression for out_w may depend on the value of out_h, +and the expression for out_h may depend on out_w, but they +cannot depend on x and y, as x and y are +evaluated after out_w and out_h. +

+

The x and y parameters specify the expressions for the +position of the top-left corner of the output (non-cropped) area. They +are evaluated for each frame. If the evaluated value is not valid, it +is approximated to the nearest valid value. +

+

The expression for x may depend on y, and the expression +for y may depend on x. +

+ +

30.13.1 Examples

+ +
    +
  • +Crop area with size 100x100 at position (12,34). +
     
    crop=100:100:12:34
    +
    + +

    Using named options, the example above becomes: +

     
    crop=w=100:h=100:x=12:y=34
    +
    + +
  • +Crop the central input area with size 100x100: +
     
    crop=100:100
    +
    + +
  • +Crop the central input area with size 2/3 of the input video: +
     
    crop=2/3*in_w:2/3*in_h
    +
    + +
  • +Crop the input video central square: +
     
    crop=out_w=in_h
    +crop=in_h
    +
    + +
  • +Delimit the rectangle with the top-left corner placed at position +100:100 and the right-bottom corner corresponding to the right-bottom +corner of the input image: +
     
    crop=in_w-100:in_h-100:100:100
    +
    + +
  • +Crop 10 pixels from the left and right borders, and 20 pixels from +the top and bottom borders +
     
    crop=in_w-2*10:in_h-2*20
    +
    + +
  • +Keep only the bottom right quarter of the input image: +
     
    crop=in_w/2:in_h/2:in_w/2:in_h/2
    +
    + +
  • +Crop height for getting Greek harmony: +
     
    crop=in_w:1/PHI*in_w
    +
    + +
  • +Appply trembling effect: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(n/10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(n/7)
    +
    + +
  • +Apply erratic camera effect depending on timestamp: +
     
    crop=in_w/2:in_h/2:(in_w-out_w)/2+((in_w-out_w)/2)*sin(t*10):(in_h-out_h)/2 +((in_h-out_h)/2)*sin(t*13)"
    +
    + +
  • +Set x depending on the value of y: +
     
    crop=in_w/2:in_h/2:y:10+10*sin(n/10)
    +
    +
+ + +

30.14 cropdetect

+ +

Auto-detect crop size. +

+

Calculate necessary cropping parameters and prints the recommended +parameters through the logging system. The detected dimensions +correspond to the non-black area of the input video. +

+

The filter accepts the following options: +

+
+
limit
+

Set higher black value threshold, which can be optionally specified +from nothing (0) to everything (255). An intensity value greater +to the set value is considered non-black. Default value is 24. +

+
+
round
+

Set the value for which the width/height should be divisible by. The +offset is automatically adjusted to center the video. Use 2 to get +only even dimensions (needed for 4:2:2 video). 16 is best when +encoding to most video codecs. Default value is 16. +

+
+
reset_count, reset
+

Set the counter that determines after how many frames cropdetect will +reset the previously detected largest video area and start over to +detect the current optimal crop area. Default value is 0. +

+

This can be useful when channel logos distort the video area. 0 +indicates never reset and return the largest area encountered during +playback. +

+
+ +

+

+

30.15 curves

+ +

Apply color adjustments using curves. +

+

This filter is similar to the Adobe Photoshop and GIMP curves tools. Each +component (red, green and blue) has its values defined by N key points +tied from each other using a smooth curve. The x-axis represents the pixel +values from the input frame, and the y-axis the new pixel values to be set for +the output frame. +

+

By default, a component curve is defined by the two points (0;0) and +(1;1). This creates a straight line where each original pixel value is +"adjusted" to its own value, which means no change to the image. +

+

The filter allows you to redefine these two points and add some more. A new +curve (using a natural cubic spline interpolation) will be define to pass +smoothly through all these new coordinates. The new defined points needs to be +strictly increasing over the x-axis, and their x and y values must +be in the [0;1] interval. If the computed curves happened to go outside +the vector spaces, the values will be clipped accordingly. +

+

If there is no key point defined in x=0, the filter will automatically +insert a (0;0) point. In the same way, if there is no key point defined +in x=1, the filter will automatically insert a (1;1) point. +

+

The filter accepts the following options: +

+
+
preset
+

Select one of the available color presets. This option can be used in addition +to the ‘r’, ‘g’, ‘b’ parameters; in this case, the later +options takes priority on the preset values. +Available presets are: +

+
none
+
color_negative
+
cross_process
+
darker
+
increase_contrast
+
lighter
+
linear_contrast
+
medium_contrast
+
negative
+
strong_contrast
+
vintage
+
+

Default is none. +

+
master, m
+

Set the master key points. These points will define a second pass mapping. It +is sometimes called a "luminance" or "value" mapping. It can be used with +‘r’, ‘g’, ‘b’ or ‘all’ since it acts like a +post-processing LUT. +

+
red, r
+

Set the key points for the red component. +

+
green, g
+

Set the key points for the green component. +

+
blue, b
+

Set the key points for the blue component. +

+
all
+

Set the key points for all components (not including master). +Can be used in addition to the other key points component +options. In this case, the unset component(s) will fallback on this +‘all’ setting. +

+
psfile
+

Specify a Photoshop curves file (.asv) to import the settings from. +

+
+ +

To avoid some filtergraph syntax conflicts, each key points list need to be +defined using the following syntax: x0/y0 x1/y1 x2/y2 .... +

+ +

30.15.1 Examples

+ +
    +
  • +Increase slightly the middle level of blue: +
     
    curves=blue='0.5/0.58'
    +
    + +
  • +Vintage effect: +
     
    curves=r='0/0.11 .42/.51 1/0.95':g='0.50/0.48':b='0/0.22 .49/.44 1/0.8'
    +
    +

    Here we obtain the following coordinates for each components: +

    +
    red
    +

    (0;0.11) (0.42;0.51) (1;0.95) +

    +
    green
    +

    (0;0) (0.50;0.48) (1;1) +

    +
    blue
    +

    (0;0.22) (0.49;0.44) (1;0.80) +

    +
    + +
  • +The previous example can also be achieved with the associated built-in preset: +
     
    curves=preset=vintage
    +
    + +
  • +Or simply: +
     
    curves=vintage
    +
    + +
  • +Use a Photoshop preset and redefine the points of the green component: +
     
    curves=psfile='MyCurvesPresets/purple.asv':green='0.45/0.53'
    +
    +
+ + +

30.16 dctdnoiz

+ +

Denoise frames using 2D DCT (frequency domain filtering). +

+

This filter is not designed for real time and can be extremely slow. +

+

The filter accepts the following options: +

+
+
sigma, s
+

Set the noise sigma constant. +

+

This sigma defines a hard threshold of 3 * sigma; every DCT +coefficient (absolute value) below this threshold with be dropped. +

+

If you need a more advanced filtering, see ‘expr’. +

+

Default is 0. +

+
+
overlap
+

Set number overlapping pixels for each block. Each block is of size +16x16. Since the filter can be slow, you may want to reduce this value, +at the cost of a less effective filter and the risk of various artefacts. +

+

If the overlapping value doesn’t allow to process the whole input width or +height, a warning will be displayed and according borders won’t be denoised. +

+

Default value is 15. +

+
+
expr, e
+

Set the coefficient factor expression. +

+

For each coefficient of a DCT block, this expression will be evaluated as a +multiplier value for the coefficient. +

+

If this is option is set, the ‘sigma’ option will be ignored. +

+

The absolute value of the coefficient can be accessed through the c +variable. +

+
+ + +

30.16.1 Examples

+ +

Apply a denoise with a ‘sigma’ of 4.5: +

 
dctdnoiz=4.5
+
+ +

The same operation can be achieved using the expression system: +

 
dctdnoiz=e='gte(c, 4.5*3)'
+
+ +

+

+

30.17 decimate

+ +

Drop duplicated frames at regular intervals. +

+

The filter accepts the following options: +

+
+
cycle
+

Set the number of frames from which one will be dropped. Setting this to +N means one frame in every batch of N frames will be dropped. +Default is 5. +

+
+
dupthresh
+

Set the threshold for duplicate detection. If the difference metric for a frame +is less than or equal to this value, then it is declared as duplicate. Default +is 1.1 +

+
+
scthresh
+

Set scene change threshold. Default is 15. +

+
+
blockx
+
blocky
+

Set the size of the x and y-axis blocks used during metric calculations. +Larger blocks give better noise suppression, but also give worse detection of +small movements. Must be a power of two. Default is 32. +

+
+
ppsrc
+

Mark main input as a pre-processed input and activate clean source input +stream. This allows the input to be pre-processed with various filters to help +the metrics calculation while keeping the frame selection lossless. When set to +1, the first stream is for the pre-processed input, and the second +stream is the clean source from where the kept frames are chosen. Default is +0. +

+
+
chroma
+

Set whether or not chroma is considered in the metric calculations. Default is +1. +

+
+ + +

30.18 dejudder

+ +

Remove judder produced by partially interlaced telecined content. +

+

Judder can be introduced, for instance, by pullup filter. If the original +source was partially telecined content then the output of pullup,dejudder +will have a variable frame rate. May change the recorded frame rate of the +container. Aside from that change, this filter will not affect constant frame +rate video. +

+

The option available in this filter is: +

+
cycle
+

Specify the length of the window over which the judder repeats. +

+

Accepts any interger greater than 1. Useful values are: +

+
4
+

If the original was telecined from 24 to 30 fps (Film to NTSC). +

+
+
5
+

If the original was telecined from 25 to 30 fps (PAL to NTSC). +

+
+
20
+

If a mixture of the two. +

+
+ +

The default is ‘4’. +

+
+ + +

30.19 delogo

+ +

Suppress a TV station logo by a simple interpolation of the surrounding +pixels. Just set a rectangle covering the logo and watch it disappear +(and sometimes something even uglier appear - your mileage may vary). +

+

This filter accepts the following options: +

+
x
+
y
+

Specify the top left corner coordinates of the logo. They must be +specified. +

+
+
w
+
h
+

Specify the width and height of the logo to clear. They must be +specified. +

+
+
band, t
+

Specify the thickness of the fuzzy edge of the rectangle (added to +w and h). The default value is 4. +

+
+
show
+

When set to 1, a green rectangle is drawn on the screen to simplify +finding the right x, y, w, and h parameters. +The default value is 0. +

+

The rectangle is drawn on the outermost pixels which will be (partly) +replaced with interpolated values. The values of the next pixels +immediately outside this rectangle in each direction will be used to +compute the interpolated pixel values inside the rectangle. +

+
+
+ + +

30.19.1 Examples

+ +
    +
  • +Set a rectangle covering the area with top left corner coordinates 0,0 +and size 100x77, setting a band of size 10: +
     
    delogo=x=0:y=0:w=100:h=77:band=10
    +
    + +
+ + +

30.20 deshake

+ +

Attempt to fix small changes in horizontal and/or vertical shift. This +filter helps remove camera shake from hand-holding a camera, bumping a +tripod, moving on a vehicle, etc. +

+

The filter accepts the following options: +

+
+
x
+
y
+
w
+
h
+

Specify a rectangular area where to limit the search for motion +vectors. +If desired the search for motion vectors can be limited to a +rectangular area of the frame defined by its top left corner, width +and height. These parameters have the same meaning as the drawbox +filter which can be used to visualise the position of the bounding +box. +

+

This is useful when simultaneous movement of subjects within the frame +might be confused for camera motion by the motion vector search. +

+

If any or all of x, y, w and h are set to -1 +then the full frame is used. This allows later options to be set +without specifying the bounding box for the motion vector search. +

+

Default - search the whole frame. +

+
+
rx
+
ry
+

Specify the maximum extent of movement in x and y directions in the +range 0-64 pixels. Default 16. +

+
+
edge
+

Specify how to generate pixels to fill blanks at the edge of the +frame. Available values are: +

+
blank, 0
+

Fill zeroes at blank locations +

+
original, 1
+

Original image at blank locations +

+
clamp, 2
+

Extruded edge value at blank locations +

+
mirror, 3
+

Mirrored edge at blank locations +

+
+

Default value is ‘mirror’. +

+
+
blocksize
+

Specify the blocksize to use for motion search. Range 4-128 pixels, +default 8. +

+
+
contrast
+

Specify the contrast threshold for blocks. Only blocks with more than +the specified contrast (difference between darkest and lightest +pixels) will be considered. Range 1-255, default 125. +

+
+
search
+

Specify the search strategy. Available values are: +

+
exhaustive, 0
+

Set exhaustive search +

+
less, 1
+

Set less exhaustive search. +

+
+

Default value is ‘exhaustive’. +

+
+
filename
+

If set then a detailed log of the motion search is written to the +specified file. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ + +

30.21 drawbox

+ +

Draw a colored box on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the top left corner coordinates of the box. Default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the box, if 0 they are interpreted as +the input width and height. Default to 0. +

+
+
color, c
+

Specify the color of the box to write. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the box edge color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the box edge. Default value is 3. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y offset coordinates where the box is drawn. +

+
+
w
+
h
+

The width and height of the drawn box. +

+
+
t
+

The thickness of the drawn box. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

30.21.1 Examples

+ +
    +
  • +Draw a black box around the edge of the input image: +
     
    drawbox
    +
    + +
  • +Draw a box with color red and an opacity of 50%: +
     
    drawbox=10:20:200:60:red@0.5
    +
    + +

    The previous example can be specified as: +

     
    drawbox=x=10:y=20:w=200:h=60:color=red@0.5
    +
    + +
  • +Fill the box with pink color: +
     
    drawbox=x=10:y=10:w=100:h=100:color=pink@0.5:t=max
    +
    + +
  • +Draw a 2-pixel red 2.40:1 mask: +
     
    drawbox=x=-t:y=0.5*(ih-iw/2.4)-t:w=iw+t*2:h=iw/2.4+t*2:t=2:c=red
    +
    +
+ + +

30.22 drawgrid

+ +

Draw a grid on the input image. +

+

This filter accepts the following options: +

+
+
x
+
y
+

The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0. +

+
+
width, w
+
height, h
+

The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the +input width and height, respectively, minus thickness, so image gets +framed. Default to 0. +

+
+
color, c
+

Specify the color of the grid. For the general syntax of this option, +check the "Color" section in the ffmpeg-utils manual. If the special +value invert is used, the grid color is the same as the +video with inverted luma. +

+
+
thickness, t
+

The expression which sets the thickness of the grid line. Default value is 1. +

+

See below for the list of accepted constants. +

+
+ +

The parameters for x, y, w and h and t are expressions containing the +following constants: +

+
+
dar
+

The input display aspect ratio, it is the same as (w / h) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_h, ih
+
in_w, iw
+

The input grid cell width and height. +

+
+
sar
+

The input sample aspect ratio. +

+
+
x
+
y
+

The x and y coordinates of some point of grid intersection (meant to configure offset). +

+
+
w
+
h
+

The width and height of the drawn cell. +

+
+
t
+

The thickness of the drawn cell. +

+

These constants allow the x, y, w, h and t expressions to refer to +each other, so you may for example specify y=x/dar or h=w/dar. +

+
+
+ + +

30.22.1 Examples

+ +
    +
  • +Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%: +
     
    drawgrid=width=100:height=100:thickness=2:color=red@0.5
    +
    + +
  • +Draw a white 3x3 grid with an opacity of 50%: +
     
    drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
    +
    +
+ +

+

+

30.23 drawtext

+ +

Draw text string or text from specified file on top of video using the +libfreetype library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libfreetype. +

+ +

30.23.1 Syntax

+ +

The description of the accepted parameters follows. +

+
+
box
+

Used to draw a box around text using background color. +Value should be either 1 (enable) or 0 (disable). +The default value of box is 0. +

+
+
boxcolor
+

The color to be used for drawing box around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of boxcolor is "white". +

+
+
borderw
+

Set the width of the border to be drawn around the text using bordercolor. +The default value of borderw is 0. +

+
+
bordercolor
+

Set the color to be used for drawing border around text. For the syntax of this +option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of bordercolor is "black". +

+
+
expansion
+

Select how the text is expanded. Can be either none, +strftime (deprecated) or +normal (default). See the Text expansion section +below for details. +

+
+
fix_bounds
+

If true, check and fix text coords to avoid clipping. +

+
+
fontcolor
+

The color to be used for drawing fonts. For the syntax of this option, check +the "Color" section in the ffmpeg-utils manual. +

+

The default value of fontcolor is "black". +

+
+
fontfile
+

The font file to be used for drawing text. Path must be included. +This parameter is mandatory. +

+
+
fontsize
+

The font size to be used for drawing text. +The default value of fontsize is 16. +

+
+
ft_load_flags
+

Flags to be used for loading the fonts. +

+

The flags map the corresponding flags supported by libfreetype, and are +a combination of the following values: +

+
default
+
no_scale
+
no_hinting
+
render
+
no_bitmap
+
vertical_layout
+
force_autohint
+
crop_bitmap
+
pedantic
+
ignore_global_advance_width
+
no_recurse
+
ignore_transform
+
monochrome
+
linear_design
+
no_autohint
+
+ +

Default value is "default". +

+

For more information consult the documentation for the FT_LOAD_* +libfreetype flags. +

+
+
shadowcolor
+

The color to be used for drawing a shadow behind the drawn text. For the +syntax of this option, check the "Color" section in the ffmpeg-utils manual. +

+

The default value of shadowcolor is "black". +

+
+
shadowx
+
shadowy
+

The x and y offsets for the text shadow position with respect to the +position of the text. They can be either positive or negative +values. Default value for both is "0". +

+
+
start_number
+

The starting frame number for the n/frame_num variable. The default value +is "0". +

+
+
tabsize
+

The size in number of spaces to use for rendering the tab. +Default value is 4. +

+
+
timecode
+

Set the initial timecode representation in "hh:mm:ss[:;.]ff" +format. It can be used with or without text parameter. timecode_rate +option must be specified. +

+
+
timecode_rate, rate, r
+

Set the timecode frame rate (timecode only). +

+
+
text
+

The text string to be drawn. The text must be a sequence of UTF-8 +encoded characters. +This parameter is mandatory if no file is specified with the parameter +textfile. +

+
+
textfile
+

A text file containing text to be drawn. The text must be a sequence +of UTF-8 encoded characters. +

+

This parameter is mandatory if no text string is specified with the +parameter text. +

+

If both text and textfile are specified, an error is thrown. +

+
+
reload
+

If set to 1, the textfile will be reloaded before each frame. +Be sure to update it atomically, or it may be read partially, or even fail. +

+
+
x
+
y
+

The expressions which specify the offsets where text will be drawn +within the video frame. They are relative to the top/left border of the +output image. +

+

The default value of x and y is "0". +

+

See below for the list of accepted constants and functions. +

+
+ +

The parameters for x and y are expressions containing the +following constants and functions: +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
line_h, lh
+

the height of each text line +

+
+
main_h, h, H
+

the input height +

+
+
main_w, w, W
+

the input width +

+
+
max_glyph_a, ascent
+

the maximum distance from the baseline to the highest/upper grid +coordinate used to place a glyph outline point, for all the rendered +glyphs. +It is a positive value, due to the grid’s orientation with the Y axis +upwards. +

+
+
max_glyph_d, descent
+

the maximum distance from the baseline to the lowest grid coordinate +used to place a glyph outline point, for all the rendered glyphs. +This is a negative value, due to the grid’s orientation, with the Y axis +upwards. +

+
+
max_glyph_h
+

maximum glyph height, that is the maximum height for all the glyphs +contained in the rendered text, it is equivalent to ascent - +descent. +

+
+
max_glyph_w
+

maximum glyph width, that is the maximum width for all the glyphs +contained in the rendered text +

+
+
n
+

the number of input frame, starting from 0 +

+
+
rand(min, max)
+

return a random number included between min and max +

+
+
sar
+

input sample aspect ratio +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
text_h, th
+

the height of the rendered text +

+
+
text_w, tw
+

the width of the rendered text +

+
+
x
+
y
+

the x and y offset coordinates where the text is drawn. +

+

These parameters allow the x and y expressions to refer +each other, so you can for example specify y=x/dar. +

+
+ +

If libavfilter was built with --enable-fontconfig, then +‘fontfile’ can be a fontconfig pattern or omitted. +

+

+

+

30.23.2 Text expansion

+ +

If ‘expansion’ is set to strftime, +the filter recognizes strftime() sequences in the provided text and +expands them accordingly. Check the documentation of strftime(). This +feature is deprecated. +

+

If ‘expansion’ is set to none, the text is printed verbatim. +

+

If ‘expansion’ is set to normal (which is the default), +the following expansion mechanism is used. +

+

The backslash character ’\’, followed by any character, always expands to +the second character. +

+

Sequence of the form %{...} are expanded. The text between the +braces is a function name, possibly followed by arguments separated by ’:’. +If the arguments contain special characters or delimiters (’:’ or ’}’), +they should be escaped. +

+

Note that they probably must also be escaped as the value for the +‘text’ option in the filter argument string and as the filter +argument in the filtergraph description, and possibly also for the shell, +that makes up to four levels of escaping; using a text file avoids these +problems. +

+

The following functions are available: +

+
+
expr, e
+

The expression evaluation result. +

+

It must take one argument specifying the expression to be evaluated, +which accepts the same constants and functions as the x and +y values. Note that not all constants should be used, for +example the text size is not known when evaluating the expression, so +the constants text_w and text_h will have an undefined +value. +

+
+
gmtime
+

The time at which the filter is running, expressed in UTC. +It can accept an argument: a strftime() format string. +

+
+
localtime
+

The time at which the filter is running, expressed in the local time zone. +It can accept an argument: a strftime() format string. +

+
+
metadata
+

Frame metadata. It must take one argument specifying metadata key. +

+
+
n, frame_num
+

The frame number, starting from 0. +

+
+
pict_type
+

A 1 character description of the current picture type. +

+
+
pts
+

The timestamp of the current frame, in seconds, with microsecond accuracy. +

+
+
+ + +

30.23.3 Examples

+ +
    +
  • +Draw "Test Text" with font FreeSerif, using the default values for the +optional parameters. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text'"
    +
    + +
  • +Draw ’Test Text’ with font FreeSerif of size 24 at position x=100 +and y=50 (counting from the top-left corner of the screen), text is +yellow with a red box around it. Both the text and the box have an +opacity of 20%. + +
     
    drawtext="fontfile=/usr/share/fonts/truetype/freefont/FreeSerif.ttf: text='Test Text':\
    +          x=100: y=50: fontsize=24: fontcolor=yellow@0.2: box=1: boxcolor=red@0.2"
    +
    + +

    Note that the double quotes are not necessary if spaces are not used +within the parameter list. +

    +
  • +Show the text at the center of the video frame: +
     
    drawtext="fontsize=30:fontfile=FreeSerif.ttf:text='hello world':x=(w-text_w)/2:y=(h-text_h-line_h)/2"
    +
    + +
  • +Show a text line sliding from right to left in the last row of the video +frame. The file ‘LONG_LINE’ is assumed to contain a single line +with no newlines. +
     
    drawtext="fontsize=15:fontfile=FreeSerif.ttf:text=LONG_LINE:y=h-line_h:x=-50*t"
    +
    + +
  • +Show the content of file ‘CREDITS’ off the bottom of the frame and scroll up. +
     
    drawtext="fontsize=20:fontfile=FreeSerif.ttf:textfile=CREDITS:y=h-20*t"
    +
    + +
  • +Draw a single green letter "g", at the center of the input video. +The glyph baseline is placed at half screen height. +
     
    drawtext="fontsize=60:fontfile=FreeSerif.ttf:fontcolor=green:text=g:x=(w-max_glyph_w)/2:y=h/2-ascent"
    +
    + +
  • +Show text for 1 second every 3 seconds: +
     
    drawtext="fontfile=FreeSerif.ttf:fontcolor=white:x=100:y=x/dar:enable=lt(mod(t\,3)\,1):text='blink'"
    +
    + +
  • +Use fontconfig to set the font. Note that the colons need to be escaped. +
     
    drawtext='fontfile=Linux Libertine O-40\:style=Semibold:text=FFmpeg'
    +
    + +
  • +Print the date of a real-time encoding (see strftime(3)): +
     
    drawtext='fontfile=FreeSans.ttf:text=%{localtime:%a %b %d %Y}'
    +
    + +
+ +

For more information about libfreetype, check: +http://www.freetype.org/. +

+

For more information about fontconfig, check: +http://freedesktop.org/software/fontconfig/fontconfig-user.html. +

+ +

30.24 edgedetect

+ +

Detect and draw edges. The filter uses the Canny Edge Detection algorithm. +

+

The filter accepts the following options: +

+
+
low
+
high
+

Set low and high threshold values used by the Canny thresholding +algorithm. +

+

The high threshold selects the "strong" edge pixels, which are then +connected through 8-connectivity with the "weak" edge pixels selected +by the low threshold. +

+

low and high threshold values must be chosen in the range +[0,1], and low should be lesser or equal to high. +

+

Default value for low is 20/255, and default value for high +is 50/255. +

+
+ +

Example: +

 
edgedetect=low=0.1:high=0.4
+
+ + +

30.25 extractplanes

+ +

Extract color channel components from input video stream into +separate grayscale video streams. +

+

The filter accepts the following option: +

+
+
planes
+

Set plane(s) to extract. +

+

Available values for planes are: +

+
y
+
u
+
v
+
a
+
r
+
g
+
b
+
+ +

Choosing planes not available in the input will result in an error. +That means you cannot select r, g, b planes +with y, u, v planes at same time. +

+
+ + +

30.25.1 Examples

+ +
    +
  • +Extract luma, u and v color channel component from input video frame +into 3 grayscale outputs: +
     
    ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
    +
    +
+ + +

30.26 elbg

+ +

Apply a posterize effect using the ELBG (Enhanced LBG) algorithm. +

+

For each input image, the filter will compute the optimal mapping from +the input to the output given the codebook length, that is the number +of distinct output colors. +

+

This filter accepts the following options. +

+
+
codebook_length, l
+

Set codebook length. The value must be a positive integer, and +represents the number of distinct output colors. Default value is 256. +

+
+
nb_steps, n
+

Set the maximum number of iterations to apply for computing the optimal +mapping. The higher the value the better the result and the higher the +computation time. Default value is 1. +

+
+
seed, s
+

Set a random seed, must be an integer included between 0 and +UINT32_MAX. If not specified, or if explicitly set to -1, the filter +will try to use a good random seed on a best effort basis. +

+
+ + +

30.27 fade

+ +

Apply fade-in/out effect to input video. +

+

This filter accepts the following options: +

+
+
type, t
+

The effect type – can be either "in" for fade-in, or "out" for a fade-out +effect. +Default is in. +

+
+
start_frame, s
+

Specify the number of the start frame for starting to apply the fade +effect. Default is 0. +

+
+
nb_frames, n
+

The number of frames for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +Default is 25. +

+
+
alpha
+

If set to 1, fade only alpha channel, if one exists on the input. +Default value is 0. +

+
+
start_time, st
+

Specify the timestamp (in seconds) of the frame to start to apply the fade +effect. If both start_frame and start_time are specified, the fade will start at +whichever comes last. Default is 0. +

+
+
duration, d
+

The number of seconds for which the fade effect has to last. At the end of the +fade-in effect the output video will have the same intensity as the input video, +at the end of the fade-out transition the output video will be filled with the +selected ‘color’. +If both duration and nb_frames are specified, duration is used. Default is 0. +

+
+
color, c
+

Specify the color of the fade. Default is "black". +

+
+ + +

30.27.1 Examples

+ +
    +
  • +Fade in first 30 frames of video: +
     
    fade=in:0:30
    +
    + +

    The command above is equivalent to: +

     
    fade=t=in:s=0:n=30
    +
    + +
  • +Fade out last 45 frames of a 200-frame video: +
     
    fade=out:155:45
    +fade=type=out:start_frame=155:nb_frames=45
    +
    + +
  • +Fade in first 25 frames and fade out last 25 frames of a 1000-frame video: +
     
    fade=in:0:25, fade=out:975:25
    +
    + +
  • +Make first 5 frames yellow, then fade in from frame 5-24: +
     
    fade=in:5:20:color=yellow
    +
    + +
  • +Fade in alpha over first 25 frames of video: +
     
    fade=in:0:25:alpha=1
    +
    + +
  • +Make first 5.5 seconds black, then fade in for 0.5 seconds: +
     
    fade=t=in:st=5.5:d=0.5
    +
    + +
+ + +

30.28 field

+ +

Extract a single field from an interlaced image using stride +arithmetic to avoid wasting CPU time. The output frames are marked as +non-interlaced. +

+

The filter accepts the following options: +

+
+
type
+

Specify whether to extract the top (if the value is 0 or +top) or the bottom field (if the value is 1 or +bottom). +

+
+ + +

30.29 fieldmatch

+ +

Field matching filter for inverse telecine. It is meant to reconstruct the +progressive frames from a telecined stream. The filter does not drop duplicated +frames, so to achieve a complete inverse telecine fieldmatch needs to be +followed by a decimation filter such as decimate in the filtergraph. +

+

The separation of the field matching and the decimation is notably motivated by +the possibility of inserting a de-interlacing filter fallback between the two. +If the source has mixed telecined and real interlaced content, +fieldmatch will not be able to match fields for the interlaced parts. +But these remaining combed frames will be marked as interlaced, and thus can be +de-interlaced by a later filter such as yadif before decimation. +

+

In addition to the various configuration options, fieldmatch can take an +optional second stream, activated through the ‘ppsrc’ option. If +enabled, the frames reconstruction will be based on the fields and frames from +this second stream. This allows the first input to be pre-processed in order to +help the various algorithms of the filter, while keeping the output lossless +(assuming the fields are matched properly). Typically, a field-aware denoiser, +or brightness/contrast adjustments can help. +

+

Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project) +and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from +which fieldmatch is based on. While the semantic and usage are very +close, some behaviour and options names can differ. +

+

The filter accepts the following options: +

+
+
order
+

Specify the assumed field order of the input stream. Available values are: +

+
+
auto
+

Auto detect parity (use FFmpeg’s internal parity value). +

+
bff
+

Assume bottom field first. +

+
tff
+

Assume top field first. +

+
+ +

Note that it is sometimes recommended not to trust the parity announced by the +stream. +

+

Default value is auto. +

+
+
mode
+

Set the matching mode or strategy to use. ‘pc’ mode is the safest in the +sense that it won’t risk creating jerkiness due to duplicate frames when +possible, but if there are bad edits or blended fields it will end up +outputting combed frames when a good match might actually exist. On the other +hand, ‘pcn_ub’ mode is the most risky in terms of creating jerkiness, +but will almost always find a good frame if there is one. The other values are +all somewhere in between ‘pc’ and ‘pcn_ub’ in terms of risking +jerkiness and creating duplicate frames versus finding good matches in sections +with bad edits, orphaned fields, blended fields, etc. +

+

More details about p/c/n/u/b are available in p/c/n/u/b meaning section. +

+

Available values are: +

+
+
pc
+

2-way matching (p/c) +

+
pc_n
+

2-way matching, and trying 3rd match if still combed (p/c + n) +

+
pc_u
+

2-way matching, and trying 3rd match (same order) if still combed (p/c + u) +

+
pc_n_ub
+

2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if +still combed (p/c + n + u/b) +

+
pcn
+

3-way matching (p/c/n) +

+
pcn_ub
+

3-way matching, and trying 4th/5th matches if all 3 of the original matches are +detected as combed (p/c/n + u/b) +

+
+ +

The parenthesis at the end indicate the matches that would be used for that +mode assuming ‘order’=tff (and ‘field’ on auto or +top). +

+

In terms of speed ‘pc’ mode is by far the fastest and ‘pcn_ub’ is +the slowest. +

+

Default value is pc_n. +

+
+
ppsrc
+

Mark the main input stream as a pre-processed input, and enable the secondary +input stream as the clean source to pick the fields from. See the filter +introduction for more details. It is similar to the ‘clip2’ feature from +VFM/TFM. +

+

Default value is 0 (disabled). +

+
+
field
+

Set the field to match from. It is recommended to set this to the same value as +‘order’ unless you experience matching failures with that setting. In +certain circumstances changing the field that is used to match from can have a +large impact on matching performance. Available values are: +

+
+
auto
+

Automatic (same value as ‘order’). +

+
bottom
+

Match from the bottom field. +

+
top
+

Match from the top field. +

+
+ +

Default value is auto. +

+
+
mchroma
+

Set whether or not chroma is included during the match comparisons. In most +cases it is recommended to leave this enabled. You should set this to 0 +only if your clip has bad chroma problems such as heavy rainbowing or other +artifacts. Setting this to 0 could also be used to speed things up at +the cost of some accuracy. +

+

Default value is 1. +

+
+
y0
+
y1
+

These define an exclusion band which excludes the lines between ‘y0’ and +‘y1’ from being included in the field matching decision. An exclusion +band can be used to ignore subtitles, a logo, or other things that may +interfere with the matching. ‘y0’ sets the starting scan line and +‘y1’ sets the ending line; all lines in between ‘y0’ and +‘y1’ (including ‘y0’ and ‘y1’) will be ignored. Setting +‘y0’ and ‘y1’ to the same value will disable the feature. +‘y0’ and ‘y1’ defaults to 0. +

+
+
scthresh
+

Set the scene change detection threshold as a percentage of maximum change on +the luma plane. Good values are in the [8.0, 14.0] range. Scene change +detection is only relevant in case ‘combmatch’=sc. The range for +‘scthresh’ is [0.0, 100.0]. +

+

Default value is 12.0. +

+
+
combmatch
+

When ‘combatch’ is not none, fieldmatch will take into +account the combed scores of matches when deciding what match to use as the +final match. Available values are: +

+
+
none
+

No final matching based on combed scores. +

+
sc
+

Combed scores are only used when a scene change is detected. +

+
full
+

Use combed scores all the time. +

+
+ +

Default is sc. +

+
+
combdbg
+

Force fieldmatch to calculate the combed metrics for certain matches and +print them. This setting is known as ‘micout’ in TFM/VFM vocabulary. +Available values are: +

+
+
none
+

No forced calculation. +

+
pcn
+

Force p/c/n calculations. +

+
pcnub
+

Force p/c/n/u/b calculations. +

+
+ +

Default value is none. +

+
+
cthresh
+

This is the area combing threshold used for combed frame detection. This +essentially controls how "strong" or "visible" combing must be to be detected. +Larger values mean combing must be more visible and smaller values mean combing +can be less visible or strong and still be detected. Valid settings are from +-1 (every pixel will be detected as combed) to 255 (no pixel will +be detected as combed). This is basically a pixel difference value. A good +range is [8, 12]. +

+

Default value is 9. +

+
+
chroma
+

Sets whether or not chroma is considered in the combed frame decision. Only +disable this if your source has chroma problems (rainbowing, etc.) that are +causing problems for the combed frame detection with chroma enabled. Actually, +using ‘chroma’=0 is usually more reliable, except for the case +where there is chroma only combing in the source. +

+

Default value is 0. +

+
+
blockx
+
blocky
+

Respectively set the x-axis and y-axis size of the window used during combed +frame detection. This has to do with the size of the area in which +‘combpel’ pixels are required to be detected as combed for a frame to be +declared combed. See the ‘combpel’ parameter description for more info. +Possible values are any number that is a power of 2 starting at 4 and going up +to 512. +

+

Default value is 16. +

+
+
combpel
+

The number of combed pixels inside any of the ‘blocky’ by +‘blockx’ size blocks on the frame for the frame to be detected as +combed. While ‘cthresh’ controls how "visible" the combing must be, this +setting controls "how much" combing there must be in any localized area (a +window defined by the ‘blockx’ and ‘blocky’ settings) on the +frame. Minimum value is 0 and maximum is blocky x blockx (at +which point no frames will ever be detected as combed). This setting is known +as ‘MI’ in TFM/VFM vocabulary. +

+

Default value is 80. +

+
+ +

+

+

30.29.1 p/c/n/u/b meaning

+ + +

30.29.1.1 p/c/n

+ +

We assume the following telecined stream: +

+
 
Top fields:     1 2 2 3 4
+Bottom fields:  1 2 3 4 4
+
+ +

The numbers correspond to the progressive frame the fields relate to. Here, the +first two frames are progressive, the 3rd and 4th are combed, and so on. +

+

When fieldmatch is configured to run a matching from bottom +(‘field’=bottom) this is how this input stream get transformed: +

+
 
Input stream:
+                T     1 2 2 3 4
+                B     1 2 3 4 4   <-- matching reference
+
+Matches:              c c n n c
+
+Output stream:
+                T     1 2 3 4 4
+                B     1 2 3 4 4
+
+ +

As a result of the field matching, we can see that some frames get duplicated. +To perform a complete inverse telecine, you need to rely on a decimation filter +after this operation. See for instance the decimate filter. +

+

The same operation now matching from top fields (‘field’=top) +looks like this: +

+
 
Input stream:
+                T     1 2 2 3 4   <-- matching reference
+                B     1 2 3 4 4
+
+Matches:              c c p p c
+
+Output stream:
+                T     1 2 2 3 4
+                B     1 2 2 3 4
+
+ +

In these examples, we can see what p, c and n mean; +basically, they refer to the frame and field of the opposite parity: +

+
    +
  • p matches the field of the opposite parity in the previous frame +
  • c matches the field of the opposite parity in the current frame +
  • n matches the field of the opposite parity in the next frame +
+ + +

30.29.1.2 u/b

+ +

The u and b matching are a bit special in the sense that they match +from the opposite parity flag. In the following examples, we assume that we are +currently matching the 2nd frame (Top:2, bottom:2). According to the match, a +’x’ is placed above and below each matched fields. +

+

With bottom matching (‘field’=bottom): +

 
Match:           c         p           n          b          u
+
+                 x       x               x        x          x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x         x           x        x              x
+
+Output frames:
+                 2          1          2          2          2
+                 2          2          2          1          3
+
+ +

With top matching (‘field’=top): +

 
Match:           c         p           n          b          u
+
+                 x         x           x        x              x
+  Top          1 2 2     1 2 2       1 2 2      1 2 2      1 2 2
+  Bottom       1 2 3     1 2 3       1 2 3      1 2 3      1 2 3
+                 x       x               x        x          x
+
+Output frames:
+                 2          2          2          1          2
+                 2          1          3          2          2
+
+ + +

30.29.2 Examples

+ +

Simple IVTC of a top field first telecined stream: +

 
fieldmatch=order=tff:combmatch=none, decimate
+
+ +

Advanced IVTC, with fallback on yadif for still combed frames: +

 
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+ + +

30.30 fieldorder

+ +

Transform the field order of the input video. +

+

This filter accepts the following options: +

+
+
order
+

Output field order. Valid values are tff for top field first or bff +for bottom field first. +

+
+ +

Default value is ‘tff’. +

+

Transformation is achieved by shifting the picture content up or down +by one line, and filling the remaining line with appropriate picture content. +This method is consistent with most broadcast field order converters. +

+

If the input video is not flagged as being interlaced, or it is already +flagged as being of the required output field order then this filter does +not alter the incoming video. +

+

This filter is very useful when converting to or from PAL DV material, +which is bottom field first. +

+

For example: +

 
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+ + +

30.31 fifo

+ +

Buffer input images and send them when they are requested. +

+

This filter is mainly useful when auto-inserted by the libavfilter +framework. +

+

The filter does not take parameters. +

+

+

+

30.32 format

+ +

Convert the input video to one of the specified pixel formats. +Libavfilter will try to pick one that is supported for the input to +the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

30.32.1 Examples

+ +
    +
  • +Convert the input video to the format yuv420p +
     
    format=pix_fmts=yuv420p
    +
    + +

    Convert the input video to any of the formats in the list +

     
    format=pix_fmts=yuv420p|yuv444p|yuv410p
    +
    +
+ +

+

+

30.33 fps

+ +

Convert the video to specified constant frame rate by duplicating or dropping +frames as necessary. +

+

This filter accepts the following named parameters: +

+
fps
+

Desired output frame rate. The default is 25. +

+
+
round
+

Rounding method. +

+

Possible values are: +

+
zero
+

zero round towards 0 +

+
inf
+

round away from 0 +

+
down
+

round towards -infinity +

+
up
+

round towards +infinity +

+
near
+

round to nearest +

+
+

The default is near. +

+
+
start_time
+

Assume the first PTS should be the given value, in seconds. This allows for +padding/trimming at the start of stream. By default, no assumption is made +about the first frame’s expected PTS, so no padding or trimming is done. +For example, this could be set to 0 to pad the beginning with duplicates of +the first frame if a video stream starts after the audio stream or to trim any +frames with a negative PTS. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +fps[:round]. +

+

See also the setpts filter. +

+ +

30.33.1 Examples

+ +
    +
  • +A typical usage in order to set the fps to 25: +
     
    fps=fps=25
    +
    + +
  • +Sets the fps to 24, using abbreviation and rounding method to round to nearest: +
     
    fps=fps=film:round=near
    +
    +
+ + +

30.34 framepack

+ +

Pack two different video streams into a stereoscopic video, setting proper +metadata on supported codecs. The two views should have the same size and +framerate and processing will stop when the shorter video ends. Please note +that you may conveniently adjust view properties with the scale and +fps filters. +

+

This filter accepts the following named parameters: +

+
format
+

Desired packing format. Supported values are: +

+
+
sbs
+

Views are next to each other (default). +

+
+
tab
+

Views are on top of each other. +

+
+
lines
+

Views are packed by line. +

+
+
columns
+

Views are eacked by column. +

+
+
frameseq
+

Views are temporally interleaved. +

+
+
+ +
+
+ +

Some examples follow: +

+
 
# Convert left and right views into a frame sequential video.
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input.
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+ + +

30.35 framestep

+ +

Select one frame every N-th frame. +

+

This filter accepts the following option: +

+
step
+

Select frame after every step frames. +Allowed values are positive integers higher than 0. Default value is 1. +

+
+ +

+

+

30.36 frei0r

+ +

Apply a frei0r effect to the input video. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This filter accepts the following options: +

+
+
filter_name
+

The name to the frei0r effect to load. If the environment variable +FREI0R_PATH is defined, the frei0r effect is searched in each one of the +directories specified by the colon separated list in FREIOR_PATH, +otherwise in the standard frei0r paths, which are in this order: +‘HOME/.frei0r-1/lib/’, ‘/usr/local/lib/frei0r-1/’, +‘/usr/lib/frei0r-1/’. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r effect. +

+
+
+ +

A frei0r effect parameter can be a boolean (whose values are specified +with "y" and "n"), a double, a color (specified by the syntax +R/G/B, (R, G, and B being float +numbers from 0.0 to 1.0) or by a color description specified in the "Color" +section in the ffmpeg-utils manual), a position (specified by the syntax X/Y, +X and Y being float numbers) and a string. +

+

The number and kind of parameters depend on the loaded effect. If an +effect parameter is not specified the default value is set. +

+ +

30.36.1 Examples

+ +
    +
  • +Apply the distort0r effect, set the first two double parameters: +
     
    frei0r=filter_name=distort0r:filter_params=0.5|0.01
    +
    + +
  • +Apply the colordistance effect, take a color as first parameter: +
     
    frei0r=colordistance:0.2/0.3/0.4
    +frei0r=colordistance:violet
    +frei0r=colordistance:0x112233
    +
    + +
  • +Apply the perspective effect, specify the top left and top right image +positions: +
     
    frei0r=perspective:0.2/0.2|0.8/0.2
    +
    +
+ +

For more information see: +http://frei0r.dyne.org +

+ +

30.37 geq

+ +

The filter accepts the following options: +

+
+
lum_expr, lum
+

Set the luminance expression. +

+
cb_expr, cb
+

Set the chrominance blue expression. +

+
cr_expr, cr
+

Set the chrominance red expression. +

+
alpha_expr, a
+

Set the alpha expression. +

+
red_expr, r
+

Set the red expression. +

+
green_expr, g
+

Set the green expression. +

+
blue_expr, b
+

Set the blue expression. +

+
+ +

The colorspace is selected according to the specified options. If one +of the ‘lum_expr’, ‘cb_expr’, or ‘cr_expr’ +options is specified, the filter will automatically select a YCbCr +colorspace. If one of the ‘red_expr’, ‘green_expr’, or +‘blue_expr’ options is specified, it will select an RGB +colorspace. +

+

If one of the chrominance expression is not defined, it falls back on the other +one. If no alpha expression is specified it will evaluate to opaque value. +If none of chrominance expressions are specified, they will evaluate +to the luminance expression. +

+

The expressions can use the following variables and functions: +

+
+
N
+

The sequential number of the filtered frame, starting from 0. +

+
+
X
+
Y
+

The coordinates of the current sample. +

+
+
W
+
H
+

The width and height of the image. +

+
+
SW
+
SH
+

Width and height scale depending on the currently filtered plane. It is the +ratio between the corresponding luma plane number of pixels and the current +plane ones. E.g. for YUV4:2:0 the values are 1,1 for the luma plane, and +0.5,0.5 for chroma planes. +

+
+
T
+

Time of the current frame, expressed in seconds. +

+
+
p(x, y)
+

Return the value of the pixel at location (x,y) of the current +plane. +

+
+
lum(x, y)
+

Return the value of the pixel at location (x,y) of the luminance +plane. +

+
+
cb(x, y)
+

Return the value of the pixel at location (x,y) of the +blue-difference chroma plane. Return 0 if there is no such plane. +

+
+
cr(x, y)
+

Return the value of the pixel at location (x,y) of the +red-difference chroma plane. Return 0 if there is no such plane. +

+
+
r(x, y)
+
g(x, y)
+
b(x, y)
+

Return the value of the pixel at location (x,y) of the +red/green/blue component. Return 0 if there is no such component. +

+
+
alpha(x, y)
+

Return the value of the pixel at location (x,y) of the alpha +plane. Return 0 if there is no such plane. +

+
+ +

For functions, if x and y are outside the area, the value will be +automatically clipped to the closer edge. +

+ +

30.37.1 Examples

+ +
    +
  • +Flip the image horizontally: +
     
    geq=p(W-X\,Y)
    +
    + +
  • +Generate a bidimensional sine wave, with angle PI/3 and a +wavelength of 100 pixels: +
     
    geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
    +
    + +
  • +Generate a fancy enigmatic moving light: +
     
    nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
    +
    + +
  • +Generate a quick emboss effect: +
     
    format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
    +
    + +
  • +Modify RGB components depending on pixel position: +
     
    geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
    +
    +
+ + +

30.38 gradfun

+ +

Fix the banding artifacts that are sometimes introduced into nearly flat +regions by truncation to 8bit color depth. +Interpolate the gradients that should go where the bands are, and +dither them. +

+

This filter is designed for playback only. Do not use it prior to +lossy compression, because compression tends to lose the dither and +bring back the bands. +

+

This filter accepts the following options: +

+
+
strength
+

The maximum amount by which the filter will change any one pixel. Also the +threshold for detecting nearly flat regions. Acceptable values range from .51 to +64, default value is 1.2, out-of-range values will be clipped to the valid +range. +

+
+
radius
+

The neighborhood to fit the gradient to. A larger radius makes for smoother +gradients, but also prevents the filter from modifying the pixels near detailed +regions. Acceptable values are 8-32, default value is 16, out-of-range values +will be clipped to the valid range. +

+
+
+ +

Alternatively, the options can be specified as a flat string: +strength[:radius] +

+ +

30.38.1 Examples

+ +
    +
  • +Apply the filter with a 3.5 strength and radius of 8: +
     
    gradfun=3.5:8
    +
    + +
  • +Specify radius, omitting the strength (which will fall-back to the default +value): +
     
    gradfun=radius=8
    +
    + +
+ +

+

+

30.39 haldclut

+ +

Apply a Hald CLUT to a video stream. +

+

First input is the video stream to process, and second one is the Hald CLUT. +The Hald CLUT input can be a simple picture or a complete video stream. +

+

The filter accepts the following options: +

+
+
shortest
+

Force termination when the shortest input terminates. Default is 0. +

+
repeatlast
+

Continue applying the last CLUT after the end of the stream. A value of +0 disable the filter after the last frame of the CLUT is reached. +Default is 1. +

+
+ +

haldclut also has the same interpolation options as lut3d (both +filters share the same internals). +

+

More information about the Hald CLUT can be found on Eskil Steenberg’s website +(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html. +

+ +

30.39.1 Workflow examples

+ + +

30.39.1.1 Hald CLUT video stream

+ +

Generate an identity Hald CLUT stream altered with various effects: +

 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+ +

Note: make sure you use a lossless codec. +

+

Then use it with haldclut to apply it on some random stream: +

 
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+ +

The Hald CLUT will be applied to the 10 first seconds (duration of +‘clut.nut’), then the latest picture of that CLUT stream will be applied +to the remaining frames of the mandelbrot stream. +

+ +

30.39.1.2 Hald CLUT with preview

+ +

A Hald CLUT is supposed to be a squared image of Level*Level*Level by +Level*Level*Level pixels. For a given Hald CLUT, FFmpeg will select the +biggest possible square starting at the top left of the picture. The remaining +padding pixels (bottom or right) will be ignored. This area can be used to add +a preview of the Hald CLUT. +

+

Typically, the following generated Hald CLUT will be supported by the +haldclut filter: +

+
 
ffmpeg -f lavfi -i haldclutsrc=8 -vf "
+   pad=iw+320 [padded_clut];
+   smptebars=s=320x256, split [a][b];
+   [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+   [main][b] overlay=W-320" -frames:v 1 clut.png
+
+ +

It contains the original and a preview of the effect of the CLUT: SMPTE color +bars are displayed on the right-top, and below the same color bars processed by +the color changes. +

+

Then, the effect of this Hald CLUT can be visualized with: +

 
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+ + +

30.40 hflip

+ +

Flip the input video horizontally. +

+

For example to horizontally flip the input video with ffmpeg: +

 
ffmpeg -i in.avi -vf "hflip" out.avi
+
+ + +

30.41 histeq

+

This filter applies a global color histogram equalization on a +per-frame basis. +

+

It can be used to correct video that has a compressed range of pixel +intensities. The filter redistributes the pixel intensities to +equalize their distribution across the intensity range. It may be +viewed as an "automatically adjusting contrast filter". This filter is +useful only for correcting degraded or poorly captured source +video. +

+

The filter accepts the following options: +

+
+
strength
+

Determine the amount of equalization to be applied. As the strength +is reduced, the distribution of pixel intensities more-and-more +approaches that of the input frame. The value must be a float number +in the range [0,1] and defaults to 0.200. +

+
+
intensity
+

Set the maximum intensity that can generated and scale the output +values appropriately. The strength should be set as desired and then +the intensity can be limited if needed to avoid washing-out. The value +must be a float number in the range [0,1] and defaults to 0.210. +

+
+
antibanding
+

Set the antibanding level. If enabled the filter will randomly vary +the luminance of output pixels by a small amount to avoid banding of +the histogram. Possible values are none, weak or +strong. It defaults to none. +

+
+ + +

30.42 histogram

+ +

Compute and draw a color distribution histogram for the input video. +

+

The computed histogram is a representation of the color component +distribution in an image. +

+

The filter accepts the following options: +

+
+
mode
+

Set histogram mode. +

+

It accepts the following values: +

+
levels
+

Standard histogram that displays the color components distribution in an +image. Displays color graph for each color component. Shows distribution of +the Y, U, V, A or R, G, B components, depending on input format, in the +current frame. Below each graph a color component scale meter is shown. +

+
+
color
+

Displays chroma values (U/V color placement) in a two dimensional +graph (which is called a vectorscope). The brighter a pixel in the +vectorscope, the more pixels of the input frame correspond to that pixel +(i.e., more pixels have this chroma value). The V component is displayed on +the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost +side being V = 255. The U component is displayed on the vertical (Y) axis, +with the top representing U = 0 and the bottom representing U = 255. +

+

The position of a white pixel in the graph corresponds to the chroma value of +a pixel of the input clip. The graph can therefore be used to read the hue +(color flavor) and the saturation (the dominance of the hue in the color). As +the hue of a color changes, it moves around the square. At the center of the +square the saturation is zero, which means that the corresponding pixel has no +color. If the amount of a specific color is increased (while leaving the other +colors unchanged) the saturation increases, and the indicator moves towards +the edge of the square. +

+
+
color2
+

Chroma values in vectorscope, similar as color but actual chroma values +are displayed. +

+
+
waveform
+

Per row/column color component graph. In row mode, the graph on the left side +represents color component value 0 and the right side represents value = 255. +In column mode, the top side represents color component value = 0 and bottom +side represents value = 255. +

+
+

Default value is levels. +

+
+
level_height
+

Set height of level in levels. Default value is 200. +Allowed range is [50, 2048]. +

+
+
scale_height
+

Set height of color scale in levels. Default value is 12. +Allowed range is [0, 40]. +

+
+
step
+

Set step for waveform mode. Smaller values are useful to find out how +many values of the same luminance are distributed across input rows/columns. +Default value is 10. Allowed range is [1, 255]. +

+
+
waveform_mode
+

Set mode for waveform. Can be either row, or column. +Default is row. +

+
+
waveform_mirror
+

Set mirroring mode for waveform. 0 means unmirrored, 1 +means mirrored. In mirrored mode, higher values will be represented on the left +side for row mode and at the top for column mode. Default is +0 (unmirrored). +

+
+
display_mode
+

Set display mode for waveform and levels. +It accepts the following values: +

+
parade
+

Display separate graph for the color components side by side in +row waveform mode or one below the other in column waveform mode +for waveform histogram mode. For levels histogram mode, +per color component graphs are placed below each other. +

+

Using this display mode in waveform histogram mode makes it easy to +spot color casts in the highlights and shadows of an image, by comparing the +contours of the top and the bottom graphs of each waveform. Since whites, +grays, and blacks are characterized by exactly equal amounts of red, green, +and blue, neutral areas of the picture should display three waveforms of +roughly equal width/height. If not, the correction is easy to perform by +making level adjustments the three waveforms. +

+
+
overlay
+

Presents information identical to that in the parade, except +that the graphs representing color components are superimposed directly +over one another. +

+

This display mode in waveform histogram mode makes it easier to spot +relative differences or similarities in overlapping areas of the color +components that are supposed to be identical, such as neutral whites, grays, +or blacks. +

+
+

Default is parade. +

+
+
levels_mode
+

Set mode for levels. Can be either linear, or logarithmic. +Default is linear. +

+
+ + +

30.42.1 Examples

+ +
    +
  • +Calculate and draw histogram: +
     
    ffplay -i input -vf histogram
    +
    + +
+ +

+

+

30.43 hqdn3d

+ +

High precision/quality 3d denoise filter. This filter aims to reduce +image noise producing smooth images and making still images really +still. It should enhance compressibility. +

+

It accepts the following optional parameters: +

+
+
luma_spatial
+

a non-negative float number which specifies spatial luma strength, +defaults to 4.0 +

+
+
chroma_spatial
+

a non-negative float number which specifies spatial chroma strength, +defaults to 3.0*luma_spatial/4.0 +

+
+
luma_tmp
+

a float number which specifies luma temporal strength, defaults to +6.0*luma_spatial/4.0 +

+
+
chroma_tmp
+

a float number which specifies chroma temporal strength, defaults to +luma_tmp*chroma_spatial/luma_spatial +

+
+ + +

30.44 hue

+ +

Modify the hue and/or the saturation of the input. +

+

This filter accepts the following options: +

+
+
h
+

Specify the hue angle as a number of degrees. It accepts an expression, +and defaults to "0". +

+
+
s
+

Specify the saturation in the [-10,10] range. It accepts an expression and +defaults to "1". +

+
+
H
+

Specify the hue angle as a number of radians. It accepts an +expression, and defaults to "0". +

+
+
b
+

Specify the brightness in the [-10,10] range. It accepts an expression and +defaults to "0". +

+
+ +

h’ and ‘H’ are mutually exclusive, and can’t be +specified at the same time. +

+

The ‘b’, ‘h’, ‘H’ and ‘s’ option values are +expressions containing the following constants: +

+
+
n
+

frame count of the input frame starting from 0 +

+
+
pts
+

presentation timestamp of the input frame expressed in time base units +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
tb
+

time base of the input video +

+
+ + +

30.44.1 Examples

+ +
    +
  • +Set the hue to 90 degrees and the saturation to 1.0: +
     
    hue=h=90:s=1
    +
    + +
  • +Same command but expressing the hue in radians: +
     
    hue=H=PI/2:s=1
    +
    + +
  • +Rotate hue and make the saturation swing between 0 +and 2 over a period of 1 second: +
     
    hue="H=2*PI*t: s=sin(2*PI*t)+1"
    +
    + +
  • +Apply a 3 seconds saturation fade-in effect starting at 0: +
     
    hue="s=min(t/3\,1)"
    +
    + +

    The general fade-in expression can be written as: +

     
    hue="s=min(0\, max((t-START)/DURATION\, 1))"
    +
    + +
  • +Apply a 3 seconds saturation fade-out effect starting at 5 seconds: +
     
    hue="s=max(0\, min(1\, (8-t)/3))"
    +
    + +

    The general fade-out expression can be written as: +

     
    hue="s=max(0\, min(1\, (START+DURATION-t)/DURATION))"
    +
    + +
+ + +

30.44.2 Commands

+ +

This filter supports the following commands: +

+
b
+
s
+
h
+
H
+

Modify the hue and/or the saturation and/or brightness of the input video. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

30.45 idet

+ +

Detect video interlacing type. +

+

This filter tries to detect if the input is interlaced or progressive, +top or bottom field first. +

+

The filter accepts the following options: +

+
+
intl_thres
+

Set interlacing threshold. +

+
prog_thres
+

Set progressive threshold. +

+
+ + +

30.46 il

+ +

Deinterleave or interleave fields. +

+

This filter allows one to process interlaced images fields without +deinterlacing them. Deinterleaving splits the input frame into 2 +fields (so called half pictures). Odd lines are moved to the top +half of the output image, even lines to the bottom half. +You can process (filter) them independently and then re-interleave them. +

+

The filter accepts the following options: +

+
+
luma_mode, l
+
chroma_mode, c
+
alpha_mode, a
+

Available values for luma_mode, chroma_mode and +alpha_mode are: +

+
+
none
+

Do nothing. +

+
+
deinterleave, d
+

Deinterleave fields, placing one above the other. +

+
+
interleave, i
+

Interleave fields. Reverse the effect of deinterleaving. +

+
+

Default value is none. +

+
+
luma_swap, ls
+
chroma_swap, cs
+
alpha_swap, as
+

Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0. +

+
+ + +

30.47 interlace

+ +

Simple interlacing filter from progressive contents. This interleaves upper (or +lower) lines from odd frames with lower (or upper) lines from even frames, +halving the frame rate and preserving image height. A vertical lowpass filter +is always applied in order to avoid twitter effects and reduce moiré patterns. +

+
 
   Original        Original             New Frame
+   Frame 'j'      Frame 'j+1'             (tff)
+  ==========      ===========       ==================
+    Line 0  -------------------->    Frame 'j' Line 0
+    Line 1          Line 1  ---->   Frame 'j+1' Line 1
+    Line 2 --------------------->    Frame 'j' Line 2
+    Line 3          Line 3  ---->   Frame 'j+1' Line 3
+     ...             ...                   ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+ +

It accepts the following optional parameters: +

+
+
scan
+

determines whether the interlaced frame is taken from the even (tff - default) +or odd (bff) lines of the progressive frame. +

+
+ + +

30.48 kerndeint

+ +

Deinterlace input video by applying Donald Graft’s adaptive kernel +deinterling. Work on interlaced parts of a video to produce +progressive frames. +

+

The description of the accepted parameters follows. +

+
+
thresh
+

Set the threshold which affects the filter’s tolerance when +determining if a pixel line must be processed. It must be an integer +in the range [0,255] and defaults to 10. A value of 0 will result in +applying the process on every pixels. +

+
+
map
+

Paint pixels exceeding the threshold value to white if set to 1. +Default is 0. +

+
+
order
+

Set the fields order. Swap fields if set to 1, leave fields alone if +0. Default is 0. +

+
+
sharp
+

Enable additional sharpening if set to 1. Default is 0. +

+
+
twoway
+

Enable twoway sharpening if set to 1. Default is 0. +

+
+ + +

30.48.1 Examples

+ +
    +
  • +Apply default values: +
     
    kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
    +
    + +
  • +Enable additional sharpening: +
     
    kerndeint=sharp=1
    +
    + +
  • +Paint processed pixels in white: +
     
    kerndeint=map=1
    +
    +
+ +

+

+

30.49 lut3d

+ +

Apply a 3D LUT to an input video. +

+

The filter accepts the following options: +

+
+
file
+

Set the 3D LUT file name. +

+

Currently supported formats: +

+
3dl
+

AfterEffects +

+
cube
+

Iridas +

+
dat
+

DaVinci +

+
m3d
+

Pandora +

+
+
+
interp
+

Select interpolation mode. +

+

Available values are: +

+
+
nearest
+

Use values from the nearest defined point. +

+
trilinear
+

Interpolate values using the 8 points defining a cube. +

+
tetrahedral
+

Interpolate values using a tetrahedron. +

+
+
+
+ + +

30.50 lut, lutrgb, lutyuv

+ +

Compute a look-up table for binding each pixel component input value +to an output value, and apply it to input video. +

+

lutyuv applies a lookup table to a YUV input video, lutrgb +to an RGB input video. +

+

These filters accept the following options: +

+
c0
+

set first pixel component expression +

+
c1
+

set second pixel component expression +

+
c2
+

set third pixel component expression +

+
c3
+

set fourth pixel component expression, corresponds to the alpha component +

+
+
r
+

set red component expression +

+
g
+

set green component expression +

+
b
+

set blue component expression +

+
a
+

alpha component expression +

+
+
y
+

set Y/luminance component expression +

+
u
+

set U/Cb component expression +

+
v
+

set V/Cr component expression +

+
+ +

Each of them specifies the expression to use for computing the lookup table for +the corresponding pixel component values. +

+

The exact component associated to each of the c* options depends on the +format in input. +

+

The lut filter requires either YUV or RGB pixel formats in input, +lutrgb requires RGB pixel formats in input, and lutyuv requires YUV. +

+

The expressions can contain the following constants and functions: +

+
+
w
+
h
+

the input width and height +

+
+
val
+

input value for the pixel component +

+
+
clipval
+

the input value clipped in the minval-maxval range +

+
+
maxval
+

maximum value for the pixel component +

+
+
minval
+

minimum value for the pixel component +

+
+
negval
+

the negated value for the pixel component value clipped in the +minval-maxval range , it corresponds to the expression +"maxval-clipval+minval" +

+
+
clip(val)
+

the computed value in val clipped in the +minval-maxval range +

+
+
gammaval(gamma)
+

the computed gamma correction value of the pixel component value +clipped in the minval-maxval range, corresponds to the +expression +"pow((clipval-minval)/(maxval-minval)\,gamma)*(maxval-minval)+minval" +

+
+
+ +

All expressions default to "val". +

+ +

30.50.1 Examples

+ +
    +
  • +Negate input video: +
     
    lutrgb="r=maxval+minval-val:g=maxval+minval-val:b=maxval+minval-val"
    +lutyuv="y=maxval+minval-val:u=maxval+minval-val:v=maxval+minval-val"
    +
    + +

    The above is the same as: +

     
    lutrgb="r=negval:g=negval:b=negval"
    +lutyuv="y=negval:u=negval:v=negval"
    +
    + +
  • +Negate luminance: +
     
    lutyuv=y=negval
    +
    + +
  • +Remove chroma components, turns the video into a graytone image: +
     
    lutyuv="u=128:v=128"
    +
    + +
  • +Apply a luma burning effect: +
     
    lutyuv="y=2*val"
    +
    + +
  • +Remove green and blue components: +
     
    lutrgb="g=0:b=0"
    +
    + +
  • +Set a constant alpha channel value on input: +
     
    format=rgba,lutrgb=a="maxval-minval/2"
    +
    + +
  • +Correct luminance gamma by a 0.5 factor: +
     
    lutyuv=y=gammaval(0.5)
    +
    + +
  • +Discard least significant bits of luma: +
     
    lutyuv=y='bitand(val, 128+64+32)'
    +
    +
+ + +

30.51 mergeplanes

+ +

Merge color channel components from several video streams. +

+

The filter accepts up to 4 input streams, and merge selected input +planes to the output video. +

+

This filter accepts the following options: +

+
mapping
+

Set input to output plane mapping. Default is 0. +

+

The mappings is specified as a bitmap. It should be specified as a +hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the +mapping for the first plane of the output stream. ’A’ sets the number of +the input stream to use (from 0 to 3), and ’a’ the plane number of the +corresponding input to use (from 0 to 3). The rest of the mappings is +similar, ’Bb’ describes the mapping for the output stream second +plane, ’Cc’ describes the mapping for the output stream third plane and +’Dd’ describes the mapping for the output stream fourth plane. +

+
+
format
+

Set output pixel format. Default is yuva444p. +

+
+ + +

30.51.1 Examples

+ +
    +
  • +Merge three gray video streams of same width and height into single video stream: +
     
    [a0][a1][a2]mergeplanes=0x001020:yuv444p
    +
    + +
  • +Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream: +
     
    [a0][a1]mergeplanes=0x00010210:yuva444p
    +
    + +
  • +Swap Y and A plane in yuva444p stream: +
     
    format=yuva444p,mergeplanes=0x03010200:yuva444p
    +
    + +
  • +Swap U and V plane in yuv420p stream: +
     
    format=yuv420p,mergeplanes=0x000201:yuv420p
    +
    + +
  • +Cast a rgb24 clip to yuv444p: +
     
    format=rgb24,mergeplanes=0x000102:yuv444p
    +
    +
+ + +

30.52 mcdeint

+ +

Apply motion-compensation deinterlacing. +

+

It needs one field per frame as input and must thus be used together +with yadif=1/3 or equivalent. +

+

This filter accepts the following options: +

+
mode
+

Set the deinterlacing mode. +

+

It accepts one of the following values: +

+
fast
+
medium
+
slow
+

use iterative motion estimation +

+
extra_slow
+

like ‘slow’, but use multiple reference frames. +

+
+

Default value is ‘fast’. +

+
+
parity
+

Set the picture field parity assumed for the input video. It must be +one of the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
+ +

Default value is ‘bff’. +

+
+
qp
+

Set per-block quantization parameter (QP) used by the internal +encoder. +

+

Higher values should result in a smoother motion vector field but less +optimal individual vectors. Default value is 1. +

+
+ + +

30.53 mp

+ +

Apply an MPlayer filter to the input video. +

+

This filter provides a wrapper around some of the filters of +MPlayer/MEncoder. +

+

This wrapper is considered experimental. Some of the wrapped filters +may not work properly and we may drop support for them, as they will +be implemented natively into FFmpeg. Thus you should avoid +depending on them when writing portable scripts. +

+

The filter accepts the parameters: +filter_name[:=]filter_params +

+

filter_name is the name of a supported MPlayer filter, +filter_params is a string containing the parameters accepted by +the named filter. +

+

The list of the currently supported filters follows: +

+
eq2
+
eq
+
fspp
+
ilpack
+
pp7
+
softpulldown
+
uspp
+
+ +

The parameter syntax and behavior for the listed filters are the same +of the corresponding MPlayer filters. For detailed instructions check +the "VIDEO FILTERS" section in the MPlayer manual. +

+ +

30.53.1 Examples

+ +
    +
  • +Adjust gamma, brightness, contrast: +
     
    mp=eq2=1.0:2:0.5
    +
    +
+ +

See also mplayer(1), http://www.mplayerhq.hu/. +

+ +

30.54 mpdecimate

+ +

Drop frames that do not differ greatly from the previous frame in +order to reduce frame rate. +

+

The main use of this filter is for very-low-bitrate encoding +(e.g. streaming over dialup modem), but it could in theory be used for +fixing movies that were inverse-telecined incorrectly. +

+

A description of the accepted options follows. +

+
+
max
+

Set the maximum number of consecutive frames which can be dropped (if +positive), or the minimum interval between dropped frames (if +negative). If the value is 0, the frame is dropped unregarding the +number of previous sequentially dropped frames. +

+

Default value is 0. +

+
+
hi
+
lo
+
frac
+

Set the dropping threshold values. +

+

Values for ‘hi’ and ‘lo’ are for 8x8 pixel blocks and +represent actual pixel value differences, so a threshold of 64 +corresponds to 1 unit of difference for each pixel, or the same spread +out differently over the block. +

+

A frame is a candidate for dropping if no 8x8 blocks differ by more +than a threshold of ‘hi’, and if no more than ‘frac’ blocks (1 +meaning the whole image) differ by more than a threshold of ‘lo’. +

+

Default value for ‘hi’ is 64*12, default value for ‘lo’ is +64*5, and default value for ‘frac’ is 0.33. +

+
+ + + +

30.55 negate

+ +

Negate input video. +

+

This filter accepts an integer in input, if non-zero it negates the +alpha component (if available). The default value in input is 0. +

+ +

30.56 noformat

+ +

Force libavfilter not to use any of the specified pixel formats for the +input to the next filter. +

+

This filter accepts the following parameters: +

+
pix_fmts
+

A ’|’-separated list of pixel format names, for example +"pix_fmts=yuv420p|monow|rgb24". +

+
+
+ + +

30.56.1 Examples

+ +
    +
  • +Force libavfilter to use a format different from yuv420p for the +input to the vflip filter: +
     
    noformat=pix_fmts=yuv420p,vflip
    +
    + +
  • +Convert the input video to any of the formats not contained in the list: +
     
    noformat=yuv420p|yuv444p|yuv410p
    +
    +
+ + +

30.57 noise

+ +

Add noise on video input frame. +

+

The filter accepts the following options: +

+
+
all_seed
+
c0_seed
+
c1_seed
+
c2_seed
+
c3_seed
+

Set noise seed for specific pixel component or all pixel components in case +of all_seed. Default value is 123457. +

+
+
all_strength, alls
+
c0_strength, c0s
+
c1_strength, c1s
+
c2_strength, c2s
+
c3_strength, c3s
+

Set noise strength for specific pixel component or all pixel components in case +all_strength. Default value is 0. Allowed range is [0, 100]. +

+
+
all_flags, allf
+
c0_flags, c0f
+
c1_flags, c1f
+
c2_flags, c2f
+
c3_flags, c3f
+

Set pixel component flags or set flags for all components if all_flags. +Available values for component flags are: +

+
a
+

averaged temporal noise (smoother) +

+
p
+

mix random noise with a (semi)regular pattern +

+
t
+

temporal noise (noise pattern changes between frames) +

+
u
+

uniform noise (gaussian otherwise) +

+
+
+
+ + +

30.57.1 Examples

+ +

Add temporal and uniform noise to input video: +

 
noise=alls=20:allf=t+u
+
+ + +

30.58 null

+ +

Pass the video source unchanged to the output. +

+ +

30.59 ocv

+ +

Apply video transform using libopencv. +

+

To enable this filter install libopencv library and headers and +configure FFmpeg with --enable-libopencv. +

+

This filter accepts the following parameters: +

+
+
filter_name
+

The name of the libopencv filter to apply. +

+
+
filter_params
+

The parameters to pass to the libopencv filter. If not specified the default +values are assumed. +

+
+
+ +

Refer to the official libopencv documentation for more precise +information: +http://opencv.willowgarage.com/documentation/c/image_filtering.html +

+

Follows the list of supported libopencv filters. +

+

+

+

30.59.1 dilate

+ +

Dilate an image by using a specific structuring element. +This filter corresponds to the libopencv function cvDilate. +

+

It accepts the parameters: struct_el|nb_iterations. +

+

struct_el represents a structuring element, and has the syntax: +colsxrows+anchor_xxanchor_y/shape +

+

cols and rows represent the number of columns and rows of +the structuring element, anchor_x and anchor_y the anchor +point, and shape the shape for the structuring element, and +can be one of the values "rect", "cross", "ellipse", "custom". +

+

If the value for shape is "custom", it must be followed by a +string of the form "=filename". The file with name +filename is assumed to represent a binary image, with each +printable character corresponding to a bright pixel. When a custom +shape is used, cols and rows are ignored, the number +or columns and rows of the read file are assumed instead. +

+

The default value for struct_el is "3x3+0x0/rect". +

+

nb_iterations specifies the number of times the transform is +applied to the image, and defaults to 1. +

+

Follow some example: +

 
# use the default values
+ocv=dilate
+
+# dilate using a structuring element with a 5x5 cross, iterate two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# read the shape from the file diamond.shape, iterate two times
+# the file diamond.shape may contain a pattern of characters like this:
+#   *
+#  ***
+# *****
+#  ***
+#   *
+# the specified cols and rows are ignored (but not the anchor point coordinates)
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+ + +

30.59.2 erode

+ +

Erode an image by using a specific structuring element. +This filter corresponds to the libopencv function cvErode. +

+

The filter accepts the parameters: struct_el:nb_iterations, +with the same syntax and semantics as the dilate filter. +

+ +

30.59.3 smooth

+ +

Smooth the input video. +

+

The filter takes the following parameters: +type|param1|param2|param3|param4. +

+

type is the type of smooth filter to apply, and can be one of +the following values: "blur", "blur_no_scale", "median", "gaussian", +"bilateral". The default value is "gaussian". +

+

param1, param2, param3, and param4 are +parameters whose meanings depend on smooth type. param1 and +param2 accept integer positive values or 0, param3 and +param4 accept float values. +

+

The default value for param1 is 3, the default value for the +other parameters is 0. +

+

These parameters correspond to the parameters assigned to the +libopencv function cvSmooth. +

+

+

+

30.60 overlay

+ +

Overlay one video on top of another. +

+

It takes two inputs and one output, the first input is the "main" +video on which the second input is overlayed. +

+

This filter accepts the following parameters: +

+

A description of the accepted options follows. +

+
+
x
+
y
+

Set the expression for the x and y coordinates of the overlayed video +on the main video. Default value is "0" for both expressions. In case +the expression is invalid, it is set to a huge value (meaning that the +overlay will not be displayed within the output visible area). +

+
+
eof_action
+

The action to take when EOF is encountered on the secondary input, accepts one +of the following values: +

+
+
repeat
+

repeat the last frame (the default) +

+
endall
+

end both streams +

+
pass
+

pass through the main input +

+
+ +
+
eval
+

Set when the expressions for ‘x’, and ‘y’ are evaluated. +

+

It accepts the following values: +

+
init
+

only evaluate expressions once during the filter initialization or +when a command is processed +

+
+
frame
+

evaluate expressions for each incoming frame +

+
+ +

Default value is ‘frame’. +

+
+
shortest
+

If set to 1, force the output to terminate when the shortest input +terminates. Default value is 0. +

+
+
format
+

Set the format for the output video. +

+

It accepts the following values: +

+
yuv420
+

force YUV420 output +

+
+
yuv422
+

force YUV422 output +

+
+
yuv444
+

force YUV444 output +

+
+
rgb
+

force RGB output +

+
+ +

Default value is ‘yuv420’. +

+
+
rgb (deprecated)
+

If set to 1, force the filter to accept inputs in the RGB +color space. Default value is 0. This option is deprecated, use +‘format’ instead. +

+
+
repeatlast
+

If set to 1, force the filter to draw the last overlay frame over the +main input until the end of the stream. A value of 0 disables this +behavior. Default value is 1. +

+
+ +

The ‘x’, and ‘y’ expressions can contain the following +parameters. +

+
+
main_w, W
+
main_h, H
+

main input width and height +

+
+
overlay_w, w
+
overlay_h, h
+

overlay input width and height +

+
+
x
+
y
+

the computed values for x and y. They are evaluated for +each new frame. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values of the output +format. For example for the pixel format "yuv422p" hsub is 2 and +vsub is 1. +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pos
+

the position in the file of the input frame, NAN if unknown +

+
+
t
+

timestamp expressed in seconds, NAN if the input timestamp is unknown +

+
+
+ +

Note that the n, pos, t variables are available only +when evaluation is done per frame, and will evaluate to NAN +when ‘eval’ is set to ‘init’. +

+

Be aware that frames are taken from each input video in timestamp +order, hence, if their initial timestamps differ, it is a good idea +to pass the two inputs through a setpts=PTS-STARTPTS filter to +have them begin in the same zero timestamp, as it does the example for +the movie filter. +

+

You can chain together more overlays but you should test the +efficiency of such approach. +

+ +

30.60.1 Commands

+ +

This filter supports the following commands: +

+
x
+
y
+

Modify the x and y of the overlay input. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

30.60.2 Examples

+ +
    +
  • +Draw the overlay at 10 pixels from the bottom right corner of the main +video: +
     
    overlay=main_w-overlay_w-10:main_h-overlay_h-10
    +
    + +

    Using named options the example above becomes: +

     
    overlay=x=main_w-overlay_w-10:y=main_h-overlay_h-10
    +
    + +
  • +Insert a transparent PNG logo in the bottom left corner of the input, +using the ffmpeg tool with the -filter_complex option: +
     
    ffmpeg -i input -i logo -filter_complex 'overlay=10:main_h-overlay_h-10' output
    +
    + +
  • +Insert 2 different transparent PNG logos (second logo on bottom +right corner) using the ffmpeg tool: +
     
    ffmpeg -i input -i logo1 -i logo2 -filter_complex 'overlay=x=10:y=H-h-10,overlay=x=W-w-10:y=H-h-10' output
    +
    + +
  • +Add a transparent color layer on top of the main video, WxH +must specify the size of the main input to the overlay filter: +
     
    color=color=red@.3:size=WxH [over]; [in][over] overlay [out]
    +
    + +
  • +Play an original video and a filtered version (here with the deshake +filter) side by side using the ffplay tool: +
     
    ffplay input.avi -vf 'split[a][b]; [a]pad=iw*2:ih[src]; [b]deshake[filt]; [src][filt]overlay=w'
    +
    + +

    The above command is the same as: +

     
    ffplay input.avi -vf 'split[b], pad=iw*2[src], [b]deshake, [src]overlay=w'
    +
    + +
  • +Make a sliding overlay appearing from the left to the right top part of the +screen starting since time 2: +
     
    overlay=x='if(gte(t,2), -w+(t-2)*20, NAN)':y=0
    +
    + +
  • +Compose output by putting two input videos side to side: +
     
    ffmpeg -i left.avi -i right.avi -filter_complex "
    +nullsrc=size=200x100 [background];
    +[0:v] setpts=PTS-STARTPTS, scale=100x100 [left];
    +[1:v] setpts=PTS-STARTPTS, scale=100x100 [right];
    +[background][left]       overlay=shortest=1       [background+left];
    +[background+left][right] overlay=shortest=1:x=100 [left+right]
    +"
    +
    + +
  • +mask 10-20 seconds of a video by applying the delogo filter to a section +
     
    ffmpeg -i test.avi -codec:v:0 wmv2 -ar 11025 -b:v 9000k
    +-vf '[in]split[split_main][split_delogo];[split_delogo]trim=start=360:end=371,delogo=0:0:640:480[delogoed];[split_main][delogoed]overlay=eof_action=pass[out]'
    +masked.avi
    +
    + +
  • +Chain several overlays in cascade: +
     
    nullsrc=s=200x200 [bg];
    +testsrc=s=100x100, split=4 [in0][in1][in2][in3];
    +[in0] lutrgb=r=0, [bg]   overlay=0:0     [mid0];
    +[in1] lutrgb=g=0, [mid0] overlay=100:0   [mid1];
    +[in2] lutrgb=b=0, [mid1] overlay=0:100   [mid2];
    +[in3] null,       [mid2] overlay=100:100 [out0]
    +
    + +
+ + +

30.61 owdenoise

+ +

Apply Overcomplete Wavelet denoiser. +

+

The filter accepts the following options: +

+
+
depth
+

Set depth. +

+

Larger depth values will denoise lower frequency components more, but +slow down filtering. +

+

Must be an int in the range 8-16, default is 8. +

+
+
luma_strength, ls
+

Set luma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+
chroma_strength, cs
+

Set chroma strength. +

+

Must be a double value in the range 0-1000, default is 1.0. +

+
+ + +

30.62 pad

+ +

Add paddings to the input image, and place the original input at the +given coordinates x, y. +

+

This filter accepts the following parameters: +

+
+
width, w
+
height, h
+

Specify an expression for the size of the output image with the +paddings added. If the value for width or height is 0, the +corresponding input size is used for the output. +

+

The width expression can reference the value set by the +height expression, and vice versa. +

+

The default value of width and height is 0. +

+
+
x
+
y
+

Specify an expression for the offsets where to place the input image +in the padded area with respect to the top/left border of the output +image. +

+

The x expression can reference the value set by the y +expression, and vice versa. +

+

The default value of x and y is 0. +

+
+
color
+

Specify the color of the padded area. For the syntax of this option, +check the "Color" section in the ffmpeg-utils manual. +

+

The default value of color is "black". +

+
+ +

The value for the width, height, x, and y +options are expressions containing the following constants: +

+
+
in_w
+
in_h
+

the input video width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
x
+
y
+

x and y offsets as specified by the x and y +expressions, or NAN if not yet specified +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (iw / ih) * sar +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

30.62.1 Examples

+ +
    +
  • +Add paddings with color "violet" to the input video. Output video +size is 640x480, the top-left corner of the input video is placed at +column 0, row 40: +
     
    pad=640:480:0:40:violet
    +
    + +

    The example above is equivalent to the following command: +

     
    pad=width=640:height=480:x=0:y=40:color=violet
    +
    + +
  • +Pad the input to get an output with dimensions increased by 3/2, +and put the input video at the center of the padded area: +
     
    pad="3/2*iw:3/2*ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a squared output with size equal to the maximum +value between the input width and height, and put the input video at +the center of the padded area: +
     
    pad="max(iw\,ih):ow:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Pad the input to get a final w/h ratio of 16:9: +
     
    pad="ih*16/9:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +In case of anamorphic video, in order to set the output display aspect +correctly, it is necessary to use sar in the expression, +according to the relation: +
     
    (ih * X / ih) * sar = output_dar
    +X = output_dar / sar
    +
    + +

    Thus the previous example needs to be modified to: +

     
    pad="ih*16/9/sar:ih:(ow-iw)/2:(oh-ih)/2"
    +
    + +
  • +Double output size and put the input video in the bottom-right +corner of the output padded area: +
     
    pad="2*iw:2*ih:ow-iw:oh-ih"
    +
    +
+ + +

30.63 perspective

+ +

Correct perspective of video not recorded perpendicular to the screen. +

+

A description of the accepted parameters follows. +

+
+
x0
+
y0
+
x1
+
y1
+
x2
+
y2
+
x3
+
y3
+

Set coordinates expression for top left, top right, bottom left and bottom right corners. +Default values are 0:0:W:0:0:H:W:H with which perspective will remain unchanged. +

+

The expressions can use the following variables: +

+
+
W
+
H
+

the width and height of video frame. +

+
+ +
+
interpolation
+

Set interpolation for perspective correction. +

+

It accepts the following values: +

+
linear
+
cubic
+
+ +

Default value is ‘linear’. +

+
+ + +

30.64 phase

+ +

Delay interlaced video by one field time so that the field order changes. +

+

The intended use is to fix PAL movies that have been captured with the +opposite field order to the film-to-video transfer. +

+

A description of the accepted parameters follows. +

+
+
mode
+

Set phase mode. +

+

It accepts the following values: +

+
t
+

Capture field order top-first, transfer bottom-first. +Filter will delay the bottom field. +

+
+
b
+

Capture field order bottom-first, transfer top-first. +Filter will delay the top field. +

+
+
p
+

Capture and transfer with the same field order. This mode only exists +for the documentation of the other options to refer to, but if you +actually select it, the filter will faithfully do nothing. +

+
+
a
+

Capture field order determined automatically by field flags, transfer +opposite. +Filter selects among ‘t’ and ‘b’ modes on a frame by frame +basis using field flags. If no field information is available, +then this works just like ‘u’. +

+
+
u
+

Capture unknown or varying, transfer opposite. +Filter selects among ‘t’ and ‘b’ on a frame by frame basis by +analyzing the images and selecting the alternative that produces best +match between the fields. +

+
+
T
+

Capture top-first, transfer unknown or varying. +Filter selects among ‘t’ and ‘p’ using image analysis. +

+
+
B
+

Capture bottom-first, transfer unknown or varying. +Filter selects among ‘b’ and ‘p’ using image analysis. +

+
+
A
+

Capture determined by field flags, transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using field flags and +image analysis. If no field information is available, then this works just +like ‘U’. This is the default mode. +

+
+
U
+

Both capture and transfer unknown or varying. +Filter selects among ‘t’, ‘b’ and ‘p’ using image analysis only. +

+
+
+
+ + +

30.65 pixdesctest

+ +

Pixel format descriptor test filter, mainly useful for internal +testing. The output video should be equal to the input video. +

+

For example: +

 
format=monow, pixdesctest
+
+ +

can be used to test the monowhite pixel format descriptor definition. +

+ +

30.66 pp

+ +

Enable the specified chain of postprocessing subfilters using libpostproc. This +library should be automatically selected with a GPL build (--enable-gpl). +Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’. +Each subfilter and some options have a short and a long name that can be used +interchangeably, i.e. dr/dering are the same. +

+

The filters accept the following options: +

+
+
subfilters
+

Set postprocessing subfilters string. +

+
+ +

All subfilters share common options to determine their scope: +

+
+
a/autoq
+

Honor the quality commands for this subfilter. +

+
+
c/chrom
+

Do chrominance filtering, too (default). +

+
+
y/nochrom
+

Do luminance filtering only (no chrominance). +

+
+
n/noluma
+

Do chrominance filtering only (no luminance). +

+
+ +

These options can be appended after the subfilter name, separated by a ’|’. +

+

Available subfilters are: +

+
+
hb/hdeblock[|difference[|flatness]]
+

Horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
vb/vdeblock[|difference[|flatness]]
+

Vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
ha/hadeblock[|difference[|flatness]]
+

Accurate horizontal deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+ +
+
va/vadeblock[|difference[|flatness]]
+

Accurate vertical deblocking filter +

+
difference
+

Difference factor where higher values mean more deblocking (default: 32). +

+
flatness
+

Flatness threshold where lower values mean more deblocking (default: 39). +

+
+
+
+ +

The horizontal and vertical deblocking filters share the difference and +flatness values so you cannot set different horizontal and vertical +thresholds. +

+
+
h1/x1hdeblock
+

Experimental horizontal deblocking filter +

+
+
v1/x1vdeblock
+

Experimental vertical deblocking filter +

+
+
dr/dering
+

Deringing filter +

+
+
tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+
threshold1
+

larger -> stronger filtering +

+
threshold2
+

larger -> stronger filtering +

+
threshold3
+

larger -> stronger filtering +

+
+ +
+
al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+
f/fullyrange
+

Stretch luminance to 0-255. +

+
+ +
+
lb/linblenddeint
+

Linear blend deinterlacing filter that deinterlaces the given block by +filtering all lines with a (1 2 1) filter. +

+
+
li/linipoldeint
+

Linear interpolating deinterlacing filter that deinterlaces the given block by +linearly interpolating every second line. +

+
+
ci/cubicipoldeint
+

Cubic interpolating deinterlacing filter deinterlaces the given block by +cubically interpolating every second line. +

+
+
md/mediandeint
+

Median deinterlacing filter that deinterlaces the given block by applying a +median filter to every second line. +

+
+
fd/ffmpegdeint
+

FFmpeg deinterlacing filter that deinterlaces the given block by filtering every +second line with a (-1 4 2 4 -1) filter. +

+
+
l5/lowpass5
+

Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given +block by filtering all lines with a (-1 2 6 2 -1) filter. +

+
+
fq/forceQuant[|quantizer]
+

Overrides the quantizer table from the input with the constant quantizer you +specify. +

+
quantizer
+

Quantizer to use +

+
+ +
+
de/default
+

Default pp filter combination (hb|a,vb|a,dr|a) +

+
+
fa/fast
+

Fast pp filter combination (h1|a,v1|a,dr|a) +

+
+
ac
+

High quality pp filter combination (ha|a|128|7,va|a,dr|a) +

+
+ + +

30.66.1 Examples

+ +
    +
  • +Apply horizontal and vertical deblocking, deringing and automatic +brightness/contrast: +
     
    pp=hb/vb/dr/al
    +
    + +
  • +Apply default filters without brightness/contrast correction: +
     
    pp=de/-al
    +
    + +
  • +Apply default filters and temporal denoiser: +
     
    pp=default/tmpnoise|1|2|3
    +
    + +
  • +Apply deblocking on luminance only, and switch vertical deblocking on or off +automatically depending on available CPU time: +
     
    pp=hb|y/vb|a
    +
    +
+ + +

30.67 psnr

+ +

Obtain the average, maximum and minimum PSNR (Peak Signal to Noise +Ratio) between two input videos. +

+

This filter takes in input two input videos, the first input is +considered the "main" source and is passed unchanged to the +output. The second input is used as a "reference" video for computing +the PSNR. +

+

Both video inputs must have the same resolution and pixel format for +this filter to work correctly. Also it assumes that both inputs +have the same number of frames, which are compared one by one. +

+

The obtained average PSNR is printed through the logging system. +

+

The filter stores the accumulated MSE (mean squared error) of each +frame, and at the end of the processing it is averaged across all frames +equally, and the following formula is applied to obtain the PSNR: +

+
 
PSNR = 10*log10(MAX^2/MSE)
+
+ +

Where MAX is the average of the maximum values of each component of the +image. +

+

The description of the accepted parameters follows. +

+
+
stats_file, f
+

If specified the filter will use the named file to save the PSNR of +each individual frame. +

+
+ +

The file printed if stats_file is selected, contains a sequence of +key/value pairs of the form key:value for each compared +couple of frames. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 1 +

+
+
mse_avg
+

Mean Square Error pixel-by-pixel average difference of the compared +frames, averaged over all the image components. +

+
+
mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+

Mean Square Error pixel-by-pixel average difference of the compared +frames for the component specified by the suffix. +

+
+
psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+

Peak Signal to Noise ratio of the compared frames for the component +specified by the suffix. +

+
+ +

For example: +

 
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+ +

On this example the input file being processed is compared with the +reference file ‘ref_movie.mpg’. The PSNR of each individual frame +is stored in ‘stats.log’. +

+

+

+

30.68 pullup

+ +

Pulldown reversal (inverse telecine) filter, capable of handling mixed +hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive +content. +

+

The pullup filter is designed to take advantage of future context in making +its decisions. This filter is stateless in the sense that it does not lock +onto a pattern to follow, but it instead looks forward to the following +fields in order to identify matches and rebuild progressive frames. +

+

To produce content with an even framerate, insert the fps filter after +pullup, use fps=24000/1001 if the input frame rate is 29.97fps, +fps=24 for 30fps and the (rare) telecined 25fps input. +

+

The filter accepts the following options: +

+
+
jl
+
jr
+
jt
+
jb
+

These options set the amount of "junk" to ignore at the left, right, top, and +bottom of the image, respectively. Left and right are in units of 8 pixels, +while top and bottom are in units of 2 lines. +The default is 8 pixels on each side. +

+
+
sb
+

Set the strict breaks. Setting this option to 1 will reduce the chances of +filter generating an occasional mismatched frame, but it may also cause an +excessive number of frames to be dropped during high motion sequences. +Conversely, setting it to -1 will make filter match fields more easily. +This may help processing of video where there is slight blurring between +the fields, but may also cause there to be interlaced frames in the output. +Default value is 0. +

+
+
mp
+

Set the metric plane to use. It accepts the following values: +

+
l
+

Use luma plane. +

+
+
u
+

Use chroma blue plane. +

+
+
v
+

Use chroma red plane. +

+
+ +

This option may be set to use chroma plane instead of the default luma plane +for doing filter’s computations. This may improve accuracy on very clean +source material, but more likely will decrease accuracy, especially if there +is chroma noise (rainbow effect) or any grayscale video. +The main purpose of setting ‘mp’ to a chroma plane is to reduce CPU +load and make pullup usable in realtime on slow machines. +

+
+ +

For best results (without duplicated frames in the output file) it is +necessary to change the output frame rate. For example, to inverse +telecine NTSC input: +

 
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+ + +

30.69 removelogo

+ +

Suppress a TV station logo, using an image file to determine which +pixels comprise the logo. It works by filling in the pixels that +comprise the logo with neighboring pixels. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filter bitmap file, which can be any image format supported by +libavformat. The width and height of the image file must match those of the +video stream being processed. +

+
+ +

Pixels in the provided bitmap image with a value of zero are not +considered part of the logo, non-zero pixels are considered part of +the logo. If you use white (255) for the logo and black (0) for the +rest, you will be safe. For making the filter bitmap, it is +recommended to take a screen capture of a black frame with the logo +visible, and then using a threshold filter followed by the erode +filter once or twice. +

+

If needed, little splotches can be fixed manually. Remember that if +logo pixels are not covered, the filter quality will be much +reduced. Marking too many pixels as part of the logo does not hurt as +much, but it will increase the amount of blurring needed to cover over +the image and will destroy more information than necessary, and extra +pixels will slow things down on a large logo. +

+ +

30.70 rotate

+ +

Rotate video by an arbitrary angle expressed in radians. +

+

The filter accepts the following options: +

+

A description of the optional parameters follows. +

+
angle, a
+

Set an expression for the angle by which to rotate the input video +clockwise, expressed as a number of radians. A negative value will +result in a counter-clockwise rotation. By default it is set to "0". +

+

This expression is evaluated for each frame. +

+
+
out_w, ow
+

Set the output width expression, default value is "iw". +This expression is evaluated just once during configuration. +

+
+
out_h, oh
+

Set the output height expression, default value is "ih". +This expression is evaluated just once during configuration. +

+
+
bilinear
+

Enable bilinear interpolation if set to 1, a value of 0 disables +it. Default value is 1. +

+
+
fillcolor, c
+

Set the color used to fill the output area not covered by the rotated +image. For the generalsyntax of this option, check the "Color" section in the +ffmpeg-utils manual. If the special value "none" is selected then no +background is printed (useful for example if the background is never shown). +

+

Default value is "black". +

+
+ +

The expressions for the angle and the output size can contain the +following constants and functions: +

+
+
n
+

sequential number of the input frame, starting from 0. It is always NAN +before the first frame is filtered. +

+
+
t
+

time in seconds of the input frame, it is set to 0 when the filter is +configured. It is always NAN before the first frame is filtered. +

+
+
hsub
+
vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
in_w, iw
+
in_h, ih
+

the input video width and height +

+
+
out_w, ow
+
out_h, oh
+

the output width and height, that is the size of the padded area as +specified by the width and height expressions +

+
+
rotw(a)
+
roth(a)
+

the minimal width/height required for completely containing the input +video rotated by a radians. +

+

These are only available when computing the ‘out_w’ and +‘out_h’ expressions. +

+
+ + +

30.70.1 Examples

+ +
    +
  • +Rotate the input by PI/6 radians clockwise: +
     
    rotate=PI/6
    +
    + +
  • +Rotate the input by PI/6 radians counter-clockwise: +
     
    rotate=-PI/6
    +
    + +
  • +Rotate the input by 45 degrees clockwise: +
     
    rotate=45*PI/180
    +
    + +
  • +Apply a constant rotation with period T, starting from an angle of PI/3: +
     
    rotate=PI/3+2*PI*t/T
    +
    + +
  • +Make the input video rotation oscillating with a period of T +seconds and an amplitude of A radians: +
     
    rotate=A*sin(2*PI/T*t)
    +
    + +
  • +Rotate the video, output size is chosen so that the whole rotating +input video is always completely contained in the output: +
     
    rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
    +
    + +
  • +Rotate the video, reduce the output size so that no background is ever +shown: +
     
    rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
    +
    +
+ + +

30.70.2 Commands

+ +

The filter supports the following commands: +

+
+
a, angle
+

Set the angle expression. +The command accepts the same syntax of the corresponding option. +

+

If the specified expression is not valid, it is kept at its current +value. +

+
+ + +

30.71 sab

+ +

Apply Shape Adaptive Blur. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set luma blur filter strength, must be a value in range 0.1-4.0, default +value is 1.0. A greater value will result in a more blurred image, and +in slower processing. +

+
+
luma_pre_filter_radius, lpfr
+

Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default +value is 1.0. +

+
+
luma_strength, ls
+

Set luma maximum difference between pixels to still be considered, must +be a value in the 0.1-100.0 range, default value is 1.0. +

+
+
chroma_radius, cr
+

Set chroma blur filter strength, must be a value in range 0.1-4.0. A +greater value will result in a more blurred image, and in slower +processing. +

+
+
chroma_pre_filter_radius, cpfr
+

Set chroma pre-filter radius, must be a value in the 0.1-2.0 range. +

+
+
chroma_strength, cs
+

Set chroma maximum difference between pixels to still be considered, +must be a value in the 0.1-100.0 range. +

+
+ +

Each chroma option value, if not explicitly specified, is set to the +corresponding luma option value. +

+

+

+

30.72 scale

+ +

Scale (resize) the input video, using the libswscale library. +

+

The scale filter forces the output display aspect ratio to be the same +of the input, by changing the output sample aspect ratio. +

+

If the input image format is different from the format requested by +the next filter, the scale filter will convert the input to the +requested format. +

+ +

30.72.1 Options

+

The filter accepts the following options, or any of the options +supported by the libswscale scaler. +

+

See (ffmpeg-scaler)scaler_options for +the complete list of scaler options. +

+
+
width, w
+
height, h
+

Set the output video dimension expression. Default value is the input +dimension. +

+

If the value is 0, the input width is used for the output. +

+

If one of the values is -1, the scale filter will use a value that +maintains the aspect ratio of the input image, calculated from the +other specified dimension. If both of them are -1, the input size is +used +

+

If one of the values is -n with n > 1, the scale filter will also use a value +that maintains the aspect ratio of the input image, calculated from the other +specified dimension. After that it will, however, make sure that the calculated +dimension is divisible by n and adjust the value if necessary. +

+

See below for the list of accepted constants for use in the dimension +expression. +

+
+
interl
+

Set the interlacing mode. It accepts the following values: +

+
+
1
+

Force interlaced aware scaling. +

+
+
0
+

Do not apply interlaced scaling. +

+
+
-1
+

Select interlaced aware scaling depending on whether the source frames +are flagged as interlaced or not. +

+
+ +

Default value is ‘0’. +

+
+
flags
+

Set libswscale scaling flags. See +(ffmpeg-scaler)sws_flags for the +complete list of values. If not explicitly specified the filter applies +the default flags. +

+
+
size, s
+

Set the video size. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
in_color_matrix
+
out_color_matrix
+

Set in/output YCbCr color space type. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. +

+

If not specified, the color space type depends on the pixel format. +

+

Possible values: +

+
+
auto
+

Choose automatically. +

+
+
bt709
+

Format conforming to International Telecommunication Union (ITU) +Recommendation BT.709. +

+
+
fcc
+

Set color space conforming to the United States Federal Communications +Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a). +

+
+
bt601
+

Set color space conforming to: +

+
    +
  • +ITU Radiocommunication Sector (ITU-R) Recommendation BT.601 + +
  • +ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G + +
  • +Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004 + +
+ +
+
smpte240m
+

Set color space conforming to SMPTE ST 240:1999. +

+
+ +
+
in_range
+
out_range
+

Set in/output YCbCr sample range. +

+

This allows the autodetected value to be overridden as well as allows forcing +a specific value used for the output and encoder. If not specified, the +range depends on the pixel format. Possible values: +

+
+
auto
+

Choose automatically. +

+
+
jpeg/full/pc
+

Set full range (0-255 in case of 8-bit luma). +

+
+
mpeg/tv
+

Set "MPEG" range (16-235 in case of 8-bit luma). +

+
+ +
+
force_original_aspect_ratio
+

Enable decreasing or increasing output video width or height if necessary to +keep the original aspect ratio. Possible values: +

+
+
disable
+

Scale the video as specified and disable this feature. +

+
+
decrease
+

The output video dimensions will automatically be decreased if needed. +

+
+
increase
+

The output video dimensions will automatically be increased if needed. +

+
+
+ +

One useful instance of this option is that when you know a specific device’s +maximum allowed resolution, you can use this to limit the output video to +that, while retaining the aspect ratio. For example, device A allows +1280x720 playback, and your video is 1920x800. Using this option (set it to +decrease) and specifying 1280x720 to the command line makes the output +1280x533. +

+

Please note that this is a different thing than specifying -1 for ‘w’ +or ‘h’, you still need to specify the output resolution for this option +to work. +

+
+
+ +

The values of the ‘w’ and ‘h’ options are expressions +containing the following constants: +

+
+
in_w
+
in_h
+

the input width and height +

+
+
iw
+
ih
+

same as in_w and in_h +

+
+
out_w
+
out_h
+

the output (scaled) width and height +

+
+
ow
+
oh
+

same as out_w and out_h +

+
+
a
+

same as iw / ih +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio. Calculated from (iw / ih) * sar. +

+
+
hsub
+
vsub
+

horizontal and vertical input chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+
ohsub
+
ovsub
+

horizontal and vertical output chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

30.72.2 Examples

+ +
    +
  • +Scale the input video to a size of 200x100: +
     
    scale=w=200:h=100
    +
    + +

    This is equivalent to: +

     
    scale=200:100
    +
    + +

    or: +

     
    scale=200x100
    +
    + +
  • +Specify a size abbreviation for the output size: +
     
    scale=qcif
    +
    + +

    which can also be written as: +

     
    scale=size=qcif
    +
    + +
  • +Scale the input to 2x: +
     
    scale=w=2*iw:h=2*ih
    +
    + +
  • +The above is the same as: +
     
    scale=2*in_w:2*in_h
    +
    + +
  • +Scale the input to 2x with forced interlaced scaling: +
     
    scale=2*iw:2*ih:interl=1
    +
    + +
  • +Scale the input to half size: +
     
    scale=w=iw/2:h=ih/2
    +
    + +
  • +Increase the width, and set the height to the same size: +
     
    scale=3/2*iw:ow
    +
    + +
  • +Seek for Greek harmony: +
     
    scale=iw:1/PHI*iw
    +scale=ih*PHI:ih
    +
    + +
  • +Increase the height, and set the width to 3/2 of the height: +
     
    scale=w=3/2*oh:h=3/5*ih
    +
    + +
  • +Increase the size, but make the size a multiple of the chroma +subsample values: +
     
    scale="trunc(3/2*iw/hsub)*hsub:trunc(3/2*ih/vsub)*vsub"
    +
    + +
  • +Increase the width to a maximum of 500 pixels, keep the same input +aspect ratio: +
     
    scale=w='min(500\, iw*3/2):h=-1'
    +
    +
+ + +

30.73 separatefields

+ +

The separatefields takes a frame-based video input and splits +each frame into its components fields, producing a new half height clip +with twice the frame rate and twice the frame count. +

+

This filter use field-dominance information in frame to decide which +of each pair of fields to place first in the output. +If it gets it wrong use setfield filter before separatefields filter. +

+ +

30.74 setdar, setsar

+ +

The setdar filter sets the Display Aspect Ratio for the filter +output video. +

+

This is done by changing the specified Sample (aka Pixel) Aspect +Ratio, according to the following equation: +

 
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+ +

Keep in mind that the setdar filter does not modify the pixel +dimensions of the video frame. Also the display aspect ratio set by +this filter may be changed by later filters in the filterchain, +e.g. in case of scaling or if another "setdar" or a "setsar" filter is +applied. +

+

The setsar filter sets the Sample (aka Pixel) Aspect Ratio for +the filter output video. +

+

Note that as a consequence of the application of this filter, the +output display aspect ratio will change according to the equation +above. +

+

Keep in mind that the sample aspect ratio set by the setsar +filter may be changed by later filters in the filterchain, e.g. if +another "setsar" or a "setdar" filter is applied. +

+

The filters accept the following options: +

+
+
r, ratio, dar (setdar only), sar (setsar only)
+

Set the aspect ratio used by the filter. +

+

The parameter can be a floating point number string, an expression, or +a string of the form num:den, where num and +den are the numerator and denominator of the aspect ratio. If +the parameter is not specified, it is assumed the value "0". +In case the form "num:den" is used, the : character +should be escaped. +

+
+
max
+

Set the maximum integer value to use for expressing numerator and +denominator when reducing the expressed aspect ratio to a rational. +Default value is 100. +

+
+
+ +

The parameter sar is an expression containing +the following constants: +

+
+
E, PI, PHI
+

the corresponding mathematical approximated values for e +(euler number), pi (greek PI), phi (golden ratio) +

+
+
w, h
+

the input width and height +

+
+
a
+

same as w / h +

+
+
sar
+

input sample aspect ratio +

+
+
dar
+

input display aspect ratio, it is the same as (w / h) * sar +

+
+
hsub, vsub
+

horizontal and vertical chroma subsample values. For example for the +pixel format "yuv422p" hsub is 2 and vsub is 1. +

+
+ + +

30.74.1 Examples

+ +
    +
  • +To change the display aspect ratio to 16:9, specify one of the following: +
     
    setdar=dar=1.77777
    +setdar=dar=16/9
    +setdar=dar=1.77777
    +
    + +
  • +To change the sample aspect ratio to 10:11, specify: +
     
    setsar=sar=10/11
    +
    + +
  • +To set a display aspect ratio of 16:9, and specify a maximum integer value of +1000 in the aspect ratio reduction, use the command: +
     
    setdar=ratio=16/9:max=1000
    +
    + +
+ +

+

+

30.75 setfield

+ +

Force field for the output video frame. +

+

The setfield filter marks the interlace type field for the +output frames. It does not change the input frame, but only sets the +corresponding property, which affects how the frame is treated by +following filters (e.g. fieldorder or yadif). +

+

The filter accepts the following options: +

+
+
mode
+

Available values are: +

+
+
auto
+

Keep the same field property. +

+
+
bff
+

Mark the frame as bottom-field-first. +

+
+
tff
+

Mark the frame as top-field-first. +

+
+
prog
+

Mark the frame as progressive. +

+
+
+
+ + +

30.76 showinfo

+ +

Show a line containing various information for each input video frame. +The input video is not modified. +

+

The shown line contains a sequence of key/value pairs of the form +key:value. +

+

A description of each shown parameter follows: +

+
+
n
+

sequential number of the input frame, starting from 0 +

+
+
pts
+

Presentation TimeStamp of the input frame, expressed as a number of +time base units. The time base unit depends on the filter input pad. +

+
+
pts_time
+

Presentation TimeStamp of the input frame, expressed as a number of +seconds +

+
+
pos
+

position of the frame in the input stream, -1 if this information in +unavailable and/or meaningless (for example in case of synthetic video) +

+
+
fmt
+

pixel format name +

+
+
sar
+

sample aspect ratio of the input frame, expressed in the form +num/den +

+
+
s
+

size of the input frame. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. +

+
+
i
+

interlaced mode ("P" for "progressive", "T" for top field first, "B" +for bottom field first) +

+
+
iskey
+

1 if the frame is a key frame, 0 otherwise +

+
+
type
+

picture type of the input frame ("I" for an I-frame, "P" for a +P-frame, "B" for a B-frame, "?" for unknown type). +Check also the documentation of the AVPictureType enum and of +the av_get_picture_type_char function defined in +‘libavutil/avutil.h’. +

+
+
checksum
+

Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame +

+
+
plane_checksum
+

Adler-32 checksum (printed in hexadecimal) of each plane of the input frame, +expressed in the form "[c0 c1 c2 c3]" +

+
+ +

+

+

30.77 smartblur

+ +

Blur the input video without impacting the outlines. +

+

The filter accepts the following options: +

+
+
luma_radius, lr
+

Set the luma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
luma_strength, ls
+

Set the luma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
luma_threshold, lt
+

Set the luma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+
chroma_radius, cr
+

Set the chroma radius. The option value must be a float number in +the range [0.1,5.0] that specifies the variance of the gaussian filter +used to blur the image (slower if larger). Default value is 1.0. +

+
+
chroma_strength, cs
+

Set the chroma strength. The option value must be a float number +in the range [-1.0,1.0] that configures the blurring. A value included +in [0.0,1.0] will blur the image whereas a value included in +[-1.0,0.0] will sharpen the image. Default value is 1.0. +

+
+
chroma_threshold, ct
+

Set the chroma threshold used as a coefficient to determine +whether a pixel should be blurred or not. The option value must be an +integer in the range [-30,30]. A value of 0 will filter all the image, +a value included in [0,30] will filter flat areas and a value included +in [-30,0] will filter edges. Default value is 0. +

+
+ +

If a chroma option is not explicitly set, the corresponding luma value +is set. +

+ +

30.78 stereo3d

+ +

Convert between different stereoscopic image formats. +

+

The filters accept the following options: +

+
+
in
+

Set stereoscopic image format of input. +

+

Available values for input image formats are: +

+
sbsl
+

side by side parallel (left eye left, right eye right) +

+
+
sbsr
+

side by side crosseye (right eye left, left eye right) +

+
+
sbs2l
+

side by side parallel with half width resolution +(left eye left, right eye right) +

+
+
sbs2r
+

side by side crosseye with half width resolution +(right eye left, left eye right) +

+
+
abl
+

above-below (left eye above, right eye below) +

+
+
abr
+

above-below (right eye above, left eye below) +

+
+
ab2l
+

above-below with half height resolution +(left eye above, right eye below) +

+
+
ab2r
+

above-below with half height resolution +(right eye above, left eye below) +

+
+
al
+

alternating frames (left eye first, right eye second) +

+
+
ar
+

alternating frames (right eye first, left eye second) +

+

Default value is ‘sbsl’. +

+
+ +
+
out
+

Set stereoscopic image format of output. +

+

Available values for output image formats are all the input formats as well as: +

+
arbg
+

anaglyph red/blue gray +(red filter on left eye, blue filter on right eye) +

+
+
argg
+

anaglyph red/green gray +(red filter on left eye, green filter on right eye) +

+
+
arcg
+

anaglyph red/cyan gray +(red filter on left eye, cyan filter on right eye) +

+
+
arch
+

anaglyph red/cyan half colored +(red filter on left eye, cyan filter on right eye) +

+
+
arcc
+

anaglyph red/cyan color +(red filter on left eye, cyan filter on right eye) +

+
+
arcd
+

anaglyph red/cyan color optimized with the least squares projection of dubois +(red filter on left eye, cyan filter on right eye) +

+
+
agmg
+

anaglyph green/magenta gray +(green filter on left eye, magenta filter on right eye) +

+
+
agmh
+

anaglyph green/magenta half colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmc
+

anaglyph green/magenta colored +(green filter on left eye, magenta filter on right eye) +

+
+
agmd
+

anaglyph green/magenta color optimized with the least squares projection of dubois +(green filter on left eye, magenta filter on right eye) +

+
+
aybg
+

anaglyph yellow/blue gray +(yellow filter on left eye, blue filter on right eye) +

+
+
aybh
+

anaglyph yellow/blue half colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybc
+

anaglyph yellow/blue colored +(yellow filter on left eye, blue filter on right eye) +

+
+
aybd
+

anaglyph yellow/blue color optimized with the least squares projection of dubois +(yellow filter on left eye, blue filter on right eye) +

+
+
irl
+

interleaved rows (left eye has top row, right eye starts on next row) +

+
+
irr
+

interleaved rows (right eye has top row, left eye starts on next row) +

+
+
ml
+

mono output (left eye only) +

+
+
mr
+

mono output (right eye only) +

+
+ +

Default value is ‘arcd’. +

+
+ + +

30.78.1 Examples

+ +
    +
  • +Convert input video from side by side parallel to anaglyph yellow/blue dubois: +
     
    stereo3d=sbsl:aybd
    +
    + +
  • +Convert input video from above bellow (left eye above, right eye below) to side by side crosseye. +
     
    stereo3d=abl:sbsr
    +
    +
+ + +

30.79 spp

+ +

Apply a simple postprocessing filter that compresses and decompresses the image +at several (or - in the case of ‘quality’ level 6 - all) shifts +and average the results. +

+

The filter accepts the following options: +

+
+
quality
+

Set quality. This option defines the number of levels for averaging. It accepts +an integer in the range 0-6. If set to 0, the filter will have no +effect. A value of 6 means the higher quality. For each increment of +that value the speed drops by a factor of approximately 2. Default value is +3. +

+
+
qp
+

Force a constant quantization parameter. If not set, the filter will use the QP +from the video stream (if available). +

+
+
mode
+

Set thresholding mode. Available modes are: +

+
+
hard
+

Set hard thresholding (default). +

+
soft
+

Set soft thresholding (better de-ringing effect, but likely blurrier). +

+
+ +
+
use_bframe_qp
+

Enable the use of the QP from the B-Frames if set to 1. Using this +option may cause flicker since the B-Frames have often larger QP. Default is +0 (not enabled). +

+
+ +

+

+

30.80 subtitles

+ +

Draw subtitles on top of input video using the libass library. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libass. This filter also requires a build with libavcodec and +libavformat to convert the passed subtitles file to ASS (Advanced Substation +Alpha) subtitles format. +

+

The filter accepts the following options: +

+
+
filename, f
+

Set the filename of the subtitle file to read. It must be specified. +

+
+
original_size
+

Specify the size of the original video, the video for which the ASS file +was composed. For the syntax of this option, check the "Video size" section in +the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic, +this is necessary to correctly scale the fonts if the aspect ratio has been +changed. +

+
+
charenc
+

Set subtitles input character encoding. subtitles filter only. Only +useful if not UTF-8. +

+
+ +

If the first key is not specified, it is assumed that the first value +specifies the ‘filename’. +

+

For example, to render the file ‘sub.srt’ on top of the input +video, use the command: +

 
subtitles=sub.srt
+
+ +

which is equivalent to: +

 
subtitles=filename=sub.srt
+
+ + +

30.81 super2xsai

+ +

Scale the input by 2x and smooth using the Super2xSaI (Scale and +Interpolate) pixel art scaling algorithm. +

+

Useful for enlarging pixel art images without reducing sharpness. +

+ +

30.82 swapuv

+

Swap U & V plane. +

+ +

30.83 telecine

+ +

Apply telecine process to the video. +

+

This filter accepts the following options: +

+
+
first_field
+
+
top, t
+

top field first +

+
bottom, b
+

bottom field first +The default value is top. +

+
+ +
+
pattern
+

A string of numbers representing the pulldown pattern you wish to apply. +The default value is 23. +

+
+ +
 
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+ + +

30.84 thumbnail

+

Select the most representative frame in a given sequence of consecutive frames. +

+

The filter accepts the following options: +

+
+
n
+

Set the frames batch size to analyze; in a set of n frames, the filter +will pick one of them, and then handle the next batch of n frames until +the end. Default is 100. +

+
+ +

Since the filter keeps track of the whole frames sequence, a bigger n +value will result in a higher memory usage, so a high value is not recommended. +

+ +

30.84.1 Examples

+ +
    +
  • +Extract one picture each 50 frames: +
     
    thumbnail=50
    +
    + +
  • +Complete example of a thumbnail creation with ffmpeg: +
     
    ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
    +
    +
+ + +

30.85 tile

+ +

Tile several successive frames together. +

+

The filter accepts the following options: +

+
+
layout
+

Set the grid size (i.e. the number of lines and columns). For the syntax of +this option, check the "Video size" section in the ffmpeg-utils manual. +

+
+
nb_frames
+

Set the maximum number of frames to render in the given area. It must be less +than or equal to wxh. The default value is 0, meaning all +the area will be used. +

+
+
margin
+

Set the outer border margin in pixels. +

+
+
padding
+

Set the inner border thickness (i.e. the number of pixels between frames). For +more advanced padding options (such as having different values for the edges), +refer to the pad video filter. +

+
+
color
+

Specify the color of the unused areaFor the syntax of this option, check the +"Color" section in the ffmpeg-utils manual. The default value of color +is "black". +

+
+ + +

30.85.1 Examples

+ +
    +
  • +Produce 8x8 PNG tiles of all keyframes (‘-skip_frame nokey’) in a movie: +
     
    ffmpeg -skip_frame nokey -i file.avi -vf 'scale=128:72,tile=8x8' -an -vsync 0 keyframes%03d.png
    +
    +

    The ‘-vsync 0’ is necessary to prevent ffmpeg from +duplicating each output frame to accommodate the originally detected frame +rate. +

    +
  • +Display 5 pictures in an area of 3x2 frames, +with 7 pixels between them, and 2 pixels of initial margin, using +mixed flat and named options: +
     
    tile=3x2:nb_frames=5:padding=7:margin=2
    +
    +
+ + +

30.86 tinterlace

+ +

Perform various types of temporal field interlacing. +

+

Frames are counted starting from 1, so the first input frame is +considered odd. +

+

The filter accepts the following options: +

+
+
mode
+

Specify the mode of the interlacing. This option can also be specified +as a value alone. See below for a list of values for this option. +

+

Available values are: +

+
+
merge, 0
+

Move odd frames into the upper field, even into the lower field, +generating a double height frame at half frame rate. +

+
+
drop_odd, 1
+

Only output even frames, odd frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
drop_even, 2
+

Only output odd frames, even frames are dropped, generating a frame with +unchanged height at half frame rate. +

+
+
pad, 3
+

Expand each frame to full height, but pad alternate lines with black, +generating a frame with double height at the same input frame rate. +

+
+
interleave_top, 4
+

Interleave the upper field from odd frames with the lower field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interleave_bottom, 5
+

Interleave the lower field from odd frames with the upper field from +even frames, generating a frame with unchanged height at half frame rate. +

+
+
interlacex2, 6
+

Double frame rate with unchanged height. Frames are inserted each +containing the second temporal field from the previous input frame and +the first temporal field from the next input frame. This mode relies on +the top_field_first flag. Useful for interlaced video displays with no +field synchronisation. +

+
+ +

Numeric values are deprecated but are accepted for backward +compatibility reasons. +

+

Default mode is merge. +

+
+
flags
+

Specify flags influencing the filter process. +

+

Available value for flags is: +

+
+
low_pass_filter, vlfp
+

Enable vertical low-pass filtering in the filter. +Vertical low-pass filtering is required when creating an interlaced +destination from a progressive source which contains high-frequency +vertical detail. Filtering will reduce interlace ’twitter’ and Moire +patterning. +

+

Vertical low-pass filtering can only be enabled for ‘mode’ +interleave_top and interleave_bottom. +

+
+
+
+
+ + +

30.87 transpose

+ +

Transpose rows with columns in the input video and optionally flip it. +

+

This filter accepts the following options: +

+
+
dir
+

Specify the transposition direction. +

+

Can assume the following values: +

+
0, 4, cclock_flip
+

Rotate by 90 degrees counterclockwise and vertically flip (default), that is: +

 
L.R     L.l
+. . ->  . .
+l.r     R.r
+
+ +
+
1, 5, clock
+

Rotate by 90 degrees clockwise, that is: +

 
L.R     l.L
+. . ->  . .
+l.r     r.R
+
+ +
+
2, 6, cclock
+

Rotate by 90 degrees counterclockwise, that is: +

 
L.R     R.r
+. . ->  . .
+l.r     L.l
+
+ +
+
3, 7, clock_flip
+

Rotate by 90 degrees clockwise and vertically flip, that is: +

 
L.R     r.R
+. . ->  . .
+l.r     l.L
+
+
+
+ +

For values between 4-7, the transposition is only done if the input +video geometry is portrait and not landscape. These values are +deprecated, the passthrough option should be used instead. +

+

Numerical values are deprecated, and should be dropped in favor of +symbolic constants. +

+
+
passthrough
+

Do not apply the transposition if the input geometry matches the one +specified by the specified value. It accepts the following values: +

+
none
+

Always apply transposition. +

+
portrait
+

Preserve portrait geometry (when height >= width). +

+
landscape
+

Preserve landscape geometry (when width >= height). +

+
+ +

Default value is none. +

+
+ +

For example to rotate by 90 degrees clockwise and preserve portrait +layout: +

 
transpose=dir=1:passthrough=portrait
+
+ +

The command above can also be specified as: +

 
transpose=1:portrait
+
+ + +

30.88 trim

+

Trim the input so that the output contains one continuous subpart of the input. +

+

This filter accepts the following options: +

+
start
+

Specify time of the start of the kept section, i.e. the frame with the +timestamp start will be the first frame in the output. +

+
+
end
+

Specify time of the first frame that will be dropped, i.e. the frame +immediately preceding the one with the timestamp end will be the last +frame in the output. +

+
+
start_pts
+

Same as start, except this option sets the start timestamp in timebase +units instead of seconds. +

+
+
end_pts
+

Same as end, except this option sets the end timestamp in timebase units +instead of seconds. +

+
+
duration
+

Specify maximum duration of the output. +

+
+
start_frame
+

Number of the first frame that should be passed to output. +

+
+
end_frame
+

Number of the first frame that should be dropped. +

+
+ +

start’, ‘end’, ‘duration’ are expressed as time +duration specifications, check the "Time duration" section in the +ffmpeg-utils manual. +

+

Note that the first two sets of the start/end options and the ‘duration’ +option look at the frame timestamp, while the _frame variants simply count the +frames that pass through the filter. Also note that this filter does not modify +the timestamps. If you wish that the output timestamps start at zero, insert a +setpts filter after the trim filter. +

+

If multiple start or end options are set, this filter tries to be greedy and +keep all the frames that match at least one of the specified constraints. To keep +only the part that matches all the constraints at once, chain multiple trim +filters. +

+

The defaults are such that all the input is kept. So it is possible to set e.g. +just the end values to keep everything before the specified time. +

+

Examples: +

    +
  • +drop everything except the second minute of input +
     
    ffmpeg -i INPUT -vf trim=60:120
    +
    + +
  • +keep only the first second +
     
    ffmpeg -i INPUT -vf trim=duration=1
    +
    + +
+ + + +

30.89 unsharp

+ +

Sharpen or blur the input video. +

+

It accepts the following parameters: +

+
+
luma_msize_x, lx
+

Set the luma matrix horizontal size. It must be an odd integer between +3 and 63, default value is 5. +

+
+
luma_msize_y, ly
+

Set the luma matrix vertical size. It must be an odd integer between 3 +and 63, default value is 5. +

+
+
luma_amount, la
+

Set the luma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 1.0. +

+
+
chroma_msize_x, cx
+

Set the chroma matrix horizontal size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_msize_y, cy
+

Set the chroma matrix vertical size. It must be an odd integer +between 3 and 63, default value is 5. +

+
+
chroma_amount, ca
+

Set the chroma effect strength. It can be a float number, reasonable +values lay between -1.5 and 1.5. +

+

Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. +

+

Default value is 0.0. +

+
+
opencl
+

If set to 1, specify using OpenCL capabilities, only available if +FFmpeg was configured with --enable-opencl. Default value is 0. +

+
+
+ +

All parameters are optional and default to the equivalent of the +string ’5:5:1.0:5:5:0.0’. +

+ +

30.89.1 Examples

+ +
    +
  • +Apply strong luma sharpen effect: +
     
    unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
    +
    + +
  • +Apply strong blur of both luma and chroma parameters: +
     
    unsharp=7:7:-2:7:7:-2
    +
    +
+ +

+

+

30.90 vidstabdetect

+ +

Analyze video stabilization/deshaking. Perform pass 1 of 2, see +vidstabtransform for pass 2. +

+

This filter generates a file with relative translation and rotation +transform information about subsequent frames, which is then used by +the vidstabtransform filter. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+

This filter accepts the following options: +

+
+
result
+

Set the path to the file used to write the transforms information. +Default value is ‘transforms.trf’. +

+
+
shakiness
+

Set how shaky the video is and how quick the camera is. It accepts an +integer in the range 1-10, a value of 1 means little shakiness, a +value of 10 means strong shakiness. Default value is 5. +

+
+
accuracy
+

Set the accuracy of the detection process. It must be a value in the +range 1-15. A value of 1 means low accuracy, a value of 15 means high +accuracy. Default value is 15. +

+
+
stepsize
+

Set stepsize of the search process. The region around minimum is +scanned with 1 pixel resolution. Default value is 6. +

+
+
mincontrast
+

Set minimum contrast. Below this value a local measurement field is +discarded. Must be a floating point value in the range 0-1. Default +value is 0.3. +

+
+
tripod
+

Set reference frame number for tripod mode. +

+

If enabled, the motion of the frames is compared to a reference frame +in the filtered stream, identified by the specified number. The idea +is to compensate all movements in a more-or-less static scene and keep +the camera view absolutely still. +

+

If set to 0, it is disabled. The frames are counted starting from 1. +

+
+
show
+

Show fields and transforms in the resulting frames. It accepts an +integer in the range 0-2. Default value is 0, which disables any +visualization. +

+
+ + +

30.90.1 Examples

+ +
    +
  • +Use default values: +
     
    vidstabdetect
    +
    + +
  • +Analyze strongly shaky movie and put the results in file +‘mytransforms.trf’: +
     
    vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
    +
    + +
  • +Visualize the result of internal transformations in the resulting +video: +
     
    vidstabdetect=show=1
    +
    + +
  • +Analyze a video with medium shakiness using ffmpeg: +
     
    ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
    +
    +
+ +

+

+

30.91 vidstabtransform

+ +

Video stabilization/deshaking: pass 2 of 2, +see vidstabdetect for pass 1. +

+

Read a file with transform information for each frame and +apply/compensate them. Together with the vidstabdetect +filter this can be used to deshake videos. See also +http://public.hronopik.de/vid.stab. It is important to also use +the unsharp filter, see below. +

+

To enable compilation of this filter you need to configure FFmpeg with +--enable-libvidstab. +

+ +

30.91.1 Options

+ +
+
input
+

Set path to the file used to read the transforms. Default value is +‘transforms.trf’). +

+
+
smoothing
+

Set the number of frames (value*2 + 1) used for lowpass filtering the +camera movements. Default value is 10. +

+

For example a number of 10 means that 21 frames are used (10 in the +past and 10 in the future) to smoothen the motion in the video. A +larger values leads to a smoother video, but limits the acceleration +of the camera (pan/tilt movements). 0 is a special case where a +static camera is simulated. +

+
+
optalgo
+

Set the camera path optimization algorithm. +

+

Accepted values are: +

+
gauss
+

gaussian kernel low-pass filter on camera motion (default) +

+
avg
+

averaging on transformations +

+
+ +
+
maxshift
+

Set maximal number of pixels to translate frames. Default value is -1, +meaning no limit. +

+
+
maxangle
+

Set maximal angle in radians (degree*PI/180) to rotate frames. Default +value is -1, meaning no limit. +

+
+
crop
+

Specify how to deal with borders that may be visible due to movement +compensation. +

+

Available values are: +

+
keep
+

keep image information from previous frame (default) +

+
black
+

fill the border black +

+
+ +
+
invert
+

Invert transforms if set to 1. Default value is 0. +

+
+
relative
+

Consider transforms as relative to previsou frame if set to 1, +absolute if set to 0. Default value is 0. +

+
+
zoom
+

Set percentage to zoom. A positive value will result in a zoom-in +effect, a negative value in a zoom-out effect. Default value is 0 (no +zoom). +

+
+
optzoom
+

Set optimal zooming to avoid borders. +

+

Accepted values are: +

+
0
+

disabled +

+
1
+

optimal static zoom value is determined (only very strong movements +will lead to visible borders) (default) +

+
2
+

optimal adaptive zoom value is determined (no borders will be +visible), see ‘zoomspeed’ +

+
+ +

Note that the value given at zoom is added to the one calculated here. +

+
+
zoomspeed
+

Set percent to zoom maximally each frame (enabled when +‘optzoom’ is set to 2). Range is from 0 to 5, default value is +0.25. +

+
+
interpol
+

Specify type of interpolation. +

+

Available values are: +

+
no
+

no interpolation +

+
linear
+

linear only horizontal +

+
bilinear
+

linear in both directions (default) +

+
bicubic
+

cubic in both directions (slow) +

+
+ +
+
tripod
+

Enable virtual tripod mode if set to 1, which is equivalent to +relative=0:smoothing=0. Default value is 0. +

+

Use also tripod option of vidstabdetect. +

+
+
debug
+

Increase log verbosity if set to 1. Also the detected global motions +are written to the temporary file ‘global_motions.trf’. Default +value is 0. +

+
+ + +

30.91.2 Examples

+ +
    +
  • +Use ffmpeg for a typical stabilization with default values: +
     
    ffmpeg -i inp.mpeg -vf vidstabtransform,unsharp=5:5:0.8:3:3:0.4 inp_stabilized.mpeg
    +
    + +

    Note the use of the unsharp filter which is always recommended. +

    +
  • +Zoom in a bit more and load transform data from a given file: +
     
    vidstabtransform=zoom=5:input="mytransforms.trf"
    +
    + +
  • +Smoothen the video even more: +
     
    vidstabtransform=smoothing=30
    +
    +
+ + +

30.92 vflip

+ +

Flip the input video vertically. +

+

For example, to vertically flip a video with ffmpeg: +

 
ffmpeg -i in.avi -vf "vflip" out.avi
+
+ + +

30.93 vignette

+ +

Make or reverse a natural vignetting effect. +

+

The filter accepts the following options: +

+
+
angle, a
+

Set lens angle expression as a number of radians. +

+

The value is clipped in the [0,PI/2] range. +

+

Default value: "PI/5" +

+
+
x0
+
y0
+

Set center coordinates expressions. Respectively "w/2" and "h/2" +by default. +

+
+
mode
+

Set forward/backward mode. +

+

Available modes are: +

+
forward
+

The larger the distance from the central point, the darker the image becomes. +

+
+
backward
+

The larger the distance from the central point, the brighter the image becomes. +This can be used to reverse a vignette effect, though there is no automatic +detection to extract the lens ‘angle’ and other settings (yet). It can +also be used to create a burning effect. +

+
+ +

Default value is ‘forward’. +

+
+
eval
+

Set evaluation mode for the expressions (‘angle’, ‘x0’, ‘y0’). +

+

It accepts the following values: +

+
init
+

Evaluate expressions only once during the filter initialization. +

+
+
frame
+

Evaluate expressions for each incoming frame. This is way slower than the +‘init’ mode since it requires all the scalers to be re-computed, but it +allows advanced dynamic expressions. +

+
+ +

Default value is ‘init’. +

+
+
dither
+

Set dithering to reduce the circular banding effects. Default is 1 +(enabled). +

+
+
aspect
+

Set vignette aspect. This setting allows one to adjust the shape of the vignette. +Setting this value to the SAR of the input will make a rectangular vignetting +following the dimensions of the video. +

+

Default is 1/1. +

+
+ + +

30.93.1 Expressions

+ +

The ‘alpha’, ‘x0’ and ‘y0’ expressions can contain the +following parameters. +

+
+
w
+
h
+

input width and height +

+
+
n
+

the number of input frame, starting from 0 +

+
+
pts
+

the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in +TB units, NAN if undefined +

+
+
r
+

frame rate of the input video, NAN if the input frame rate is unknown +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
tb
+

time base of the input video +

+
+ + + +

30.93.2 Examples

+ +
    +
  • +Apply simple strong vignetting effect: +
     
    vignette=PI/4
    +
    + +
  • +Make a flickering vignetting: +
     
    vignette='PI/4+random(1)*PI/50':eval=frame
    +
    + +
+ + +

30.94 w3fdif

+ +

Deinterlace the input video ("w3fdif" stands for "Weston 3 Field +Deinterlacing Filter"). +

+

Based on the process described by Martin Weston for BBC R&D, and +implemented based on the de-interlace algorithm written by Jim +Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter +uses filter coefficients calculated by BBC R&D. +

+

There are two sets of filter coefficients, so called "simple": +and "complex". Which set of filter coefficients is used can +be set by passing an optional parameter: +

+
+
filter
+

Set the interlacing filter coefficients. Accepts one of the following values: +

+
+
simple
+

Simple filter coefficient set. +

+
complex
+

More-complex filter coefficient set. +

+
+

Default value is ‘complex’. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following values: +

+
+
all
+

Deinterlace all frames, +

+
interlaced
+

Only deinterlace frames marked as interlaced. +

+
+ +

Default value is ‘all’. +

+
+ +

+

+

30.95 yadif

+ +

Deinterlace the input video ("yadif" means "yet another deinterlacing +filter"). +

+

This filter accepts the following options: +

+ +
+
mode
+

The interlacing mode to adopt, accepts one of the following values: +

+
+
0, send_frame
+

output 1 frame for each frame +

+
1, send_field
+

output 1 frame for each field +

+
2, send_frame_nospatial
+

like send_frame but skip spatial interlacing check +

+
3, send_field_nospatial
+

like send_field but skip spatial interlacing check +

+
+ +

Default value is send_frame. +

+
+
parity
+

The picture field parity assumed for the input interlaced video, accepts one of +the following values: +

+
+
0, tff
+

assume top field first +

+
1, bff
+

assume bottom field first +

+
-1, auto
+

enable automatic detection +

+
+ +

Default value is auto. +If interlacing is unknown or decoder does not export this information, +top field first will be assumed. +

+
+
deint
+

Specify which frames to deinterlace. Accept one of the following +values: +

+
+
0, all
+

deinterlace all frames +

+
1, interlaced
+

only deinterlace frames marked as interlaced +

+
+ +

Default value is all. +

+
+ + + +

31. Video Sources

+ +

Below is a description of the currently available video sources. +

+ +

31.1 buffer

+ +

Buffer video frames, and make them available to the filter chain. +

+

This source is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/vsrc_buffer.h’. +

+

This source accepts the following options: +

+
+
video_size
+

Specify the size (width and height) of the buffered video frames. For the +syntax of this option, check the "Video size" section in the ffmpeg-utils +manual. +

+
+
width
+

Input video width. +

+
+
height
+

Input video height. +

+
+
pix_fmt
+

A string representing the pixel format of the buffered video frames. +It may be a number corresponding to a pixel format, or a pixel format +name. +

+
+
time_base
+

Specify the timebase assumed by the timestamps of the buffered frames. +

+
+
frame_rate
+

Specify the frame rate expected for the video stream. +

+
+
pixel_aspect, sar
+

Specify the sample aspect ratio assumed by the video frames. +

+
+
sws_param
+

Specify the optional parameters to be used for the scale filter which +is automatically inserted when an input change is detected in the +input size or format. +

+
+ +

For example: +

 
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+ +

will instruct the source to accept video frames with size 320x240 and +with format "yuv410p", assuming 1/24 as the timestamps timebase and +square pixels (1:1 sample aspect ratio). +Since the pixel format with name "yuv410p" corresponds to the number 6 +(check the enum AVPixelFormat definition in ‘libavutil/pixfmt.h’), +this example corresponds to: +

 
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+ +

Alternatively, the options can be specified as a flat string, but this +syntax is deprecated: +

+

width:height:pix_fmt:time_base.num:time_base.den:pixel_aspect.num:pixel_aspect.den[:sws_param] +

+ +

31.2 cellauto

+ +

Create a pattern generated by an elementary cellular automaton. +

+

The initial state of the cellular automaton can be defined through the +‘filename’, and ‘pattern’ options. If such options are +not specified an initial state is created randomly. +

+

At each new frame a new row in the video is filled with the result of +the cellular automaton next generation. The behavior when the whole +frame is filled is defined by the ‘scroll’ option. +

+

This source accepts the following options: +

+
+
filename, f
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified file. +In the file, each non-whitespace character is considered an alive +cell, a newline will terminate the row, and further characters in the +file will be ignored. +

+
+
pattern, p
+

Read the initial cellular automaton state, i.e. the starting row, from +the specified string. +

+

Each non-whitespace character in the string is considered an alive +cell, a newline will terminate the row, and further characters in the +string will be ignored. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial cellular automaton row. It +is a floating point number value ranging from 0 to 1, defaults to +1/PHI. +

+

This option is ignored when a file or a pattern is specified. +

+
+
random_seed, seed
+

Set the seed for filling randomly the initial row, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the cellular automaton rule, it is a number ranging from 0 to 255. +Default value is 110. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ or ‘pattern’ is specified, the size is set +by default to the width of the specified initial state row, and the +height is set to width * PHI. +

+

If ‘size’ is set, it must contain the width of the specified +pattern string, and the specified pattern will be centered in the +larger row. +

+

If a filename or a pattern string is not specified, the size value +defaults to "320x518" (used for a randomly generated initial state). +

+
+
scroll
+

If set to 1, scroll the output upward when all the rows in the output +have been already filled. If set to 0, the new generated row will be +written over the top row just after the bottom row is filled. +Defaults to 1. +

+
+
start_full, full
+

If set to 1, completely fill the output with generated rows before +outputting the first frame. +This is the default behavior, for disabling set the value to 0. +

+
+
stitch
+

If set to 1, stitch the left and right row edges together. +This is the default behavior, for disabling set the value to 0. +

+
+ + +

31.2.1 Examples

+ +
    +
  • +Read the initial state from ‘pattern’, and specify an output of +size 200x400. +
     
    cellauto=f=pattern:s=200x400
    +
    + +
  • +Generate a random initial row with a width of 200 cells, with a fill +ratio of 2/3: +
     
    cellauto=ratio=2/3:s=200x200
    +
    + +
  • +Create a pattern generated by rule 18 starting by a single alive cell +centered on an initial row with width 100: +
     
    cellauto=p=@:s=100x400:full=0:rule=18
    +
    + +
  • +Specify a more elaborated initial pattern: +
     
    cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
    +
    + +
+ + +

31.3 mandelbrot

+ +

Generate a Mandelbrot set fractal, and progressively zoom towards the +point specified with start_x and start_y. +

+

This source accepts the following options: +

+
+
end_pts
+

Set the terminal pts value. Default value is 400. +

+
+
end_scale
+

Set the terminal scale value. +Must be a floating point value. Default value is 0.3. +

+
+
inner
+

Set the inner coloring mode, that is the algorithm used to draw the +Mandelbrot fractal internal region. +

+

It shall assume one of the following values: +

+
black
+

Set black mode. +

+
convergence
+

Show time until convergence. +

+
mincol
+

Set color based on point closest to the origin of the iterations. +

+
period
+

Set period mode. +

+
+ +

Default value is mincol. +

+
+
bailout
+

Set the bailout value. Default value is 10.0. +

+
+
maxiter
+

Set the maximum of iterations performed by the rendering +algorithm. Default value is 7189. +

+
+
outer
+

Set outer coloring mode. +It shall assume one of following values: +

+
iteration_count
+

Set iteration cound mode. +

+
normalized_iteration_count
+

set normalized iteration count mode. +

+
+

Default value is normalized_iteration_count. +

+
+
rate, r
+

Set frame rate, expressed as number of frames per second. Default +value is "25". +

+
+
size, s
+

Set frame size. For the syntax of this option, check the "Video +size" section in the ffmpeg-utils manual. Default value is "640x480". +

+
+
start_scale
+

Set the initial scale value. Default value is 3.0. +

+
+
start_x
+

Set the initial x position. Must be a floating point value between +-100 and 100. Default value is -0.743643887037158704752191506114774. +

+
+
start_y
+

Set the initial y position. Must be a floating point value between +-100 and 100. Default value is -0.131825904205311970493132056385139. +

+
+ + +

31.4 mptestsrc

+ +

Generate various test patterns, as generated by the MPlayer test filter. +

+

The size of the generated video is fixed, and is 256x256. +This source is useful in particular for testing encoding features. +

+

This source accepts the following options: +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH:MM:SS[.m...]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
test, t
+
+

Set the number or the name of the test to perform. Supported tests are: +

+
dc_luma
+
dc_chroma
+
freq_luma
+
freq_chroma
+
amp_luma
+
amp_chroma
+
cbp
+
mv
+
ring1
+
ring2
+
all
+
+ +

Default value is "all", which will cycle through the list of all tests. +

+
+ +

For example the following: +

 
testsrc=t=dc_luma
+
+ +

will generate a "dc_luma" test pattern. +

+ +

31.5 frei0r_src

+ +

Provide a frei0r source. +

+

To enable compilation of this filter you need to install the frei0r +header and configure FFmpeg with --enable-frei0r. +

+

This source accepts the following options: +

+
+
size
+

The size of the video to generate. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+
+
framerate
+

Framerate of the generated video, may be a string of the form +num/den or a frame rate abbreviation. +

+
+
filter_name
+

The name to the frei0r source to load. For more information regarding frei0r and +how to set the parameters read the section frei0r in the description of +the video filters. +

+
+
filter_params
+

A ’|’-separated list of parameters to pass to the frei0r source. +

+
+
+ +

For example, to generate a frei0r partik0l source with size 200x200 +and frame rate 10 which is overlayed on the overlay filter main input: +

 
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+ + +

31.6 life

+ +

Generate a life pattern. +

+

This source is based on a generalization of John Conway’s life game. +

+

The sourced input represents a life grid, each pixel represents a cell +which can be in one of two possible states, alive or dead. Every cell +interacts with its eight neighbours, which are the cells that are +horizontally, vertically, or diagonally adjacent. +

+

At each interaction the grid evolves according to the adopted rule, +which specifies the number of neighbor alive cells which will make a +cell stay alive or born. The ‘rule’ option allows one to specify +the rule to adopt. +

+

This source accepts the following options: +

+
+
filename, f
+

Set the file from which to read the initial grid state. In the file, +each non-whitespace character is considered an alive cell, and newline +is used to delimit the end of each row. +

+

If this option is not specified, the initial grid is generated +randomly. +

+
+
rate, r
+

Set the video rate, that is the number of frames generated per second. +Default is 25. +

+
+
random_fill_ratio, ratio
+

Set the random fill ratio for the initial random grid. It is a +floating point number value ranging from 0 to 1, defaults to 1/PHI. +It is ignored when a file is specified. +

+
+
random_seed, seed
+

Set the seed for filling the initial random grid, must be an integer +included between 0 and UINT32_MAX. If not specified, or if explicitly +set to -1, the filter will try to use a good random seed on a best +effort basis. +

+
+
rule
+

Set the life rule. +

+

A rule can be specified with a code of the kind "SNS/BNB", +where NS and NB are sequences of numbers in the range 0-8, +NS specifies the number of alive neighbor cells which make a +live cell stay alive, and NB the number of alive neighbor cells +which make a dead cell to become alive (i.e. to "born"). +"s" and "b" can be used in place of "S" and "B", respectively. +

+

Alternatively a rule can be specified by an 18-bits integer. The 9 +high order bits are used to encode the next cell state if it is alive +for each number of neighbor alive cells, the low order bits specify +the rule for "borning" new cells. Higher order bits encode for an +higher number of neighbor cells. +For example the number 6153 = (12<<9)+9 specifies a stay alive +rule of 12 and a born rule of 9, which corresponds to "S23/B03". +

+

Default value is "S23/B3", which is the original Conway’s game of life +rule, and will keep a cell alive if it has 2 or 3 neighbor alive +cells, and will born a new cell if there are three alive cells around +a dead cell. +

+
+
size, s
+

Set the size of the output video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. +

+

If ‘filename’ is specified, the size is set by default to the +same size of the input file. If ‘size’ is set, it must contain +the size specified in the input file, and the initial grid defined in +that file is centered in the larger resulting area. +

+

If a filename is not specified, the size value defaults to "320x240" +(used for a randomly generated initial grid). +

+
+
stitch
+

If set to 1, stitch the left and right grid edges together, and the +top and bottom edges also. Defaults to 1. +

+
+
mold
+

Set cell mold speed. If set, a dead cell will go from ‘death_color’ to +‘mold_color’ with a step of ‘mold’. ‘mold’ can have a +value from 0 to 255. +

+
+
life_color
+

Set the color of living (or new born) cells. +

+
+
death_color
+

Set the color of dead cells. If ‘mold’ is set, this is the first color +used to represent a dead cell. +

+
+
mold_color
+

Set mold color, for definitely dead and moldy cells. +

+

For the syntax of these 3 color options, check the "Color" section in the +ffmpeg-utils manual. +

+
+ + +

31.6.1 Examples

+ +
    +
  • +Read a grid from ‘pattern’, and center it on a grid of size +300x300 pixels: +
     
    life=f=pattern:s=300x300
    +
    + +
  • +Generate a random grid of size 200x200, with a fill ratio of 2/3: +
     
    life=ratio=2/3:s=200x200
    +
    + +
  • +Specify a custom rule for evolving a randomly generated grid: +
     
    life=rule=S14/B34
    +
    + +
  • +Full example with slow death effect (mold) using ffplay: +
     
    ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
    +
    +
+ +

+ + + + + + +

+

31.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc

+ +

The color source provides an uniformly colored input. +

+

The haldclutsrc source provides an identity Hald CLUT. See also +haldclut filter. +

+

The nullsrc source returns unprocessed video frames. It is +mainly useful to be employed in analysis / debugging tools, or as the +source for filters which ignore the input data. +

+

The rgbtestsrc source generates an RGB test pattern useful for +detecting RGB vs BGR issues. You should see a red, green and blue +stripe from top to bottom. +

+

The smptebars source generates a color bars pattern, based on +the SMPTE Engineering Guideline EG 1-1990. +

+

The smptehdbars source generates a color bars pattern, based on +the SMPTE RP 219-2002. +

+

The testsrc source generates a test video pattern, showing a +color pattern, a scrolling gradient and a timestamp. This is mainly +intended for testing purposes. +

+

The sources accept the following options: +

+
+
color, c
+

Specify the color of the source, only available in the color +source. For the syntax of this option, check the "Color" section in the +ffmpeg-utils manual. +

+
+
level
+

Specify the level of the Hald CLUT, only available in the haldclutsrc +source. A level of N generates a picture of N*N*N by N*N*N +pixels to be used as identity matrix for 3D lookup tables. Each component is +coded on a 1/(N*N) scale. +

+
+
size, s
+

Specify the size of the sourced video. For the syntax of this option, check the +"Video size" section in the ffmpeg-utils manual. The default value is +"320x240". +

+

This option is not available with the haldclutsrc filter. +

+
+
rate, r
+

Specify the frame rate of the sourced video, as the number of frames +generated per second. It has to be a string in the format +frame_rate_num/frame_rate_den, an integer number, a float +number or a valid video frame rate abbreviation. The default value is +"25". +

+
+
sar
+

Set the sample aspect ratio of the sourced video. +

+
+
duration, d
+

Set the video duration of the sourced video. The accepted syntax is: +

 
[-]HH[:MM[:SS[.m...]]]
+[-]S+[.m...]
+
+

See also the function av_parse_time(). +

+

If not specified, or the expressed duration is negative, the video is +supposed to be generated forever. +

+
+
decimals, n
+

Set the number of decimals to show in the timestamp, only available in the +testsrc source. +

+

The displayed timestamp value will correspond to the original +timestamp value multiplied by the power of 10 of the specified +value. Default value is 0. +

+
+ +

For example the following: +

 
testsrc=duration=5.3:size=qcif:rate=10
+
+ +

will generate a video with a duration of 5.3 seconds, with size +176x144 and a frame rate of 10 frames per second. +

+

The following graph description will generate a red source +with an opacity of 0.2, with size "qcif" and a frame rate of 10 +frames per second. +

 
color=c=red@0.2:s=qcif:r=10
+
+ +

If the input content is to be ignored, nullsrc can be used. The +following command generates noise in the luminance plane by employing +the geq filter: +

 
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+ + +

31.7.1 Commands

+ +

The color source supports the following commands: +

+
+
c, color
+

Set the color of the created image. Accepts the same syntax of the +corresponding ‘color’ option. +

+
+ + + +

32. Video Sinks

+ +

Below is a description of the currently available video sinks. +

+ +

32.1 buffersink

+ +

Buffer video frames, and make them available to the end of the filter +graph. +

+

This sink is mainly intended for a programmatic use, in particular +through the interface defined in ‘libavfilter/buffersink.h’ +or the options system. +

+

It accepts a pointer to an AVBufferSinkContext structure, which +defines the incoming buffers’ formats, to be passed as the opaque +parameter to avfilter_init_filter for initialization. +

+ +

32.2 nullsink

+ +

Null video sink, do absolutely nothing with the input video. It is +mainly useful as a template and to be employed in analysis / debugging +tools. +

+ + +

33. Multimedia Filters

+ +

Below is a description of the currently available multimedia filters. +

+ +

33.1 avectorscope

+ +

Convert input audio to a video output, representing the audio vector +scope. +

+

The filter is used to measure the difference between channels of stereo +audio stream. A monoaural signal, consisting of identical left and right +signal, results in straight vertical line. Any stereo separation is visible +as a deviation from this line, creating a Lissajous figure. +If the straight (or deviation from it) but horizontal line appears this +indicates that the left and right channels are out of phase. +

+

The filter accepts the following options: +

+
+
mode, m
+

Set the vectorscope mode. +

+

Available values are: +

+
lissajous
+

Lissajous rotated by 45 degrees. +

+
+
lissajous_xy
+

Same as above but not rotated. +

+
+ +

Default value is ‘lissajous’. +

+
+
size, s
+

Set the video size for the output. For the syntax of this option, check the "Video size" +section in the ffmpeg-utils manual. Default value is 400x400. +

+
+
rate, r
+

Set the output frame rate. Default value is 25. +

+
+
rc
+
gc
+
bc
+

Specify the red, green and blue contrast. Default values are 40, 160 and 80. +Allowed range is [0, 255]. +

+
+
rf
+
gf
+
bf
+

Specify the red, green and blue fade. Default values are 15, 10 and 5. +Allowed range is [0, 255]. +

+
+
zoom
+

Set the zoom factor. Default value is 1. Allowed range is [1, 10]. +

+
+ + +

33.1.1 Examples

+ +
    +
  • +Complete example using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
    +
    +
+ + +

33.2 concat

+ +

Concatenate audio and video streams, joining them together one after the +other. +

+

The filter works on segments of synchronized video and audio streams. All +segments must have the same number of streams of each type, and that will +also be the number of streams at output. +

+

The filter accepts the following options: +

+
+
n
+

Set the number of segments. Default is 2. +

+
+
v
+

Set the number of output video streams, that is also the number of video +streams in each segment. Default is 1. +

+
+
a
+

Set the number of output audio streams, that is also the number of video +streams in each segment. Default is 0. +

+
+
unsafe
+

Activate unsafe mode: do not fail if segments have a different format. +

+
+
+ +

The filter has v+a outputs: first v video outputs, then +a audio outputs. +

+

There are nx(v+a) inputs: first the inputs for the first +segment, in the same order as the outputs, then the inputs for the second +segment, etc. +

+

Related streams do not always have exactly the same duration, for various +reasons including codec frame size or sloppy authoring. For that reason, +related synchronized streams (e.g. a video and its audio track) should be +concatenated at once. The concat filter will use the duration of the longest +stream in each segment (except the last one), and if necessary pad shorter +audio streams with silence. +

+

For this filter to work correctly, all segments must start at timestamp 0. +

+

All corresponding streams must have the same parameters in all segments; the +filtering system will automatically select a common pixel format for video +streams, and a common sample format, sample rate and channel layout for +audio streams, but other settings, such as resolution, must be converted +explicitly by the user. +

+

Different frame rates are acceptable but will result in variable frame rate +at output; be sure to configure the output file to handle it. +

+ +

33.2.1 Examples

+ +
    +
  • +Concatenate an opening, an episode and an ending, all in bilingual version +(video in stream 0, audio in streams 1 and 2): +
     
    ffmpeg -i opening.mkv -i episode.mkv -i ending.mkv -filter_complex \
    +  '[0:0] [0:1] [0:2] [1:0] [1:1] [1:2] [2:0] [2:1] [2:2]
    +   concat=n=3:v=1:a=2 [v] [a1] [a2]' \
    +  -map '[v]' -map '[a1]' -map '[a2]' output.mkv
    +
    + +
  • +Concatenate two parts, handling audio and video separately, using the +(a)movie sources, and adjusting the resolution: +
     
    movie=part1.mp4, scale=512:288 [v1] ; amovie=part1.mp4 [a1] ;
    +movie=part2.mp4, scale=512:288 [v2] ; amovie=part2.mp4 [a2] ;
    +[v1] [v2] concat [outv] ; [a1] [a2] concat=v=0:a=1 [outa]
    +
    +

    Note that a desync will happen at the stitch if the audio and video streams +do not have exactly the same duration in the first file. +

    +
+ + +

33.3 ebur128

+ +

EBU R128 scanner filter. This filter takes an audio stream as input and outputs +it unchanged. By default, it logs a message at a frequency of 10Hz with the +Momentary loudness (identified by M), Short-term loudness (S), +Integrated loudness (I) and Loudness Range (LRA). +

+

The filter also has a video output (see the video option) with a real +time graph to observe the loudness evolution. The graphic contains the logged +message mentioned above, so it is not printed anymore when this option is set, +unless the verbose logging is set. The main graphing area contains the +short-term loudness (3 seconds of analysis), and the gauge on the right is for +the momentary loudness (400 milliseconds). +

+

More information about the Loudness Recommendation EBU R128 on +http://tech.ebu.ch/loudness. +

+

The filter accepts the following options: +

+
+
video
+

Activate the video output. The audio stream is passed unchanged whether this +option is set or no. The video stream will be the first output stream if +activated. Default is 0. +

+
+
size
+

Set the video size. This option is for video only. For the syntax of this +option, check the "Video size" section in the ffmpeg-utils manual. Default +and minimum resolution is 640x480. +

+
+
meter
+

Set the EBU scale meter. Default is 9. Common values are 9 and +18, respectively for EBU scale meter +9 and EBU scale meter +18. Any +other integer value between this range is allowed. +

+
+
metadata
+

Set metadata injection. If set to 1, the audio input will be segmented +into 100ms output frames, each of them containing various loudness information +in metadata. All the metadata keys are prefixed with lavfi.r128.. +

+

Default is 0. +

+
+
framelog
+

Force the frame logging level. +

+

Available values are: +

+
info
+

information logging level +

+
verbose
+

verbose logging level +

+
+ +

By default, the logging level is set to info. If the ‘video’ or +the ‘metadata’ options are set, it switches to verbose. +

+
+
peak
+

Set peak mode(s). +

+

Available modes can be cumulated (the option is a flag type). Possible +values are: +

+
none
+

Disable any peak mode (default). +

+
sample
+

Enable sample-peak mode. +

+

Simple peak mode looking for the higher sample value. It logs a message +for sample-peak (identified by SPK). +

+
true
+

Enable true-peak mode. +

+

If enabled, the peak lookup is done on an over-sampled version of the input +stream for better peak accuracy. It logs a message for true-peak. +(identified by TPK) and true-peak per frame (identified by FTPK). +This mode requires a build with libswresample. +

+
+ +
+
+ + +

33.3.1 Examples

+ +
    +
  • +Real-time graph using ffplay, with a EBU scale meter +18: +
     
    ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
    +
    + +
  • +Run an analysis with ffmpeg: +
     
    ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
    +
    +
+ + +

33.4 interleave, ainterleave

+ +

Temporally interleave frames from several inputs. +

+

interleave works with video inputs, ainterleave with audio. +

+

These filters read frames from several inputs and send the oldest +queued frame to the output. +

+

Input streams must have a well defined, monotonically increasing frame +timestamp values. +

+

In order to submit one frame to output, these filters need to enqueue +at least one frame for each input, so they cannot work in case one +input is not yet terminated and will not receive incoming frames. +

+

For example consider the case when one input is a select filter +which always drop input frames. The interleave filter will keep +reading from that input, but it will never be able to send new frames +to output until the input will send an end-of-stream signal. +

+

Also, depending on inputs synchronization, the filters will drop +frames in case one input receives more frames than the other ones, and +the queue is already filled. +

+

These filters accept the following options: +

+
+
nb_inputs, n
+

Set the number of different inputs, it is 2 by default. +

+
+ + +

33.4.1 Examples

+ +
    +
  • +Interleave frames belonging to different streams using ffmpeg: +
     
    ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
    +
    + +
  • +Add flickering blur effect: +
     
    select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
    +
    +
+ + +

33.5 perms, aperms

+ +

Set read/write permissions for the output frames. +

+

These filters are mainly aimed at developers to test direct path in the +following filter in the filtergraph. +

+

The filters accept the following options: +

+
+
mode
+

Select the permissions mode. +

+

It accepts the following values: +

+
none
+

Do nothing. This is the default. +

+
ro
+

Set all the output frames read-only. +

+
rw
+

Set all the output frames directly writable. +

+
toggle
+

Make the frame read-only if writable, and writable if read-only. +

+
random
+

Set each output frame read-only or writable randomly. +

+
+ +
+
seed
+

Set the seed for the random mode, must be an integer included between +0 and UINT32_MAX. If not specified, or if explicitly set to +-1, the filter will try to use a good random seed on a best effort +basis. +

+
+ +

Note: in case of auto-inserted filter between the permission filter and the +following one, the permission might not be received as expected in that +following filter. Inserting a format or aformat filter before the +perms/aperms filter can avoid this problem. +

+ +

33.6 select, aselect

+ +

Select frames to pass in output. +

+

This filter accepts the following options: +

+
+
expr, e
+

Set expression, which is evaluated for each input frame. +

+

If the expression is evaluated to zero, the frame is discarded. +

+

If the evaluation result is negative or NaN, the frame is sent to the +first output; otherwise it is sent to the output with index +ceil(val)-1, assuming that the input index starts from 0. +

+

For example a value of 1.2 corresponds to the output with index +ceil(1.2)-1 = 2-1 = 1, that is the second output. +

+
+
outputs, n
+

Set the number of outputs. The output to which to send the selected +frame is based on the result of the evaluation. Default value is 1. +

+
+ +

The expression can contain the following constants: +

+
+
n
+

the sequential number of the filtered frame, starting from 0 +

+
+
selected_n
+

the sequential number of the selected frame, starting from 0 +

+
+
prev_selected_n
+

the sequential number of the last selected frame, NAN if undefined +

+
+
TB
+

timebase of the input timestamps +

+
+
pts
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in TB units, NAN if undefined +

+
+
t
+

the PTS (Presentation TimeStamp) of the filtered video frame, +expressed in seconds, NAN if undefined +

+
+
prev_pts
+

the PTS of the previously filtered video frame, NAN if undefined +

+
+
prev_selected_pts
+

the PTS of the last previously filtered video frame, NAN if undefined +

+
+
prev_selected_t
+

the PTS of the last previously selected video frame, NAN if undefined +

+
+
start_pts
+

the PTS of the first video frame in the video, NAN if undefined +

+
+
start_t
+

the time of the first video frame in the video, NAN if undefined +

+
+
pict_type (video only)
+

the type of the filtered frame, can assume one of the following +values: +

+
I
+
P
+
B
+
S
+
SI
+
SP
+
BI
+
+ +
+
interlace_type (video only)
+

the frame interlace type, can assume one of the following values: +

+
PROGRESSIVE
+

the frame is progressive (not interlaced) +

+
TOPFIRST
+

the frame is top-field-first +

+
BOTTOMFIRST
+

the frame is bottom-field-first +

+
+ +
+
consumed_sample_n (audio only)
+

the number of selected samples before the current frame +

+
+
samples_n (audio only)
+

the number of samples in the current frame +

+
+
sample_rate (audio only)
+

the input sample rate +

+
+
key
+

1 if the filtered frame is a key-frame, 0 otherwise +

+
+
pos
+

the position in the file of the filtered frame, -1 if the information +is not available (e.g. for synthetic video) +

+
+
scene (video only)
+

value between 0 and 1 to indicate a new scene; a low value reflects a low +probability for the current frame to introduce a new scene, while a higher +value means the current frame is more likely to be one (see the example below) +

+
+
+ +

The default value of the select expression is "1". +

+ +

33.6.1 Examples

+ +
    +
  • +Select all frames in input: +
     
    select
    +
    + +

    The example above is the same as: +

     
    select=1
    +
    + +
  • +Skip all frames: +
     
    select=0
    +
    + +
  • +Select only I-frames: +
     
    select='eq(pict_type\,I)'
    +
    + +
  • +Select one frame every 100: +
     
    select='not(mod(n\,100))'
    +
    + +
  • +Select only frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)
    +
    + +
  • +Select only I frames contained in the 10-20 time interval: +
     
    select=between(t\,10\,20)*eq(pict_type\,I)
    +
    + +
  • +Select frames with a minimum distance of 10 seconds: +
     
    select='isnan(prev_selected_t)+gte(t-prev_selected_t\,10)'
    +
    + +
  • +Use aselect to select only audio frames with samples number > 100: +
     
    aselect='gt(samples_n\,100)'
    +
    + +
  • +Create a mosaic of the first scenes: +
     
    ffmpeg -i video.avi -vf select='gt(scene\,0.4)',scale=160:120,tile -frames:v 1 preview.png
    +
    + +

    Comparing scene against a value between 0.3 and 0.5 is generally a sane +choice. +

    +
  • +Send even and odd frames to separate outputs, and compose them: +
     
    select=n=2:e='mod(n, 2)+1' [odd][even]; [odd] pad=h=2*ih [tmp]; [tmp][even] overlay=y=h
    +
    +
+ + +

33.7 sendcmd, asendcmd

+ +

Send commands to filters in the filtergraph. +

+

These filters read commands to be sent to other filters in the +filtergraph. +

+

sendcmd must be inserted between two video filters, +asendcmd must be inserted between two audio filters, but apart +from that they act the same way. +

+

The specification of commands can be provided in the filter arguments +with the commands option, or in a file specified by the +filename option. +

+

These filters accept the following options: +

+
commands, c
+

Set the commands to be read and sent to the other filters. +

+
filename, f
+

Set the filename of the commands to be read and sent to the other +filters. +

+
+ + +

33.7.1 Commands syntax

+ +

A commands description consists of a sequence of interval +specifications, comprising a list of commands to be executed when a +particular event related to that interval occurs. The occurring event +is typically the current frame time entering or leaving a given time +interval. +

+

An interval is specified by the following syntax: +

 
START[-END] COMMANDS;
+
+ +

The time interval is specified by the START and END times. +END is optional and defaults to the maximum time. +

+

The current frame time is considered within the specified interval if +it is included in the interval [START, END), that is when +the time is greater or equal to START and is lesser than +END. +

+

COMMANDS consists of a sequence of one or more command +specifications, separated by ",", relating to that interval. The +syntax of a command specification is given by: +

 
[FLAGS] TARGET COMMAND ARG
+
+ +

FLAGS is optional and specifies the type of events relating to +the time interval which enable sending the specified command, and must +be a non-null sequence of identifier flags separated by "+" or "|" and +enclosed between "[" and "]". +

+

The following flags are recognized: +

+
enter
+

The command is sent when the current frame timestamp enters the +specified interval. In other words, the command is sent when the +previous frame timestamp was not in the given interval, and the +current is. +

+
+
leave
+

The command is sent when the current frame timestamp leaves the +specified interval. In other words, the command is sent when the +previous frame timestamp was in the given interval, and the +current is not. +

+
+ +

If FLAGS is not specified, a default value of [enter] is +assumed. +

+

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional list of argument for +the given COMMAND. +

+

Between one interval specification and another, whitespaces, or +sequences of characters starting with # until the end of line, +are ignored and can be used to annotate comments. +

+

A simplified BNF description of the commands specification syntax +follows: +

 
COMMAND_FLAG  ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG]
+COMMAND       ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG]
+COMMANDS      ::= COMMAND [,COMMANDS]
+INTERVAL      ::= START[-END] COMMANDS
+INTERVALS     ::= INTERVAL[;INTERVALS]
+
+ + +

33.7.2 Examples

+ +
    +
  • +Specify audio tempo change at second 4: +
     
    asendcmd=c='4.0 atempo tempo 1.5',atempo
    +
    + +
  • +Specify a list of drawtext and hue commands in a file. +
     
    # show text in the interval 5-10
    +5.0-10.0 [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=hello world',
    +         [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=';
    +
    +# desaturate the image in the interval 15-20
    +15.0-20.0 [enter] hue s 0,
    +          [enter] drawtext reinit 'fontfile=FreeSerif.ttf:text=nocolor',
    +          [leave] hue s 1,
    +          [leave] drawtext reinit 'fontfile=FreeSerif.ttf:text=color';
    +
    +# apply an exponential saturation fade-out effect, starting from time 25
    +25 [enter] hue s exp(25-t)
    +
    + +

    A filtergraph allowing to read and process the above command list +stored in a file ‘test.cmd’, can be specified with: +

     
    sendcmd=f=test.cmd,drawtext=fontfile=FreeSerif.ttf:text='',hue
    +
    +
+ +

+

+

33.8 setpts, asetpts

+ +

Change the PTS (presentation timestamp) of the input frames. +

+

setpts works on video frames, asetpts on audio frames. +

+

This filter accepts the following options: +

+
+
expr
+

The expression which is evaluated for each frame to construct its timestamp. +

+
+
+ +

The expression is evaluated through the eval API and can contain the following +constants: +

+
+
FRAME_RATE
+

frame rate, only defined for constant frame-rate video +

+
+
PTS
+

the presentation timestamp in input +

+
+
N
+

the count of the input frame for video or the number of consumed samples, +not including the current frame for audio, starting from 0. +

+
+
NB_CONSUMED_SAMPLES
+

the number of consumed samples, not including the current frame (only +audio) +

+
+
NB_SAMPLES, S
+

the number of samples in the current frame (only audio) +

+
+
SAMPLE_RATE, SR
+

audio sample rate +

+
+
STARTPTS
+

the PTS of the first frame +

+
+
STARTT
+

the time in seconds of the first frame +

+
+
INTERLACED
+

tell if the current frame is interlaced +

+
+
T
+

the time in seconds of the current frame +

+
+
POS
+

original position in the file of the frame, or undefined if undefined +for the current frame +

+
+
PREV_INPTS
+

previous input PTS +

+
+
PREV_INT
+

previous input time in seconds +

+
+
PREV_OUTPTS
+

previous output PTS +

+
+
PREV_OUTT
+

previous output time in seconds +

+
+
RTCTIME
+

wallclock (RTC) time in microseconds. This is deprecated, use time(0) +instead. +

+
+
RTCSTART
+

wallclock (RTC) time at the start of the movie in microseconds +

+
+
TB
+

timebase of the input timestamps +

+
+
+ + +

33.8.1 Examples

+ +
    +
  • +Start counting PTS from zero +
     
    setpts=PTS-STARTPTS
    +
    + +
  • +Apply fast motion effect: +
     
    setpts=0.5*PTS
    +
    + +
  • +Apply slow motion effect: +
     
    setpts=2.0*PTS
    +
    + +
  • +Set fixed rate of 25 frames per second: +
     
    setpts=N/(25*TB)
    +
    + +
  • +Set fixed rate 25 fps with some jitter: +
     
    setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
    +
    + +
  • +Apply an offset of 10 seconds to the input PTS: +
     
    setpts=PTS+10/TB
    +
    + +
  • +Generate timestamps from a "live source" and rebase onto the current timebase: +
     
    setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
    +
    + +
  • +Generate timestamps by counting samples: +
     
    asetpts=N/SR/TB
    +
    + +
+ + +

33.9 settb, asettb

+ +

Set the timebase to use for the output frames timestamps. +It is mainly useful for testing timebase configuration. +

+

This filter accepts the following options: +

+
+
expr, tb
+

The expression which is evaluated into the output timebase. +

+
+
+ +

The value for ‘tb’ is an arithmetic expression representing a +rational. The expression can contain the constants "AVTB" (the default +timebase), "intb" (the input timebase) and "sr" (the sample rate, +audio only). Default value is "intb". +

+ +

33.9.1 Examples

+ +
    +
  • +Set the timebase to 1/25: +
     
    settb=expr=1/25
    +
    + +
  • +Set the timebase to 1/10: +
     
    settb=expr=0.1
    +
    + +
  • +Set the timebase to 1001/1000: +
     
    settb=1+0.001
    +
    + +
  • +Set the timebase to 2*intb: +
     
    settb=2*intb
    +
    + +
  • +Set the default timebase value: +
     
    settb=AVTB
    +
    +
+ + +

33.10 showspectrum

+ +

Convert input audio to a video output, representing the audio frequency +spectrum. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value is +640x512. +

+
+
slide
+

Specify if the spectrum should slide along the window. Default value is +0. +

+
+
mode
+

Specify display mode. +

+

It accepts the following values: +

+
combined
+

all channels are displayed in the same row +

+
separate
+

all channels are displayed in separate rows +

+
+ +

Default value is ‘combined’. +

+
+
color
+

Specify display color mode. +

+

It accepts the following values: +

+
channel
+

each channel is displayed in a separate color +

+
intensity
+

each channel is is displayed using the same color scheme +

+
+ +

Default value is ‘channel’. +

+
+
scale
+

Specify scale used for calculating intensity color values. +

+

It accepts the following values: +

+
lin
+

linear +

+
sqrt
+

square root, default +

+
cbrt
+

cubic root +

+
log
+

logarithmic +

+
+ +

Default value is ‘sqrt’. +

+
+
saturation
+

Set saturation modifier for displayed colors. Negative values provide +alternative color scheme. 0 is no saturation at all. +Saturation must be in [-10.0, 10.0] range. +Default value is 1. +

+
+
win_func
+

Set window function. +

+

It accepts the following values: +

+
none
+

No samples pre-processing (do not expect this to be faster) +

+
hann
+

Hann window +

+
hamming
+

Hamming window +

+
blackman
+

Blackman window +

+
+ +

Default value is hann. +

+
+ +

The usage is very similar to the showwaves filter; see the examples in that +section. +

+ +

33.10.1 Examples

+ +
    +
  • +Large window with logarithmic color scaling: +
     
    showspectrum=s=1280x480:scale=log
    +
    + +
  • +Complete example for a colored and sliding spectrum per channel using ffplay: +
     
    ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
    +             [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
    +
    +
+ + +

33.11 showwaves

+ +

Convert input audio to a video output, representing the samples waves. +

+

The filter accepts the following options: +

+
+
size, s
+

Specify the video size for the output. For the syntax of this option, check +the "Video size" section in the ffmpeg-utils manual. Default value +is "600x240". +

+
+
mode
+

Set display mode. +

+

Available values are: +

+
point
+

Draw a point for each sample. +

+
+
line
+

Draw a vertical line for each sample. +

+
+ +

Default value is point. +

+
+
n
+

Set the number of samples which are printed on the same column. A +larger value will decrease the frame rate. Must be a positive +integer. This option can be set only if the value for rate +is not explicitly specified. +

+
+
rate, r
+

Set the (approximate) output frame rate. This is done by setting the +option n. Default value is "25". +

+
+
+ + +

33.11.1 Examples

+ +
    +
  • +Output the input file audio and the corresponding video representation +at the same time: +
     
    amovie=a.mp3,asplit[out0],showwaves[out1]
    +
    + +
  • +Create a synthetic signal and show it with showwaves, forcing a +frame rate of 30 frames per second: +
     
    aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
    +
    +
+ + +

33.12 split, asplit

+ +

Split input into several identical outputs. +

+

asplit works with audio input, split with video. +

+

The filter accepts a single parameter which specifies the number of outputs. If +unspecified, it defaults to 2. +

+ +

33.12.1 Examples

+ +
    +
  • +Create two separate outputs from the same input: +
     
    [in] split [out0][out1]
    +
    + +
  • +To create 3 or more outputs, you need to specify the number of +outputs, like in: +
     
    [in] asplit=3 [out0][out1][out2]
    +
    + +
  • +Create two separate outputs from the same input, one cropped and +one padded: +
     
    [in] split [splitout1][splitout2];
    +[splitout1] crop=100:100:0:0    [cropout];
    +[splitout2] pad=200:200:100:100 [padout];
    +
    + +
  • +Create 5 copies of the input audio with ffmpeg: +
     
    ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
    +
    +
+ + +

33.13 zmq, azmq

+ +

Receive commands sent through a libzmq client, and forward them to +filters in the filtergraph. +

+

zmq and azmq work as a pass-through filters. zmq +must be inserted between two video filters, azmq between two +audio filters. +

+

To enable these filters you need to install the libzmq library and +headers and configure FFmpeg with --enable-libzmq. +

+

For more information about libzmq see: +http://www.zeromq.org/ +

+

The zmq and azmq filters work as a libzmq server, which +receives messages sent through a network interface defined by the +‘bind_address’ option. +

+

The received message must be in the form: +

 
TARGET COMMAND [ARG]
+
+ +

TARGET specifies the target of the command, usually the name of +the filter class or a specific filter instance name. +

+

COMMAND specifies the name of the command for the target filter. +

+

ARG is optional and specifies the optional argument list for the +given COMMAND. +

+

Upon reception, the message is processed and the corresponding command +is injected into the filtergraph. Depending on the result, the filter +will send a reply to the client, adopting the format: +

 
ERROR_CODE ERROR_REASON
+MESSAGE
+
+ +

MESSAGE is optional. +

+ +

33.13.1 Examples

+ +

Look at ‘tools/zmqsend’ for an example of a zmq client which can +be used to send commands processed by these filters. +

+

Consider the following filtergraph generated by ffplay +

 
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red  [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l]   overlay      [bg+l];
+[bg+l][r] overlay=x=100 "
+
+ +

To change the color of the left side of the video, the following +command can be used: +

 
echo Parsed_color_0 c yellow | tools/zmqsend
+
+ +

To change the right side: +

 
echo Parsed_color_1 c pink | tools/zmqsend
+
+ + + +

34. Multimedia Sources

+ +

Below is a description of the currently available multimedia sources. +

+ +

34.1 amovie

+ +

This is the same as movie source, except it selects an audio +stream by default. +

+

+

+

34.2 movie

+ +

Read audio and/or video stream(s) from a movie container. +

+

This filter accepts the following options: +

+
+
filename
+

The name of the resource to read (not necessarily a file but also a device or a +stream accessed through some protocol). +

+
+
format_name, f
+

Specifies the format assumed for the movie to read, and can be either +the name of a container or an input device. If not specified the +format is guessed from movie_name or by probing. +

+
+
seek_point, sp
+

Specifies the seek point in seconds, the frames will be output +starting from this seek point, the parameter is evaluated with +av_strtod so the numerical value may be suffixed by an IS +postfix. Default value is "0". +

+
+
streams, s
+

Specifies the streams to read. Several streams can be specified, +separated by "+". The source will then have as many outputs, in the +same order. The syntax is explained in the “Stream specifiers” +section in the ffmpeg manual. Two special names, "dv" and "da" specify +respectively the default (best suited) video and audio stream. Default +is "dv", or "da" if the filter is called as "amovie". +

+
+
stream_index, si
+

Specifies the index of the video stream to read. If the value is -1, +the best suited video stream will be automatically selected. Default +value is "-1". Deprecated. If the filter is called "amovie", it will select +audio instead of video. +

+
+
loop
+

Specifies how many times to read the stream in sequence. +If the value is less than 1, the stream will be read again and again. +Default value is "1". +

+

Note that when the movie is looped the source timestamps are not +changed, so it will generate non monotonically increasing timestamps. +

+
+ +

This filter allows one to overlay a second video on top of main input of +a filtergraph as shown in this graph: +

 
input -----------> deltapts0 --> overlay --> output
+                                    ^
+                                    |
+movie --> scale--> deltapts1 -------+
+
+ + +

34.2.1 Examples

+ +
    +
  • +Skip 3.2 seconds from the start of the avi file in.avi, and overlay it +on top of the input labelled as "in": +
     
    movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read from a video4linux2 device, and overlay it on top of the input +labelled as "in": +
     
    movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
    +[in] setpts=PTS-STARTPTS [main];
    +[main][over] overlay=16:16 [out]
    +
    + +
  • +Read the first video stream and the audio stream with id 0x81 from +dvd.vob; the video is connected to the pad named "video" and the audio is +connected to the pad named "audio": +
     
    movie=dvd.vob:s=v:0+#0x81 [video] [audio]
    +
    +
+ + + +

35. See Also

+ +

ffprobe, +ffmpeg, ffplay, ffserver, +ffmpeg-utils, +ffmpeg-scaler, +ffmpeg-resampler, +ffmpeg-codecs, +ffmpeg-bitstream-filters, +ffmpeg-formats, +ffmpeg-devices, +ffmpeg-protocols, +ffmpeg-filters +

+ + +

36. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/ffprobe.html b/dependencies64/ffmpeg/doc/ffprobe.html new file mode 100644 index 000000000..3a3b8fcd8 --- /dev/null +++ b/dependencies64/ffmpeg/doc/ffprobe.html @@ -0,0 +1,1060 @@ + + + + + +FFmpeg documentation : ffprobe + + + + + + + + + + +
+
+ + +

ffprobe Documentation

+ + +

Table of Contents

+ + + +

1. Synopsis

+ +

ffprobe [options] [‘input_file’] +

+ +

2. Description

+ +

ffprobe gathers information from multimedia streams and prints it in +human- and machine-readable fashion. +

+

For example it can be used to check the format of the container used +by a multimedia stream and the format and type of each media stream +contained in it. +

+

If a filename is specified in input, ffprobe will try to open and +probe the file content. If the file cannot be opened or recognized as +a multimedia file, a positive exit code is returned. +

+

ffprobe may be employed both as a standalone application or in +combination with a textual filter, which may perform more +sophisticated processing, e.g. statistical processing or plotting. +

+

Options are used to list some of the formats supported by ffprobe or +for specifying which information to display, and for setting how +ffprobe will show it. +

+

ffprobe output is designed to be easily parsable by a textual filter, +and consists of one or more sections of a form defined by the selected +writer, which is specified by the ‘print_format’ option. +

+

Sections may contain other nested sections, and are identified by a +name (which may be shared by other sections), and an unique +name. See the output of ‘sections’. +

+

Metadata tags stored in the container or in the streams are recognized +and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM" +section. +

+ + +

3. Options

+ +

All the numerical options, if not specified otherwise, accept a string +representing a number as input, which may be followed by one of the SI +unit prefixes, for example: ’K’, ’M’, or ’G’. +

+

If ’i’ is appended to the SI unit prefix, the complete prefix will be +interpreted as a unit prefix for binary multiplies, which are based on +powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit +prefix multiplies the value by 8. This allows using, for example: +’KB’, ’MiB’, ’G’ and ’B’ as number suffixes. +

+

Options which do not take arguments are boolean options, and set the +corresponding value to true. They can be set to false by prefixing +the option name with "no". For example using "-nofoo" +will set the boolean option with name "foo" to false. +

+

+

+

3.1 Stream specifiers

+

Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers +are used to precisely specify which stream(s) a given option belongs to. +

+

A stream specifier is a string generally appended to the option name and +separated from it by a colon. E.g. -codec:a:1 ac3 contains the +a:1 stream specifier, which matches the second audio stream. Therefore, it +would select the ac3 codec for the second audio stream. +

+

A stream specifier can match several streams, so that the option is applied to all +of them. E.g. the stream specifier in -b:a 128k matches all audio +streams. +

+

An empty stream specifier matches all streams. For example, -codec copy +or -codec: copy would copy all the streams without reencoding. +

+

Possible forms of stream specifiers are: +

+
stream_index
+

Matches the stream with this index. E.g. -threads:1 4 would set the +thread count for the second stream to 4. +

+
stream_type[:stream_index]
+

stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle, +’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches +stream number stream_index of this type. Otherwise, it matches all +streams of this type. +

+
p:program_id[:stream_index]
+

If stream_index is given, then it matches the stream with number stream_index +in the program with the id program_id. Otherwise, it matches all streams in the +program. +

+
#stream_id
+

Matches the stream by a format-specific ID. +

+
+ + +

3.2 Generic options

+ +

These options are shared amongst the ff* tools. +

+
+
-L
+

Show license. +

+
+
-h, -?, -help, --help [arg]
+

Show help. An optional parameter may be specified to print help about a specific +item. If no argument is specified, only basic (non advanced) tool +options are shown. +

+

Possible values of arg are: +

+
long
+

Print advanced tool options in addition to the basic tool options. +

+
+
full
+

Print complete list of options, including shared and private options +for encoders, decoders, demuxers, muxers, filters, etc. +

+
+
decoder=decoder_name
+

Print detailed information about the decoder named decoder_name. Use the +‘-decoders’ option to get a list of all decoders. +

+
+
encoder=encoder_name
+

Print detailed information about the encoder named encoder_name. Use the +‘-encoders’ option to get a list of all encoders. +

+
+
demuxer=demuxer_name
+

Print detailed information about the demuxer named demuxer_name. Use the +‘-formats’ option to get a list of all demuxers and muxers. +

+
+
muxer=muxer_name
+

Print detailed information about the muxer named muxer_name. Use the +‘-formats’ option to get a list of all muxers and demuxers. +

+
+
filter=filter_name
+

Print detailed information about the filter name filter_name. Use the +‘-filters’ option to get a list of all filters. +

+
+ +
+
-version
+

Show version. +

+
+
-formats
+

Show available formats. +

+
+
-codecs
+

Show all codecs known to libavcodec. +

+

Note that the term ’codec’ is used throughout this documentation as a shortcut +for what is more correctly called a media bitstream format. +

+
+
-decoders
+

Show available decoders. +

+
+
-encoders
+

Show all available encoders. +

+
+
-bsfs
+

Show available bitstream filters. +

+
+
-protocols
+

Show available protocols. +

+
+
-filters
+

Show available libavfilter filters. +

+
+
-pix_fmts
+

Show available pixel formats. +

+
+
-sample_fmts
+

Show available sample formats. +

+
+
-layouts
+

Show channel names and standard channel layouts. +

+
+
-colors
+

Show recognized color names. +

+
+
-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+

Set the logging level used by the library. +Adding "repeat+" indicates that repeated log output should not be compressed +to the first line and the "Last message repeated n times" line will be +omitted. "repeat" can also be used alone. +If "repeat" is used alone, and with no prior loglevel set, the default +loglevel will be used. If multiple loglevel parameters are given, using +’repeat’ will not change the loglevel. +loglevel is a number or a string containing one of the following values: +

+
quiet
+

Show nothing at all; be silent. +

+
panic
+

Only show fatal errors which could lead the process to crash, such as +and assert failure. This is not currently used for anything. +

+
fatal
+

Only show fatal errors. These are errors after which the process absolutely +cannot continue after. +

+
error
+

Show all errors, including ones which can be recovered from. +

+
warning
+

Show all warnings and errors. Any message related to possibly +incorrect or unexpected events will be shown. +

+
info
+

Show informative messages during processing. This is in addition to +warnings and errors. This is the default value. +

+
verbose
+

Same as info, except more verbose. +

+
debug
+

Show everything, including debugging information. +

+
+ +

By default the program logs to stderr, if coloring is supported by the +terminal, colors are used to mark errors and warnings. Log coloring +can be disabled setting the environment variable +AV_LOG_FORCE_NOCOLOR or NO_COLOR, or can be forced setting +the environment variable AV_LOG_FORCE_COLOR. +The use of the environment variable NO_COLOR is deprecated and +will be dropped in a following FFmpeg version. +

+
+
-report
+

Dump full command line and console output to a file named +program-YYYYMMDD-HHMMSS.log in the current +directory. +This file can be useful for bug reports. +It also implies -loglevel verbose. +

+

Setting the environment variable FFREPORT to any value has the +same effect. If the value is a ’:’-separated key=value sequence, these +options will affect the report; options values must be escaped if they +contain special characters or the options delimiter ’:’ (see the +“Quoting and escaping” section in the ffmpeg-utils manual). The +following option is recognized: +

+
file
+

set the file name to use for the report; %p is expanded to the name +of the program, %t is expanded to a timestamp, %% is expanded +to a plain % +

+
+ +

Errors in parsing the environment variable are not fatal, and will not +appear in the report. +

+
+
-hide_banner
+

Suppress printing banner. +

+

All FFmpeg tools will normally show a copyright notice, build options +and library versions. This option can be used to suppress printing +this information. +

+
+
-cpuflags flags (global)
+

Allows setting and clearing cpu flags. This option is intended +for testing. Do not use it unless you know what you’re doing. +

 
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+

Possible flags for this option are: +

+
x86
+
+
mmx
+
mmxext
+
sse
+
sse2
+
sse2slow
+
sse3
+
sse3slow
+
ssse3
+
atom
+
sse4.1
+
sse4.2
+
avx
+
xop
+
fma4
+
3dnow
+
3dnowext
+
cmov
+
+
+
ARM
+
+
armv5te
+
armv6
+
armv6t2
+
vfp
+
vfpv3
+
neon
+
+
+
PowerPC
+
+
altivec
+
+
+
Specific Processors
+
+
pentium2
+
pentium3
+
pentium4
+
k6
+
k62
+
athlon
+
athlonxp
+
k8
+
+
+
+ +
+
-opencl_bench
+

Benchmark all available OpenCL devices and show the results. This option +is only available when FFmpeg has been compiled with --enable-opencl. +

+
+
-opencl_options options (global)
+

Set OpenCL environment options. This option is only available when +FFmpeg has been compiled with --enable-opencl. +

+

options must be a list of key=value option pairs +separated by ’:’. See the “OpenCL Options” section in the +ffmpeg-utils manual for the list of supported options. +

+
+ + +

3.3 AVOptions

+ +

These options are provided directly by the libavformat, libavdevice and +libavcodec libraries. To see the list of available AVOptions, use the +‘-help’ option. They are separated into two categories: +

+
generic
+

These options can be set for any container, codec or device. Generic options +are listed under AVFormatContext options for containers/devices and under +AVCodecContext options for codecs. +

+
private
+

These options are specific to the given container, device or codec. Private +options are listed under their corresponding containers/devices/codecs. +

+
+ +

For example to write an ID3v2.3 header instead of a default ID3v2.4 to +an MP3 file, use the ‘id3v2_version’ private option of the MP3 +muxer: +

 
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+ +

All codec AVOptions are per-stream, and thus a stream specifier +should be attached to them. +

+

Note: the ‘-nooption’ syntax cannot be used for boolean +AVOptions, use ‘-option 0’/‘-option 1’. +

+

Note: the old undocumented way of specifying per-stream AVOptions by +prepending v/a/s to the options name is now obsolete and will be +removed soon. +

+ +

3.4 Main options

+ +
+
-f format
+

Force format to use. +

+
+
-unit
+

Show the unit of the displayed values. +

+
+
-prefix
+

Use SI prefixes for the displayed values. +Unless the "-byte_binary_prefix" option is used all the prefixes +are decimal. +

+
+
-byte_binary_prefix
+

Force the use of binary prefixes for byte values. +

+
+
-sexagesimal
+

Use sexagesimal format HH:MM:SS.MICROSECONDS for time values. +

+
+
-pretty
+

Prettify the format of the displayed values, it corresponds to the +options "-unit -prefix -byte_binary_prefix -sexagesimal". +

+
+
-of, -print_format writer_name[=writer_options]
+

Set the output printing format. +

+

writer_name specifies the name of the writer, and +writer_options specifies the options to be passed to the writer. +

+

For example for printing the output in JSON format, specify: +

 
-print_format json
+
+ +

For more details on the available output printing formats, see the +Writers section below. +

+
+
-sections
+

Print sections structure and section information, and exit. The output +is not meant to be parsed by a machine. +

+
+
-select_streams stream_specifier
+

Select only the streams specified by stream_specifier. This +option affects only the options related to streams +(e.g. show_streams, show_packets, etc.). +

+

For example to show only audio streams, you can use the command: +

 
ffprobe -show_streams -select_streams a INPUT
+
+ +

To show only video packets belonging to the video stream with index 1: +

 
ffprobe -show_packets -select_streams v:1 INPUT
+
+ +
+
-show_data
+

Show payload data, as a hexadecimal and ASCII dump. Coupled with +‘-show_packets’, it will dump the packets’ data. Coupled with +‘-show_streams’, it will dump the codec extradata. +

+

The dump is printed as the "data" field. It may contain newlines. +

+
+
-show_error
+

Show information about the error found when trying to probe the input. +

+

The error information is printed within a section with name "ERROR". +

+
+
-show_format
+

Show information about the container format of the input multimedia +stream. +

+

All the container format information is printed within a section with +name "FORMAT". +

+
+
-show_format_entry name
+

Like ‘-show_format’, but only prints the specified entry of the +container format information, rather than all. This option may be given more +than once, then all specified entries will be shown. +

+

This option is deprecated, use show_entries instead. +

+
+
-show_entries section_entries
+

Set list of entries to show. +

+

Entries are specified according to the following +syntax. section_entries contains a list of section entries +separated by :. Each section entry is composed by a section +name (or unique name), optionally followed by a list of entries local +to that section, separated by ,. +

+

If section name is specified but is followed by no =, all +entries are printed to output, together with all the contained +sections. Otherwise only the entries specified in the local section +entries list are printed. In particular, if = is specified but +the list of local entries is empty, then no entries will be shown for +that section. +

+

Note that the order of specification of the local section entries is +not honored in the output, and the usual display order will be +retained. +

+

The formal syntax is given by: +

 
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME[,LOCAL_SECTION_ENTRIES]
+SECTION_ENTRY         ::= SECTION_NAME[=[LOCAL_SECTION_ENTRIES]]
+SECTION_ENTRIES       ::= SECTION_ENTRY[:SECTION_ENTRIES]
+
+ +

For example, to show only the index and type of each stream, and the PTS +time, duration time, and stream index of the packets, you can specify +the argument: +

 
packet=pts_time,duration_time,stream_index : stream=index,codec_type
+
+ +

To show all the entries in the section "format", but only the codec +type in the section "stream", specify the argument: +

 
format : stream=codec_type
+
+ +

To show all the tags in the stream and format sections: +

 
format_tags : format_tags
+
+ +

To show only the title tag (if available) in the stream +sections: +

 
stream_tags=title
+
+ +
+
-show_packets
+

Show information about each packet contained in the input multimedia +stream. +

+

The information for each single packet is printed within a dedicated +section with name "PACKET". +

+
+
-show_frames
+

Show information about each frame and subtitle contained in the input +multimedia stream. +

+

The information for each single frame is printed within a dedicated +section with name "FRAME" or "SUBTITLE". +

+
+
-show_streams
+

Show information about each media stream contained in the input +multimedia stream. +

+

Each media stream information is printed within a dedicated section +with name "STREAM". +

+
+
-show_programs
+

Show information about programs and their streams contained in the input +multimedia stream. +

+

Each media stream information is printed within a dedicated section +with name "PROGRAM_STREAM". +

+
+
-show_chapters
+

Show information about chapters stored in the format. +

+

Each chapter is printed within a dedicated section with name "CHAPTER". +

+
+
-count_frames
+

Count the number of frames per stream and report it in the +corresponding stream section. +

+
+
-count_packets
+

Count the number of packets per stream and report it in the +corresponding stream section. +

+
+
-read_intervals read_intervals
+
+

Read only the specified intervals. read_intervals must be a +sequence of interval specifications separated by ",". +ffprobe will seek to the interval starting point, and will +continue reading from that. +

+

Each interval is specified by two optional parts, separated by "%". +

+

The first part specifies the interval start position. It is +interpreted as an abolute position, or as a relative offset from the +current position if it is preceded by the "+" character. If this first +part is not specified, no seeking will be performed when reading this +interval. +

+

The second part specifies the interval end position. It is interpreted +as an absolute position, or as a relative offset from the current +position if it is preceded by the "+" character. If the offset +specification starts with "#", it is interpreted as the number of +packets to read (not including the flushing packets) from the interval +start. If no second part is specified, the program will read until the +end of the input. +

+

Note that seeking is not accurate, thus the actual interval start +point may be different from the specified position. Also, when an +interval duration is specified, the absolute end time will be computed +by adding the duration to the interval start point found by seeking +the file, rather than to the specified start value. +

+

The formal syntax is given by: +

 
INTERVAL  ::= [START|+START_OFFSET][%[END|+END_OFFSET]]
+INTERVALS ::= INTERVAL[,INTERVALS]
+
+ +

A few examples follow. +

    +
  • +Seek to time 10, read packets until 20 seconds after the found seek +point, then seek to position 01:30 (1 minute and thirty +seconds) and read packets until position 01:45. +
     
    10%+20,01:30%01:45
    +
    + +
  • +Read only 42 packets after seeking to position 01:23: +
     
    01:23%+#42
    +
    + +
  • +Read only the first 20 seconds from the start: +
     
    %+20
    +
    + +
  • +Read from the start until position 02:30: +
     
    %02:30
    +
    +
+ +
+
-show_private_data, -private
+

Show private data, that is data depending on the format of the +particular shown element. +This option is enabled by default, but you may need to disable it +for specific uses, for example when creating XSD-compliant XML output. +

+
+
-show_program_version
+

Show information related to program version. +

+

Version information is printed within a section with name +"PROGRAM_VERSION". +

+
+
-show_library_versions
+

Show information related to library versions. +

+

Version information for each library is printed within a section with +name "LIBRARY_VERSION". +

+
+
-show_versions
+

Show information related to program and library versions. This is the +equivalent of setting both ‘-show_program_version’ and +‘-show_library_versions’ options. +

+
+
-bitexact
+

Force bitexact output, useful to produce output which is not dependent +on the specific build. +

+
+
-i input_file
+

Read input_file. +

+
+
+ + +

4. Writers

+ +

A writer defines the output format adopted by ffprobe, and will be +used for printing all the parts of the output. +

+

A writer may accept one or more arguments, which specify the options +to adopt. The options are specified as a list of key=value +pairs, separated by ":". +

+

All writers support the following options: +

+
+
string_validation, sv
+

Set string validation mode. +

+

The following values are accepted. +

+
fail
+

The writer will fail immediately in case an invalid string (UTF-8) +sequence or code point is found in the input. This is especially +useful to validate input metadata. +

+
+
ignore
+

Any validation error will be ignored. This will result in possibly +broken output, especially with the json or xml writer. +

+
+
replace
+

The writer will substitute invalid UTF-8 sequences or code points with +the string specified with the ‘string_validation_replacement’. +

+
+ +

Default value is ‘replace’. +

+
+
string_validation_replacement, svr
+

Set replacement string to use in case ‘string_validation’ is +set to ‘replace’. +

+

In case the option is not specified, the writer will assume the empty +string, that is it will remove the invalid sequences from the input +strings. +

+
+ +

A description of the currently available writers follows. +

+ +

4.1 default

+

Default format. +

+

Print each section in the form: +

 
[SECTION]
+key1=val1
+...
+keyN=valN
+[/SECTION]
+
+ +

Metadata tags are printed as a line in the corresponding FORMAT, STREAM or +PROGRAM_STREAM section, and are prefixed by the string "TAG:". +

+

A description of the accepted options follows. +

+
+
nokey, nk
+

If set to 1 specify not to print the key of each field. Default value +is 0. +

+
+
noprint_wrappers, nw
+

If set to 1 specify not to print the section header and footer. +Default value is 0. +

+
+ + +

4.2 compact, csv

+

Compact and CSV format. +

+

The csv writer is equivalent to compact, but supports +different defaults. +

+

Each section is printed on a single line. +If no option is specifid, the output has the form: +

 
section|key1=val1| ... |keyN=valN
+
+ +

Metadata tags are printed in the corresponding "format" or "stream" +section. A metadata tag key, if printed, is prefixed by the string +"tag:". +

+

The description of the accepted options follows. +

+
+
item_sep, s
+

Specify the character to use for separating fields in the output line. +It must be a single printable character, it is "|" by default ("," for +the csv writer). +

+
+
nokey, nk
+

If set to 1 specify not to print the key of each field. Its default +value is 0 (1 for the csv writer). +

+
+
escape, e
+

Set the escape mode to use, default to "c" ("csv" for the csv +writer). +

+

It can assume one of the following values: +

+
c
+

Perform C-like escaping. Strings containing a newline (’\n’), carriage +return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping +character (’\’) or the item separator character SEP are escaped using C-like fashioned +escaping, so that a newline is converted to the sequence "\n", a +carriage return to "\r", ’\’ to "\\" and the separator SEP is +converted to "\SEP". +

+
+
csv
+

Perform CSV-like escaping, as described in RFC4180. Strings +containing a newline (’\n’), a carriage return (’\r’), a double quote +(’"’), or SEP are enclosed in double-quotes. +

+
+
none
+

Perform no escaping. +

+
+ +
+
print_section, p
+

Print the section name at the begin of each line if the value is +1, disable it with value set to 0. Default value is +1. +

+
+
+ + +

4.3 flat

+

Flat format. +

+

A free-form output where each line contains an explicit key=value, such as +"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be +directly embedded in sh scripts as long as the separator character is an +alphanumeric character or an underscore (see sep_char option). +

+

The description of the accepted options follows. +

+
+
sep_char, s
+

Separator character used to separate the chapter, the section name, IDs and +potential tags in the printed field key. +

+

Default value is ’.’. +

+
+
hierarchical, h
+

Specify if the section name specification should be hierarchical. If +set to 1, and if there is more than one section in the current +chapter, the section name will be prefixed by the name of the +chapter. A value of 0 will disable this behavior. +

+

Default value is 1. +

+
+ + +

4.4 ini

+

INI format output. +

+

Print output in an INI based format. +

+

The following conventions are adopted: +

+
    +
  • +all key and values are UTF-8 +
  • +’.’ is the subgroup separator +
  • +newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped +
  • +’\’ is the escape character +
  • +’#’ is the comment indicator +
  • +’=’ is the key/value separator +
  • +’:’ is not used but usually parsed as key/value separator +
+ +

This writer accepts options as a list of key=value pairs, +separated by ":". +

+

The description of the accepted options follows. +

+
+
hierarchical, h
+

Specify if the section name specification should be hierarchical. If +set to 1, and if there is more than one section in the current +chapter, the section name will be prefixed by the name of the +chapter. A value of 0 will disable this behavior. +

+

Default value is 1. +

+
+ + +

4.5 json

+

JSON based format. +

+

Each section is printed using JSON notation. +

+

The description of the accepted options follows. +

+
+
compact, c
+

If set to 1 enable compact output, that is each section will be +printed on a single line. Default value is 0. +

+
+ +

For more information about JSON, see http://www.json.org/. +

+ +

4.6 xml

+

XML based format. +

+

The XML output is described in the XML schema description file +‘ffprobe.xsd’ installed in the FFmpeg datadir. +

+

An updated version of the schema can be retrieved at the url +http://www.ffmpeg.org/schema/ffprobe.xsd, which redirects to the +latest schema committed into the FFmpeg development source code tree. +

+

Note that the output issued will be compliant to the +‘ffprobe.xsd’ schema only when no special global output options +(‘unit’, ‘prefix’, ‘byte_binary_prefix’, +‘sexagesimal’ etc.) are specified. +

+

The description of the accepted options follows. +

+
+
fully_qualified, q
+

If set to 1 specify if the output should be fully qualified. Default +value is 0. +This is required for generating an XML file which can be validated +through an XSD file. +

+
+
xsd_compliant, x
+

If set to 1 perform more checks for ensuring that the output is XSD +compliant. Default value is 0. +This option automatically sets ‘fully_qualified’ to 1. +

+
+ +

For more information about the XML format, see +http://www.w3.org/XML/. +

+ +

5. Timecode

+ +

ffprobe supports Timecode extraction: +

+
    +
  • +MPEG1/2 timecode is extracted from the GOP, and is available in the video +stream details (‘-show_streams’, see timecode). + +
  • +MOV timecode is extracted from tmcd track, so is available in the tmcd +stream metadata (‘-show_streams’, see TAG:timecode). + +
  • +DV, GXF and AVI timecodes are available in format metadata +(‘-show_format’, see TAG:timecode). + +
+ + + +

6. See Also

+ +

ffprobe-all, +ffmpeg, ffplay, ffserver, +ffmpeg-utils, +ffmpeg-scaler, +ffmpeg-resampler, +ffmpeg-codecs, +ffmpeg-bitstream-filters, +ffmpeg-formats, +ffmpeg-devices, +ffmpeg-protocols, +ffmpeg-filters +

+ + +

7. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/general.html b/dependencies64/ffmpeg/doc/general.html new file mode 100644 index 000000000..95e098521 --- /dev/null +++ b/dependencies64/ffmpeg/doc/general.html @@ -0,0 +1,962 @@ + + + + + +FFmpeg documentation : General + + + + + + + + + + +
+
+ + +

General Documentation

+ + +

Table of Contents

+ + + +

1. External libraries

+ +

FFmpeg can be hooked up with a number of external libraries to add support +for more formats. None of them are used by default, their use has to be +explicitly requested by passing the appropriate flags to +./configure. +

+ +

1.1 OpenJPEG

+ +

FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to +http://www.openjpeg.org/ to get the libraries and follow the installation +instructions. To enable using OpenJPEG in FFmpeg, pass --enable-libopenjpeg to +‘./configure’. +

+ + +

1.2 OpenCORE, VisualOn, and Fraunhofer libraries

+ +

Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer +libraries provide encoders for a number of audio codecs. +

+
+

OpenCORE and VisualOn libraries are under the Apache License 2.0 +(see http://www.apache.org/licenses/LICENSE-2.0 for details), which is +incompatible to the LGPL version 2.1 and GPL version 2. You have to +upgrade FFmpeg’s license to LGPL version 3 (or if you have enabled +GPL components, GPL version 3) by passing --enable-version3 to configure in +order to use it. +

+

The Fraunhofer AAC library is licensed under a license incompatible to the GPL +and is not known to be compatible to the LGPL. Therefore, you have to pass +--enable-nonfree to configure to use it. +

+ +

1.2.1 OpenCORE AMR

+ +

FFmpeg can make use of the OpenCORE libraries for AMR-NB +decoding/encoding and AMR-WB decoding. +

+

Go to http://sourceforge.net/projects/opencore-amr/ and follow the +instructions for installing the libraries. +Then pass --enable-libopencore-amrnb and/or +--enable-libopencore-amrwb to configure to enable them. +

+ +

1.2.2 VisualOn AAC encoder library

+ +

FFmpeg can make use of the VisualOn AACenc library for AAC encoding. +

+

Go to http://sourceforge.net/projects/opencore-amr/ and follow the +instructions for installing the library. +Then pass --enable-libvo-aacenc to configure to enable it. +

+ +

1.2.3 VisualOn AMR-WB encoder library

+ +

FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding. +

+

Go to http://sourceforge.net/projects/opencore-amr/ and follow the +instructions for installing the library. +Then pass --enable-libvo-amrwbenc to configure to enable it. +

+ +

1.2.4 Fraunhofer AAC library

+ +

FFmpeg can make use of the Fraunhofer AAC library for AAC encoding. +

+

Go to http://sourceforge.net/projects/opencore-amr/ and follow the +instructions for installing the library. +Then pass --enable-libfdk-aac to configure to enable it. +

+ +

1.3 LAME

+ +

FFmpeg can make use of the LAME library for MP3 encoding. +

+

Go to http://lame.sourceforge.net/ and follow the +instructions for installing the library. +Then pass --enable-libmp3lame to configure to enable it. +

+ +

1.4 TwoLAME

+ +

FFmpeg can make use of the TwoLAME library for MP2 encoding. +

+

Go to http://www.twolame.org/ and follow the +instructions for installing the library. +Then pass --enable-libtwolame to configure to enable it. +

+ +

1.5 libvpx

+ +

FFmpeg can make use of the libvpx library for VP8/VP9 encoding. +

+

Go to http://www.webmproject.org/ and follow the instructions for +installing the library. Then pass --enable-libvpx to configure to +enable it. +

+ +

1.6 libwavpack

+ +

FFmpeg can make use of the libwavpack library for WavPack encoding. +

+

Go to http://www.wavpack.com/ and follow the instructions for +installing the library. Then pass --enable-libwavpack to configure to +enable it. +

+ +

1.7 x264

+ +

FFmpeg can make use of the x264 library for H.264 encoding. +

+

Go to http://www.videolan.org/developers/x264.html and follow the +instructions for installing the library. Then pass --enable-libx264 to +configure to enable it. +

+
+

x264 is under the GNU Public License Version 2 or later +(see http://www.gnu.org/licenses/old-licenses/gpl-2.0.html for +details), you must upgrade FFmpeg’s license to GPL in order to use it. +

+ +

1.8 x265

+ +

FFmpeg can make use of the x265 library for HEVC encoding. +

+

Go to http://x265.org/developers.html and follow the instructions +for installing the library. Then pass --enable-libx265 to configure +to enable it. +

+
+

x265 is under the GNU Public License Version 2 or later +(see http://www.gnu.org/licenses/old-licenses/gpl-2.0.html for +details), you must upgrade FFmpeg’s license to GPL in order to use it. +

+ +

1.9 libilbc

+ +

iLBC is a narrowband speech codec that has been made freely available +by Google as part of the WebRTC project. libilbc is a packaging friendly +copy of the iLBC codec. FFmpeg can make use of the libilbc library for +iLBC encoding and decoding. +

+

Go to https://github.com/dekkers/libilbc and follow the instructions for +installing the library. Then pass --enable-libilbc to configure to +enable it. +

+ +

1.10 libzvbi

+ +

libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB +teletext pages and DVB teletext subtitles. +

+

Go to http://sourceforge.net/projects/zapping/ and follow the instructions for +installing the library. Then pass --enable-libzvbi to configure to +enable it. +

+
+

libzvbi is licensed under the GNU General Public License Version 2 or later +(see http://www.gnu.org/licenses/old-licenses/gpl-2.0.html for details), +you must upgrade FFmpeg’s license to GPL in order to use it. +

+ +

1.11 AviSynth

+ +

FFmpeg can read AviSynth scripts as input. To enable support, pass +--enable-avisynth to configure. The correct headers are +included in compat/avisynth/, which allows the user to enable support +without needing to search for these headers themselves. +

+

For Windows, supported AviSynth variants are +AviSynth 2.5 or 2.6 for 32-bit builds and +AviSynth+ 0.1 for 32-bit and 64-bit builds. +

+

For Linux and OS X, the supported AviSynth variant is +AvxSynth. +

+
+

AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg +with --enable-avisynth, and the binaries will work regardless of the +end user having AviSynth or AvxSynth installed - they’ll only need to be +installed to use AviSynth scripts (obviously). +

+ + +

2. Supported File Formats, Codecs or Features

+ +

You can use the -formats and -codecs options to have an exhaustive list. +

+ +

2.1 File Formats

+ +

FFmpeg supports the following file formats through the libavformat +library: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameEncodingDecodingComments
4xmX4X Technologies format, used in some games.
8088flex TMVX
ACT VoiceXcontains G.729 audio
Adobe FilmstripXX
Audio IFF (AIFF)XX
American Laser Games MMXMultimedia format used in games like Mad Dog McCree.
3GPP AMRXX
Amazing Studio Packed Animation FileXMultimedia format used in game Heart Of Darkness.
Apple HTTP Live StreamingX
Artworx Data FormatX
ADPXAudio format used on the Nintendo Gamecube.
AFCXAudio format used on the Nintendo Gamecube.
ASFXX
ASTXXAudio format used on the Nintendo Wii.
AVIXX
AviSynthX
AVRXAudio format used on Mac.
AVSXMultimedia format used by the Creature Shock game.
Beam Software SIFFXAudio and video format used in some games by Beam Software.
Bethesda Softworks VIDXUsed in some games from Bethesda Softworks.
Binary textX
BinkXMultimedia format used by many games.
Bitmap Brothers JVXUsed in Z and Z95 games.
Brute Force & IgnoranceXUsed in the game Flash Traffic: City of Angels.
BRSTMXAudio format used on the Nintendo Wii.
BWFXX
CRI ADXXXAudio-only format used in console video games.
Discworld II BMVX
Interplay C93XUsed in the game Cyberia from Interplay.
Delphine Software International CINXMultimedia format used by Delphine Software games.
CD+GXVideo format used by CD+G karaoke disks
Commodore CDXLXAmiga CD video format
Core Audio FormatXXApple Core Audio Format
CRC testing formatX
Creative VoiceXXCreated for the Sound Blaster Pro.
CRYO APCXAudio format used in some games by CRYO Interactive Entertainment.
D-Cinema audioXX
Deluxe Paint AnimationX
DFAXThis format is used in Chronomaster game
DV videoXX
DXAXThis format is used in the non-Windows version of the Feeble Files + game and different game cutscenes repacked for use with ScummVM.
Electronic Arts cdataX
Electronic Arts MultimediaXUsed in various EA games; files have extensions like WVE and UV2.
Ensoniq Paris Audio FileX
FFM (FFserver live feed)XX
Flash (SWF)XX
Flash 9 (AVM2)XXOnly embedded audio is decoded.
FLI/FLC/FLX animationX.fli/.flc files
Flash Video (FLV)XXMacromedia Flash video files
framecrc testing formatX
FunCom ISSXAudio format used in various games from FunCom like The Longest Journey.
G.723.1XX
G.729 BITXX
G.729 rawX
GIF AnimationXX
GXFXXGeneral eXchange Format SMPTE 360M, used by Thomson Grass Valley + playout servers.
HNMXOnly version 4 supported, used in some games from Cryo Interactive
iCEDraw FileX
ICOXXMicrosoft Windows ICO
id Quake II CIN videoX
id RoQXXUsed in Quake III, Jedi Knight 2 and other computer games.
IEC61937 encapsulationXX
IFFXInterchange File Format
iLBCXX
Interplay MVEXFormat used in various Interplay computer games.
IV8XA format generated by IndigoVision 8000 video server.
IVF (On2)XXA format used by libvpx
IRCAMXX
LATMXX
LMLM4XUsed by Linux Media Labs MPEG-4 PCI boards
LOASXcontains LATM multiplexed AAC audio
LVFX
LXFXVR native stream format, used by Leitch/Harris’ video servers.
MatroskaXX
Matroska audioX
FFmpeg metadataXXMetadata in text format.
MAXIS XAXUsed in Sim City 3000; file extension .xa.
MD StudioX
Metal Gear Solid: The Twin SnakesX
Megalux FrameXUsed by Megalux Ultimate Paint
Mobotix .mxgX
Monkey’s AudioX
Motion Pixels MVIX
MOV/QuickTime/MP4XX3GP, 3GP2, PSP, iPod variants supported
MP2XX
MP3XX
MPEG-1 SystemXXmuxed audio and video, VCD format supported
MPEG-PS (program stream)XXalso known as VOB file, SVCD and DVD format supported
MPEG-TS (transport stream)XXalso known as DVB Transport Stream
MPEG-4XXMPEG-4 is a variant of QuickTime.
Mirillis FIC videoXNo cursor rendering.
MIME multipart JPEGX
MSN TCP webcamXUsed by MSN Messenger webcam streams.
MTVX
MusepackX
Musepack SV8X
Material eXchange Format (MXF)XXSMPTE 377M, used by D-Cinema, broadcast industry.
Material eXchange Format (MXF), D-10 MappingXXSMPTE 386M, D-10/IMX Mapping.
NC camera feedXNC (AVIP NC4600) camera streams
NIST SPeech HEader REsourcesX
NTT TwinVQ (VQF)XNippon Telegraph and Telephone Corporation TwinVQ.
Nullsoft Streaming VideoX
NuppelVideoX
NUTXXNUT Open Container Format
OggXX
Playstation Portable PMPX
Portable Voice FormatX
TechnoTrend PVAXUsed by TechnoTrend DVB PCI boards.
QCPX
raw ADTS (AAC)XX
raw AC-3XX
raw Chinese AVS videoXX
raw CRI ADXXX
raw DiracXX
raw DNxHDXX
raw DTSXX
raw DTS-HDX
raw E-AC-3XX
raw FLACXX
raw GSMX
raw H.261XX
raw H.263XX
raw H.264XX
raw HEVCXX
raw Ingenient MJPEGX
raw MJPEGXX
raw MLPX
raw MPEGX
raw MPEG-1X
raw MPEG-2X
raw MPEG-4XX
raw NULLX
raw videoXX
raw id RoQX
raw ShortenX
raw TAKX
raw TrueHDXX
raw VC-1XX
raw PCM A-lawXX
raw PCM mu-lawXX
raw PCM signed 8 bitXX
raw PCM signed 16 bit big-endianXX
raw PCM signed 16 bit little-endianXX
raw PCM signed 24 bit big-endianXX
raw PCM signed 24 bit little-endianXX
raw PCM signed 32 bit big-endianXX
raw PCM signed 32 bit little-endianXX
raw PCM unsigned 8 bitXX
raw PCM unsigned 16 bit big-endianXX
raw PCM unsigned 16 bit little-endianXX
raw PCM unsigned 24 bit big-endianXX
raw PCM unsigned 24 bit little-endianXX
raw PCM unsigned 32 bit big-endianXX
raw PCM unsigned 32 bit little-endianXX
raw PCM floating-point 32 bit big-endianXX
raw PCM floating-point 32 bit little-endianXX
raw PCM floating-point 64 bit big-endianXX
raw PCM floating-point 64 bit little-endianXX
RDTX
REDCODE R3DXFile format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio.
RealMediaXX
RedirectorX
RedSparkX
Renderware TeXture DictionaryX
RL2XAudio and video format used in some games by Entertainment Software Partners.
RPL/ARMovieX
Lego Mindstorms RSOXX
RSDX
RTMPXXOutput is performed by publishing stream to RTMP server
RTPXX
RTSPXX
SAPXX
SBGX
SDPX
Sega FILM/CPKXUsed in many Sega Saturn console games.
Silicon Graphics MovieX
Sierra SOLX.sol files used in Sierra Online games.
Sierra VMDXUsed in Sierra CD-ROM games.
SmackerXMultimedia format used by many games.
SMJPEGXXUsed in certain Loki game ports.
SmushXMultimedia format used in some LucasArts games.
Sony OpenMG (OMA)XXAudio format used in Sony Sonic Stage and Sony Vegas.
Sony PlayStation STRX
Sony Wave64 (W64)XX
SoX native formatXX
SUN AU formatXX
Text filesX
THPXUsed on the Nintendo GameCube.
Tiertex Limited SEQXTiertex .seq files used in the DOS CD-ROM version of the game Flashback.
True AudioX
VC-1 test bitstreamXX
VivoX
WAVXX
WavPackXX
WebMXX
Windows Televison (WTV)XX
Wing Commander III movieXMultimedia format used in Origin’s Wing Commander III computer game.
Westwood Studios audioXMultimedia format used in Westwood Studios games.
Westwood Studios VQAXMultimedia format used in Westwood Studios games.
XMVXMicrosoft video container used in Xbox games.
xWMAXMicrosoft audio container used by XAudio 2.
eXtended BINary text (XBIN)X
YUV4MPEG pipeXX
Psygnosis YOPX
+ +

X means that encoding (resp. decoding) is supported. +

+ +

2.2 Image Formats

+ +

FFmpeg can read and write images for each frame of a video sequence. The +following image formats are supported: +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameEncodingDecodingComments
.Y.U.VXXone raw file per component
animated GIFXX
BMPXXMicrosoft BMP image
PIXXPIX is an image format used in the Argonaut BRender engine.
DPXXXDigital Picture Exchange
EXRXOpenEXR
JPEGXXProgressive JPEG is not supported.
JPEG 2000XX
JPEG-LSXX
LJPEGXLossless JPEG
PAMXXPAM is a PNM extension with alpha support.
PBMXXPortable BitMap image
PCXXXPC Paintbrush
PGMXXPortable GrayMap image
PGMYUVXXPGM with U and V components in YUV 4:2:0
PICXPictor/PC Paint
PNGXX
PPMXXPortable PixelMap image
PTXXV.Flash PTX format
SGIXXSGI RGB image format
Sun RasterfileXXSun RAS image format
TIFFXXYUV, JPEG and some extension is not supported yet.
Truevision TargaXXTarga (.TGA) image format
WebPEXWebP image format, encoding supported through external library libwebp
XBMXXX BitMap image format
XFaceXXX-Face image format
XWDXXX Window Dump image format
+ +

X means that encoding (resp. decoding) is supported. +

+

E means that support is provided through an external library. +

+ +

2.3 Video Codecs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameEncodingDecodingComments
4X MovieXUsed in certain computer games.
8088flex TMVX
A64 multicolorXCreates video suitable to be played on a commodore 64 (multicolor mode).
Amazing Studio PAF VideoX
American Laser Games MMXUsed in games like Mad Dog McCree.
AMV VideoXXUsed in Chinese MP3 players.
ANSI/ASCII artX
Apple Intermediate CodecX
Apple MJPEG-BX
Apple ProResXX
Apple QuickDrawXfourcc: qdrw
Asus v1XXfourcc: ASV1
Asus v2XXfourcc: ASV2
ATI VCR1Xfourcc: VCR1
ATI VCR2Xfourcc: VCR2
Auravision AuraX
Auravision Aura 2X
Autodesk Animator Flic videoX
Autodesk RLEXfourcc: AASC
Avid 1:1 10-bit RGB PackerXXfourcc: AVrp
AVS (Audio Video Standard) videoXVideo encoding used by the Creature Shock game.
AYUVXXMicrosoft uncompressed packed 4:4:4:4
Beam Software VBX
Bethesda VID videoXUsed in some games from Bethesda Softworks.
Bink VideoX
Bitmap Brothers JV videoX
y41p Brooktree uncompressed 4:1:1 12-bitXX
Brute Force & IgnoranceXUsed in the game Flash Traffic: City of Angels.
C93 videoXCodec used in Cyberia game.
CamStudioXfourcc: CSCD
CD+GXVideo codec for CD+G karaoke disks
CDXLXAmiga CD video codec
Chinese AVS videoEXAVS1-P2, JiZhun profile, encoding through external library libxavs
Delphine Software International CIN videoXCodec used in Delphine Software International games.
Discworld II BMV VideoX
Canopus Lossless CodecX
CinepakX
Cirrus Logic AccuPakXXfourcc: CLJR
CPiA Video FormatX
Creative YUV (CYUV)X
DFAXCodec used in Chronomaster game.
DiracEXsupported through external library libschroedinger
Deluxe Paint AnimationX
DNxHDXXaka SMPTE VC3
Duck TrueMotion 1.0Xfourcc: DUCK
Duck TrueMotion 2.0Xfourcc: TM20
DV (Digital Video)XX
Dxtory capture formatX
Feeble Files/ScummVM DXAXCodec originally used in Feeble Files game.
Electronic Arts CMV videoXUsed in NHL 95 game.
Electronic Arts Madcow videoX
Electronic Arts TGV videoX
Electronic Arts TGQ videoX
Electronic Arts TQI videoX
Escape 124X
Escape 130X
FFmpeg video codec #1XXlossless codec (fourcc: FFV1)
Flash Screen Video v1XXfourcc: FSV1
Flash Screen Video v2XX
Flash Video (FLV)XXSorenson H.263 used in Flash
Forward UncompressedX
FrapsX
Go2WebinarXfourcc: G2M4
H.261XX
H.263 / H.263-1996XX
H.263+ / H.263-1998 / H.263 version 2XX
H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10EXencoding supported through external library libx264
HEVCXXencoding supported through the external library libx265
HNM version 4X
HuffYUVXX
HuffYUV FFmpeg variantXX
IBM UltimotionXfourcc: ULTI
id Cinematic videoXUsed in Quake II.
id RoQ videoXXUsed in Quake III, Jedi Knight 2, other computer games.
IFF ILBMXIFF interleaved bitmap
IFF ByteRun1XIFF run length encoded bitmap
Intel H.263X
Intel Indeo 2X
Intel Indeo 3X
Intel Indeo 4X
Intel Indeo 5X
Interplay C93XUsed in the game Cyberia from Interplay.
Interplay MVE videoXUsed in Interplay .MVE files.
J2KXX
Karl Morton’s video codecXCodec used in Worms games.
Kega Game Video (KGV1)XKega emulator screen capture codec.
LagarithX
LCL (LossLess Codec Library) MSZHX
LCL (LossLess Codec Library) ZLIBEE
LOCOX
LucasArts SmushXUsed in LucasArts games.
lossless MJPEGXX
Microsoft ATC ScreenXAlso known as Microsoft Screen 3.
Microsoft Expression Encoder ScreenXAlso known as Microsoft Titanium Screen 2.
Microsoft RLEX
Microsoft Screen 1XAlso known as Windows Media Video V7 Screen.
Microsoft Screen 2XAlso known as Windows Media Video V9 Screen.
Microsoft Video 1X
MimicXUsed in MSN Messenger Webcam streams.
Miro VideoXLXfourcc: VIXL
MJPEG (Motion JPEG)XX
Mobotix MxPEG videoX
Motion Pixels videoX
MPEG-1 videoXX
MPEG-2 videoXX
MPEG-4 part 2XXlibxvidcore can be used alternatively for encoding.
MPEG-4 part 2 Microsoft variant version 1X
MPEG-4 part 2 Microsoft variant version 2XX
MPEG-4 part 2 Microsoft variant version 3XX
Nintendo Gamecube THP videoX
NuppelVideo/RTjpegXVideo encoding used in NuppelVideo files.
On2 VP3Xstill experimental
On2 VP5Xfourcc: VP50
On2 VP6Xfourcc: VP60,VP61,VP62
VP8EXfourcc: VP80, encoding supported through external library libvpx
VP9EXencoding supported through external library libvpx
Pinnacle TARGA CineWave YUV16Xfourcc: Y216
ProresXfourcc: apch,apcn,apcs,apco
Q-team QPEGXfourccs: QPEG, Q1.0, Q1.1
QuickTime 8BPS videoX
QuickTime Animation (RLE) videoXXfourcc: ’rle ’
QuickTime Graphics (SMC)Xfourcc: ’smc ’
QuickTime video (RPZA)Xfourcc: rpza
R10K AJA Kona 10-bit RGB CodecXX
R210 Quicktime Uncompressed RGB 10-bitXX
Raw VideoXX
RealVideo 1.0XX
RealVideo 2.0XX
RealVideo 3.0Xstill far from ideal
RealVideo 4.0X
Renderware TXD (TeXture Dictionary)XTexture dictionaries used by the Renderware Engine.
RL2 videoXused in some games by Entertainment Software Partners
SGI RLE 8-bitX
Sierra VMD videoXUsed in Sierra VMD files.
Silicon Graphics Motion Video Compressor 1 (MVC1)X
Silicon Graphics Motion Video Compressor 2 (MVC2)X
Smacker videoXVideo encoding used in Smacker.
SMPTE VC-1X
SnowXXexperimental wavelet codec (fourcc: SNOW)
Sony PlayStation MDEC (Motion DECoder)X
Sorenson Vector Quantizer 1XXfourcc: SVQ1
Sorenson Vector Quantizer 3Xfourcc: SVQ3
Sunplus JPEG (SP5X)Xfourcc: SP5X
TechSmith Screen Capture CodecXfourcc: TSCC
TechSmith Screen Capture Codec 2Xfourcc: TSC2
TheoraEXencoding supported through external library libtheora
Tiertex Limited SEQ videoXCodec used in DOS CD-ROM FlashBack game.
Ut VideoXX
v210 QuickTime uncompressed 4:2:2 10-bitXX
v308 QuickTime uncompressed 4:4:4XX
v408 QuickTime uncompressed 4:4:4:4XX
v410 QuickTime uncompressed 4:4:4 10-bitXX
VBLE Lossless CodecX
VMware Screen Codec / VMware VideoXCodec used in videos captured by VMware.
Westwood Studios VQA (Vector Quantized Animation) videoX
Windows Media ImageX
Windows Media Video 7XX
Windows Media Video 8XX
Windows Media Video 9Xnot completely working
Wing Commander III / XanXUsed in Wing Commander III .MVE files.
Wing Commander IV / XanXUsed in Wing Commander IV.
Winnov WNV1X
WMV7XX
YAMAHA SMAFXX
Psygnosis YOP VideoX
yuv4XXlibquicktime uncompressed packed 4:2:0
ZeroCodec Lossless VideoX
ZLIBXXpart of LCL, encoder experimental
Zip Motion Blocks VideoXXEncoder works only in PAL8.
+ +

X means that encoding (resp. decoding) is supported. +

+

E means that support is provided through an external library. +

+ +

2.4 Audio Codecs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameEncodingDecodingComments
8SVX exponentialX
8SVX fibonacciX
AAC+EXencoding supported through external library libaacplus
AACEXencoding supported through external library libfaac and libvo-aacenc
AC-3IXX
ADPCM 4X MovieX
ADPCM CDROM XAX
ADPCM Creative TechnologyX16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
ADPCM Electronic ArtsXUsed in various EA titles.
ADPCM Electronic Arts Maxis CDROM XSXUsed in Sim City 3000.
ADPCM Electronic Arts R1X
ADPCM Electronic Arts R2X
ADPCM Electronic Arts R3X
ADPCM Electronic Arts XASX
ADPCM G.722XX
ADPCM G.726XX
ADPCM IMA AMVXUsed in AMV files
ADPCM IMA Electronic Arts EACSX
ADPCM IMA Electronic Arts SEADX
ADPCM IMA FuncomX
ADPCM IMA QuickTimeXX
ADPCM IMA Loki SDL MJPEGX
ADPCM IMA WAVXX
ADPCM IMA WestwoodX
ADPCM ISS IMAXUsed in FunCom games.
ADPCM IMA DialogicX
ADPCM IMA Duck DK3XUsed in some Sega Saturn console games.
ADPCM IMA Duck DK4XUsed in some Sega Saturn console games.
ADPCM IMA RadicalX
ADPCM MicrosoftXX
ADPCM MS IMAXX
ADPCM Nintendo Gamecube AFCX
ADPCM Nintendo Gamecube DTKX
ADPCM Nintendo Gamecube THPX
ADPCM QT IMAXX
ADPCM SEGA CRI ADXXXUsed in Sega Dreamcast games.
ADPCM Shockwave FlashXX
ADPCM Sound Blaster Pro 2-bitX
ADPCM Sound Blaster Pro 2.6-bitX
ADPCM Sound Blaster Pro 4-bitX
ADPCM Westwood Studios IMAXUsed in Westwood Studios games like Command and Conquer.
ADPCM YamahaXX
AMR-NBEXencoding supported through external library libopencore-amrnb
AMR-WBEXencoding supported through external library libvo-amrwbenc
Amazing Studio PAF AudioX
Apple lossless audioXXQuickTime fourcc ’alac’
ATRAC1X
ATRAC3X
ATRAC3+X
Bink AudioXUsed in Bink and Smacker files in many games.
CELTEdecoding supported through external library libcelt
Delphine Software International CIN audioXCodec used in Delphine Software International games.
Discworld II BMV AudioX
COOKXAll versions except 5.1 are supported.
DCA (DTS Coherent Acoustics)XX
DPCM id RoQXXUsed in Quake III, Jedi Knight 2 and other computer games.
DPCM InterplayXUsed in various Interplay computer games.
DPCM Sierra OnlineXUsed in Sierra Online game audio files.
DPCM SolX
DPCM XanXUsed in Origin’s Wing Commander IV AVI files.
DSP Group TrueSpeechX
DV audioX
Enhanced AC-3XX
EVRC (Enhanced Variable Rate Codec)X
FLAC (Free Lossless Audio Codec)XIX
G.723.1XX
G.729X
GSMEXencoding supported through external library libgsm
GSM Microsoft variantEXencoding supported through external library libgsm
IAC (Indeo Audio Coder)X
iLBC (Internet Low Bitrate Codec)EEencoding and decoding supported through external library libilbc
IMC (Intel Music Coder)X
MACE (Macintosh Audio Compression/Expansion) 3:1X
MACE (Macintosh Audio Compression/Expansion) 6:1X
MLP (Meridian Lossless Packing)XUsed in DVD-Audio discs.
Monkey’s AudioX
MP1 (MPEG audio layer 1)IX
MP2 (MPEG audio layer 2)IXIXlibtwolame can be used alternatively for encoding.
MP3 (MPEG audio layer 3)EIXencoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
MPEG-4 Audio Lossless Coding (ALS)X
Musepack SV7X
Musepack SV8X
Nellymoser AsaoXX
OpusEEsupported through external library libopus
PCM A-lawXX
PCM mu-lawXX
PCM signed 8-bit planarXX
PCM signed 16-bit big-endian planarXX
PCM signed 16-bit little-endian planarXX
PCM signed 24-bit little-endian planarXX
PCM signed 32-bit little-endian planarXX
PCM 32-bit floating point big-endianXX
PCM 32-bit floating point little-endianXX
PCM 64-bit floating point big-endianXX
PCM 64-bit floating point little-endianXX
PCM D-Cinema audio signed 24-bitXX
PCM signed 8-bitXX
PCM signed 16-bit big-endianXX
PCM signed 16-bit little-endianXX
PCM signed 24-bit big-endianXX
PCM signed 24-bit little-endianXX
PCM signed 32-bit big-endianXX
PCM signed 32-bit little-endianXX
PCM signed 16/20/24-bit big-endian in MPEG-TSX
PCM unsigned 8-bitXX
PCM unsigned 16-bit big-endianXX
PCM unsigned 16-bit little-endianXX
PCM unsigned 24-bit big-endianXX
PCM unsigned 24-bit little-endianXX
PCM unsigned 32-bit big-endianXX
PCM unsigned 32-bit little-endianXX
PCM ZorkX
QCELP / PureVoiceX
QDesign Music Codec 2XThere are still some distortions.
RealAudio 1.0 (14.4K)XXReal 14400 bit/s codec
RealAudio 2.0 (28.8K)XReal 28800 bit/s codec
RealAudio 3.0 (dnet)IXXReal low bitrate AC-3 codec
RealAudio LosslessX
RealAudio SIPR / ACELP.NETX
ShortenX
Sierra VMD audioXUsed in Sierra VMD files.
Smacker audioX
SMPTE 302M AES3 audioXX
SonicXXexperimental codec
Sonic losslessXXexperimental codec
SpeexEEsupported through external library libspeex
TAK (Tom’s lossless Audio Kompressor)X
True Audio (TTA)XX
TrueHDXUsed in HD-DVD and Blu-Ray discs.
TwinVQ (VQF flavor)X
VIMAXUsed in LucasArts SMUSH animations.
VorbisEXA native but very primitive encoder exists.
Voxware MetaSoundX
WavPackXX
Westwood Audio (SND1)X
Windows Media Audio 1XX
Windows Media Audio 2XX
Windows Media Audio LosslessX
Windows Media Audio ProX
Windows Media Audio VoiceX
+ +

X means that encoding (resp. decoding) is supported. +

+

E means that support is provided through an external library. +

+

I means that an integer-only version is available, too (ensures high +performance on systems without hardware floating point support). +

+ +

2.5 Subtitle Formats

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameMuxingDemuxingEncodingDecoding
3GPP Timed TextXX
AQTitleXX
DVBXXXX
DVB teletextXE
DVDXXXX
JACOsubXXX
MicroDVDXXX
MPL2XX
MPsub (MPlayer)XX
PGSX
PJS (Phoenix)XX
RealTextXX
SAMIXX
SSA/ASSXXXX
SubRip (SRT)XXXX
SubViewer v1XX
SubViewerXX
TED Talks captionsXX
VobSub (IDX+SUB)XX
VPlayerXX
WebVTTXXX
XSUBXX
+ +

X means that the feature is supported. +

+

E means that support is provided through an external library. +

+ +

2.6 Network Protocols

+ + + + + + + + + + + + + + + + + + + + + + + + +
NameSupport
fileX
FTPX
GopherX
HLSX
HTTPX
HTTPSX
MMSHX
MMSTX
pipeX
RTMPX
RTMPEX
RTMPSX
RTMPTX
RTMPTEX
RTMPTSX
RTPX
SCTPX
SFTPE
TCPX
TLSX
UDPX
+ +

X means that the protocol is supported. +

+

E means that support is provided through an external library. +

+ + +

2.7 Input/Output Devices

+ + + + + + + + + + + + + + + + + + + + +
NameInputOutput
ALSAXX
BKTRX
cacaX
DV1394X
Lavfi virtual deviceX
Linux framebufferXX
JACKX
LIBCDIOX
LIBDC1394X
OpenALX
OpenGLX
OSSXX
PulseAudioXX
SDLX
Video4Linux2XX
VfW captureX
X11 grabbingX
+ +

X means that input/output is supported. +

+ +

2.8 Timecode

+ + + + + + + + + +
Codec/formatReadWrite
AVIXX
DVXX
GXFXX
MOVXX
MPEG1/2XX
MXFXX
+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/git-howto.html b/dependencies64/ffmpeg/doc/git-howto.html new file mode 100644 index 000000000..22414bbbc --- /dev/null +++ b/dependencies64/ffmpeg/doc/git-howto.html @@ -0,0 +1,459 @@ + + + + + +FFmpeg documentation : Using git to develop FFmpeg: + + + + + + + + + + +
+
+ + +

Using git to develop FFmpeg

+ + +

Table of Contents

+ + + +

1. Introduction

+ +

This document aims in giving some quick references on a set of useful git +commands. You should always use the extensive and detailed documentation +provided directly by git: +

+
 
git --help
+man git
+
+ +

shows you the available subcommands, +

+
 
git <command> --help
+man git-<command>
+
+ +

shows information about the subcommand <command>. +

+

Additional information could be found on the +Git Reference website +

+

For more information about the Git project, visit the +

+

Git website +

+

Consult these resources whenever you have problems, they are quite exhaustive. +

+

What follows now is a basic introduction to Git and some FFmpeg-specific +guidelines to ease the contribution to the project +

+ +

2. Basics Usage

+ + +

2.1 Get GIT

+ +

You can get git from http://git-scm.com/ +Most distribution and operating system provide a package for it. +

+ + +

2.2 Cloning the source tree

+ +
 
git clone git://source.ffmpeg.org/ffmpeg <target>
+
+ +

This will put the FFmpeg sources into the directory <target>. +

+
 
git clone git@source.ffmpeg.org:ffmpeg <target>
+
+ +

This will put the FFmpeg sources into the directory <target> and let +you push back your changes to the remote repository. +

+

Make sure that you do not have Windows line endings in your checkouts, +otherwise you may experience spurious compilation failures. One way to +achieve this is to run +

+
 
git config --global core.autocrlf false
+
+ + + +

2.3 Updating the source tree to the latest revision

+ +
 
git pull (--rebase)
+
+ +

pulls in the latest changes from the tracked branch. The tracked branch +can be remote. By default the master branch tracks the branch master in +the remote origin. +

+
+

--rebase (see below) is recommended. +

+ +

2.4 Rebasing your local branches

+ +
 
git pull --rebase
+
+ +

fetches the changes from the main repository and replays your local commits +over it. This is required to keep all your local changes at the top of +FFmpeg’s master tree. The master tree will reject pushes with merge commits. +

+ + +

2.5 Adding/removing files/directories

+ +
 
git add [-A] <filename/dirname>
+git rm [-r] <filename/dirname>
+
+ +

GIT needs to get notified of all changes you make to your working +directory that makes files appear or disappear. +Line moves across files are automatically tracked. +

+ + +

2.6 Showing modifications

+ +
 
git diff <filename(s)>
+
+ +

will show all local modifications in your working directory as unified diff. +

+ + +

2.7 Inspecting the changelog

+ +
 
git log <filename(s)>
+
+ +

You may also use the graphical tools like gitview or gitk or the web +interface available at http://source.ffmpeg.org/ +

+ +

2.8 Checking source tree status

+ +
 
git status
+
+ +

detects all the changes you made and lists what actions will be taken in case +of a commit (additions, modifications, deletions, etc.). +

+ + +

2.9 Committing

+ +
 
git diff --check
+
+ +

to double check your changes before committing them to avoid trouble later +on. All experienced developers do this on each and every commit, no matter +how small. +Every one of them has been saved from looking like a fool by this many times. +It’s very easy for stray debug output or cosmetic modifications to slip in, +please avoid problems through this extra level of scrutiny. +

+

For cosmetics-only commits you should get (almost) empty output from +

+
 
git diff -w -b <filename(s)>
+
+ +

Also check the output of +

+
 
git status
+
+ +

to make sure you don’t have untracked files or deletions. +

+
 
git add [-i|-p|-A] <filenames/dirnames>
+
+ +

Make sure you have told git your name and email address +

+
 
git config --global user.name "My Name"
+git config --global user.email my@email.invalid
+
+ +

Use –global to set the global configuration for all your git checkouts. +

+

Git will select the changes to the files for commit. Optionally you can use +the interactive or the patch mode to select hunk by hunk what should be +added to the commit. +

+ +
 
git commit
+
+ +

Git will commit the selected changes to your current local branch. +

+

You will be prompted for a log message in an editor, which is either +set in your personal configuration file through +

+
 
git config --global core.editor
+
+ +

or set by one of the following environment variables: +GIT_EDITOR, VISUAL or EDITOR. +

+

Log messages should be concise but descriptive. Explain why you made a change, +what you did will be obvious from the changes themselves most of the time. +Saying just "bug fix" or "10l" is bad. Remember that people of varying skill +levels look at and educate themselves while reading through your code. Don’t +include filenames in log messages, Git provides that information. +

+

Possibly make the commit message have a terse, descriptive first line, an +empty line and then a full description. The first line will be used to name +the patch by git format-patch. +

+ +

2.10 Preparing a patchset

+ +
 
git format-patch <commit> [-o directory]
+
+ +

will generate a set of patches for each commit between <commit> and +current HEAD. E.g. +

+
 
git format-patch origin/master
+
+ +

will generate patches for all commits on current branch which are not +present in upstream. +A useful shortcut is also +

+
 
git format-patch -n
+
+ +

which will generate patches from last n commits. +By default the patches are created in the current directory. +

+ +

2.11 Sending patches for review

+ +
 
git send-email <commit list|directory>
+
+ +

will send the patches created by git format-patch or directly +generates them. All the email fields can be configured in the global/local +configuration or overridden by command line. +Note that this tool must often be installed separately (e.g. git-email +package on Debian-based distros). +

+ + +

2.12 Renaming/moving/copying files or contents of files

+ +

Git automatically tracks such changes, making those normal commits. +

+
 
mv/cp path/file otherpath/otherfile
+git add [-A] .
+git commit
+
+ + + +

3. Git configuration

+ +

In order to simplify a few workflows, it is advisable to configure both +your personal Git installation and your local FFmpeg repository. +

+ +

3.1 Personal Git installation

+ +

Add the following to your ‘~/.gitconfig’ to help git send-email +and git format-patch detect renames: +

+
 
[diff]
+        renames = copy
+
+ + +

3.2 Repository configuration

+ +

In order to have git send-email automatically send patches +to the ffmpeg-devel mailing list, add the following stanza +to ‘/path/to/ffmpeg/repository/.git/config’: +

+
 
[sendemail]
+        to = ffmpeg-devel@ffmpeg.org
+
+ + +

4. FFmpeg specific

+ + +

4.1 Reverting broken commits

+ +
 
git reset <commit>
+
+ +

git reset will uncommit the changes till <commit> rewriting +the current branch history. +

+
 
git commit --amend
+
+ +

allows one to amend the last commit details quickly. +

+
 
git rebase -i origin/master
+
+ +

will replay local commits over the main repository allowing to edit, merge +or remove some of them in the process. +

+
+

git reset, git commit --amend and git rebase +rewrite history, so you should use them ONLY on your local or topic branches. +The main repository will reject those changes. +

+
 
git revert <commit>
+
+ +

git revert will generate a revert commit. This will not make the +faulty commit disappear from the history. +

+ +

4.2 Pushing changes to remote trees

+ +
 
git push
+
+ +

Will push the changes to the default remote (origin). +Git will prevent you from pushing changes if the local and remote trees are +out of sync. Refer to and to sync the local tree. +

+
 
git remote add <name> <url>
+
+ +

Will add additional remote with a name reference, it is useful if you want +to push your local branch for review on a remote host. +

+
 
git push <remote> <refspec>
+
+ +

Will push the changes to the <remote> repository. +Omitting <refspec> makes git push update all the remote +branches matching the local ones. +

+ +

4.3 Finding a specific svn revision

+ +

Since version 1.7.1 git supports :/foo syntax for specifying commits +based on a regular expression. see man gitrevisions +

+
 
git show :/'as revision 23456'
+
+ +

will show the svn changeset r23456. With older git versions searching in +the git log output is the easiest option (especially if a pager with +search capabilities is used). +This commit can be checked out with +

+
 
git checkout -b svn_23456 :/'as revision 23456'
+
+ +

or for git < 1.7.1 with +

+
 
git checkout -b svn_23456 $SHA1
+
+ +

where $SHA1 is the commit hash from the git log output. +

+ + +

5. pre-push checklist

+ +

Once you have a set of commits that you feel are ready for pushing, +work through the following checklist to doublecheck everything is in +proper order. This list tries to be exhaustive. In case you are just +pushing a typo in a comment, some of the steps may be unnecessary. +Apply your common sense, but if in doubt, err on the side of caution. +

+

First, make sure that the commits and branches you are going to push +match what you want pushed and that nothing is missing, extraneous or +wrong. You can see what will be pushed by running the git push command +with –dry-run first. And then inspecting the commits listed with +git log -p 1234567..987654. The git status command +may help in finding local changes that have been forgotten to be added. +

+

Next let the code pass through a full run of our testsuite. +

+
    +
  • make distclean +
  • /path/to/ffmpeg/configure +
  • make check +
  • if fate fails due to missing samples run make fate-rsync and retry +
+ +

Make sure all your changes have been checked before pushing them, the +testsuite only checks against regressions and that only to some extend. It does +obviously not check newly added features/code to be working unless you have +added a test for that (which is recommended). +

+

Also note that every single commit should pass the test suite, not just +the result of a series of patches. +

+

Once everything passed, push the changes to your public ffmpeg clone and post a +merge request to ffmpeg-devel. You can also push them directly but this is not +recommended. +

+ +

6. Server Issues

+ +

Contact the project admins root@ffmpeg.org if you have technical +problems with the GIT server. +

+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libavcodec.html b/dependencies64/ffmpeg/doc/libavcodec.html new file mode 100644 index 000000000..4aa672707 --- /dev/null +++ b/dependencies64/ffmpeg/doc/libavcodec.html @@ -0,0 +1,78 @@ + + + + + +FFmpeg documentation : Libavcodec + + + + + + + + + + +
+
+ + +

Libavcodec Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libavcodec library provides a generic encoding/decoding framework +and contains multiple decoders and encoders for audio, video and +subtitle streams, and several bitstream filters. +

+

The shared architecture provides various services ranging from bit +stream I/O to DSP optimizations, and makes it suitable for +implementing robust and fast codecs as well as for experimentation. +

+ + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-codecs, bitstream-filters, +libavutil +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libavdevice.html b/dependencies64/ffmpeg/doc/libavdevice.html new file mode 100644 index 000000000..38e27c302 --- /dev/null +++ b/dependencies64/ffmpeg/doc/libavdevice.html @@ -0,0 +1,75 @@ + + + + + +FFmpeg documentation : Libavdevice + + + + + + + + + + +
+
+ + +

Libavdevice Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libavdevice library provides a generic framework for grabbing from +and rendering to many common multimedia input/output devices, and +supports several input and output devices, including Video4Linux2, +VfW, DShow, and ALSA. +

+ + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-devices, +libavutil, libavcodec, libavformat +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libavfilter.html b/dependencies64/ffmpeg/doc/libavfilter.html new file mode 100644 index 000000000..58a5de349 --- /dev/null +++ b/dependencies64/ffmpeg/doc/libavfilter.html @@ -0,0 +1,74 @@ + + + + + +FFmpeg documentation : Libavfilter + + + + + + + + + + +
+
+ + +

Libavfilter Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libavfilter library provides a generic audio/video filtering +framework containing several filters, sources and sinks. +

+ + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-filters, +libavutil, libswscale, libswresample, +libavcodec, libavformat, libavdevice +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libavformat.html b/dependencies64/ffmpeg/doc/libavformat.html new file mode 100644 index 000000000..170f29f89 --- /dev/null +++ b/dependencies64/ffmpeg/doc/libavformat.html @@ -0,0 +1,78 @@ + + + + + +FFmpeg documentation : Libavformat + + + + + + + + + + +
+
+ + +

Libavformat Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libavformat library provides a generic framework for multiplexing +and demultiplexing (muxing and demuxing) audio, video and subtitle +streams. It encompasses multiple muxers and demuxers for multimedia +container formats. +

+

It also supports several input and output protocols to access a media +resource. +

+ + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-formats, ffmpeg-protocols, +libavutil, libavcodec +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libavutil.html b/dependencies64/ffmpeg/doc/libavutil.html new file mode 100644 index 000000000..4eb38645b --- /dev/null +++ b/dependencies64/ffmpeg/doc/libavutil.html @@ -0,0 +1,97 @@ + + + + + +FFmpeg documentation : Libavutil + + + + + + + + + + +
+
+ + +

Libavutil Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libavutil library is a utility library to aid portable +multimedia programming. It contains safe portable string functions, +random number generators, data structures, additional mathematics +functions, cryptography and multimedia related functionality (like +enumerations for pixel and sample formats). It is not a library for +code needed by both libavcodec and libavformat. +

+

The goals for this library is to be: +

+
+
Modular
+

It should have few interdependencies and the possibility of disabling individual +parts during ./configure. +

+
+
Small
+

Both sources and objects should be small. +

+
+
Efficient
+

It should have low CPU and memory usage. +

+
+
Useful
+

It should avoid useless features that almost no one needs. +

+
+ + + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-utils +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libswresample.html b/dependencies64/ffmpeg/doc/libswresample.html new file mode 100644 index 000000000..2ef5a2e20 --- /dev/null +++ b/dependencies64/ffmpeg/doc/libswresample.html @@ -0,0 +1,100 @@ + + + + + +FFmpeg documentation : Libswresample + + + + + + + + + + +
+
+ + +

Libswresample Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libswresample library performs highly optimized audio resampling, +rematrixing and sample format conversion operations. +

+

Specifically, this library performs the following conversions: +

+
    +
  • +Resampling: is the process of changing the audio rate, for +example from a high sample rate of 44100Hz to 8000Hz. Audio +conversion from high to low sample rate is a lossy process. Several +resampling options and algorithms are available. + +
  • +Format conversion: is the process of converting the type of +samples, for example from 16-bit signed samples to unsigned 8-bit or +float samples. It also handles packing conversion, when passing from +packed layout (all samples belonging to distinct channels interleaved +in the same buffer), to planar layout (all samples belonging to the +same channel stored in a dedicated buffer or "plane"). + +
  • +Rematrixing: is the process of changing the channel layout, for +example from stereo to mono. When the input channels cannot be mapped +to the output streams, the process is lossy, since it involves +different gain factors and mixing. +
+ +

Various other audio conversions (e.g. stretching and padding) are +enabled through dedicated options. +

+ + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-resampler, +libavutil +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/libswscale.html b/dependencies64/ffmpeg/doc/libswscale.html new file mode 100644 index 000000000..8e7684c16 --- /dev/null +++ b/dependencies64/ffmpeg/doc/libswscale.html @@ -0,0 +1,93 @@ + + + + + +FFmpeg documentation : Libswscale + + + + + + + + + + +
+
+ + +

Libswscale Documentation

+ + +

Table of Contents

+ + + +

1. Description

+ +

The libswscale library performs highly optimized image scaling and +colorspace and pixel format conversion operations. +

+

Specifically, this library performs the following conversions: +

+
    +
  • +Rescaling: is the process of changing the video size. Several +rescaling options and algorithms are available. This is usually a +lossy process. + +
  • +Pixel format conversion: is the process of converting the image +format and colorspace of the image, for example from planar YUV420P to +RGB24 packed. It also handles packing conversion, that is converts +from packed layout (all pixels belonging to distinct planes +interleaved in the same buffer), to planar layout (all samples +belonging to the same plane stored in a dedicated buffer or "plane"). + +

    This is usually a lossy process in case the source and destination +colorspaces differ. +

+ + + +

2. See Also

+ +

ffmpeg, ffplay, ffprobe, ffserver, +ffmpeg-scaler, +libavutil +

+ + +

3. Authors

+ +

The FFmpeg developers. +

+

For details about the authorship, see the Git history of the project +(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command +git log in the FFmpeg source directory, or browsing the +online repository at http://source.ffmpeg.org. +

+

Maintainers for the specific components are listed in the file +‘MAINTAINERS’ in the source code tree. +

+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/nut.html b/dependencies64/ffmpeg/doc/nut.html new file mode 100644 index 000000000..bf71d28c1 --- /dev/null +++ b/dependencies64/ffmpeg/doc/nut.html @@ -0,0 +1,183 @@ + + + + + +FFmpeg documentation : NUT: + + + + + + + + + + +
+
+ + +

NUT

+ + +

Table of Contents

+ + + +

1. Description

+

NUT is a low overhead generic container format. It stores audio, video, +subtitle and user-defined streams in a simple, yet efficient, way. +

+

It was created by a group of FFmpeg and MPlayer developers in 2003 +and was finalized in 2008. +

+

The official nut specification is at svn://svn.mplayerhq.hu/nut +In case of any differences between this text and the official specification, +the official specification shall prevail. +

+ +

2. Container-specific codec tags

+ + +

2.1 Generic raw YUVA formats

+ +

Since many exotic planar YUVA pixel formats are not considered by +the AVI/QuickTime FourCC lists, the following scheme is adopted for +representing them. +

+

The first two bytes can contain the values: +Y1 = only Y +Y2 = Y+A +Y3 = YUV +Y4 = YUVA +

+

The third byte represents the width and height chroma subsampling +values for the UV planes, that is the amount to shift the luma +width/height right to find the chroma width/height. +

+

The fourth byte is the number of bits used (8, 16, ...). +

+

If the order of bytes is inverted, that means that each component has +to be read big-endian. +

+ +

2.2 Raw Audio

+ + + + + + +
ALAWA-LAW
ULAWMU-LAW
P<type><interleaving><bits>little-endian PCM
<bits><interleaving><type>Pbig-endian PCM
+ +

<type> is S for signed integer, U for unsigned integer, F for IEEE float +<interleaving> is D for default, P is for planar. +<bits> is 8/16/24/32 +

+
 
PFD[32]   would for example be signed 32 bit little-endian IEEE float
+
+ + +

2.3 Subtitles

+ + + + + + +
UTF8Raw UTF-8
SSA[0]SubStation Alpha
DVDSDVD subtitles
DVBSDVB subtitles
+ + +

2.4 Raw Data

+ + + +
UTF8Raw UTF-8
+ + +

2.5 Codecs

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
3IV1non-compliant MPEG-4 generated by old 3ivx
ASV1Asus Video
ASV2Asus Video 2
CVIDCinepak
CYUVCreative YUV
DIVXnon-compliant MPEG-4 generated by old DivX
DUCKTruemotion 1
FFV1FFmpeg video 1
FFVHFFmpeg Huffyuv
H261ITU H.261
H262ITU H.262
H263ITU H.263
H264ITU H.264
HFYUHuffyuv
I263Intel H.263
IV31Indeo 3.1
IV32Indeo 3.2
IV50Indeo 5.0
LJPGITU JPEG (lossless)
MJLSITU JPEG-LS
MJPGITU JPEG
MPG4MS MPEG-4v1 (not ISO MPEG-4)
MP42MS MPEG-4v2
MP43MS MPEG-4v3
MP4VISO MPEG-4 Part 2 Video (from old encoders)
mpg1ISO MPEG-1 Video
mpg2ISO MPEG-2 Video
MRLEMS RLE
MSVCMS Video 1
RT21Indeo 2.1
RV10RealVideo 1.0
RV20RealVideo 2.0
RV30RealVideo 3.0
RV40RealVideo 4.0
SNOWFFmpeg Snow
SVQ1Sorenson Video 1
SVQ3Sorenson Video 3
theoXiph Theora
TM20Truemotion 2.0
UMP4non-compliant MPEG-4 generated by UB Video MPEG-4
VCR1ATI VCR1
VP30VP 3.0
VP31VP 3.1
VP50VP 5.0
VP60VP 6.0
VP61VP 6.1
VP62VP 6.2
VP70VP 7.0
WMV1MS WMV7
WMV2MS WMV8
WMV3MS WMV9
WV1Fnon-compliant MPEG-4 generated by ?
WVC1VC-1
XVIDnon-compliant MPEG-4 generated by old Xvid
XVIXnon-compliant MPEG-4 generated by old Xvid with interlacing bug
+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/doc/platform.html b/dependencies64/ffmpeg/doc/platform.html new file mode 100644 index 000000000..ba7c5116e --- /dev/null +++ b/dependencies64/ffmpeg/doc/platform.html @@ -0,0 +1,422 @@ + + + + + +FFmpeg documentation : Platform Specific Information: + + + + + + + + + + +
+
+ + +

Platform Specific Information

+ + +

Table of Contents

+ + + +

1. Unix-like

+ +

Some parts of FFmpeg cannot be built with version 2.15 of the GNU +assembler which is still provided by a few AMD64 distributions. To +make sure your compiler really uses the required version of gas +after a binutils upgrade, run: +

+
 
$(gcc -print-prog-name=as) --version
+
+ +

If not, then you should install a different compiler that has no +hard-coded path to gas. In the worst case pass --disable-asm +to configure. +

+ +

1.1 BSD

+ +

BSD make will not build FFmpeg, you need to install and use GNU Make +(gmake). +

+ +

1.2 (Open)Solaris

+ +

GNU Make is required to build FFmpeg, so you have to invoke (gmake), +standard Solaris Make will not work. When building with a non-c99 front-end +(gcc, generic suncc) add either --extra-libs=/usr/lib/values-xpg6.o +or --extra-libs=/usr/lib/64/values-xpg6.o to the configure options +since the libc is not c99-compliant by default. The probes performed by +configure may raise an exception leading to the death of configure itself +due to a bug in the system shell. Simply invoke a different shell such as +bash directly to work around this: +

+
 
bash ./configure
+
+ +

+

+

1.3 Darwin (Mac OS X, iPhone)

+ +

The toolchain provided with Xcode is sufficient to build the basic +unacelerated code. +

+

Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from +https://github.com/FFmpeg/gas-preprocessor or +http://github.com/yuvi/gas-preprocessor to build the optimized +assembler functions. Put the Perl script somewhere +in your PATH, FFmpeg’s configure will pick it up automatically. +

+

Mac OS X on amd64 and x86 requires yasm to build most of the +optimized assembler functions. Fink, +Gentoo Prefix, +Homebrew +or MacPorts can easily provide it. +

+ + +

2. DOS

+ +

Using a cross-compiler is preferred for various reasons. +http://www.delorie.com/howto/djgpp/linux-x-djgpp.html +

+ + +

3. OS/2

+ +

For information about compiling FFmpeg on OS/2 see +http://www.edm2.com/index.php/FFmpeg. +

+ + +

4. Windows

+ +

To get help and instructions for building FFmpeg under Windows, check out +the FFmpeg Windows Help Forum at http://ffmpeg.zeranoe.com/forum/. +

+ +

4.1 Native Windows compilation using MinGW or MinGW-w64

+ +

FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64 +toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from +http://www.mingw.org/ or http://mingw-w64.sourceforge.net/. +You can find detailed installation instructions in the download section and +the FAQ. +

+

Notes: +

+
    +
  • Building natively using MSYS can be sped up by disabling implicit rules +in the Makefile by calling make -r instead of plain make. This +speed up is close to non-existent for normal one-off builds and is only +noticeable when running make for a second time (for example during +make install). + +
  • In order to compile FFplay, you must have the MinGW development library +of SDL and pkg-config installed. + +
  • By using ./configure --enable-shared when configuring FFmpeg, +you can build the FFmpeg libraries (e.g. libavutil, libavcodec, +libavformat) as DLLs. + +
+ + +

4.2 Microsoft Visual C++ or Intel C++ Compiler for Windows

+ +

FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility +and wrapper, or with MSVC 2013 and ICL natively. +

+

You will need the following prerequisites: +

+ + +

To set up a proper environment in MSYS, you need to run msys.bat from +the Visual Studio or Intel Compiler command prompt. +

+

Place yasm.exe somewhere in your PATH. If using MSVC 2012 or +earlier, place c99wrap.exe and c99conv.exe somewhere in your +PATH as well. +

+

Next, make sure any other headers and libs you want to use, such as zlib, are +located in a spot that the compiler can see. Do so by modifying the LIB +and INCLUDE environment variables to include the Windows-style +paths to these directories. Alternatively, you can try and use the +--extra-cflags/--extra-ldflags configure options. If using MSVC +2012 or earlier, place inttypes.h somewhere the compiler can see too. +

+

Finally, run: +

+
 
For MSVC:
+./configure --toolchain=msvc
+
+For ICL:
+./configure --toolchain=icl
+
+make
+make install
+
+ +

If you wish to compile shared libraries, add --enable-shared to your +configure options. Note that due to the way MSVC and ICL handle DLL imports and +exports, you cannot compile static and shared libraries at the same time, and +enabling shared libraries will automatically disable the static ones. +

+

Notes: +

+
    +
  • It is possible that coreutils’ link.exe conflicts with MSVC’s linker. +You can find out by running which link to see which link.exe you +are using. If it is located at /bin/link.exe, then you have the wrong one +in your PATH. Either move or remove that copy, or make sure MSVC’s +link.exe takes precedence in your PATH over coreutils’. + +
  • If you wish to build with zlib support, you will have to grab a compatible +zlib binary from somewhere, with an MSVC import lib, or if you wish to link +statically, you can follow the instructions below to build a compatible +zlib.lib with MSVC. Regardless of which method you use, you must still +follow step 3, or compilation will fail. +
      +
    1. Grab the zlib sources. +
    2. Edit win32/Makefile.msc so that it uses -MT instead of -MD, since +this is how FFmpeg is built as well. +
    3. Edit zconf.h and remove its inclusion of unistd.h. This gets +erroneously included when building FFmpeg. +
    4. Run nmake -f win32/Makefile.msc. +
    5. Move zlib.lib, zconf.h, and zlib.h to somewhere MSVC +can see. +
    + +
  • FFmpeg has been tested with the following on i686 and x86_64: +
      +
    • Visual Studio 2010 Pro and Express +
    • Visual Studio 2012 Pro and Express +
    • Visual Studio 2013 Pro and Express +
    • Intel Composer XE 2013 +
    • Intel Composer XE 2013 SP1 +
    +

    Anything else is not officially supported. +

    +
+ + +

4.2.1 Linking to FFmpeg with Microsoft Visual C++

+ +

If you plan to link with MSVC-built static libraries, you will need +to make sure you have Runtime Library set to +Multi-threaded (/MT) in your project’s settings. +

+

You will need to define inline to something MSVC understands: +

 
#define inline __inline
+
+ +

Also note, that as stated in Microsoft Visual C++, you will need +an MSVC-compatible inttypes.h. +

+

If you plan on using import libraries created by dlltool, you must +set References to No (/OPT:NOREF) under the linker optimization +settings, otherwise the resulting binaries will fail during runtime. +This is not required when using import libraries generated by lib.exe. +This issue is reported upstream at +http://sourceware.org/bugzilla/show_bug.cgi?id=12633. +

+

To create import libraries that work with the /OPT:REF option +(which is enabled by default in Release mode), follow these steps: +

+
    +
  1. Open the Visual Studio Command Prompt. + +

    Alternatively, in a normal command line prompt, call ‘vcvars32.bat’ +which sets up the environment variables for the Visual C++ tools +(the standard location for this file is something like +‘C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat’). +

    +
  2. Enter the ‘bin’ directory where the created LIB and DLL files +are stored. + +
  3. Generate new import libraries with lib.exe: + +
     
    lib /machine:i386 /def:..\lib\foo-version.def  /out:foo.lib
    +
    + +

    Replace foo-version and foo with the respective library names. +

    +
+ +

+

+

4.3 Cross compilation for Windows with Linux

+ +

You must use the MinGW cross compilation tools available at +http://www.mingw.org/. +

+

Then configure FFmpeg with the following options: +

 
./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc-
+
+

(you can change the cross-prefix according to the prefix chosen for the +MinGW tools). +

+

Then you can easily test FFmpeg with Wine. +

+ +

4.4 Compilation under Cygwin

+ +

Please use Cygwin 1.7.x as the obsolete 1.5.x Cygwin versions lack +llrint() in its C library. +

+

Install your Cygwin with all the "Base" packages, plus the +following "Devel" ones: +

 
binutils, gcc4-core, make, git, mingw-runtime, texi2html
+
+ +

In order to run FATE you will also need the following "Utils" packages: +

 
bc, diffutils
+
+ +

If you want to build FFmpeg with additional libraries, download Cygwin +"Devel" packages for Ogg and Vorbis from any Cygwin packages repository: +

 
libogg-devel, libvorbis-devel
+
+ +

These library packages are only available from +Cygwin Ports: +

+
 
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
+libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
+
+ +

The recommendation for x264 is to build it from source, as it evolves too +quickly for Cygwin Ports to be up to date. +

+ +

4.5 Crosscompilation for Windows under Cygwin

+ +

With Cygwin you can create Windows binaries that do not need the cygwin1.dll. +

+

Just install your Cygwin as explained before, plus these additional +"Devel" packages: +

 
gcc-mingw-core, mingw-runtime, mingw-zlib
+
+ +

and add some special flags to your configure invocation. +

+

For a static build run +

 
./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+
+ +

and for a build with shared libraries +

 
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+
+ + +

5. Plan 9

+ +

The native Plan 9 compiler +does not implement all the C99 features needed by FFmpeg so the gcc +port must be used. Furthermore, a few items missing from the C +library and shell environment need to be fixed. +

+
    +
  • GNU awk, grep, make, and sed + +

    Working packages of these tools can be found at +ports2plan9. +They can be installed with 9front’s pkg +utility by setting pkgpath to +http://ports2plan9.googlecode.com/files/. +

    +
  • Missing/broken head and printf commands + +

    Replacements adequate for building FFmpeg can be found in the +compat/plan9 directory. Place these somewhere they will be +found by the shell. These are not full implementations of the +commands and are not suitable for general use. +

    +
  • Missing C99 stdint.h and inttypes.h + +

    Replacement headers are available from +http://code.google.com/p/plan9front/issues/detail?id=152. +

    +
  • Missing or non-standard library functions + +

    Some functions in the C library are missing or incomplete. The +gcc-apelibs-1207 package from +ports2plan9 +includes an updated C library, but installing the full package gives +unusable executables. Instead, keep the files from gccbin.tgz +under /386/lib/gnu. From the libc.a archive in the +gcc-apelibs-1207 package, extract the following object files and +turn them into a library: +

    +
      +
    • strerror.o +
    • strtoll.o +
    • snprintf.o +
    • vsnprintf.o +
    • vfprintf.o +
    • _IO_getc.o +
    • _IO_putc.o +
    + +

    Use the --extra-libs option of configure to inform the +build system of this library. +

    +
  • FPU exceptions enabled by default + +

    Unlike most other systems, Plan 9 enables FPU exceptions by default. +These must be disabled before calling any FFmpeg functions. While the +included tools will do this automatically, other users of the +libraries must do it themselves. +

    +
+ +
+This document was generated by Kyle Schwarz on June 19, 2014 using texi2html 1.82.
diff --git a/dependencies64/ffmpeg/include/libavcodec/avcodec.h b/dependencies64/ffmpeg/include/libavcodec/avcodec.h index 17c48ddde..5df717cae 100644 --- a/dependencies64/ffmpeg/include/libavcodec/avcodec.h +++ b/dependencies64/ffmpeg/include/libavcodec/avcodec.h @@ -40,7 +40,13 @@ #include "libavutil/pixfmt.h" #include "libavutil/rational.h" -#include "libavcodec/version.h" +#include "version.h" + +#if FF_API_FAST_MALLOC +// to provide fast_*alloc +#include "libavutil/mem.h" +#endif + /** * @defgroup libavc Encoding/Decoding Library * @{ @@ -104,7 +110,9 @@ enum AVCodecID { /* video codecs */ AV_CODEC_ID_MPEG1VIDEO, AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding +#if FF_API_XVMC AV_CODEC_ID_MPEG2VIDEO_XVMC, +#endif /* FF_API_XVMC */ AV_CODEC_ID_H261, AV_CODEC_ID_H263, AV_CODEC_ID_RV10, @@ -274,6 +282,10 @@ enum AVCodecID { AV_CODEC_ID_AIC, AV_CODEC_ID_ESCAPE130_DEPRECATED, AV_CODEC_ID_G2M_DEPRECATED, + AV_CODEC_ID_WEBP_DEPRECATED, + AV_CODEC_ID_HNM4_VIDEO, + AV_CODEC_ID_HEVC_DEPRECATED, + AV_CODEC_ID_FIC, AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'), AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), @@ -300,6 +312,8 @@ enum AVCodecID { AV_CODEC_ID_SNOW = MKBETAG('S','N','O','W'), AV_CODEC_ID_WEBP = MKBETAG('W','E','B','P'), AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'), + AV_CODEC_ID_HEVC = MKBETAG('H','2','6','5'), +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC /* various PCM "codecs" */ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs @@ -331,6 +345,8 @@ enum AVCodecID { AV_CODEC_ID_PCM_LXF, AV_CODEC_ID_S302M, AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED, + AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED, AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'), AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'), AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16), @@ -371,6 +387,7 @@ enum AVCodecID { AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '), AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '), AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '), + AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'), /* AMR */ AV_CODEC_ID_AMR_NB = 0x12000, @@ -419,7 +436,9 @@ enum AVCodecID { AV_CODEC_ID_MLP, AV_CODEC_ID_GSM_MS, /* as found in WAV */ AV_CODEC_ID_ATRAC3, +#if FF_API_VOXWARE AV_CODEC_ID_VOXWARE, +#endif AV_CODEC_ID_APE, AV_CODEC_ID_NELLYMOSER, AV_CODEC_ID_MUSEPACK8, @@ -451,6 +470,7 @@ enum AVCodecID { AV_CODEC_ID_OPUS_DEPRECATED, AV_CODEC_ID_COMFORT_NOISE, AV_CODEC_ID_TAK_DEPRECATED, + AV_CODEC_ID_METASOUND, AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'), AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), @@ -494,6 +514,7 @@ enum AVCodecID { AV_CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'), AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'), + AV_CODEC_ID_TIMED_ID3 = MKBETAG('T','I','D','3'), AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it @@ -616,16 +637,26 @@ enum AVColorPrimaries{ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above AVCOL_PRI_FILM = 8, + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 AVCOL_PRI_NB , ///< Not part of ABI }; enum AVColorTransferCharacteristic{ - AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 - AVCOL_TRC_UNSPECIFIED = 2, - AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM - AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG - AVCOL_TRC_SMPTE240M = 7, - AVCOL_TRC_NB , ///< Not part of ABI + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt( 10 ) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10 bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12 bit system + AVCOL_TRC_NB , ///< Not part of ABI }; /** @@ -667,7 +698,12 @@ typedef struct RcOverride{ float quality_factor; } RcOverride; +#if FF_API_MAX_BFRAMES +/** + * @deprecated there is no libavcodec-wide limit on the number of B-frames + */ #define FF_MAX_B_FRAMES 16 +#endif /* encoding support These flags can be passed in AVCodecContext.flags before initialization. @@ -681,6 +717,7 @@ typedef struct RcOverride{ #define CODEC_FLAG_UNALIGNED 0x0001 #define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale. #define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263. +#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted #define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC. #define CODEC_FLAG_GMC 0x0020 ///< Use GMC. #define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>. @@ -693,7 +730,13 @@ typedef struct RcOverride{ #define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode. #define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode. #define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale. -#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges. +#if FF_API_EMU_EDGE +/** + * @deprecated edges are not used/required anymore. I.e. this flag is now always + * set. + */ +#define CODEC_FLAG_EMU_EDGE 0x4000 +#endif #define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding. #define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random location instead of only at frame boundaries. */ @@ -731,8 +774,16 @@ typedef struct RcOverride{ */ #define CODEC_CAP_DR1 0x0002 #define CODEC_CAP_TRUNCATED 0x0008 -/* Codec can export data for HW decoding (XvMC). */ +#if FF_API_XVMC +/* Codec can export data for HW decoding. This flag indicates that + * the codec would call get_format() with list that might contain HW accelerated + * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them + * including raw image format. + * The application can use the passed context to determine bitstream version, + * chroma format, resolution etc. + */ #define CODEC_CAP_HWACCEL 0x0010 +#endif /* FF_API_XVMC */ /** * Encoder or decoder requires flushing with NULL input at the end in order to * give the complete and correct output. @@ -762,10 +813,12 @@ typedef struct RcOverride{ * This can be used to prevent truncation of the last audio samples. */ #define CODEC_CAP_SMALL_LAST_FRAME 0x0040 +#if FF_API_CAP_VDPAU /** * Codec can export data for HW decoding (VDPAU). */ #define CODEC_CAP_HWACCEL_VDPAU 0x0080 +#endif /** * Codec can output multiple frames per AVPacket * Normally demuxers return one frame at a time, demuxers which do not do @@ -787,12 +840,12 @@ typedef struct RcOverride{ * Codec should fill in channel configuration and samplerate instead of container */ #define CODEC_CAP_CHANNEL_CONF 0x0400 - +#if FF_API_NEG_LINESIZES /** - * Codec is able to deal with negative linesizes + * @deprecated no codecs use this capability */ #define CODEC_CAP_NEG_LINESIZES 0x0800 - +#endif /** * Codec supports frame-level multithreading. */ @@ -822,6 +875,7 @@ typedef struct RcOverride{ */ #define CODEC_CAP_LOSSLESS 0x80000000 +#if FF_API_MB_TYPE //The following defines may change, don't expect compatibility if you use them. #define MB_TYPE_INTRA4x4 0x0001 #define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific @@ -845,6 +899,7 @@ typedef struct RcOverride{ #define MB_TYPE_QUANT 0x00010000 #define MB_TYPE_CBP 0x00020000 //Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) +#endif /** * Pan Scan area. @@ -875,10 +930,12 @@ typedef struct AVPanScan{ int16_t position[3][2]; }AVPanScan; +#if FF_API_QSCALE_TYPE #define FF_QSCALE_TYPE_MPEG1 0 #define FF_QSCALE_TYPE_MPEG2 1 #define FF_QSCALE_TYPE_H264 2 #define FF_QSCALE_TYPE_VP56 3 +#endif #if FF_API_GET_BUFFER #define FF_BUFFER_TYPE_INTERNAL 1 @@ -1001,6 +1058,13 @@ enum AVPacketSideDataType { * follow the timestamp specifier of a WebVTT cue. */ AV_PKT_DATA_WEBVTT_SETTINGS, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. This + * side data includes updated metadata which appeared in the stream. + */ + AV_PKT_DATA_METADATA_UPDATE, }; /** @@ -1311,7 +1375,9 @@ typedef struct AVCodecContext { */ int coded_width, coded_height; +#if FF_API_ASPECT_EXTENDED #define FF_ASPECT_EXTENDED 15 +#endif /** * the number of pictures in a group of pictures, or 0 for intra_only @@ -1638,12 +1704,15 @@ typedef struct AVCodecContext { #define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics) #define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) +#if FF_API_XVMC /** * XVideo Motion Acceleration * - encoding: forbidden * - decoding: set by decoder + * @deprecated XvMC doesn't need it anymore. */ - int xvmc_acceleration; + attribute_deprecated int xvmc_acceleration; +#endif /* FF_API_XVMC */ /** * macroblock decision mode @@ -2090,12 +2159,11 @@ typedef struct AVCodecContext { * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused * (read and/or written to if it is writable) later by libavcodec. * - * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an - * edge of the size returned by avcodec_get_edge_width() on all sides. - * * avcodec_align_dimensions2() should be used to find the required width and * height, as they normally need to be rounded up to the next multiple of 16. * + * Some decoders do not support linesizes changing between frames. + * * If frame multithreading is used and thread_safe_callbacks is set, * this callback may be called from a different thread, but not from more * than one at once. Does not need to be reentrant. @@ -2378,12 +2446,16 @@ typedef struct AVCodecContext { */ int workaround_bugs; #define FF_BUG_AUTODETECT 1 ///< autodetection +#if FF_API_OLD_MSMPEG4 #define FF_BUG_OLD_MSMPEG4 2 +#endif #define FF_BUG_XVID_ILACE 4 #define FF_BUG_UMP4 8 #define FF_BUG_NO_PADDING 16 #define FF_BUG_AMV 32 +#if FF_API_AC_VLC #define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. +#endif #define FF_BUG_QPEL_CHROMA 64 #define FF_BUG_STD_QPEL 128 #define FF_BUG_QPEL_CHROMA2 256 @@ -2433,7 +2505,12 @@ typedef struct AVCodecContext { #define FF_DEBUG_BITSTREAM 4 #define FF_DEBUG_MB_TYPE 8 #define FF_DEBUG_QP 16 +#if FF_API_DEBUG_MV +/** + * @deprecated this option does nothing + */ #define FF_DEBUG_MV 32 +#endif #define FF_DEBUG_DCT_COEFF 0x00000040 #define FF_DEBUG_SKIP 0x00000080 #define FF_DEBUG_STARTCODE 0x00000100 @@ -2441,13 +2518,17 @@ typedef struct AVCodecContext { #define FF_DEBUG_ER 0x00000400 #define FF_DEBUG_MMCO 0x00000800 #define FF_DEBUG_BUGS 0x00001000 -#define FF_DEBUG_VIS_QP 0x00002000 -#define FF_DEBUG_VIS_MB_TYPE 0x00004000 +#if FF_API_DEBUG_MV +#define FF_DEBUG_VIS_QP 0x00002000 ///< only access through AVOptions from outside libavcodec +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec +#endif #define FF_DEBUG_BUFFERS 0x00008000 #define FF_DEBUG_THREADS 0x00010000 +#if FF_API_DEBUG_MV /** * debug + * Code outside libavcodec should access this field using AVOptions * - encoding: Set by user. * - decoding: Set by user. */ @@ -2455,6 +2536,7 @@ typedef struct AVCodecContext { #define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames #define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames #define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames +#endif /** * Error recognition; may misdetect some more or less valid parts as errors. @@ -2462,14 +2544,21 @@ typedef struct AVCodecContext { * - decoding: Set by user. */ int err_recognition; + +/** + * Verify checksums embedded in the bitstream (could be of either encoded or + * decoded data, depending on the codec) and print an error message on mismatch. + * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the + * decoder returning an error. + */ #define AV_EF_CRCCHECK (1<<0) -#define AV_EF_BITSTREAM (1<<1) -#define AV_EF_BUFFER (1<<2) -#define AV_EF_EXPLODE (1<<3) +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection -#define AV_EF_CAREFUL (1<<16) -#define AV_EF_COMPLIANT (1<<17) -#define AV_EF_AGGRESSIVE (1<<18) +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliancies as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error /** @@ -2541,7 +2630,9 @@ typedef struct AVCodecContext { #define FF_IDCT_SIMPLEVIS 18 #define FF_IDCT_FAAN 20 #define FF_IDCT_SIMPLENEON 22 +#if FF_API_ARCH_ALPHA #define FF_IDCT_SIMPLEALPHA 23 +#endif /** * bits per sample/pixel from the demuxer (needed for huffyuv). @@ -2571,7 +2662,7 @@ typedef struct AVCodecContext { /** * the picture in the bitstream * - encoding: Set by libavcodec. - * - decoding: Set by libavcodec. + * - decoding: unused */ AVFrame *coded_frame; @@ -2643,13 +2734,13 @@ typedef struct AVCodecContext { */ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); +#if FF_API_THREAD_OPAQUE /** - * thread opaque - * Can be used by execute() to store some per AVCodecContext stuff. - * - encoding: set by execute() - * - decoding: set by execute() + * @deprecated this field should not be used from outside of lavc */ + attribute_deprecated void *thread_opaque; +#endif /** * noise vs. sse weight for the nsse comparsion function @@ -2736,6 +2827,11 @@ typedef struct AVCodecContext { #define FF_PROFILE_JPEG2000_DCINEMA_2K 3 #define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + +#define FF_PROFILE_HEVC_MAIN 1 +#define FF_PROFILE_HEVC_MAIN_10 2 +#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 + /** * level * - encoding: Set by user. @@ -2776,21 +2872,22 @@ typedef struct AVCodecContext { uint8_t *subtitle_header; int subtitle_header_size; +#if FF_API_ERROR_RATE /** - * Simulates errors in the bitstream to test error concealment. - * - encoding: Set by user. - * - decoding: unused + * @deprecated use the 'error_rate' private AVOption of the mpegvideo + * encoders */ + attribute_deprecated int error_rate; +#endif +#if FF_API_CODEC_PKT /** - * Current packet as passed into the decoder, to avoid having - * to pass the packet into every function. Currently only valid - * inside lavc and get/release_buffer callbacks. - * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts - * - encoding: unused + * @deprecated this field is not supposed to be accessed from outside lavc */ + attribute_deprecated AVPacket *pkt; +#endif /** * VBV delay coded in the last frame (in periods of a 27 MHz clock). @@ -2857,6 +2954,48 @@ typedef struct AVCodecContext { #define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself #define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * Code outside libavcodec should access this field using AVOptions + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; + +#if !FF_API_DEBUG_MV + /** + * debug motion vectors + * Code outside libavcodec should access this field using AVOptions + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames +#endif + + /** + * custom intra quantization matrix + * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix() + * - encoding: Set by user, can be NULL. + * - decoding: unused. + */ + uint16_t *chroma_intra_matrix; } AVCodecContext; AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); @@ -2868,6 +3007,12 @@ void av_codec_set_codec_descriptor(AVCodecContext *avctx, co int av_codec_get_lowres(const AVCodecContext *avctx); void av_codec_set_lowres(AVCodecContext *avctx, int val); +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx); +void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val); + /** * AVProfile. */ @@ -2908,7 +3053,9 @@ typedef struct AVCodec { const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 - uint8_t max_lowres; ///< maximum value for lowres supported by the decoder +#if FF_API_LOWRES + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres() +#endif const AVClass *priv_class; ///< AVClass for the private context const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} @@ -2975,6 +3122,10 @@ typedef struct AVCodec { void (*flush)(AVCodecContext *); } AVCodec; +int av_codec_get_max_lowres(const AVCodec *codec); + +struct MpegEncContext; + /** * AVHWAccel. */ @@ -3036,6 +3187,7 @@ typedef struct AVHWAccel { * * Meaningful slice information (codec specific) is guaranteed to * be parsed at this point. This function is mandatory. + * The only exception is XvMC, that works on MB level. * * @param avctx the codec context * @param buf the slice data buffer base @@ -3063,6 +3215,17 @@ typedef struct AVHWAccel { * AVCodecContext.release_buffer(). */ int priv_data_size; + + /** + * Called for every Macroblock in a slice. + * + * XvMC uses it to replace the ff_MPV_decode_mb(). + * Instead of decoding to raw picture, MB parameters are + * stored in an array provided by the video driver. + * + * @param s the mpeg context + */ + void (*decode_mb)(struct MpegEncContext *s); } AVHWAccel; /** @@ -3186,40 +3349,6 @@ void avcodec_register(AVCodec *codec); */ void avcodec_register_all(void); - -#if FF_API_ALLOC_CONTEXT -/** - * Allocate an AVCodecContext and set its fields to default values. The - * resulting struct can be deallocated by simply calling av_free(). - * - * @return An AVCodecContext filled with default values or NULL on failure. - * @see avcodec_get_context_defaults - * - * @deprecated use avcodec_alloc_context3() - */ -attribute_deprecated -AVCodecContext *avcodec_alloc_context(void); - -/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! - * we WILL change its arguments and name a few times! */ -attribute_deprecated -AVCodecContext *avcodec_alloc_context2(enum AVMediaType); - -/** - * Set the fields of the given AVCodecContext to default values. - * - * @param s The AVCodecContext of which the fields should be set to default values. - * @deprecated use avcodec_get_context_defaults3 - */ -attribute_deprecated -void avcodec_get_context_defaults(AVCodecContext *s); - -/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! - * we WILL change its arguments and name a few times! */ -attribute_deprecated -void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); -#endif - /** * Allocate an AVCodecContext and set its fields to default values. The * resulting struct can be deallocated by calling avcodec_close() on it followed @@ -3279,26 +3408,27 @@ const AVClass *avcodec_get_subtitle_rect_class(void); * can use this AVCodecContext to decode/encode video/audio data. * * @param dest target codec context, should be initialized with - * avcodec_alloc_context3(), but otherwise uninitialized + * avcodec_alloc_context3(NULL), but otherwise uninitialized * @param src source codec context * @return AVERROR() on error (e.g. memory allocation error), 0 on success */ int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); +#if FF_API_AVFRAME_LAVC /** - * Allocate an AVFrame and set its fields to default values. The resulting - * struct must be freed using avcodec_free_frame(). - * - * @return An AVFrame filled with default values or NULL on failure. - * @see avcodec_get_frame_defaults + * @deprecated use av_frame_alloc() */ +attribute_deprecated AVFrame *avcodec_alloc_frame(void); /** * Set the fields of the given AVFrame to default values. * * @param frame The AVFrame of which the fields should be set to default values. + * + * @deprecated use av_frame_unref() */ +attribute_deprecated void avcodec_get_frame_defaults(AVFrame *frame); /** @@ -3310,41 +3440,11 @@ void avcodec_get_frame_defaults(AVFrame *frame); * @warning this function does NOT free the data buffers themselves * (it does not know how, since they might have been allocated with * a custom get_buffer()). - */ -void avcodec_free_frame(AVFrame **frame); - -#if FF_API_AVCODEC_OPEN -/** - * Initialize the AVCodecContext to use the given AVCodec. Prior to using this - * function the context has to be allocated. - * - * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), - * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for - * retrieving a codec. - * - * @warning This function is not thread safe! - * - * @code - * avcodec_register_all(); - * codec = avcodec_find_decoder(AV_CODEC_ID_H264); - * if (!codec) - * exit(1); * - * context = avcodec_alloc_context3(codec); - * - * if (avcodec_open(context, codec) < 0) - * exit(1); - * @endcode - * - * @param avctx The context which will be set up to use the given codec. - * @param codec The codec to use within the context. - * @return zero on success, a negative value on error - * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close - * - * @deprecated use avcodec_open2 + * @deprecated use av_frame_free() */ attribute_deprecated -int avcodec_open(AVCodecContext *avctx, AVCodec *codec); +void avcodec_free_frame(AVFrame **frame); #endif /** @@ -3536,6 +3636,84 @@ int av_packet_merge_side_data(AVPacket *pkt); int av_packet_split_side_data(AVPacket *pkt); +/** + * Pack a dictionary for use in side_data. + * + * @param dict The dictionary to pack. + * @param size pointer to store the size of the returned data + * @return pointer to data if successful, NULL otherwise + */ +uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size); +/** + * Unpack a dictionary from side_data. + * + * @param data data from side_data + * @param size size of the data + * @param dict the metadata storage dictionary + * @return 0 on success, < 0 on failure + */ +int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_packet_ref(AVPacket *dst, AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + * + */ +int av_packet_copy_props(AVPacket *dst, const AVPacket *src); /** * @} @@ -3575,14 +3753,20 @@ attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame */ int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); +#if FF_API_EMU_EDGE /** * Return the amount of padding in pixels which the get_buffer callback must * provide around the edge of the image for codecs which do not have the * CODEC_FLAG_EMU_EDGE flag. * * @return Required padding in pixels. + * + * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer + * needed */ +attribute_deprecated unsigned avcodec_get_edge_width(void); +#endif /** * Modify width and height values so that they will result in a memory @@ -3590,8 +3774,6 @@ unsigned avcodec_get_edge_width(void); * padding. * * May only be used if a codec with CODEC_CAP_DR1 has been opened. - * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased - * according to avcodec_get_edge_width() before. */ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); @@ -3601,8 +3783,6 @@ void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); * line sizes are a multiple of the respective linesize_align[i]. * * May only be used if a codec with CODEC_CAP_DR1 has been opened. - * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased - * according to avcodec_get_edge_width() before. */ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS]); @@ -3693,19 +3873,25 @@ attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *s * Decode the audio frame of size avpkt->size from avpkt->data into frame. * * Some decoders may support multiple frames in a single AVPacket. Such - * decoders would then just decode the first frame. In this case, - * avcodec_decode_audio4 has to be called again with an AVPacket containing - * the remaining data in order to decode the second frame, etc... - * Even if no frames are returned, the packet needs to be fed to the decoder - * with remaining data until it is completely consumed or an error occurs. + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no samples will be returned. * * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE * larger than the actual read bytes because some optimized bitstream * readers read 32 or 64 bits at once and could read over the end. * - * @note You might have to align the input buffer. The alignment requirements - * depend on the CPU and the decoder. - * * @param avctx the codec context * @param[out] frame The AVFrame in which to store decoded audio samples. * The decoder will allocate a buffer for the decoded frame by @@ -3717,10 +3903,13 @@ attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *s * to the frame if av_frame_is_writable() returns 1. * When AVCodecContext.refcounted_frames is set to 0, the returned * reference belongs to the decoder and is valid only until the - * next call to this function or until closing the decoder. - * The caller may not write to it. + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is - * non-zero. + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. * @param[in] avpkt The input AVPacket containing the input buffer. * At least avpkt->data and avpkt->size should be set. Some * decoders might also require additional fields to be set. @@ -3743,13 +3932,6 @@ int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, * @warning The end of the input buffer buf should be set to 0 to ensure that * no overreading happens for damaged MPEG streams. * - * @note You might have to align the input buffer avpkt->data. - * The alignment requirements depend on the CPU: on some CPUs it isn't - * necessary at all, on others it won't work at all if not aligned and on others - * it will work but it will have an impact on performance. - * - * In practice, avpkt->data should have 4 byte alignment at minimum. - * * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay * between input and output, these need to be fed with avpkt->data=NULL, * avpkt->size=0 at the end to return the remaining frames. @@ -3766,10 +3948,10 @@ int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, * to the frame if av_frame_is_writable() returns 1. * When AVCodecContext.refcounted_frames is set to 0, the returned * reference belongs to the decoder and is valid only until the - * next call to this function or until closing the decoder. The - * caller may not write to it. + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. * - * @param[in] avpkt The input AVpacket containing the input buffer. + * @param[in] avpkt The input AVPacket containing the input buffer. * You can create such packet with av_init_packet() and by then setting * data and size, some decoders might in addition need other fields like * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least @@ -3792,6 +3974,14 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, * and reusing a get_buffer written for video codecs would probably perform badly * due to a potentially very different allocation pattern. * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * * @param avctx the codec context * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be freed with avsubtitle_free if *got_sub_ptr is set. @@ -3960,6 +4150,14 @@ typedef struct AVCodecParserContext { * AV_PICTURE_STRUCTURE_TOP_FIELD. */ enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; } AVCodecParserContext; typedef struct AVCodecParser { @@ -4494,7 +4692,7 @@ int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. * @return The best pixel format to convert to or -1 if none was found. */ -enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(enum AVPixelFormat *pix_fmt_list, +enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); @@ -4533,7 +4731,7 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt attribute_deprecated #if AV_HAVE_INCOMPATIBLE_LIBAV_ABI -enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat *pix_fmt_list, +enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); #else @@ -4548,12 +4746,20 @@ enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const en * @} */ +#if FF_API_SET_DIMENSIONS +/** + * @deprecated this function is not supposed to be used from outside of lavc + */ +attribute_deprecated void avcodec_set_dimensions(AVCodecContext *s, int width, int height); +#endif /** * Put a string representing the codec tag codec_tag in buf. * + * @param buf buffer to place codec tag in * @param buf_size size in bytes of buf + * @param codec_tag codec tag to assign * @return the length of the string that would have been generated if * enough space had been available, excluding the trailing null */ @@ -4602,7 +4808,13 @@ int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, int buf_size, int align); /** - * Flush buffers, should be called when seeking or when switching to a different stream. + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. */ void avcodec_flush_buffers(AVCodecContext *avctx); @@ -4738,30 +4950,9 @@ AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f); /* memory */ -/** - * Reallocate the given block if it is not large enough, otherwise do nothing. - * - * @see av_realloc - */ -void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); - -/** - * Allocate a buffer, reusing the given one if large enough. - * - * Contrary to av_fast_realloc the current buffer contents might not be - * preserved and on error the old buffer is freed, thus no special - * handling to avoid memleaks is necessary. - * - * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer - * @param size size of the buffer *ptr points to - * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and - * *size 0 if an error occurred. - */ -void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); - /** * Same behaviour av_fast_malloc but the buffer has additional - * FF_INPUT_BUFFER_PADDING_SIZE at the end which will will always be 0. + * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. * * In addition the whole buffer will initially and after resizes * be 0-initialized so that no uninitialized data will ever appear. diff --git a/dependencies64/ffmpeg/include/libavcodec/avfft.h b/dependencies64/ffmpeg/include/libavcodec/avfft.h index 2d20a45f8..0c0f9b8d8 100644 --- a/dependencies64/ffmpeg/include/libavcodec/avfft.h +++ b/dependencies64/ffmpeg/include/libavcodec/avfft.h @@ -99,9 +99,11 @@ enum DCTTransformType { /** * Set up DCT. + * * @param nbits size of the input array: * (1 << nbits) for DCT-II, DCT-III and DST-I * (1 << nbits) + 1 for DCT-I + * @param type the type of transform * * @note the first element of the input of DST-I is ignored */ diff --git a/dependencies64/ffmpeg/include/libavcodec/old_codec_ids.h b/dependencies64/ffmpeg/include/libavcodec/old_codec_ids.h index d8a8f746d..b956264f3 100644 --- a/dependencies64/ffmpeg/include/libavcodec/old_codec_ids.h +++ b/dependencies64/ffmpeg/include/libavcodec/old_codec_ids.h @@ -34,7 +34,9 @@ /* video codecs */ CODEC_ID_MPEG1VIDEO, CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding +#if FF_API_XVMC CODEC_ID_MPEG2VIDEO_XVMC, +#endif CODEC_ID_H261, CODEC_ID_H263, CODEC_ID_RV10, diff --git a/dependencies64/ffmpeg/include/libavcodec/vda.h b/dependencies64/ffmpeg/include/libavcodec/vda.h index b3d6399a6..2f68188c9 100644 --- a/dependencies64/ffmpeg/include/libavcodec/vda.h +++ b/dependencies64/ffmpeg/include/libavcodec/vda.h @@ -41,6 +41,12 @@ #include "libavcodec/version.h" +// extra flags not defined in VDADecoder.h +enum { + kVDADecodeInfo_Asynchronous = 1UL << 0, + kVDADecodeInfo_FrameDropped = 1UL << 1 +}; + /** * @defgroup lavc_codec_hwaccel_vda VDA * @ingroup lavc_codec_hwaccel diff --git a/dependencies64/ffmpeg/include/libavcodec/vdpau.h b/dependencies64/ffmpeg/include/libavcodec/vdpau.h index 37d212cd8..e25cc42d7 100644 --- a/dependencies64/ffmpeg/include/libavcodec/vdpau.h +++ b/dependencies64/ffmpeg/include/libavcodec/vdpau.h @@ -52,22 +52,40 @@ #include #include #include "libavutil/avconfig.h" +#include "libavutil/attributes.h" +#include "avcodec.h" +#include "version.h" + +#if FF_API_BUFS_VDPAU union AVVDPAUPictureInfo { VdpPictureInfoH264 h264; VdpPictureInfoMPEG1Or2 mpeg; VdpPictureInfoVC1 vc1; VdpPictureInfoMPEG4Part2 mpeg4; }; +#endif + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); /** * This structure is used to share data between the libavcodec library and * the client video application. - * The user shall zero-allocate the structure and make it available as + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as * AVCodecContext.hwaccel_context. Members can be set by the user once * during initialization or through each AVCodecContext.get_buffer() * function call. In any case, they must be valid prior to calling * decoding functions. + * + * The size of this structure is not a part of the public ABI and must not + * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an + * AVVDPAUContext. */ typedef struct AVVDPAUContext { /** @@ -84,11 +102,13 @@ typedef struct AVVDPAUContext { */ VdpDecoderRender *render; +#if FF_API_BUFS_VDPAU /** * VDPAU picture information * * Set by libavcodec. */ + attribute_deprecated union AVVDPAUPictureInfo info; /** @@ -96,6 +116,7 @@ typedef struct AVVDPAUContext { * * Set by libavcodec. */ + attribute_deprecated int bitstream_buffers_allocated; /** @@ -103,6 +124,7 @@ typedef struct AVVDPAUContext { * * Set by libavcodec. */ + attribute_deprecated int bitstream_buffers_used; /** @@ -111,10 +133,43 @@ typedef struct AVVDPAUContext { * * Set by libavcodec. */ + attribute_deprecated VdpBitstreamBuffer *bitstream_buffers; +#endif + AVVDPAU_Render2 render2; } AVVDPAUContext; +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); +/** + * Allocate an AVVDPAUContext. + * + * @return Newly-allocated AVVDPAUContext or NULL on failure. + */ +AVVDPAUContext *av_vdpau_alloc_context(void); + +/** + * Get a decoder profile that should be used for initializing a VDPAU decoder. + * Should be called from the AVCodecContext.get_format() callback. + * + * @param avctx the codec context being used for decoding the stream + * @param profile a pointer into which the result will be written on success. + * The contents of profile are undefined if this function returns + * an error. + * + * @return 0 on success (non-negative), a negative AVERROR on failure. + */ +int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile); + +#if FF_API_CAP_VDPAU /** @brief The videoSurface is used for rendering. */ #define FF_VDPAU_STATE_USED_FOR_RENDER 1 @@ -153,6 +208,7 @@ struct vdpau_render_state { union AVVDPAUPictureInfo info; #endif }; +#endif /* @}*/ diff --git a/dependencies64/ffmpeg/include/libavcodec/version.h b/dependencies64/ffmpeg/include/libavcodec/version.h index 06777bf3a..61a796852 100644 --- a/dependencies64/ffmpeg/include/libavcodec/version.h +++ b/dependencies64/ffmpeg/include/libavcodec/version.h @@ -26,11 +26,11 @@ * Libavcodec version macros. */ -#include "libavutil/avutil.h" +#include "libavutil/version.h" #define LIBAVCODEC_VERSION_MAJOR 55 -#define LIBAVCODEC_VERSION_MINOR 19 -#define LIBAVCODEC_VERSION_MICRO 100 +#define LIBAVCODEC_VERSION_MINOR 52 +#define LIBAVCODEC_VERSION_MICRO 102 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ @@ -51,19 +51,9 @@ #ifndef FF_API_REQUEST_CHANNELS #define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56) #endif -#ifndef FF_API_ALLOC_CONTEXT -#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 55) -#endif -#ifndef FF_API_AVCODEC_OPEN -#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 55) -#endif #ifndef FF_API_OLD_DECODE_AUDIO #define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) #endif -#ifndef FF_API_OLD_TIMECODE -#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 55) -#endif - #ifndef FF_API_OLD_ENCODE_AUDIO #define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) #endif @@ -73,8 +63,11 @@ #ifndef FF_API_CODEC_ID #define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 56) #endif +#ifndef FF_API_AUDIO_CONVERT +#define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 56) +#endif #ifndef FF_API_AVCODEC_RESAMPLE -#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) +#define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT #endif #ifndef FF_API_DEINTERLACE #define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 56) @@ -91,5 +84,62 @@ #ifndef FF_API_LOWRES #define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 56) #endif +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_VOXWARE +#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_SET_DIMENSIONS +#define FF_API_SET_DIMENSIONS (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DEBUG_MV +#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_AC_VLC +#define FF_API_AC_VLC (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OLD_MSMPEG4 +#define FF_API_OLD_MSMPEG4 (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ASPECT_EXTENDED +#define FF_API_ASPECT_EXTENDED (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_THREAD_OPAQUE +#define FF_API_THREAD_OPAQUE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CODEC_PKT +#define FF_API_CODEC_PKT (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ARCH_ALPHA +#define FF_API_ARCH_ALPHA (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_XVMC +#define FF_API_XVMC (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ERROR_RATE +#define FF_API_ERROR_RATE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_QSCALE_TYPE +#define FF_API_QSCALE_TYPE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_MB_TYPE +#define FF_API_MB_TYPE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_MAX_BFRAMES +#define FF_API_MAX_BFRAMES (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_FAST_MALLOC +#define FF_API_FAST_MALLOC (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_NEG_LINESIZES +#define FF_API_NEG_LINESIZES (LIBAVCODEC_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_EMU_EDGE +#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 56) +#endif #endif /* AVCODEC_VERSION_H */ diff --git a/dependencies64/ffmpeg/include/libavcodec/xvmc.h b/dependencies64/ffmpeg/include/libavcodec/xvmc.h index b2bf518d0..c2e187cc1 100644 --- a/dependencies64/ffmpeg/include/libavcodec/xvmc.h +++ b/dependencies64/ffmpeg/include/libavcodec/xvmc.h @@ -29,6 +29,8 @@ #include +#include "libavutil/attributes.h" +#include "version.h" #include "avcodec.h" /** @@ -41,7 +43,7 @@ #define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct the number is 1337 speak for the letters IDCT MCo (motion compensation) */ -struct xvmc_pix_fmt { +attribute_deprecated struct xvmc_pix_fmt { /** The field contains the special constant value AV_XVMC_ID. It is used as a test that the application correctly uses the API, and that there is no corruption caused by pixel routines. diff --git a/dependencies64/ffmpeg/include/libavdevice/avdevice.h b/dependencies64/ffmpeg/include/libavdevice/avdevice.h index 93a044f27..28344ca16 100644 --- a/dependencies64/ffmpeg/include/libavdevice/avdevice.h +++ b/dependencies64/ffmpeg/include/libavdevice/avdevice.h @@ -66,4 +66,168 @@ const char *avdevice_license(void); */ void avdevice_register_all(void); +typedef struct AVDeviceRect { + int x; /**< x coordinate of top left corner */ + int y; /**< y coordinate of top left corner */ + int width; /**< width */ + int height; /**< height */ +} AVDeviceRect; + +/** + * Message types used by avdevice_app_to_dev_control_message(). + */ +enum AVAppToDevMessageType { + /** + * Dummy message. + */ + AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'), + + /** + * Window size change message. + * + * Message is sent to the device every time the application changes the size + * of the window device renders to. + * Message should also be sent right after window is created. + * + * data: AVDeviceRect: new window size. + */ + AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'), + + /** + * Repaint request message. + * + * Message is sent to the device when window have to be rapainted. + * + * data: AVDeviceRect: area required to be repainted. + * NULL: whole area is required to be repainted. + */ + AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A') +}; + +/** + * Message types used by avdevice_dev_to_app_control_message(). + */ +enum AVDevToAppMessageType { + /** + * Dummy message. + */ + AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'), + + /** + * Create window buffer message. + * + * Device requests to create a window buffer. Exact meaning is device- + * and application-dependent. Message is sent before rendering first + * frame and all one-shot initializations should be done here. + * Application is allowed to ignore preferred window buffer size. + * + * @note: Application is obligated to inform about window buffer size + * with AV_APP_TO_DEV_WINDOW_SIZE message. + * + * data: AVDeviceRect: preferred size of the window buffer. + * NULL: no preferred size of the window buffer. + */ + AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'), + + /** + * Prepare window buffer message. + * + * Device requests to prepare a window buffer for rendering. + * Exact meaning is device- and application-dependent. + * Message is sent before rendering of each frame. + * + * data: NULL. + */ + AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'), + + /** + * Display window buffer message. + * + * Device requests to display a window buffer. + * Message is sent when new frame is ready to be displyed. + * Usually buffers need to be swapped in handler of this message. + * + * data: NULL. + */ + AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'), + + /** + * Destroy window buffer message. + * + * Device requests to destroy a window buffer. + * Message is sent when device is about to be destroyed and window + * buffer is not required anymore. + * + * data: NULL. + */ + AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S') +}; + +/** + * Send control message from application to device. + * + * @param s device context. + * @param type message type. + * @param data message data. Exact type depends on message type. + * @param data_size size of message data. + * @return >= 0 on success, negative on error. + * AVERROR(ENOSYS) when device doesn't implement handler of the message. + */ +int avdevice_app_to_dev_control_message(struct AVFormatContext *s, + enum AVAppToDevMessageType type, + void *data, size_t data_size); + +/** + * Send control message from device to application. + * + * @param s device context. + * @param type message type. + * @param data message data. Can be NULL. + * @param data_size size of message data. + * @return >= 0 on success, negative on error. + * AVERROR(ENOSYS) when application doesn't implement handler of the message. + */ +int avdevice_dev_to_app_control_message(struct AVFormatContext *s, + enum AVDevToAppMessageType type, + void *data, size_t data_size); + +/** + * Structure describes basic parameters of the device. + */ +typedef struct AVDeviceInfo { + char *device_name; /**< device name, format depends on device */ + char *device_description; /**< human friendly name */ +} AVDeviceInfo; + +/** + * List of devices. + */ +typedef struct AVDeviceInfoList { + AVDeviceInfo **devices; /**< list of autodetected devices */ + int nb_devices; /**< number of autodetected devices */ + int default_device; /**< index of default device or -1 if no default */ +} AVDeviceInfoList; + +/** + * List devices. + * + * Returns available device names and their parameters. + * + * @note: Some devices may accept system-dependent device names that cannot be + * autodetected. The list returned by this function cannot be assumed to + * be always completed. + * + * @param s device context. + * @param[out] device_list list of autodetected devices. + * @return count of autodetected devices, negative on error. + */ +int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list); + +/** + * Convinient function to free result of avdevice_list_devices(). + * + * @param devices device list to be freed. + */ +void avdevice_free_list_devices(AVDeviceInfoList **device_list); + #endif /* AVDEVICE_AVDEVICE_H */ diff --git a/dependencies64/ffmpeg/include/libavdevice/version.h b/dependencies64/ffmpeg/include/libavdevice/version.h index 1e18f51d4..85b3b3766 100644 --- a/dependencies64/ffmpeg/include/libavdevice/version.h +++ b/dependencies64/ffmpeg/include/libavdevice/version.h @@ -25,10 +25,10 @@ * Libavdevice version macros */ -#include "libavutil/avutil.h" +#include "libavutil/version.h" #define LIBAVDEVICE_VERSION_MAJOR 55 -#define LIBAVDEVICE_VERSION_MINOR 3 +#define LIBAVDEVICE_VERSION_MINOR 10 #define LIBAVDEVICE_VERSION_MICRO 100 #define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ diff --git a/dependencies64/ffmpeg/include/libavfilter/avcodec.h b/dependencies64/ffmpeg/include/libavfilter/avcodec.h index ae55df788..8bbdad267 100644 --- a/dependencies64/ffmpeg/include/libavfilter/avcodec.h +++ b/dependencies64/ffmpeg/include/libavfilter/avcodec.h @@ -72,7 +72,7 @@ AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type, * * @param frame an already allocated AVFrame * @param samplesref an audio buffer reference - * @return 0 in case of success, a negative AVERROR code in case of + * @return >= 0 in case of success, a negative AVERROR code in case of * failure * @deprecated Use avfilter_copy_buf_props() instead. */ @@ -85,7 +85,7 @@ int avfilter_fill_frame_from_audio_buffer_ref(AVFrame *frame, * * @param frame an already allocated AVFrame * @param picref a video buffer reference - * @return 0 in case of success, a negative AVERROR code in case of + * @return >= 0 in case of success, a negative AVERROR code in case of * failure * @deprecated Use avfilter_copy_buf_props() instead. */ @@ -98,7 +98,7 @@ int avfilter_fill_frame_from_video_buffer_ref(AVFrame *frame, * * @param frame an already allocated AVFrame * @param ref a video or audio buffer reference - * @return 0 in case of success, a negative AVERROR code in case of + * @return >= 0 in case of success, a negative AVERROR code in case of * failure * @deprecated Use avfilter_copy_buf_props() instead. */ diff --git a/dependencies64/ffmpeg/include/libavfilter/avfilter.h b/dependencies64/ffmpeg/include/libavfilter/avfilter.h index 6de0f66ae..ef7e8cadc 100644 --- a/dependencies64/ffmpeg/include/libavfilter/avfilter.h +++ b/dependencies64/ffmpeg/include/libavfilter/avfilter.h @@ -385,6 +385,12 @@ struct AVFilterPad { */ int needs_fifo; + /** + * The filter expects writable frames from its input link, + * duplicating data buffers if needed. + * + * input pads only. + */ int needs_writable; }; #endif @@ -991,6 +997,9 @@ int avfilter_register(AVFilter *filter); * @return the filter definition, if any matching one is registered. * NULL if none found. */ +#if !FF_API_NOCONST_GET_NAME +const +#endif AVFilter *avfilter_get_by_name(const char *name); /** @@ -1126,6 +1135,35 @@ const AVClass *avfilter_get_class(void); typedef struct AVFilterGraphInternal AVFilterGraphInternal; +/** + * A function pointer passed to the @ref AVFilterGraph.execute callback to be + * executed multiple times, possibly in parallel. + * + * @param ctx the filter context the job belongs to + * @param arg an opaque parameter passed through from @ref + * AVFilterGraph.execute + * @param jobnr the index of the job being executed + * @param nb_jobs the total number of jobs + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); + +/** + * A function executing multiple jobs, possibly in parallel. + * + * @param ctx the filter context to which the jobs belong + * @param func the function to be called multiple times + * @param arg the argument to be passed to func + * @param ret a nb_jobs-sized array to be filled with return values from each + * invocation of func + * @param nb_jobs the number of jobs to execute + * + * @return 0 on success, a negative AVERROR on error + */ +typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func, + void *arg, int *ret, int nb_jobs); + typedef struct AVFilterGraph { const AVClass *av_class; #if FF_API_FOO_COUNT @@ -1169,6 +1207,27 @@ typedef struct AVFilterGraph { */ AVFilterGraphInternal *internal; + /** + * Opaque user data. May be set by the caller to an arbitrary value, e.g. to + * be used from callbacks like @ref AVFilterGraph.execute. + * Libavfilter will not touch this field in any way. + */ + void *opaque; + + /** + * This callback may be set by the caller immediately after allocating the + * graph and before adding any filters to it, to provide a custom + * multithreading implementation. + * + * If set, filters with slice threading capability will call this callback + * to execute multiple jobs in parallel. + * + * If this field is left unset, libavfilter will use its internal + * implementation, which may or may not be multithreaded depending on the + * platform and build options. + */ + avfilter_execute_func *execute; + char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions /** @@ -1242,7 +1301,7 @@ int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter); * @return a negative AVERROR error code in case of failure, a non * negative value otherwise */ -int avfilter_graph_create_filter(AVFilterContext **filt_ctx, AVFilter *filt, +int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx); @@ -1266,7 +1325,7 @@ enum { * * @param graphctx the filter graph * @param log_ctx context used for logging - * @return 0 in case of success, a negative AVERROR code otherwise + * @return >= 0 in case of success, a negative AVERROR code otherwise */ int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx); diff --git a/dependencies64/ffmpeg/include/libavfilter/buffersink.h b/dependencies64/ffmpeg/include/libavfilter/buffersink.h index ce96d08b3..24cd2feac 100644 --- a/dependencies64/ffmpeg/include/libavfilter/buffersink.h +++ b/dependencies64/ffmpeg/include/libavfilter/buffersink.h @@ -21,11 +21,18 @@ /** * @file + * @ingroup lavfi_buffersink * memory buffer sink API for audio and video */ #include "avfilter.h" +/** + * @defgroup lavfi_buffersink Buffer sink API + * @ingroup lavfi + * @{ + */ + #if FF_API_AVFILTERBUFFER /** * Get an audio/video buffer data from buffer_sink and put it in bufref. @@ -162,8 +169,12 @@ AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx); * @param frame pointer to an allocated frame that will be filled with data. * The data must be freed using av_frame_unref() / av_frame_free() * - * @return >= 0 in case of success, a negative AVERROR code in case of - * failure. + * @return + * - >= 0 if a frame was successfully returned. + * - AVERROR(EAGAIN) if no frames are available at this point; more + * input frames must be added to the filtergraph to get more output. + * - AVERROR_EOF if there will be no more output frames on this sink. + * - A different negative AVERROR code in other failure cases. */ int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); @@ -178,9 +189,16 @@ int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame); * frame will contain exactly nb_samples audio samples, except at * the end of stream, when it can contain less than nb_samples. * + * @return The return codes have the same meaning as for + * av_buffersink_get_samples(). + * * @warning do not mix this function with av_buffersink_get_frame(). Use only one or * the other with a single sink, not both. */ int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples); +/** + * @} + */ + #endif /* AVFILTER_BUFFERSINK_H */ diff --git a/dependencies64/ffmpeg/include/libavfilter/buffersrc.h b/dependencies64/ffmpeg/include/libavfilter/buffersrc.h index 66361b3da..5d124337b 100644 --- a/dependencies64/ffmpeg/include/libavfilter/buffersrc.h +++ b/dependencies64/ffmpeg/include/libavfilter/buffersrc.h @@ -1,19 +1,19 @@ /* * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -22,12 +22,19 @@ /** * @file + * @ingroup lavfi_buffersrc * Memory buffer source API. */ #include "libavcodec/avcodec.h" #include "avfilter.h" +/** + * @defgroup lavfi_buffersrc Buffer source API + * @ingroup lavfi + * @{ + */ + enum { /** @@ -79,8 +86,9 @@ unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); #if FF_API_AVFILTERBUFFER /** - * Add a buffer to the filtergraph s. + * Add a buffer to a filtergraph. * + * @param ctx an instance of the buffersrc filter * @param buf buffer containing frame data to be passed down the filtergraph. * This function will take ownership of buf, the user must not free it. * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter. @@ -88,13 +96,13 @@ unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src); * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame() */ attribute_deprecated -int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); +int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf); #endif /** * Add a frame to the buffer source. * - * @param s an instance of the buffersrc filter. + * @param ctx an instance of the buffersrc filter * @param frame frame to be added. If the frame is reference counted, this * function will make a new reference to it. Otherwise the frame data will be * copied. @@ -104,12 +112,12 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf); * This function is equivalent to av_buffersrc_add_frame_flags() with the * AV_BUFFERSRC_FLAG_KEEP_REF flag. */ -int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame); +int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame); /** * Add a frame to the buffer source. * - * @param s an instance of the buffersrc filter. + * @param ctx an instance of the buffersrc filter * @param frame frame to be added. If the frame is reference counted, this * function will take ownership of the reference(s) and reset the frame. * Otherwise the frame data will be copied. If this function returns an error, @@ -145,4 +153,8 @@ int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src, AVFrame *frame, int flags); +/** + * @} + */ + #endif /* AVFILTER_BUFFERSRC_H */ diff --git a/dependencies64/ffmpeg/include/libavfilter/version.h b/dependencies64/ffmpeg/include/libavfilter/version.h index 190ea2fdb..a33ab490d 100644 --- a/dependencies64/ffmpeg/include/libavfilter/version.h +++ b/dependencies64/ffmpeg/include/libavfilter/version.h @@ -27,10 +27,10 @@ * Libavfilter version macros */ -#include "libavutil/avutil.h" +#include "libavutil/version.h" -#define LIBAVFILTER_VERSION_MAJOR 3 -#define LIBAVFILTER_VERSION_MINOR 82 +#define LIBAVFILTER_VERSION_MAJOR 4 +#define LIBAVFILTER_VERSION_MINOR 2 #define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ @@ -50,37 +50,46 @@ */ #ifndef FF_API_AVFILTERPAD_PUBLIC -#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_FOO_COUNT -#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_FILL_FRAME -#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_FILL_FRAME (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_BUFFERSRC_BUFFER -#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_AVFILTERBUFFER -#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_OLD_FILTER_OPTS -#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_ACONVERT_FILTER -#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_ACONVERT_FILTER (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_AVFILTER_OPEN -#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_AVFILTER_INIT_FILTER -#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_OLD_FILTER_REGISTER -#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 5) #endif #ifndef FF_API_OLD_GRAPH_PARSE -#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 4) +#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 5) +#endif +#ifndef FF_API_DRAWTEXT_OLD_TIMELINE +#define FF_API_DRAWTEXT_OLD_TIMELINE (LIBAVFILTER_VERSION_MAJOR < 5) +#endif +#ifndef FF_API_NOCONST_GET_NAME +#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 5) +#endif +#ifndef FF_API_INTERLACE_LOWPASS_SET +#define FF_API_INTERLACE_LOWPASS_SET (LIBAVFILTER_VERSION_MAJOR < 5) #endif #endif /* AVFILTER_VERSION_H */ diff --git a/dependencies64/ffmpeg/include/libavformat/avformat.h b/dependencies64/ffmpeg/include/libavformat/avformat.h index d5f8a2986..7839c0adc 100644 --- a/dependencies64/ffmpeg/include/libavformat/avformat.h +++ b/dependencies64/ffmpeg/include/libavformat/avformat.h @@ -173,6 +173,58 @@ * * @defgroup lavf_encoding Muxing * @{ + * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write + * it into files or other output bytestreams in the specified container format. + * + * The main API functions for muxing are avformat_write_header() for writing the + * file header, av_write_frame() / av_interleaved_write_frame() for writing the + * packets and av_write_trailer() for finalizing the file. + * + * At the beginning of the muxing process, the caller must first call + * avformat_alloc_context() to create a muxing context. The caller then sets up + * the muxer by filling the various fields in this context: + * + * - The @ref AVFormatContext.oformat "oformat" field must be set to select the + * muxer that will be used. + * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb + * "pb" field must be set to an opened IO context, either returned from + * avio_open2() or a custom one. + * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must + * be created with the avformat_new_stream() function. The caller should fill + * the @ref AVStream.codec "stream codec context" information, such as the + * codec @ref AVCodecContext.codec_type "type", @ref AVCodecContext.codec_id + * "id" and other parameters (e.g. width / height, the pixel or sample format, + * etc.) as known. The @ref AVCodecContext.time_base "codec timebase" should + * be set to the timebase that the caller desires to use for this stream (note + * that the timebase actually used by the muxer can be different, as will be + * described later). + * - The caller may fill in additional information, such as @ref + * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream" + * metadata, @ref AVFormatContext.chapters "chapters", @ref + * AVFormatContext.programs "programs", etc. as described in the + * AVFormatContext documentation. Whether such information will actually be + * stored in the output depends on what the container format and the muxer + * support. + * + * When the muxing context is fully set up, the caller must call + * avformat_write_header() to initialize the muxer internals and write the file + * header. Whether anything actually is written to the IO context at this step + * depends on the muxer, but this function must always be called. Any muxer + * private options must be passed in the options parameter to this function. + * + * The data is then sent to the muxer by repeatedly calling av_write_frame() or + * av_interleaved_write_frame() (consult those functions' documentation for + * discussion on the difference between them; only one of them may be used with + * a single muxing context, they should not be mixed). Do note that the timing + * information on the packets sent to the muxer must be in the corresponding + * AVStream's timebase. That timebase is set by the muxer (in the + * avformat_write_header() step) and may be different from the timebase the + * caller set on the codec context. + * + * Once all the data has been written, the caller must call av_write_trailer() + * to flush any buffered packets and finalize the output file, then close the IO + * context (if any) and finally free the muxing context with + * avformat_free_context(). * @} * * @defgroup lavf_io I/O Read/Write @@ -209,6 +261,7 @@ struct AVFormatContext; +struct AVDeviceInfoList; /** * @defgroup metadata_api Public Metadata API @@ -290,6 +343,7 @@ struct AVFormatContext; * Allocate and read the payload of a packet and initialize its * fields with default values. * + * @param s associated IO context * @param pkt packet * @param size desired payload size * @return >0 (read size) if OK, AVERROR_xxx otherwise @@ -305,6 +359,7 @@ int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); * when there is no reasonable way to know (an upper bound of) * the final size. * + * @param s associated IO context * @param pkt packet * @param size amount of data to read * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data @@ -338,6 +393,8 @@ typedef struct AVProbeData { } AVProbeData; #define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1) + #define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension #define AVPROBE_SCORE_MAX 100 ///< maximum score @@ -453,6 +510,27 @@ typedef struct AVOutputFormat { void (*get_output_timestamp)(struct AVFormatContext *s, int stream, int64_t *dts, int64_t *wall); + /** + * Allows sending messages from application to device. + */ + int (*control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + + /** + * Write an uncoded AVFrame. + * + * See av_write_uncoded_frame() for details. + * + * The library will free *frame afterwards, but the muxer can prevent it + * by setting the pointer to NULL. + */ + int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index, + AVFrame **frame, unsigned flags); + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); } AVOutputFormat; /** * @} @@ -581,6 +659,12 @@ typedef struct AVInputFormat { * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. */ int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); } AVInputFormat; /** * @} @@ -756,9 +840,16 @@ typedef struct AVStream { int64_t last_dts; int64_t duration_gcd; int duration_count; + int64_t rfps_duration_sum; double (*duration_error)[2][MAX_STD_TIMEBASES]; int64_t codec_info_duration; int64_t codec_info_duration_fields; + + /** + * 0 -> decoder has not been searched for yet. + * >0 -> decoder found + * <0 -> decoder with codec_id == -found_decoder has not been found + */ int found_decoder; int64_t last_duration; @@ -775,6 +866,11 @@ typedef struct AVStream { int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ +#if FF_API_REFERENCE_DTS + /* a hack to keep ABI compatibility for ffmpeg and other applications, which accesses parser even + * though it should not */ + int64_t do_not_use; +#endif // Timestamp generation support: /** * Timestamp corresponding to the last dts sync point. @@ -783,7 +879,6 @@ typedef struct AVStream { * a DTS is received from the underlying container. Otherwise set to * AV_NOPTS_VALUE by default. */ - int64_t reference_dts; int64_t first_dts; int64_t cur_dts; int64_t last_IP_pts; @@ -888,6 +983,24 @@ typedef struct AVStream { */ int pts_wrap_behavior; + /** + * Internal data to prevent doing update_initial_durations() twice + */ + int update_initial_durations_done; + + /** + * Internal data to generate dts from pts + */ + int64_t pts_reorder_error[MAX_REORDER_DELAY+1]; + uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1]; + + /** + * Internal data to analyze DTS and detect faulty mpeg streams + */ + int64_t last_dts_for_order_check; + uint8_t dts_ordered; + uint8_t dts_misordered; + } AVStream; AVRational av_stream_get_r_frame_rate(const AVStream *s); @@ -938,6 +1051,13 @@ typedef struct AVChapter { } AVChapter; +/** + * Callback used by devices to communicate with application. + */ +typedef int (*av_format_control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + + /** * The duration of a video can be estimated through various ways, and this enum can be used * to know how the duration was estimated. @@ -948,6 +1068,8 @@ enum AVDurationEstimationMethod { AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) }; +typedef struct AVFormatInternal AVFormatInternal; + /** * Format I/O context. * New fields can be added to the end with minor version bumps. @@ -958,32 +1080,41 @@ enum AVDurationEstimationMethod { */ typedef struct AVFormatContext { /** - * A class for logging and AVOptions. Set by avformat_alloc_context(). + * A class for logging and @ref avoptions. Set by avformat_alloc_context(). * Exports (de)muxer private options if they exist. */ const AVClass *av_class; /** - * Can only be iformat or oformat, not both at the same time. + * The input container format. * - * decoding: set by avformat_open_input(). - * encoding: set by the user. + * Demuxing only, set by avformat_open_input(). */ struct AVInputFormat *iformat; + + /** + * The output container format. + * + * Muxing only, must be set by the caller before avformat_write_header(). + */ struct AVOutputFormat *oformat; /** * Format private data. This is an AVOptions-enabled struct * if and only if iformat/oformat.priv_class is not NULL. + * + * - muxing: set by avformat_write_header() + * - demuxing: set by avformat_open_input() */ void *priv_data; /** * I/O context. * - * decoding: either set by the user before avformat_open_input() (then - * the user must close it manually) or set by avformat_open_input(). - * encoding: set by the user. + * - demuxing: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * - muxing: set by the user before avformat_write_header(). The caller must + * take care of closing / freeing the IO context. * * Do NOT set this field if AVFMT_NOFILE flag is set in * iformat/oformat.flags. In such a case, the (de)muxer will handle @@ -994,37 +1125,54 @@ typedef struct AVFormatContext { /* stream info */ int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */ + /** + * Number of elements in AVFormatContext.streams. + * + * Set by avformat_new_stream(), must not be modified by any other code. + */ + unsigned int nb_streams; /** * A list of all streams in the file. New streams are created with * avformat_new_stream(). * - * decoding: streams are created by libavformat in avformat_open_input(). - * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also - * appear in av_read_frame(). - * encoding: streams are created by the user before avformat_write_header(). + * - demuxing: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * - muxing: streams are created by the user before avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). */ - unsigned int nb_streams; AVStream **streams; - char filename[1024]; /**< input or output filename */ + /** + * input or output filename + * + * - demuxing: set by avformat_open_input() + * - muxing: may be set by the caller before avformat_write_header() + */ + char filename[1024]; /** - * Decoding: position of the first frame of the component, in + * Position of the first frame of the component, in * AV_TIME_BASE fractional seconds. NEVER set this value directly: * It is deduced from the AVStream values. + * + * Demuxing only, set by libavformat. */ int64_t start_time; /** - * Decoding: duration of the stream, in AV_TIME_BASE fractional + * Duration of the stream, in AV_TIME_BASE fractional * seconds. Only set this value if you know none of the individual stream * durations and also do not set any of them. This is deduced from the * AVStream values if not set. + * + * Demuxing only, set by libavformat. */ int64_t duration; /** - * Decoding: total stream bitrate in bit/s, 0 if not + * Total stream bitrate in bit/s, 0 if not * available. Never set it directly if the file_size and the * duration are known as FFmpeg can compute it automatically. */ @@ -1043,19 +1191,23 @@ typedef struct AVFormatContext { #define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible #define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. #define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. #define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) #define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. /** - * decoding: size of data to probe; encoding: unused. + * Maximum size of the data read from input for determining + * the input container format. + * Demuxing only, set by the caller before avformat_open_input(). */ unsigned int probesize; /** - * decoding: maximum time (in AV_TIME_BASE units) during which the input should - * be analyzed in avformat_find_stream_info(). + * Maximum duration (in AV_TIME_BASE units) of the data read + * from input in avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). */ int max_analyze_duration; @@ -1090,8 +1242,8 @@ typedef struct AVFormatContext { * accurate seeking (depends on demuxer). * Demuxers for which a full in-memory index is mandatory will ignore * this. - * muxing : unused - * demuxing: set by user + * - muxing: unused + * - demuxing: set by user */ unsigned int max_index_size; @@ -1101,38 +1253,57 @@ typedef struct AVFormatContext { */ unsigned int max_picture_buffer; + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * - muxing: set by user + * - demuxing: set by libavformat + */ unsigned int nb_chapters; AVChapter **chapters; + /** + * Metadata that applies to the whole file. + * + * - demuxing: set by libavformat in avformat_open_input() + * - muxing: may be set by the caller before avformat_write_header() + * + * Freed by libavformat in avformat_free_context(). + */ AVDictionary *metadata; /** * Start time of the stream in real world time, in microseconds - * since the unix epoch (00:00 1st January 1970). That is, pts=0 - * in the stream was captured at this real world time. - * - encoding: Set by user. - * - decoding: Unused. + * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the + * stream was captured at this real world time. + * Muxing only, set by the caller before avformat_write_header(). */ int64_t start_time_realtime; /** - * decoding: number of frames used to probe fps + * The number of frames used for determining the framerate in + * avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). */ int fps_probe_size; /** * Error recognition; higher values will detect more errors but may * misdetect some more or less valid parts as errors. - * - encoding: unused - * - decoding: Set by user. + * Demuxing only, set by the caller before avformat_open_input(). */ int error_recognition; /** * Custom interrupt callbacks for the I/O layer. * - * decoding: set by the user before avformat_open_input(). - * encoding: set by the user before avformat_write_header() + * demuxing: set by the user before avformat_open_input(). + * muxing: set by the user before avformat_write_header() * (mainly useful for AVFMT_NOFILE formats). The callback * should also be passed to avio_open2() if it's used to * open the file. @@ -1145,6 +1316,24 @@ typedef struct AVFormatContext { int debug; #define FF_FDEBUG_TS 0x0001 + /** + * Maximum buffering duration for interleaving. + * + * To ensure all the streams are interleaved correctly, + * av_interleaved_write_frame() will wait until it has at least one packet + * for each stream before actually writing any packets to the output file. + * When some streams are "sparse" (i.e. there are large gaps between + * successive packets), this can result in excessive buffering. + * + * This field specifies the maximum difference between the timestamps of the + * first and the last packet in the muxing queue, above which libavformat + * will output a packet regardless of whether it has queued a packet for all + * the streams. + * + * Muxing only, set by the caller before avformat_write_header(). + */ + int64_t max_interleave_delta; + /** * Transport stream id. * This will be moved into demuxer private options. Thus no API/ABI compatibility @@ -1219,14 +1408,14 @@ typedef struct AVFormatContext { /** * Correct single timestamp overflows * - encoding: unused - * - decoding: Set by user via AVOPtions (NO direct access) + * - decoding: Set by user via AVOptions (NO direct access) */ unsigned int correct_ts_overflow; /** * Force seeking to any (also non key) frames. * - encoding: unused - * - decoding: Set by user via AVOPtions (NO direct access) + * - decoding: Set by user via AVOptions (NO direct access) */ int seek2any; @@ -1237,6 +1426,15 @@ typedef struct AVFormatContext { */ int flush_packets; + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access) + */ + int probe_score; + /***************************************************************** * All fields below this line are not part of the public API. They * may not be used outside of libavformat and can be changed and @@ -1287,6 +1485,12 @@ typedef struct AVFormatContext { */ AVRational offset_timebase; + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVFormatInternal *internal; + /** * IO repositioned flag. * This is set by avformat when the underlaying IO context read pointer @@ -1294,8 +1498,71 @@ typedef struct AVFormatContext { * Demuxers can use the flag to detect such changes. */ int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_video_codec (NO direct access). + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_audio_codec (NO direct access). + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access). + */ + AVCodec *subtitle_codec; + + /** + * Number of bytes to be written as padding in a metadata header. + * Demuxing: Unused. + * Muxing: Set by user via av_format_set_metadata_header_padding. + */ + int metadata_header_padding; + + /** + * User data. + * This is a place for some private data of the user. + * Mostly usable with control_message_cb or any future callbacks in device's context. + */ + void *opaque; + + /** + * Callback used by devices to communicate with application. + */ + av_format_control_message control_message_cb; + + /** + * Output timestamp offset, in microseconds. + * Muxing: set by user via AVOptions (NO direct access) + */ + int64_t output_ts_offset; } AVFormatContext; +int av_format_get_probe_score(const AVFormatContext *s); +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); +int av_format_get_metadata_header_padding(const AVFormatContext *s); +void av_format_set_metadata_header_padding(AVFormatContext *s, int c); +void * av_format_get_opaque(const AVFormatContext *s); +void av_format_set_opaque(AVFormatContext *s, void *opaque); +av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s); +void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback); + /** * Returns the method used to set ctx->duration. * @@ -1408,6 +1675,7 @@ const AVClass *avformat_get_class(void); * User is required to call avcodec_close() and avformat_free_context() to * clean up the allocation by avformat_new_stream(). * + * @param s media file handle * @param c If non-NULL, the AVCodecContext corresponding to the new stream * will be initialized to use this codec. This is needed for e.g. codec-specific * defaults to be set, so codec should be provided if it is known. @@ -1423,12 +1691,6 @@ AVProgram *av_new_program(AVFormatContext *s, int id); */ -#if FF_API_PKT_DUMP -attribute_deprecated void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload); -attribute_deprecated void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, - int dump_payload); -#endif - #if FF_API_ALLOC_OUTPUT_CONTEXT /** * @deprecated deprecated in favor of avformat_alloc_output_context2() @@ -1471,6 +1733,7 @@ AVInputFormat *av_find_input_format(const char *short_name); /** * Guess the file format. * + * @param pd data to be probed * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. */ @@ -1479,6 +1742,7 @@ AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); /** * Guess the file format. * + * @param pd data to be probed * @param is_opened Whether the file is already opened; determines whether * demuxers with or without AVFMT_NOFILE are probed. * @param score_max A probe score larger that this is required to accept a @@ -1510,16 +1774,24 @@ AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score * @param logctx the log context * @param offset the offset within the bytestream to probe from * @param max_probe_size the maximum probe buffer size (zero for default) - * @return 0 in case of success, a negative value corresponding to an + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX * AVERROR code otherwise */ +int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *filename, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like av_probe_input_buffer2() but returns 0 on success + */ int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, const char *filename, void *logctx, unsigned int offset, unsigned int max_probe_size); /** * Open an input stream and read the header. The codecs are not opened. - * The stream must be closed with av_close_input_file(). + * The stream must be closed with avformat_close_input(). * * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). * May be a pointer to NULL, in which case an AVFormatContext is allocated by this @@ -1654,8 +1926,8 @@ int av_read_packet(AVFormatContext *s, AVPacket *pkt); * information possible for decoding. * * If pkt->buf is NULL, then the packet is valid until the next - * av_read_frame() or until av_close_input_file(). Otherwise the packet is valid - * indefinitely. In both cases the packet must be freed with + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with * av_free_packet when it is no longer needed. For video, the packet contains * exactly one frame. For audio, it contains an integer number of frames if each * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames @@ -1674,6 +1946,8 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt); /** * Seek to the keyframe at timestamp. * 'timestamp' in 'stream_index'. + * + * @param s media file handle * @param stream_index If stream_index is (-1), a default * stream is selected, and timestamp is automatically converted * from AV_TIME_BASE units to the stream specific time_base. @@ -1701,6 +1975,7 @@ int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, * keyframes (this may not be supported by all demuxers). * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. * + * @param s media file handle * @param stream_index index of the stream which is used as time base reference * @param min_ts smallest acceptable timestamp * @param ts target timestamp @@ -1800,49 +2075,108 @@ int avformat_write_header(AVFormatContext *s, AVDictionary **options); /** * Write a packet to an output media file. * - * The packet shall contain one audio or video frame. - * The packet must be correctly interleaved according to the container - * specification, if not then av_interleaved_write_frame must be used. + * This function passes the packet directly to the muxer, without any buffering + * or reordering. The caller is responsible for correctly interleaving the + * packets if the format requires it. Callers that want libavformat to handle + * the interleaving should call av_interleaved_write_frame() instead of this + * function. * * @param s media file handle - * @param pkt The packet, which contains the stream_index, buf/buf_size, - * dts/pts, ... - * This can be NULL (at any time, not just at the end), in - * order to immediately flush data buffered within the muxer, - * for muxers that buffer up data internally before writing it - * to the output. + * @param pkt The packet containing the data to be written. Note that unlike + * av_interleaved_write_frame(), this function does not take + * ownership of the packet passed to it (though some muxers may make + * an internal reference to the input packet). + *
+ * This parameter can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, for + * muxers that buffer up data internally before writing it to the + * output. + *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". It is very strongly + * recommended that timing information (@ref AVPacket.pts "pts", @ref + * AVPacket.dts "dts", @ref AVPacket.duration "duration") is set to + * correct values. * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + * + * @see av_interleaved_write_frame() */ int av_write_frame(AVFormatContext *s, AVPacket *pkt); /** * Write a packet to an output media file ensuring correct interleaving. * - * The packet must contain one audio or video frame. - * If the packets are already correctly interleaved, the application should - * call av_write_frame() instead as it is slightly faster. It is also important - * to keep in mind that completely non-interleaved input will need huge amounts - * of memory to interleave with this, so it is preferable to interleave at the - * demuxer level. + * This function will buffer the packets internally as needed to make sure the + * packets in the output file are properly interleaved in the order of + * increasing dts. Callers doing their own interleaving should call + * av_write_frame() instead of this function. * * @param s media file handle - * @param pkt The packet containing the data to be written. pkt->buf must be set - * to a valid AVBufferRef describing the packet data. Libavformat takes - * ownership of this reference and will unref it when it sees fit. The caller - * must not access the data through this reference after this function returns. - * This can be NULL (at any time, not just at the end), to flush the - * interleaving queues. - * Packet's @ref AVPacket.stream_index "stream_index" field must be set to the - * index of the corresponding stream in @ref AVFormatContext.streams - * "s.streams". - * It is very strongly recommended that timing information (@ref AVPacket.pts - * "pts", @ref AVPacket.dts "dts" @ref AVPacket.duration "duration") is set to - * correct values. - * - * @return 0 on success, a negative AVERROR on error. + * @param pkt The packet containing the data to be written. + *
+ * If the packet is reference-counted, this function will take + * ownership of this reference and unreference it later when it sees + * fit. + * The caller must not access the data through this reference after + * this function returns. If the packet is not reference-counted, + * libavformat will make a copy. + *
+ * This parameter can be NULL (at any time, not just at the end), to + * flush the interleaving queues. + *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". It is very strongly + * recommended that timing information (@ref AVPacket.pts "pts", @ref + * AVPacket.dts "dts", @ref AVPacket.duration "duration") is set to + * correct values. + * + * @return 0 on success, a negative AVERROR on error. Libavformat will always + * take care of freeing the packet, even if this function fails. + * + * @see av_write_frame(), AVFormatContext.max_interleave_delta */ int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); +/** + * Write a uncoded frame to an output media file. + * + * The frame must be correctly interleaved according to the container + * specification; if not, then av_interleaved_write_frame() must be used. + * + * See av_interleaved_write_frame() for details. + */ +int av_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Write a uncoded frame to an output media file. + * + * If the muxer supports it, this function allows to write an AVFrame + * structure directly, without encoding it into a packet. + * It is mostly useful for devices and similar special muxers that use raw + * video or PCM data and will not serialize it into a byte stream. + * + * To test whether it is possible to use it with a given muxer and stream, + * use av_write_uncoded_frame_query(). + * + * The caller gives up ownership of the frame and must not access it + * afterwards. + * + * @return >=0 for success, a negative code on error + */ +int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Test whether a muxer supports uncoded frame. + * + * @return >=0 if an uncoded frame can be written to that muxer and stream, + * <0 if not + */ +int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index); + /** * Write the stream trailer to an output media file and free the * file private data. @@ -1966,6 +2300,7 @@ void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, * * @param tags list of supported codec_id-codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param tag codec tag to match to a codec ID */ enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); @@ -1975,6 +2310,7 @@ enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned i * * @param tags list of supported codec_id-codec_tag pairs, as stored * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec ID to match to a codec tag */ unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); @@ -1994,6 +2330,9 @@ int av_find_default_stream_index(AVFormatContext *s); /** * Get the index for a specific timestamp. + * + * @param st stream that the timestamp belongs to + * @param timestamp timestamp to retrieve the index for * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond * to the timestamp which is <= the requested one, if backward * is 0, then it will be >= @@ -2090,6 +2429,7 @@ int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); * Return a positive value if the given filename has one of the given * extensions, 0 otherwise. * + * @param filename file name to check against the given extensions * @param extensions a comma-separated list of filename extensions */ int av_match_ext(const char *filename, const char *extensions); @@ -2097,6 +2437,8 @@ int av_match_ext(const char *filename, const char *extensions); /** * Test if the given container can store a codec. * + * @param ofmt container to check for compatibility + * @param codec_id codec to potentially store in container * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* * * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. @@ -2124,6 +2466,14 @@ const struct AVCodecTag *avformat_get_riff_video_tags(void); * @return the table mapping RIFF FourCCs for audio to AVCodecID. */ const struct AVCodecTag *avformat_get_riff_audio_tags(void); +/** + * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_mov_video_tags(void); +/** + * @return the table mapping MOV FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_mov_audio_tags(void); /** * @} diff --git a/dependencies64/ffmpeg/include/libavformat/avio.h b/dependencies64/ffmpeg/include/libavformat/avio.h index 5bdbc6283..defd6172b 100644 --- a/dependencies64/ffmpeg/include/libavformat/avio.h +++ b/dependencies64/ffmpeg/include/libavformat/avio.h @@ -150,6 +150,15 @@ typedef struct AVIOContext { /* unbuffered I/O */ +/** + * Return the name of the protocol that will handle the passed URL. + * + * NULL is returned if no protocol could be found for the given URL. + * + * @return Name of the protocol or NULL. + */ +const char *avio_find_protocol_name(const char *url); + /** * Return AVIO_FLAG_* access flags corresponding to the access permissions * of the resource in url, or a negative value corresponding to an @@ -366,9 +375,10 @@ int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); * * @param s Used to return the pointer to the created AVIOContext. * In case of failure the pointed to value is set to NULL. + * @param url resource to access * @param flags flags which control how the resource indicated by url * is to be opened - * @return 0 in case of success, a negative value corresponding to an + * @return >= 0 in case of success, a negative value corresponding to an * AVERROR code in case of failure */ int avio_open(AVIOContext **s, const char *url, int flags); @@ -381,13 +391,14 @@ int avio_open(AVIOContext **s, const char *url, int flags); * * @param s Used to return the pointer to the created AVIOContext. * In case of failure the pointed to value is set to NULL. + * @param url resource to access * @param flags flags which control how the resource indicated by url * is to be opened * @param int_cb an interrupt callback to be used at the protocols level * @param options A dictionary filled with protocol-private options. On return * this parameter will be destroyed and replaced with a dict containing options * that were not found. May be NULL. - * @return 0 in case of success, a negative value corresponding to an + * @return >= 0 in case of success, a negative value corresponding to an * AVERROR code in case of failure */ int avio_open2(AVIOContext **s, const char *url, int flags, @@ -454,6 +465,8 @@ const char *avio_enum_protocols(void **opaque, int output); /** * Pause and resume playing - only meaningful if using a network streaming * protocol (e.g. MMS). + * + * @param h IO context from which to call the read_pause function pointer * @param pause 1 for pause, 0 for resume */ int avio_pause(AVIOContext *h, int pause); @@ -461,6 +474,8 @@ int avio_pause(AVIOContext *h, int pause); /** * Seek to a given timestamp relative to some component stream. * Only meaningful if using a network streaming protocol (e.g. MMS.). + * + * @param h IO context from which to call the seek function pointers * @param stream_index The stream index that the timestamp is relative to. * If stream_index is (-1) the timestamp should be in AV_TIME_BASE * units from the beginning of the presentation. diff --git a/dependencies64/ffmpeg/include/libavformat/version.h b/dependencies64/ffmpeg/include/libavformat/version.h index ed1c1966e..9f0695c86 100644 --- a/dependencies64/ffmpeg/include/libavformat/version.h +++ b/dependencies64/ffmpeg/include/libavformat/version.h @@ -27,11 +27,11 @@ * Libavformat version macros */ -#include "libavutil/avutil.h" +#include "libavutil/version.h" #define LIBAVFORMAT_VERSION_MAJOR 55 -#define LIBAVFORMAT_VERSION_MINOR 12 -#define LIBAVFORMAT_VERSION_MICRO 102 +#define LIBAVFORMAT_VERSION_MINOR 33 +#define LIBAVFORMAT_VERSION_MICRO 100 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ LIBAVFORMAT_VERSION_MINOR, \ @@ -48,13 +48,10 @@ * dropped at a future version bump. The defines themselves are not part of * the public API and may change, break or disappear at any time. */ - -#ifndef FF_API_OLD_AVIO -#define FF_API_OLD_AVIO (LIBAVFORMAT_VERSION_MAJOR < 55) -#endif -#ifndef FF_API_PKT_DUMP -#define FF_API_PKT_DUMP (LIBAVFORMAT_VERSION_MAJOR < 54) +#ifndef FF_API_REFERENCE_DTS +#define FF_API_REFERENCE_DTS (LIBAVFORMAT_VERSION_MAJOR < 56) #endif + #ifndef FF_API_ALLOC_OUTPUT_CONTEXT #define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56) #endif diff --git a/dependencies64/ffmpeg/include/libavutil/adler32.h b/dependencies64/ffmpeg/include/libavutil/adler32.h index 8c08d2b88..0dc69ec0a 100644 --- a/dependencies64/ffmpeg/include/libavutil/adler32.h +++ b/dependencies64/ffmpeg/include/libavutil/adler32.h @@ -25,6 +25,9 @@ #include "attributes.h" /** + * @file + * Public header for libavutil Adler32 hasher + * * @defgroup lavu_adler32 Adler32 * @ingroup lavu_crypto * @{ diff --git a/dependencies64/ffmpeg/include/libavutil/attributes.h b/dependencies64/ffmpeg/include/libavutil/attributes.h index 64b46f68f..7d3f4a91c 100644 --- a/dependencies64/ffmpeg/include/libavutil/attributes.h +++ b/dependencies64/ffmpeg/include/libavutil/attributes.h @@ -52,6 +52,8 @@ #if AV_GCC_VERSION_AT_LEAST(3,1) # define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) #else # define av_noinline #endif @@ -62,10 +64,6 @@ # define av_pure #endif -#ifndef av_restrict -#define av_restrict restrict -#endif - #if AV_GCC_VERSION_AT_LEAST(2,6) # define av_const __attribute__((const)) #else @@ -78,7 +76,7 @@ # define av_cold #endif -#if AV_GCC_VERSION_AT_LEAST(4,1) +#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__) # define av_flatten __attribute__((flatten)) #else # define av_flatten @@ -86,6 +84,8 @@ #if AV_GCC_VERSION_AT_LEAST(3,1) # define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) #else # define attribute_deprecated #endif @@ -102,6 +102,12 @@ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ code \ _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) #else # define AV_NOWARN_DEPRECATED(code) code #endif diff --git a/dependencies64/ffmpeg/include/libavutil/audio_fifo.h b/dependencies64/ffmpeg/include/libavutil/audio_fifo.h index 55a538e78..903b8f1cd 100644 --- a/dependencies64/ffmpeg/include/libavutil/audio_fifo.h +++ b/dependencies64/ffmpeg/include/libavutil/audio_fifo.h @@ -2,20 +2,20 @@ * Audio FIFO * Copyright (c) 2012 Justin Ruggles * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/dependencies64/ffmpeg/include/libavutil/avstring.h b/dependencies64/ffmpeg/include/libavutil/avstring.h index 438ef799e..de2f71d12 100644 --- a/dependencies64/ffmpeg/include/libavutil/avstring.h +++ b/dependencies64/ffmpeg/include/libavutil/avstring.h @@ -22,6 +22,7 @@ #define AVUTIL_AVSTRING_H #include +#include #include "attributes.h" /** @@ -130,6 +131,20 @@ size_t av_strlcat(char *dst, const char *src, size_t size); */ size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); +/** + * Get the count of continuous non zero chars starting from the beginning. + * + * @param len maximum number of characters to check in the string, that + * is the maximum value which is returned by the function + */ +static inline size_t av_strnlen(const char *s, size_t len) +{ + size_t i; + for (i = 0; i < len && s[i]; i++) + ; + return i; +} + /** * Print arguments following specified format into a large enough auto * allocated buffer. It is similar to GNU asprintf(). @@ -295,6 +310,45 @@ enum AVEscapeMode { int av_escape(char **dst, const char *src, const char *special_chars, enum AVEscapeMode mode, int flags); +#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF +#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF +#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes +#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML + +#define AV_UTF8_FLAG_ACCEPT_ALL \ + AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES + +/** + * Read and decode a single UTF-8 code point (character) from the + * buffer in *buf, and update *buf to point to the next byte to + * decode. + * + * In case of an invalid byte sequence, the pointer will be updated to + * the next byte after the invalid sequence and the function will + * return an error code. + * + * Depending on the specified flags, the function will also fail in + * case the decoded code point does not belong to a valid range. + * + * @note For speed-relevant code a carefully implemented use of + * GET_UTF8() may be preferred. + * + * @param codep pointer used to return the parsed code in case of success. + * The value in *codep is set even in case the range check fails. + * @param bufp pointer to the address the first byte of the sequence + * to decode, updated by the function to point to the + * byte next after the decoded sequence + * @param buf_end pointer to the end of the buffer, points to the next + * byte past the last in the buffer. This is used to + * avoid buffer overreads (in case of an unfinished + * UTF-8 sequence towards the end of the buffer). + * @param flags a collection of AV_UTF8_FLAG_* flags + * @return >= 0 in case a sequence was successfully read, a negative + * value in case of invalid sequence + */ +int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, + unsigned int flags); + /** * @} */ diff --git a/dependencies64/ffmpeg/include/libavutil/avutil.h b/dependencies64/ffmpeg/include/libavutil/avutil.h index 4986f4f9e..d6566c116 100644 --- a/dependencies64/ffmpeg/include/libavutil/avutil.h +++ b/dependencies64/ffmpeg/include/libavutil/avutil.h @@ -41,7 +41,7 @@ * @li @ref lavu "libavutil" common utility library * @li @ref lswr "libswresample" audio resampling, format conversion and mixing * @li @ref lpp "libpostproc" post processing library - * @li @ref lsws "libswscale" color conversion and scaling library + * @li @ref libsws "libswscale" color conversion and scaling library * * @section ffmpeg_versioning Versioning and compatibility * @@ -128,6 +128,12 @@ * * @} * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * * @defgroup lavu_misc Other * * @{ @@ -139,6 +145,12 @@ * @{ * * @} + * + * @defgroup preproc_misc Preprocessor String Macros + * + * @{ + * + * @} */ @@ -271,9 +283,9 @@ char av_get_picture_type_char(enum AVPictureType pict_type); #include "common.h" #include "error.h" #include "version.h" +#include "macros.h" #include "mathematics.h" #include "rational.h" -#include "intfloat_readwrite.h" #include "log.h" #include "pixfmt.h" @@ -306,6 +318,13 @@ unsigned av_int_list_length_for_size(unsigned elsize, #define av_int_list_length(list, term) \ av_int_list_length_for_size(sizeof(*(list)), list, term) +/** + * Open a file using a UTF-8 filename. + * The API of this function matches POSIX fopen(), errors are returned through + * errno. + */ +FILE *av_fopen_utf8(const char *path, const char *mode); + /** * @} * @} diff --git a/dependencies64/ffmpeg/include/libavutil/bprint.h b/dependencies64/ffmpeg/include/libavutil/bprint.h index dc86f1241..839ec1ec0 100644 --- a/dependencies64/ffmpeg/include/libavutil/bprint.h +++ b/dependencies64/ffmpeg/include/libavutil/bprint.h @@ -21,6 +21,8 @@ #ifndef AVUTIL_BPRINT_H #define AVUTIL_BPRINT_H +#include + #include "attributes.h" #include "avstring.h" @@ -121,11 +123,25 @@ void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); */ void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); +/** + * Append a formatted string to a print buffer. + */ +void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + /** * Append char c n times to a print buffer. */ void av_bprint_chars(AVBPrint *buf, char c, unsigned n); +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + struct tm; /** * Append a formatted date and time to a print buffer. diff --git a/dependencies64/ffmpeg/include/libavutil/bswap.h b/dependencies64/ffmpeg/include/libavutil/bswap.h index 06f654816..f38e1dec8 100644 --- a/dependencies64/ffmpeg/include/libavutil/bswap.h +++ b/dependencies64/ffmpeg/include/libavutil/bswap.h @@ -34,7 +34,9 @@ #include "config.h" -#if ARCH_ARM +#if ARCH_AARCH64 +# include "aarch64/bswap.h" +#elif ARCH_ARM # include "arm/bswap.h" #elif ARCH_AVR32 # include "avr32/bswap.h" diff --git a/dependencies64/ffmpeg/include/libavutil/channel_layout.h b/dependencies64/ffmpeg/include/libavutil/channel_layout.h index 290609831..bc6befd8e 100644 --- a/dependencies64/ffmpeg/include/libavutil/channel_layout.h +++ b/dependencies64/ffmpeg/include/libavutil/channel_layout.h @@ -114,6 +114,10 @@ enum AVMatrixEncoding { AV_MATRIX_ENCODING_NONE, AV_MATRIX_ENCODING_DOLBY, AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_DPLIIX, + AV_MATRIX_ENCODING_DPLIIZ, + AV_MATRIX_ENCODING_DOLBYEX, + AV_MATRIX_ENCODING_DOLBYHEADPHONE, AV_MATRIX_ENCODING_NB }; @@ -136,7 +140,12 @@ enum AVMatrixEncoding { * - a channel layout mask, in hexadecimal starting with "0x" (see the * AV_CH_* macros). * - * Example: "stereo+FC" = "2+FC" = "2c+1c" = "0x7" + * @warning Starting from the next major bump the trailing character + * 'c' to specify a number of channels will be required, while a + * channel layout mask could also be specified as a decimal number + * (if and only if not followed by "c"). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" */ uint64_t av_get_channel_layout(const char *name); diff --git a/dependencies64/ffmpeg/include/libavutil/common.h b/dependencies64/ffmpeg/include/libavutil/common.h index c7c32fd36..c82a3a624 100644 --- a/dependencies64/ffmpeg/include/libavutil/common.h +++ b/dependencies64/ffmpeg/include/libavutil/common.h @@ -26,10 +26,15 @@ #ifndef AVUTIL_COMMON_H #define AVUTIL_COMMON_H +#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C) +#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS +#endif + #include #include #include #include +#include #include #include #include @@ -175,7 +180,7 @@ static av_always_inline av_const int16_t av_clip_int16_c(int a) */ static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) { - if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (a>>63) ^ 0x7FFFFFFF; + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); else return (int32_t)a; } @@ -279,7 +284,7 @@ static av_always_inline av_const int av_popcount_c(uint32_t x) */ static av_always_inline av_const int av_popcount64_c(uint64_t x) { - return av_popcount((uint32_t)x) + av_popcount(x >> 32); + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); } #define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) @@ -295,6 +300,11 @@ static av_always_inline av_const int av_popcount64_c(uint64_t x) * input, this could be *ptr++. * @param ERROR Expression to be evaluated on invalid input, * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. */ #define GET_UTF8(val, GET_BYTE, ERROR)\ val= GET_BYTE;\ diff --git a/dependencies64/ffmpeg/include/libavutil/cpu.h b/dependencies64/ffmpeg/include/libavutil/cpu.h index df8ef8728..0ad400fef 100644 --- a/dependencies64/ffmpeg/include/libavutil/cpu.h +++ b/dependencies64/ffmpeg/include/libavutil/cpu.h @@ -33,9 +33,11 @@ #define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions #define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions #define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) #define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt #define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions #define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) #define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions #define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower #define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions @@ -48,6 +50,10 @@ // #else // #define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction // #endif +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 #define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard @@ -105,9 +111,4 @@ int av_parse_cpu_caps(unsigned *flags, const char *s); */ int av_cpu_count(void); -/* The following CPU-specific functions shall not be called directly. */ -int ff_get_cpu_flags_arm(void); -int ff_get_cpu_flags_ppc(void); -int ff_get_cpu_flags_x86(void); - #endif /* AVUTIL_CPU_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/dict.h b/dependencies64/ffmpeg/include/libavutil/dict.h index 38f03a407..c23a2d12c 100644 --- a/dependencies64/ffmpeg/include/libavutil/dict.h +++ b/dependencies64/ffmpeg/include/libavutil/dict.h @@ -46,30 +46,31 @@ * entries and finally av_dict_free() to free the dictionary * and all its contents. * - * @code - * AVDictionary *d = NULL; // "create" an empty dictionary - * av_dict_set(&d, "foo", "bar", 0); // add an entry - * - * char *k = av_strdup("key"); // if your strings are already allocated, - * char *v = av_strdup("value"); // you can avoid copying them like this - * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); - * - * AVDictionaryEntry *t = NULL; - * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { - * <....> // iterate over all entries in d - * } - * - * av_dict_free(&d); - * @endcode + @code + AVDictionary *d = NULL; // "create" an empty dictionary + AVDictionaryEntry *t = NULL; + + av_dict_set(&d, "foo", "bar", 0); // add an entry + + char *k = av_strdup("key"); // if your strings are already allocated, + char *v = av_strdup("value"); // you can avoid copying them like this + av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + + while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + <....> // iterate over all entries in d + } + av_dict_free(&d); + @endcode * */ -#define AV_DICT_MATCH_CASE 1 -#define AV_DICT_IGNORE_SUFFIX 2 +#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */ +#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key, + ignoring the suffix of the found key string. Only relevant in av_dict_get(). */ #define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been - allocated with av_malloc() and children. */ + allocated with av_malloc() or another memory allocation function. */ #define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been - allocated with av_malloc() and chilren. */ + allocated with av_malloc() or another memory allocation function. */ #define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. #define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no delimiter is added, the strings are simply concatenated. */ @@ -84,10 +85,17 @@ typedef struct AVDictionary AVDictionary; /** * Get a dictionary entry with matching key. * + * The returned entry key or value must not be changed, or it will + * cause undefined behavior. + * + * To iterate through all the dictionary entries, you can set the matching key + * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag. + * * @param prev Set to the previous matching element to find the next. * If set to NULL the first matching element is returned. - * @param flags Allows case as well as suffix-insensitive comparisons. - * @return Found entry or NULL, changing key or value leads to undefined behavior. + * @param key matching key + * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved + * @return found entry or NULL in case no matching entry was found in the dictionary */ AVDictionaryEntry * av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags); @@ -113,7 +121,10 @@ int av_dict_count(const AVDictionary *m); int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); /** - * Parse the key/value pairs list and add to a dictionary. + * Parse the key/value pairs list and add the parsed entries to a dictionary. + * + * In case of failure, all the successfully set entries are stored in + * *pm. You may need to manually free the created dictionary. * * @param key_val_sep a 0-terminated list of characters used to separate * key from value diff --git a/dependencies64/ffmpeg/include/libavutil/downmix_info.h b/dependencies64/ffmpeg/include/libavutil/downmix_info.h new file mode 100644 index 000000000..c369891c0 --- /dev/null +++ b/dependencies64/ffmpeg/include/libavutil/downmix_info.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2014 Tim Walker + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DOWNMIX_INFO_H +#define AVUTIL_DOWNMIX_INFO_H + +#include "frame.h" + +/** + * @file + * audio downmix medatata + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup downmix_info Audio downmix metadata + * @{ + */ + +/** + * Possible downmix types. + */ +enum AVDownmixType { + AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */ + AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */ + AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */ + AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */ + AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */ +}; + +/** + * This structure describes optional metadata relevant to a downmix procedure. + * + * All fields are set by the decoder to the value indicated in the audio + * bitstream (if present), or to a "sane" default otherwise. + */ +typedef struct AVDownmixInfo { + /** + * Type of downmix preferred by the mastering engineer. + */ + enum AVDownmixType preferred_downmix_type; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during a regular downmix. + */ + double center_mix_level; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during an Lt/Rt compatible downmix. + */ + double center_mix_level_ltrt; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during a regular downmix. + */ + double surround_mix_level; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during an Lt/Rt compatible downmix. + */ + double surround_mix_level_ltrt; + + /** + * Absolute scale factor representing the level at which the LFE data is + * mixed into L/R channels during downmixing. + */ + double lfe_mix_level; +} AVDownmixInfo; + +/** + * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing. + * + * The side data is created and added to the frame if it's absent. + * + * @param frame the frame for which the side data is to be obtained. + * + * @return the AVDownmixInfo structure to be edited by the caller. + */ +AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame); + +/** + * @} + */ + +/** + * @} + */ + +#endif /* AVUTIL_DOWNMIX_INFO_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/eval.h b/dependencies64/ffmpeg/include/libavutil/eval.h index a1d1fe345..6159b0fe5 100644 --- a/dependencies64/ffmpeg/include/libavutil/eval.h +++ b/dependencies64/ffmpeg/include/libavutil/eval.h @@ -45,7 +45,7 @@ typedef struct AVExpr AVExpr; * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 * @param log_ctx parent logging context - * @return 0 in case of success, a negative value corresponding to an + * @return >= 0 in case of success, a negative value corresponding to an * AVERROR code otherwise */ int av_expr_parse_and_eval(double *res, const char *s, @@ -68,7 +68,7 @@ int av_expr_parse_and_eval(double *res, const char *s, * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments * @param log_ctx parent logging context - * @return 0 in case of success, a negative value corresponding to an + * @return >= 0 in case of success, a negative value corresponding to an * AVERROR code otherwise */ int av_expr_parse(AVExpr **expr, const char *s, diff --git a/dependencies64/ffmpeg/include/libavutil/ffversion.h b/dependencies64/ffmpeg/include/libavutil/ffversion.h new file mode 100644 index 000000000..58a021936 --- /dev/null +++ b/dependencies64/ffmpeg/include/libavutil/ffversion.h @@ -0,0 +1,4 @@ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "2.2.3" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/frame.h b/dependencies64/ffmpeg/include/libavutil/frame.h index 8cb3000aa..9fedba863 100644 --- a/dependencies64/ffmpeg/include/libavutil/frame.h +++ b/dependencies64/ffmpeg/include/libavutil/frame.h @@ -17,29 +17,37 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_frame + * reference-counted frame API + */ + #ifndef AVUTIL_FRAME_H #define AVUTIL_FRAME_H #include -#include "libavcodec/version.h" - #include "avutil.h" #include "buffer.h" #include "dict.h" #include "rational.h" #include "samplefmt.h" +#include "version.h" + enum AVColorSpace{ - AVCOL_SPC_RGB = 0, - AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B - AVCOL_SPC_UNSPECIFIED = 2, - AVCOL_SPC_FCC = 4, - AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 - AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above - AVCOL_SPC_SMPTE240M = 7, - AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 - AVCOL_SPC_NB , ///< Not part of ABI + AVCOL_SPC_RGB = 0, + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_FCC = 4, + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above + AVCOL_SPC_SMPTE240M = 7, + AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_NB , ///< Not part of ABI }; #define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG @@ -50,11 +58,40 @@ enum AVColorRange{ AVCOL_RANGE_NB , ///< Not part of ABI }; + +/** + * @defgroup lavu_frame AVFrame + * @ingroup lavu_data + * + * @{ + * AVFrame is an abstraction for reference-counted raw multimedia data. + */ + enum AVFrameSideDataType { /** * The data is the AVPanScan struct defined in libavcodec. */ AV_FRAME_DATA_PANSCAN, + /** + * ATSC A53 Part 4 Closed Captions. + * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. + * The number of bytes of CC data is AVFrameSideData.size. + */ + AV_FRAME_DATA_A53_CC, + /** + * Stereoscopic 3d metadata. + * The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + */ + AV_FRAME_DATA_STEREO3D, + /** + * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + */ + AV_FRAME_DATA_MATRIXENCODING, + /** + * Metadata relevant to a downmix procedure. + * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + */ + AV_FRAME_DATA_DOWNMIX_INFO, }; typedef struct AVFrameSideData { @@ -117,6 +154,9 @@ typedef struct AVFrame { * preference, this is 16 or 32 for modern desktop CPUs. * Some code requires such alignment other code can be slower without * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. */ int linesize[AV_NUM_DATA_POINTERS]; @@ -383,6 +423,26 @@ typedef struct AVFrame { AVFrameSideData **side_data; int nb_side_data; +/** + * @defgroup lavu_frame_flags AV_FRAME_FLAGS + * Flags describing additional frame properties. + * + * @{ + */ + +/** + * The frame data may be corrupted, e.g. due to decoding errors. + */ +#define AV_FRAME_FLAG_CORRUPT (1 << 0) +/** + * @} + */ + + /** + * Frame flags, a combination of @ref lavu_frame_flags + */ + int flags; + /** * frame timestamp estimated using various heuristics, in stream time base * Code outside libavcodec should access this field using: @@ -508,6 +568,12 @@ void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); enum AVColorRange av_frame_get_color_range(const AVFrame *frame); void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *av_get_colorspace_name(enum AVColorSpace val); + /** * Allocate an AVFrame and set its fields to default values. The resulting * struct must be freed using av_frame_free(). @@ -530,7 +596,7 @@ AVFrame *av_frame_alloc(void); void av_frame_free(AVFrame **frame); /** - * Setup a new reference to the data described by an given frame. + * Set up a new reference to the data described by the source frame. * * Copy frame properties from src to dst and create a new reference for each * AVBufferRef from src. @@ -540,7 +606,7 @@ void av_frame_free(AVFrame **frame); * * @return 0 on success, a negative AVERROR on error */ -int av_frame_ref(AVFrame *dst, AVFrame *src); +int av_frame_ref(AVFrame *dst, const AVFrame *src); /** * Create a new frame that references the same data as src. @@ -549,7 +615,7 @@ int av_frame_ref(AVFrame *dst, AVFrame *src); * * @return newly created AVFrame on success, NULL on error. */ -AVFrame *av_frame_clone(AVFrame *src); +AVFrame *av_frame_clone(const AVFrame *src); /** * Unreference all the buffers referenced by frame and reset the frame fields. @@ -607,6 +673,19 @@ int av_frame_is_writable(AVFrame *frame); */ int av_frame_make_writable(AVFrame *frame); +/** + * Copy the frame data from src to dst. + * + * This function does not allocate anything, dst must be already initialized and + * allocated with the same parameters as src. + * + * This function only copies the frame data (i.e. the contents of the data / + * extended data arrays), not any other properties. + * + * @return >= 0 on success, a negative AVERROR on error. + */ +int av_frame_copy(AVFrame *dst, const AVFrame *src); + /** * Copy only "metadata" fields from src to dst. * @@ -644,7 +723,11 @@ AVFrameSideData *av_frame_new_side_data(AVFrame *frame, * @return a pointer to the side data of a given type on success, NULL if there * is no side data with such type in this frame. */ -AVFrameSideData *av_frame_get_side_data(AVFrame *frame, +AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type); +/** + * @} + */ + #endif /* AVUTIL_FRAME_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/intfloat.h b/dependencies64/ffmpeg/include/libavutil/intfloat.h index 38d26ad87..fe3d7ec4a 100644 --- a/dependencies64/ffmpeg/include/libavutil/intfloat.h +++ b/dependencies64/ffmpeg/include/libavutil/intfloat.h @@ -1,20 +1,20 @@ /* * Copyright (c) 2011 Mans Rullgard * - * This file is part of Libav. + * This file is part of FFmpeg. * - * Libav is free software; you can redistribute it and/or + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * Libav is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/dependencies64/ffmpeg/include/libavutil/intfloat_readwrite.h b/dependencies64/ffmpeg/include/libavutil/intfloat_readwrite.h index 9709f4dae..1d79e3e8a 100644 --- a/dependencies64/ffmpeg/include/libavutil/intfloat_readwrite.h +++ b/dependencies64/ffmpeg/include/libavutil/intfloat_readwrite.h @@ -22,8 +22,11 @@ #define AVUTIL_INTFLOAT_READWRITE_H #include + #include "attributes.h" +#include "version.h" +#if FF_API_INTFLOAT /* IEEE 80 bits extended float */ typedef struct AVExtFloat { uint8_t exponent[2]; @@ -36,5 +39,6 @@ attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const; attribute_deprecated int64_t av_dbl2int(double d) av_const; attribute_deprecated int32_t av_flt2int(float d) av_const; attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const; +#endif /* FF_API_INTFLOAT */ #endif /* AVUTIL_INTFLOAT_READWRITE_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/log.h b/dependencies64/ffmpeg/include/libavutil/log.h index 7ea95fa50..5fb476f94 100644 --- a/dependencies64/ffmpeg/include/libavutil/log.h +++ b/dependencies64/ffmpeg/include/libavutil/log.h @@ -125,8 +125,19 @@ typedef struct AVClass { int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); } AVClass; -/* av_log API */ +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ +/** + * Print no output. + */ #define AV_LOG_QUIET -8 /** @@ -153,7 +164,14 @@ typedef struct AVClass { */ #define AV_LOG_WARNING 24 +/** + * Standard information. + */ #define AV_LOG_INFO 32 + +/** + * Detailed information. + */ #define AV_LOG_VERBOSE 40 /** @@ -163,27 +181,97 @@ typedef struct AVClass { #define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET) +/** + * @} + */ + /** * Send the specified message to the log if the level is less than or equal * to the current av_log_level. By default, all logging messages are sent to - * stderr. This behavior can be altered by setting a different av_vlog callback + * stderr. This behavior can be altered by setting a different logging callback * function. + * @see av_log_set_callback * * @param avcl A pointer to an arbitrary struct of which the first field is a - * pointer to an AVClass struct. - * @param level The importance level of the message, lower values signifying - * higher importance. + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". * @param fmt The format string (printf-compatible) that specifies how - * subsequent arguments are converted to output. - * @see av_vlog + * subsequent arguments are converted to output. */ void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); -void av_vlog(void *avcl, int level, const char *fmt, va_list); + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ int av_log_get_level(void); -void av_log_set_level(int); -void av_log_set_callback(void (*)(void*, int, const char*, va_list)); -void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_log_default_callback(void *avcl, int level, const char *fmt, + va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ const char* av_default_item_name(void* ctx); AVClassCategory av_default_get_category(void *ptr); @@ -219,4 +307,8 @@ void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, #define AV_LOG_SKIP_REPEATED 1 void av_log_set_flags(int arg); +/** + * @} + */ + #endif /* AVUTIL_LOG_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/macros.h b/dependencies64/ffmpeg/include/libavutil/macros.h new file mode 100644 index 000000000..446532377 --- /dev/null +++ b/dependencies64/ffmpeg/include/libavutil/macros.h @@ -0,0 +1,48 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Utility Preprocessor macros + */ + +#ifndef AVUTIL_MACROS_H +#define AVUTIL_MACROS_H + +/** + * @addtogroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +/** + * @} + */ + +#define AV_PRAGMA(s) _Pragma(#s) + +#endif /* AVUTIL_MACROS_H */ diff --git a/dependencies64/ffmpeg/include/libavutil/mathematics.h b/dependencies64/ffmpeg/include/libavutil/mathematics.h index 71f039221..88739e80b 100644 --- a/dependencies64/ffmpeg/include/libavutil/mathematics.h +++ b/dependencies64/ffmpeg/include/libavutil/mathematics.h @@ -45,6 +45,9 @@ #ifndef M_PI #define M_PI 3.14159265358979323846 /* pi */ #endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 /* pi/2 */ +#endif #ifndef M_SQRT1_2 #define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ #endif @@ -133,14 +136,28 @@ int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); * Rescale a timestamp while preserving known durations. * * @param in_ts Input timestamp - * @param in_tb Input timesbase + * @param in_tb Input timebase * @param fs_tb Duration and *last timebase * @param duration duration till the next call - * @param out_tb Output timesbase + * @param out_tb Output timebase */ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); /** + * Add a value to a timestamp. + * + * This function gurantees that when the same value is repeatly added that + * no accumulation of rounding errors occurs. + * + * @param ts Input timestamp + * @param ts_tb Input timestamp timebase + * @param inc value to add to ts + * @param inc_tb inc timebase + */ +int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc); + + + /** * @} */ diff --git a/dependencies64/ffmpeg/include/libavutil/mem.h b/dependencies64/ffmpeg/include/libavutil/mem.h index fb23a6909..703ce8193 100644 --- a/dependencies64/ffmpeg/include/libavutil/mem.h +++ b/dependencies64/ffmpeg/include/libavutil/mem.h @@ -83,8 +83,7 @@ void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); /** - * Helper function to allocate a block of size * nmemb bytes with - * using av_malloc() + * Allocate a block of size * nmemb bytes with av_malloc(). * @param nmemb Number of elements * @param size Size of the single element * @return Pointer to the allocated block, NULL if the block cannot @@ -93,7 +92,7 @@ void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); */ av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) { - if (size <= 0 || nmemb >= INT_MAX / size) + if (!size || nmemb >= INT_MAX / size) return NULL; return av_malloc(nmemb * size); } @@ -103,11 +102,17 @@ av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t siz * If ptr is NULL and size > 0, allocate a new block. If * size is zero, free the memory block pointed to by ptr. * @param ptr Pointer to a memory block already allocated with - * av_malloc(z)() or av_realloc() or NULL. - * @param size Size in bytes for the memory block to be allocated or + * av_realloc() or NULL. + * @param size Size in bytes of the memory block to be allocated or * reallocated. - * @return Pointer to a newly reallocated block or NULL if the block + * @return Pointer to a newly-reallocated block or NULL if the block * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. * @see av_fast_realloc() */ void *av_realloc(void *ptr, size_t size) av_alloc_size(2); @@ -122,29 +127,60 @@ void *av_realloc(void *ptr, size_t size) av_alloc_size(2); */ void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); +/** + * Allocate or reallocate a block of memory. + * If *ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param size Size in bytes for the memory block to be allocated or + * reallocated + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_reallocp(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +int av_reallocp(void *ptr, size_t size); + /** * Allocate or reallocate an array. * If ptr is NULL and nmemb > 0, allocate a new block. If * nmemb is zero, free the memory block pointed to by ptr. * @param ptr Pointer to a memory block already allocated with - * av_malloc(z)() or av_realloc() or NULL. + * av_realloc() or NULL. * @param nmemb Number of elements * @param size Size of the single element - * @return Pointer to a newly reallocated block or NULL if the block + * @return Pointer to a newly-reallocated block or NULL if the block * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. */ av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); /** - * Allocate or reallocate an array. + * Allocate or reallocate an array through a pointer to a pointer. * If *ptr is NULL and nmemb > 0, allocate a new block. If * nmemb is zero, free the memory block pointed to by ptr. * @param ptr Pointer to a pointer to a memory block already allocated - * with av_malloc(z)() or av_realloc(), or pointer to a pointer to NULL. + * with av_realloc(), or pointer to a pointer to NULL. * The pointer is updated on success, or freed on failure. * @param nmemb Number of elements * @param size Size of the single element * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. */ av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); @@ -181,8 +217,7 @@ void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; /** - * Helper function to allocate a block of size * nmemb bytes with - * using av_mallocz() + * Allocate a block of size * nmemb bytes with av_mallocz(). * @param nmemb Number of elements * @param size Size of the single element * @return Pointer to the allocated block, NULL if the block cannot @@ -192,7 +227,7 @@ void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; */ av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) { - if (size <= 0 || nmemb >= INT_MAX / size) + if (!size || nmemb >= INT_MAX / size) return NULL; return av_mallocz(nmemb * size); } @@ -200,7 +235,7 @@ av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t si /** * Duplicate the string s. * @param s string to be duplicated - * @return Pointer to a newly allocated string containing a + * @return Pointer to a newly-allocated string containing a * copy of s or NULL if the string cannot be allocated. */ char *av_strdup(const char *s) av_malloc_attrib; @@ -290,7 +325,7 @@ static inline int av_size_mult(size_t a, size_t b, size_t *r) void av_max_alloc(size_t max); /** - * @brief deliberately overlapping memcpy implementation + * deliberately overlapping memcpy implementation * @param dst destination buffer * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 * @param cnt number of bytes to copy, must be >= 0 @@ -300,6 +335,27 @@ void av_max_alloc(size_t max); */ void av_memcpy_backptr(uint8_t *dst, int back, int cnt); +/** + * Reallocate the given block if it is not large enough, otherwise do nothing. + * + * @see av_realloc + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special + * handling to avoid memleaks is necessary. + * + * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer + * @param size size of the buffer *ptr points to + * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and + * *size 0 if an error occurred. + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + /** * @} */ diff --git a/dependencies64/ffmpeg/include/libavutil/old_pix_fmts.h b/dependencies64/ffmpeg/include/libavutil/old_pix_fmts.h index 57b699220..cd1ed7c19 100644 --- a/dependencies64/ffmpeg/include/libavutil/old_pix_fmts.h +++ b/dependencies64/ffmpeg/include/libavutil/old_pix_fmts.h @@ -44,8 +44,10 @@ PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range +#if FF_API_XVMC PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing PIX_FMT_XVMC_MPEG2_IDCT, +#endif /* FF_API_XVMC */ PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) @@ -67,11 +69,13 @@ PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian @@ -95,7 +99,9 @@ PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 diff --git a/dependencies64/ffmpeg/include/libavutil/opt.h b/dependencies64/ffmpeg/include/libavutil/opt.h index 2344aa7b4..cd1b18e4c 100644 --- a/dependencies64/ffmpeg/include/libavutil/opt.h +++ b/dependencies64/ffmpeg/include/libavutil/opt.h @@ -64,7 +64,7 @@ * int bin_len; * } test_struct; * - * static const AVOption options[] = { + * static const AVOption test_options[] = { * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), @@ -77,7 +77,7 @@ * static const AVClass test_class = { * .class_name = "test class", * .item_name = av_default_item_name, - * .option = options, + * .option = test_options, * .version = LIBAVUTIL_VERSION_INT, * }; * @endcode @@ -163,7 +163,7 @@ * * @subsection avoptions_implement_named_constants Named constants * It is possible to create named constants for options. Simply set the unit - * field of the option the constants should apply to to a string and + * field of the option the constants should apply to a string and * create the constants themselves as options of type AV_OPT_TYPE_CONST * with their unit field set to the same string. * Their default_val field should contain the value of the named @@ -233,6 +233,7 @@ enum AVOptionType{ AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), + AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'), #if FF_API_OLD_AVOPTIONS FF_OPT_TYPE_FLAGS = 0, FF_OPT_TYPE_INT, @@ -281,10 +282,21 @@ typedef struct AVOption { int flags; #define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding #define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#if FF_API_OPT_TYPE_METADATA #define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... +#endif #define AV_OPT_FLAG_AUDIO_PARAM 8 #define AV_OPT_FLAG_VIDEO_PARAM 16 #define AV_OPT_FLAG_SUBTITLE_PARAM 32 +/** + * The option is inteded for exporting values to the caller. + */ +#define AV_OPT_FLAG_EXPORT 64 +/** + * The option may not be set through the AVOptions API, only read. + * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set. + */ +#define AV_OPT_FLAG_READONLY 128 #define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering //FIXME think about enc-audio, ... style flags @@ -319,7 +331,7 @@ typedef struct AVOptionRanges { /** * Look for an option in obj. Look only for the options which * have the flags set as specified in mask and flags (that is, - * for which it is the case that opt->flags & mask == flags). + * for which it is the case that (opt->flags & mask) == flags). * * @param[in] obj a pointer to a struct whose first element is a * pointer to an AVClass @@ -657,6 +669,7 @@ int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_ int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); /** * Set a binary option to an integer list. @@ -687,10 +700,10 @@ int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int searc * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN * is passed here, then the option may be found in a child of obj. * @param[out] out_val value of the option will be written here - * @return 0 on success, a negative error code otherwise + * @return >=0 on success, a negative error code otherwise */ /** - * @note the returned string will av_malloc()ed and must be av_free()ed by the caller + * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller */ int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); @@ -700,6 +713,7 @@ int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_ int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); /** * @} */ diff --git a/dependencies64/ffmpeg/include/libavutil/parseutils.h b/dependencies64/ffmpeg/include/libavutil/parseutils.h index 3eb35fc05..c80f0de3d 100644 --- a/dependencies64/ffmpeg/include/libavutil/parseutils.h +++ b/dependencies64/ffmpeg/include/libavutil/parseutils.h @@ -98,6 +98,19 @@ int av_parse_video_rate(AVRational *rate, const char *str); int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx); +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *av_get_known_color_name(int color_idx, const uint8_t **rgb); + /** * Parse timestr and return in *time a corresponding number of * microseconds. @@ -127,7 +140,7 @@ int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, * @endcode * @param duration flag which tells how to interpret timestr, if not * zero timestr is interpreted as a duration, otherwise as a date - * @return 0 in case of success, a negative value corresponding to an + * @return >= 0 in case of success, a negative value corresponding to an * AVERROR code otherwise */ int av_parse_time(int64_t *timeval, const char *timestr, int duration); diff --git a/dependencies64/ffmpeg/include/libavutil/pixdesc.h b/dependencies64/ffmpeg/include/libavutil/pixdesc.h index b7b96b7ce..e88bf9b92 100644 --- a/dependencies64/ffmpeg/include/libavutil/pixdesc.h +++ b/dependencies64/ffmpeg/include/libavutil/pixdesc.h @@ -125,7 +125,7 @@ typedef struct AVPixFmtDescriptor{ #if FF_API_PIX_FMT /** - * @deprecate use the AV_PIX_FMT_FLAG_* flags + * @deprecated use the AV_PIX_FMT_FLAG_* flags */ #define PIX_FMT_BE AV_PIX_FMT_FLAG_BE #define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL @@ -203,7 +203,7 @@ const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); /** * Print in buf the string corresponding to the pixel format with - * number pix_fmt, or an header if pix_fmt is negative. + * number pix_fmt, or a header if pix_fmt is negative. * * @param buf the buffer where to write the string * @param buf_size the size of buf diff --git a/dependencies64/ffmpeg/include/libavutil/pixfmt.h b/dependencies64/ffmpeg/include/libavutil/pixfmt.h index 6d1045e3b..9418c4a2b 100644 --- a/dependencies64/ffmpeg/include/libavutil/pixfmt.h +++ b/dependencies64/ffmpeg/include/libavutil/pixfmt.h @@ -28,7 +28,7 @@ */ #include "libavutil/avconfig.h" -#include "libavutil/version.h" +#include "version.h" #define AVPALETTE_SIZE 1024 #define AVPALETTE_COUNT 256 @@ -59,8 +59,8 @@ * allocating the picture. * * @note - * Make sure that all newly added big-endian formats have pix_fmt & 1 == 1 - * and that all newly added little-endian formats have pix_fmt & 1 == 0. + * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1 + * and that all newly added little-endian formats have (pix_fmt & 1) == 0. * This allows simpler detection of big vs little-endian. */ enum AVPixelFormat { @@ -80,8 +80,11 @@ enum AVPixelFormat { AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range +#if FF_API_XVMC AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing AV_PIX_FMT_XVMC_MPEG2_IDCT, +#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT +#endif /* FF_API_XVMC */ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) @@ -103,11 +106,13 @@ enum AVPixelFormat { AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian @@ -131,7 +136,9 @@ enum AVPixelFormat { AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 @@ -142,9 +149,11 @@ enum AVPixelFormat { AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian - //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus - //If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately - //is better + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian @@ -204,6 +213,9 @@ enum AVPixelFormat { AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian #ifndef AV_PIX_FMT_ABI_GIT_MASTER AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian @@ -238,6 +250,23 @@ enum AVPixelFormat { AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ +#if !FF_API_XVMC + AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing +#endif /* !FF_API_XVMC */ + AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions #if FF_API_PIX_FMT @@ -300,6 +329,13 @@ enum AVPixelFormat { #define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) #define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) #define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + #define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) #define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) @@ -312,6 +348,7 @@ enum AVPixelFormat { #define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) #define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) #if FF_API_PIX_FMT #define PixelFormat AVPixelFormat diff --git a/dependencies64/ffmpeg/include/libavutil/rational.h b/dependencies64/ffmpeg/include/libavutil/rational.h index 417e29e57..7439701db 100644 --- a/dependencies64/ffmpeg/include/libavutil/rational.h +++ b/dependencies64/ffmpeg/include/libavutil/rational.h @@ -45,6 +45,17 @@ typedef struct AVRational{ int den; ///< denominator } AVRational; +/** + * Create a rational. + * Useful for compilers that do not support compound literals. + * @note The return value is not reduced. + */ +static inline AVRational av_make_q(int num, int den) +{ + AVRational r = { num, den }; + return r; +} + /** * Compare two rationals. * @param a first rational @@ -55,7 +66,7 @@ typedef struct AVRational{ static inline int av_cmp_q(AVRational a, AVRational b){ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den; - if(tmp) return ((tmp ^ a.den ^ b.den)>>63)|1; + if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1; else if(b.den && a.den) return 0; else if(a.num && b.num) return (a.num>>31) - (b.num>>31); else return INT_MIN; diff --git a/dependencies64/ffmpeg/include/libavutil/stereo3d.h b/dependencies64/ffmpeg/include/libavutil/stereo3d.h new file mode 100644 index 000000000..8829da9ff --- /dev/null +++ b/dependencies64/ffmpeg/include/libavutil/stereo3d.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2013 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "frame.h" + +/** + * List of possible 3D Types + */ +enum AVStereo3DType { + /** + * Video is not stereoscopic (and metadata has to be there). + */ + AV_STEREO3D_2D, + + /** + * Views are next to each other. + * + * LLLLRRRR + * LLLLRRRR + * LLLLRRRR + * ... + */ + AV_STEREO3D_SIDEBYSIDE, + + /** + * Views are on top of each other. + * + * LLLLLLLL + * LLLLLLLL + * RRRRRRRR + * RRRRRRRR + */ + AV_STEREO3D_TOPBOTTOM, + + /** + * Views are alternated temporally. + * + * frame0 frame1 frame2 ... + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * ... ... ... + */ + AV_STEREO3D_FRAMESEQUENCE, + + /** + * Views are packed in a checkerboard-like structure per pixel. + * + * LRLRLRLR + * RLRLRLRL + * LRLRLRLR + * ... + */ + AV_STEREO3D_CHECKERBOARD, + + /** + * Views are next to each other, but when upscaling + * apply a checkerboard pattern. + * + * LLLLRRRR L L L L R R R R + * LLLLRRRR => L L L L R R R R + * LLLLRRRR L L L L R R R R + * LLLLRRRR L L L L R R R R + */ + AV_STEREO3D_SIDEBYSIDE_QUINCUNX, + + /** + * Views are packed per line, as if interlaced. + * + * LLLLLLLL + * RRRRRRRR + * LLLLLLLL + * ... + */ + AV_STEREO3D_LINES, + + /** + * Views are packed per column. + * + * LRLRLRLR + * LRLRLRLR + * LRLRLRLR + * ... + */ + AV_STEREO3D_COLUMNS, +}; + + +/** + * Inverted views, Right/Bottom represents the left view. + */ +#define AV_STEREO3D_FLAG_INVERT (1 << 0) + +/** + * Stereo 3D type: this structure describes how two videos are packed + * within a single video surface, with additional information as needed. + * + * @note The struct must be allocated with av_stereo3d_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVStereo3D { + /** + * How views are packed within the video. + */ + enum AVStereo3DType type; + + /** + * Additional information about the frame packing. + */ + int flags; +} AVStereo3D; + +/** + * Allocate an AVStereo3D structure and set its fields to default values. + * The resulting struct can be freed using av_freep(). + * + * @return An AVStereo3D filled with default values or NULL on failure. + */ +AVStereo3D *av_stereo3d_alloc(void); + +/** + * Allocate a complete AVFrameSideData and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVStereo3D structure to be filled by caller. + */ +AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame); diff --git a/dependencies64/ffmpeg/include/libavutil/timestamp.h b/dependencies64/ffmpeg/include/libavutil/timestamp.h index f63a08c57..f010a7ee3 100644 --- a/dependencies64/ffmpeg/include/libavutil/timestamp.h +++ b/dependencies64/ffmpeg/include/libavutil/timestamp.h @@ -26,6 +26,10 @@ #include "common.h" +#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64) +#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS +#endif + #define AV_TS_MAX_STRING_SIZE 32 /** diff --git a/dependencies64/ffmpeg/include/libavutil/version.h b/dependencies64/ffmpeg/include/libavutil/version.h index dafbeffca..7f093cd58 100644 --- a/dependencies64/ffmpeg/include/libavutil/version.h +++ b/dependencies64/ffmpeg/include/libavutil/version.h @@ -21,25 +21,7 @@ #ifndef AVUTIL_VERSION_H #define AVUTIL_VERSION_H -/** - * @defgroup preproc_misc Preprocessor String Macros - * - * String manipulation macros - * - * @{ - */ - -#define AV_STRINGIFY(s) AV_TOSTRING(s) -#define AV_TOSTRING(s) #s - -#define AV_GLUE(a, b) a ## b -#define AV_JOIN(a, b) AV_GLUE(a, b) - -#define AV_PRAGMA(s) _Pragma(#s) - -/** - * @} - */ +#include "macros.h" /** * @defgroup version_utils Library Version Macros @@ -58,7 +40,6 @@ * @} */ - /** * @file * @ingroup lavu @@ -75,7 +56,7 @@ */ #define LIBAVUTIL_VERSION_MAJOR 52 -#define LIBAVUTIL_VERSION_MINOR 40 +#define LIBAVUTIL_VERSION_MINOR 66 #define LIBAVUTIL_VERSION_MICRO 100 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ @@ -100,40 +81,61 @@ */ #ifndef FF_API_GET_BITS_PER_SAMPLE_FMT -#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_FIND_OPT -#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_OLD_AVOPTIONS -#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_PIX_FMT -#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_CONTEXT_SIZE -#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_PIX_FMT_DESC -#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_AV_REVERSE -#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_AUDIOCONVERT -#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_CPU_FLAG_MMX2 -#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_SAMPLES_UTILS_RETURN_ZERO -#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_LLS_PRIVATE -#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_LLS1 +#define FF_API_LLS1 (LIBAVUTIL_VERSION_MAJOR < 54) #endif #ifndef FF_API_AVFRAME_LAVC -#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 53) +#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_VDPAU +#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT +#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_OLD_OPENCL +#define FF_API_OLD_OPENCL (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_XVMC +#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_INTFLOAT +#define FF_API_INTFLOAT (LIBAVUTIL_VERSION_MAJOR < 54) +#endif +#ifndef FF_API_OPT_TYPE_METADATA +#define FF_API_OPT_TYPE_METADATA (LIBAVUTIL_VERSION_MAJOR < 54) #endif /** diff --git a/dependencies64/ffmpeg/include/libavutil/xtea.h b/dependencies64/ffmpeg/include/libavutil/xtea.h index 0899c92bc..6f1e71e34 100644 --- a/dependencies64/ffmpeg/include/libavutil/xtea.h +++ b/dependencies64/ffmpeg/include/libavutil/xtea.h @@ -25,6 +25,8 @@ #include /** + * @file + * @brief Public header for libavutil XTEA algorithm * @defgroup lavu_xtea XTEA * @ingroup lavu_crypto * @{ diff --git a/dependencies64/ffmpeg/include/libswresample/swresample.h b/dependencies64/ffmpeg/include/libswresample/swresample.h index 95e8a5a09..4ba008e72 100644 --- a/dependencies64/ffmpeg/include/libswresample/swresample.h +++ b/dependencies64/ffmpeg/include/libswresample/swresample.h @@ -44,8 +44,8 @@ * matrix): * @code * SwrContext *swr = swr_alloc(); - * av_opt_set_int(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); - * av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); * av_opt_set_int(swr, "in_sample_rate", 48000, 0); * av_opt_set_int(swr, "out_sample_rate", 44100, 0); * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); @@ -84,8 +84,8 @@ * input, in_samples); * handle_output(output, out_samples); * av_freep(&output); - * } - * @endcode + * } + * @endcode * * When the conversion is finished, the conversion * context and everything associated with it must be freed with swr_free(). @@ -165,6 +165,13 @@ struct SwrContext *swr_alloc(void); */ int swr_init(struct SwrContext *s); +/** + * Check whether an swr context has been initialized or not. + * + * @return positive if it has been initialized, 0 if not initialized + */ +int swr_is_initialized(struct SwrContext *s); + /** * Allocate SwrContext if needed and set/reset common parameters. * diff --git a/dependencies64/ffmpeg/include/libswresample/version.h b/dependencies64/ffmpeg/include/libswresample/version.h index 8272b763b..3a9287519 100644 --- a/dependencies64/ffmpeg/include/libswresample/version.h +++ b/dependencies64/ffmpeg/include/libswresample/version.h @@ -29,8 +29,8 @@ #include "libavutil/avutil.h" #define LIBSWRESAMPLE_VERSION_MAJOR 0 -#define LIBSWRESAMPLE_VERSION_MINOR 17 -#define LIBSWRESAMPLE_VERSION_MICRO 103 +#define LIBSWRESAMPLE_VERSION_MINOR 18 +#define LIBSWRESAMPLE_VERSION_MICRO 100 #define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ LIBSWRESAMPLE_VERSION_MINOR, \ diff --git a/dependencies64/ffmpeg/include/libswscale/swscale.h b/dependencies64/ffmpeg/include/libswscale/swscale.h index 42702b7aa..25c8b5e98 100644 --- a/dependencies64/ffmpeg/include/libswscale/swscale.h +++ b/dependencies64/ffmpeg/include/libswscale/swscale.h @@ -23,15 +23,10 @@ /** * @file - * @ingroup lsws + * @ingroup libsws * external API header */ -/** - * @defgroup lsws Libswscale - * @{ - */ - #include #include "libavutil/avutil.h" @@ -40,6 +35,9 @@ #include "version.h" /** + * @defgroup libsws Color conversion and scaling + * @{ + * * Return the LIBSWSCALE_VERSION_INT constant. */ unsigned swscale_version(void); diff --git a/dependencies64/ffmpeg/include/libswscale/version.h b/dependencies64/ffmpeg/include/libswscale/version.h index 1c4520926..6f82d3d05 100644 --- a/dependencies64/ffmpeg/include/libswscale/version.h +++ b/dependencies64/ffmpeg/include/libswscale/version.h @@ -24,11 +24,11 @@ * swscale version macros */ -#include "libavutil/avutil.h" +#include "libavutil/version.h" #define LIBSWSCALE_VERSION_MAJOR 2 -#define LIBSWSCALE_VERSION_MINOR 4 -#define LIBSWSCALE_VERSION_MICRO 100 +#define LIBSWSCALE_VERSION_MINOR 5 +#define LIBSWSCALE_VERSION_MICRO 102 #define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ LIBSWSCALE_VERSION_MINOR, \ diff --git a/dependencies64/ffmpeg/lib/avcodec-55.def b/dependencies64/ffmpeg/lib/avcodec-55.def new file mode 100644 index 000000000..4faa82a02 --- /dev/null +++ b/dependencies64/ffmpeg/lib/avcodec-55.def @@ -0,0 +1,290 @@ +EXPORTS + audio_resample + audio_resample_close + av_audio_convert + av_audio_convert_alloc + av_audio_convert_free + av_audio_resample_init + av_bitstream_filter_close + av_bitstream_filter_filter + av_bitstream_filter_init + av_bitstream_filter_next + av_codec_get_chroma_intra_matrix + av_codec_get_codec_descriptor + av_codec_get_lowres + av_codec_get_max_lowres + av_codec_get_pkt_timebase + av_codec_get_seek_preroll + av_codec_is_decoder + av_codec_is_encoder + av_codec_next + av_codec_set_chroma_intra_matrix + av_codec_set_codec_descriptor + av_codec_set_lowres + av_codec_set_pkt_timebase + av_codec_set_seek_preroll + av_copy_packet + av_copy_packet_side_data + av_dct_calc + av_dct_end + av_dct_init + av_destruct_packet + av_dup_packet + av_fast_malloc@LIBAVCODEC_55 + av_fast_padded_malloc + av_fast_padded_mallocz + av_fast_realloc@LIBAVCODEC_55 + av_fft_calc + av_fft_end + av_fft_init + av_fft_permute + av_free_packet + av_get_audio_frame_duration + av_get_bits_per_sample + av_get_codec_tag_string + av_get_exact_bits_per_sample + av_get_pcm_codec + av_get_profile_name + av_grow_packet + av_hwaccel_next + av_imdct_calc + av_imdct_half + av_init_packet + av_lockmgr_register + av_log_ask_for_sample + av_log_missing_feature + av_mdct_calc + av_mdct_end + av_mdct_init + av_new_packet + av_packet_copy_props + av_packet_free_side_data + av_packet_from_data + av_packet_get_side_data + av_packet_merge_side_data + av_packet_move_ref + av_packet_new_side_data + av_packet_pack_dictionary + av_packet_ref + av_packet_shrink_side_data + av_packet_split_side_data + av_packet_unpack_dictionary + av_packet_unref + av_parser_change + av_parser_close + av_parser_init + av_parser_next + av_parser_parse2 + av_picture_copy + av_picture_crop + av_picture_pad + av_rdft_calc + av_rdft_end + av_rdft_init + av_register_bitstream_filter + av_register_codec_parser + av_register_hwaccel + av_resample + av_resample_close + av_resample_compensate + av_resample_init + av_shrink_packet + av_xiphlacing + available_bits + avcodec_align_dimensions + avcodec_align_dimensions2 + avcodec_alloc_context3 + avcodec_alloc_frame + avcodec_chroma_pos_to_enum + avcodec_close + avcodec_configuration + avcodec_copy_context + avcodec_decode_audio3 + avcodec_decode_audio4 + avcodec_decode_subtitle2 + avcodec_decode_video2 + avcodec_default_execute + avcodec_default_execute2 + avcodec_default_get_buffer + avcodec_default_get_buffer2 + avcodec_default_get_format + avcodec_default_reget_buffer + avcodec_default_release_buffer + avcodec_descriptor_get + avcodec_descriptor_get_by_name + avcodec_descriptor_next + avcodec_encode_audio + avcodec_encode_audio2 + avcodec_encode_subtitle + avcodec_encode_video + avcodec_encode_video2 + avcodec_enum_to_chroma_pos + avcodec_fill_audio_frame + avcodec_find_best_pix_fmt2 + avcodec_find_best_pix_fmt_of_2 + avcodec_find_best_pix_fmt_of_list + avcodec_find_decoder + avcodec_find_decoder_by_name + avcodec_find_encoder + avcodec_find_encoder_by_name + avcodec_flush_buffers + avcodec_free_frame + avcodec_get_chroma_sub_sample + avcodec_get_class + avcodec_get_context_defaults3 + avcodec_get_edge_width + avcodec_get_frame_class + avcodec_get_frame_defaults + avcodec_get_name + avcodec_get_pix_fmt_loss + avcodec_get_subtitle_rect_class + avcodec_get_type + avcodec_is_open + avcodec_license + avcodec_open2 + avcodec_pix_fmt_to_codec_tag + avcodec_register + avcodec_register_all + avcodec_set_dimensions + avcodec_string + avcodec_version + aver_isf_history + avpicture_alloc + avpicture_deinterlace + avpicture_fill + avpicture_free + avpicture_get_size + avpicture_layout + avpriv_aac_parse_header + avpriv_ac3_channel_layout_tab DATA + avpriv_ac3_parse_header + avpriv_ac3_parse_header2 + avpriv_adx_decode_header + avpriv_align_put_bits + avpriv_bprint_to_extradata + avpriv_color_frame + avpriv_copy_bits + avpriv_copy_pce_data + avpriv_dca_sample_rates DATA + avpriv_dirac_parse_sequence_header + avpriv_dnxhd_get_frame_size + avpriv_do_elbg + avpriv_dsputil_init + avpriv_dv_codec_profile + avpriv_dv_frame_profile + avpriv_dv_frame_profile2 + avpriv_find_pix_fmt + avpriv_find_start_code + avpriv_flac_is_extradata_valid + avpriv_flac_parse_block_header + avpriv_flac_parse_streaminfo + avpriv_h264_has_num_reorder_frames + avpriv_init_elbg + avpriv_lock_avformat + avpriv_mjpeg_bits_ac_chrominance DATA + avpriv_mjpeg_bits_ac_luminance DATA + avpriv_mjpeg_bits_dc_chrominance DATA + avpriv_mjpeg_bits_dc_luminance DATA + avpriv_mjpeg_val_ac_chrominance DATA + avpriv_mjpeg_val_ac_luminance DATA + avpriv_mjpeg_val_dc DATA + avpriv_mpa_bitrate_tab DATA + avpriv_mpa_decode_header + avpriv_mpa_decode_header2 + avpriv_mpa_freq_tab DATA + avpriv_mpeg4audio_get_config + avpriv_mpeg4audio_sample_rates DATA + avpriv_mpegaudio_decode_header + avpriv_put_string + avpriv_split_xiph_headers + avpriv_tak_parse_streaminfo + avpriv_toupper4 + avpriv_unlock_avformat + avpriv_vorbis_parse_extradata + avpriv_vorbis_parse_frame + avpriv_vorbis_parse_frame_flags + avpriv_vorbis_parse_reset + avsubtitle_free + dsputil_init + ff_aanscales DATA + ff_dct32_fixed + ff_dct32_float + ff_dct32_float_avx DATA + ff_dct32_float_sse DATA + ff_dct32_float_sse2 DATA + ff_dct_common_init + ff_dct_encode_init + ff_dct_encode_init_x86 + ff_dct_end + ff_dct_init + ff_dct_init_x86 + ff_dct_quantize_c + ff_dnxhd_cid_table DATA + ff_dnxhd_get_cid_table + ff_dsputil_init + ff_faandct + ff_faandct248 + ff_faanidct + ff_faanidct_add + ff_faanidct_put + ff_fdct248_islow_10 + ff_fdct248_islow_8 + ff_fdct_ifast + ff_fdct_ifast248 + ff_fdct_mmx + ff_fdct_mmxext + ff_fdct_sse2 + ff_fft_calc_avx DATA + ff_fft_calc_sse DATA + ff_fft_end + ff_fft_end_fixed + ff_fft_end_fixed_32 + ff_fft_init + ff_fft_init_fixed + ff_fft_init_fixed_32 + ff_fft_init_x86 + ff_fft_lut_init + ff_fft_permute_sse DATA + ff_idct_xvid_mmx + ff_idct_xvid_mmx_add + ff_idct_xvid_mmx_put + ff_idct_xvid_mmxext + ff_idct_xvid_mmxext_add + ff_idct_xvid_mmxext_put + ff_idct_xvid_sse2 + ff_idct_xvid_sse2_add + ff_idct_xvid_sse2_put + ff_jpeg_fdct_islow_10 + ff_jpeg_fdct_islow_8 + ff_mdct_calc_c + ff_mdct_calc_c_fixed + ff_mdct_calc_c_fixed_32 + ff_mdct_calcw_c + ff_mdct_end + ff_mdct_end_fixed + ff_mdct_end_fixed_32 + ff_mdct_init + ff_mdct_init_fixed + ff_mdct_init_fixed_32 + ff_mdct_win_fixed DATA + ff_mdct_win_float DATA + ff_prores_idct_put_10_sse2 DATA + ff_raw_pix_fmt_tags DATA + ff_rdft_end + ff_rdft_init + ff_simple_idct248_put + ff_simple_idct44_add + ff_simple_idct48_add + ff_simple_idct84_add + ff_simple_idct_10 + ff_simple_idct_12 + ff_simple_idct_8 + ff_simple_idct_add_10 + ff_simple_idct_add_12 + ff_simple_idct_add_8 + ff_simple_idct_add_mmx + ff_simple_idct_mmx + ff_simple_idct_put_10 + ff_simple_idct_put_12 + ff_simple_idct_put_8 + ff_simple_idct_put_mmx diff --git a/dependencies64/ffmpeg/lib/avcodec.lib b/dependencies64/ffmpeg/lib/avcodec.lib index 7087f20f4..7aa08b85a 100644 Binary files a/dependencies64/ffmpeg/lib/avcodec.lib and b/dependencies64/ffmpeg/lib/avcodec.lib differ diff --git a/dependencies64/ffmpeg/lib/avdevice-55.def b/dependencies64/ffmpeg/lib/avdevice-55.def new file mode 100644 index 000000000..1a0e8e42b --- /dev/null +++ b/dependencies64/ffmpeg/lib/avdevice-55.def @@ -0,0 +1,9 @@ +EXPORTS + avdevice_app_to_dev_control_message + avdevice_configuration + avdevice_dev_to_app_control_message + avdevice_free_list_devices + avdevice_license + avdevice_list_devices + avdevice_register_all + avdevice_version diff --git a/dependencies64/ffmpeg/lib/avdevice.lib b/dependencies64/ffmpeg/lib/avdevice.lib index 4c67a0f77..d326e82f0 100644 Binary files a/dependencies64/ffmpeg/lib/avdevice.lib and b/dependencies64/ffmpeg/lib/avdevice.lib differ diff --git a/dependencies64/ffmpeg/lib/avfilter-4.def b/dependencies64/ffmpeg/lib/avfilter-4.def new file mode 100644 index 000000000..50bade1cc --- /dev/null +++ b/dependencies64/ffmpeg/lib/avfilter-4.def @@ -0,0 +1,85 @@ +EXPORTS + av_2_vs_pixel_format + av_abuffersink_params_alloc + av_buffersink_get_buffer_ref + av_buffersink_get_frame + av_buffersink_get_frame_flags + av_buffersink_get_frame_rate + av_buffersink_get_samples + av_buffersink_params_alloc + av_buffersink_poll_frame + av_buffersink_read + av_buffersink_read_samples + av_buffersink_set_frame_size + av_buffersrc_add_frame + av_buffersrc_add_frame_flags + av_buffersrc_add_ref + av_buffersrc_buffer + av_buffersrc_get_nb_failed_requests + av_buffersrc_write_frame + av_filter_next + avfilter_add_matrix + avfilter_all_channel_layouts DATA + avfilter_config_links + avfilter_configuration + avfilter_copy_buf_props + avfilter_copy_buffer_ref_props + avfilter_copy_frame_props + avfilter_fill_frame_from_audio_buffer_ref + avfilter_fill_frame_from_buffer_ref + avfilter_fill_frame_from_video_buffer_ref + avfilter_free + avfilter_get_audio_buffer_ref_from_arrays + avfilter_get_audio_buffer_ref_from_arrays_channels + avfilter_get_audio_buffer_ref_from_frame + avfilter_get_buffer_ref_from_frame + avfilter_get_by_name + avfilter_get_class + avfilter_get_matrix + avfilter_get_video_buffer_ref_from_arrays + avfilter_get_video_buffer_ref_from_frame + avfilter_graph_add_filter + avfilter_graph_alloc + avfilter_graph_alloc_filter + avfilter_graph_config + avfilter_graph_create_filter + avfilter_graph_dump + avfilter_graph_free + avfilter_graph_get_filter + avfilter_graph_parse + avfilter_graph_parse2 + avfilter_graph_parse_ptr + avfilter_graph_queue_command + avfilter_graph_request_oldest + avfilter_graph_send_command + avfilter_graph_set_auto_convert + avfilter_init_dict + avfilter_init_filter + avfilter_init_str + avfilter_inout_alloc + avfilter_inout_free + avfilter_insert_filter + avfilter_license + avfilter_link + avfilter_link_free + avfilter_link_get_channels + avfilter_link_set_closed + avfilter_make_format64_list + avfilter_mul_matrix + avfilter_next + avfilter_open + avfilter_pad_count + avfilter_pad_get_name + avfilter_pad_get_type + avfilter_process_command + avfilter_ref_buffer + avfilter_ref_get_channels + avfilter_register + avfilter_register_all + avfilter_sub_matrix + avfilter_transform + avfilter_uninit + avfilter_unref_buffer + avfilter_unref_bufferp + avfilter_version + ff_default_query_formats diff --git a/dependencies64/ffmpeg/lib/avfilter.lib b/dependencies64/ffmpeg/lib/avfilter.lib index 6bd9ec4e5..4bbf8f898 100644 Binary files a/dependencies64/ffmpeg/lib/avfilter.lib and b/dependencies64/ffmpeg/lib/avfilter.lib differ diff --git a/dependencies64/ffmpeg/lib/avformat-55.def b/dependencies64/ffmpeg/lib/avformat-55.def new file mode 100644 index 000000000..6cc2bece7 --- /dev/null +++ b/dependencies64/ffmpeg/lib/avformat-55.def @@ -0,0 +1,163 @@ +EXPORTS + av_add_index_entry + av_append_packet + av_close_input_file + av_codec_get_id + av_codec_get_tag + av_codec_get_tag2 + av_convert_lang_to + av_demuxer_open + av_dump_format + av_filename_number_test + av_find_best_stream + av_find_default_stream_index + av_find_input_format + av_find_program_from_stream + av_find_stream_info + av_fmt_ctx_get_duration_estimation_method + av_format_get_audio_codec + av_format_get_control_message_cb + av_format_get_metadata_header_padding + av_format_get_opaque + av_format_get_probe_score + av_format_get_subtitle_codec + av_format_get_video_codec + av_format_set_audio_codec + av_format_set_control_message_cb + av_format_set_metadata_header_padding + av_format_set_opaque + av_format_set_subtitle_codec + av_format_set_video_codec + av_get_frame_filename + av_get_output_timestamp + av_get_packet + av_guess_codec + av_guess_format + av_guess_frame_rate + av_guess_sample_aspect_ratio + av_hex_dump + av_hex_dump_log + av_iformat_next + av_index_search_timestamp + av_interleaved_write_frame + av_interleaved_write_uncoded_frame + av_match_ext + av_new_program + av_new_stream + av_oformat_next + av_pkt_dump2 + av_pkt_dump_log2 + av_probe_input_buffer + av_probe_input_buffer2 + av_probe_input_format + av_probe_input_format2 + av_probe_input_format3 + av_read_frame + av_read_packet + av_read_pause + av_read_play + av_register_all + av_register_input_format + av_register_output_format + av_sdp_create + av_seek_frame + av_set_pts_info + av_stream_get_r_frame_rate + av_stream_set_r_frame_rate + av_url_split + av_write_frame + av_write_trailer + av_write_uncoded_frame + av_write_uncoded_frame_query + avformat_alloc_context + avformat_alloc_output_context + avformat_alloc_output_context2 + avformat_close_input + avformat_configuration + avformat_find_stream_info + avformat_free_context + avformat_get_class + avformat_get_mov_audio_tags + avformat_get_mov_video_tags + avformat_get_riff_audio_tags + avformat_get_riff_video_tags + avformat_license + avformat_match_stream_specifier + avformat_network_deinit + avformat_network_init + avformat_new_stream + avformat_open_input + avformat_query_codec + avformat_queue_attached_pictures + avformat_seek_file + avformat_version + avformat_write_header + avio_alloc_context + avio_check + avio_close + avio_close_dyn_buf + avio_closep + avio_enum_protocols + avio_find_protocol_name + avio_flush + avio_get_str + avio_get_str16be + avio_get_str16le + avio_open + avio_open2 + avio_open_dyn_buf + avio_pause + avio_printf + avio_put_str + avio_put_str16le + avio_r8 + avio_rb16 + avio_rb24 + avio_rb32 + avio_rb64 + avio_read + avio_rl16 + avio_rl24 + avio_rl32 + avio_rl64 + avio_seek + avio_seek_time + avio_size + avio_skip + avio_w8 + avio_wb16 + avio_wb24 + avio_wb32 + avio_wb64 + avio_wl16 + avio_wl24 + avio_wl32 + avio_wl64 + avio_write + avpriv_dv_get_packet + avpriv_dv_init_demux + avpriv_dv_produce_packet + avpriv_new_chapter + avpriv_set_pts_info + ff_codec_get_id + ff_inet_aton + ff_mpegts_parse_close + ff_mpegts_parse_open + ff_mpegts_parse_packet + ff_rtp_get_local_rtcp_port + ff_rtp_get_local_rtp_port + ff_rtsp_parse_line + ff_socket_nonblock + ffio_open_dyn_packet_buf + ffio_set_buf_size + ffurl_close + ffurl_open + ffurl_protocol_next + ffurl_read_complete + ffurl_seek + ffurl_size + ffurl_write + get_codec_guid + get_crc_table + get_extension + url_feof diff --git a/dependencies64/ffmpeg/lib/avformat.lib b/dependencies64/ffmpeg/lib/avformat.lib index 66e43149a..0c47516aa 100644 Binary files a/dependencies64/ffmpeg/lib/avformat.lib and b/dependencies64/ffmpeg/lib/avformat.lib differ diff --git a/dependencies64/ffmpeg/lib/avutil-52.def b/dependencies64/ffmpeg/lib/avutil-52.def new file mode 100644 index 000000000..8c917a173 --- /dev/null +++ b/dependencies64/ffmpeg/lib/avutil-52.def @@ -0,0 +1,427 @@ +EXPORTS + av_add_q + av_add_stable + av_adler32_update + av_aes_alloc + av_aes_crypt + av_aes_init + av_aes_size DATA + av_asprintf + av_audio_fifo_alloc + av_audio_fifo_drain + av_audio_fifo_free + av_audio_fifo_read + av_audio_fifo_realloc + av_audio_fifo_reset + av_audio_fifo_size + av_audio_fifo_space + av_audio_fifo_write + av_base64_decode + av_base64_encode + av_basename + av_blowfish_crypt + av_blowfish_crypt_ecb + av_blowfish_init + av_bmg_get + av_bprint_append_data + av_bprint_channel_layout + av_bprint_chars + av_bprint_clear + av_bprint_escape + av_bprint_finalize + av_bprint_get_buffer + av_bprint_init + av_bprint_init_for_buffer + av_bprint_strftime + av_bprintf + av_buffer_alloc + av_buffer_allocz + av_buffer_create + av_buffer_default_free + av_buffer_get_opaque + av_buffer_get_ref_count + av_buffer_is_writable + av_buffer_make_writable + av_buffer_pool_get + av_buffer_pool_init + av_buffer_pool_uninit + av_buffer_realloc + av_buffer_ref + av_buffer_unref + av_calloc + av_channel_layout_extract_channel + av_compare_mod + av_compare_ts + av_cpu_count + av_crc + av_crc_get_table + av_crc_init + av_ctz + av_d2q + av_d2str + av_dbl2ext + av_dbl2int + av_default_get_category + av_default_item_name + av_des_crypt + av_des_init + av_des_mac + av_dict_copy + av_dict_count + av_dict_free + av_dict_get + av_dict_parse_string + av_dict_set + av_dirname + av_div_q + av_downmix_info_update_side_data + av_dynarray2_add + av_dynarray_add + av_escape + av_evaluate_lls + av_expr_eval + av_expr_free + av_expr_parse + av_expr_parse_and_eval + av_ext2dbl + av_fast_malloc + av_fast_realloc + av_fifo_alloc + av_fifo_drain + av_fifo_free + av_fifo_generic_read + av_fifo_generic_write + av_fifo_grow + av_fifo_realloc2 + av_fifo_reset + av_fifo_size + av_fifo_space + av_file_map + av_file_unmap + av_find_info_tag + av_find_nearest_q_idx + av_find_opt + av_flt2int + av_fopen_utf8 + av_force_cpu_flags + av_frame_alloc + av_frame_clone + av_frame_copy + av_frame_copy_props + av_frame_free + av_frame_get_best_effort_timestamp + av_frame_get_buffer + av_frame_get_channel_layout + av_frame_get_channels + av_frame_get_color_range + av_frame_get_colorspace + av_frame_get_decode_error_flags + av_frame_get_metadata + av_frame_get_pkt_duration + av_frame_get_pkt_pos + av_frame_get_pkt_size + av_frame_get_plane_buffer + av_frame_get_qp_table + av_frame_get_sample_rate + av_frame_get_side_data + av_frame_is_writable + av_frame_make_writable + av_frame_move_ref + av_frame_new_side_data + av_frame_ref + av_frame_set_best_effort_timestamp + av_frame_set_channel_layout + av_frame_set_channels + av_frame_set_color_range + av_frame_set_colorspace + av_frame_set_decode_error_flags + av_frame_set_metadata + av_frame_set_pkt_duration + av_frame_set_pkt_pos + av_frame_set_pkt_size + av_frame_set_qp_table + av_frame_set_sample_rate + av_frame_unref + av_free + av_freep + av_gcd + av_get_alt_sample_fmt + av_get_bits_per_pixel + av_get_bits_per_sample_fmt + av_get_bytes_per_sample + av_get_channel_description + av_get_channel_layout + av_get_channel_layout_channel_index + av_get_channel_layout_nb_channels + av_get_channel_layout_string + av_get_channel_name + av_get_colorspace_name + av_get_cpu_flags + av_get_default_channel_layout + av_get_double + av_get_int + av_get_known_color_name + av_get_media_type_string + av_get_packed_sample_fmt + av_get_padded_bits_per_pixel + av_get_picture_type_char + av_get_pix_fmt + av_get_pix_fmt_name + av_get_pix_fmt_string + av_get_planar_sample_fmt + av_get_q + av_get_random_seed + av_get_sample_fmt + av_get_sample_fmt_name + av_get_sample_fmt_string + av_get_standard_channel_layout + av_get_string + av_get_token + av_gettime + av_hash_alloc + av_hash_final + av_hash_freep + av_hash_get_name + av_hash_get_size + av_hash_init + av_hash_names + av_hash_update + av_hmac_alloc + av_hmac_calc + av_hmac_final + av_hmac_free + av_hmac_init + av_hmac_update + av_image_alloc + av_image_check_size + av_image_copy + av_image_copy_plane + av_image_copy_to_buffer + av_image_fill_arrays + av_image_fill_linesizes + av_image_fill_max_pixsteps + av_image_fill_pointers + av_image_get_buffer_size + av_image_get_linesize + av_init_lls + av_int2dbl + av_int2flt + av_int_list_length_for_size + av_isdigit + av_isgraph + av_isspace + av_isxdigit + av_lfg_init + av_log + av_log2 + av_log2_16bit + av_log_default_callback + av_log_format_line + av_log_get_level + av_log_set_callback + av_log_set_flags + av_log_set_level + av_lzo1x_decode + av_malloc + av_mallocz + av_max_alloc + av_md5_alloc + av_md5_final + av_md5_init + av_md5_size DATA + av_md5_sum + av_md5_update + av_memcpy_backptr + av_memdup + av_mul_q + av_murmur3_alloc + av_murmur3_final + av_murmur3_init + av_murmur3_init_seeded + av_murmur3_update + av_nearer_q + av_next_option + av_opt_child_class_next + av_opt_child_next + av_opt_eval_double + av_opt_eval_flags + av_opt_eval_float + av_opt_eval_int + av_opt_eval_int64 + av_opt_eval_q + av_opt_find + av_opt_find2 + av_opt_flag_is_set + av_opt_free + av_opt_freep_ranges + av_opt_get + av_opt_get_channel_layout + av_opt_get_double + av_opt_get_image_size + av_opt_get_int + av_opt_get_key_value + av_opt_get_pixel_fmt + av_opt_get_q + av_opt_get_sample_fmt + av_opt_get_video_rate + av_opt_next + av_opt_ptr + av_opt_query_ranges + av_opt_query_ranges_default + av_opt_set + av_opt_set_bin + av_opt_set_channel_layout + av_opt_set_defaults + av_opt_set_defaults2 + av_opt_set_dict + av_opt_set_double + av_opt_set_from_string + av_opt_set_image_size + av_opt_set_int + av_opt_set_pixel_fmt + av_opt_set_q + av_opt_set_sample_fmt + av_opt_set_video_rate + av_opt_show2 + av_parse_color + av_parse_cpu_caps + av_parse_cpu_flags + av_parse_ratio + av_parse_time + av_parse_video_rate + av_parse_video_size + av_pix_fmt_count_planes + av_pix_fmt_desc_get + av_pix_fmt_desc_get_id + av_pix_fmt_desc_next + av_pix_fmt_descriptors DATA + av_pix_fmt_get_chroma_sub_sample + av_pix_fmt_swap_endianness + av_rc4_crypt + av_rc4_init + av_read_image_line + av_realloc + av_realloc_array + av_realloc_f + av_reallocp + av_reallocp_array + av_reduce + av_rescale + av_rescale_delta + av_rescale_q + av_rescale_q_rnd + av_rescale_rnd + av_reverse DATA + av_ripemd_alloc + av_ripemd_final + av_ripemd_init + av_ripemd_size DATA + av_ripemd_update + av_sample_fmt_is_planar + av_samples_alloc + av_samples_alloc_array_and_samples + av_samples_copy + av_samples_fill_arrays + av_samples_get_buffer_size + av_samples_set_silence + av_set_cpu_flags_mask + av_set_double + av_set_int + av_set_options_string + av_set_q + av_set_string3 + av_sha512_alloc + av_sha512_final + av_sha512_init + av_sha512_size DATA + av_sha512_update + av_sha_alloc + av_sha_final + av_sha_init + av_sha_size DATA + av_sha_update + av_small_strptime + av_solve_lls + av_stereo3d_alloc + av_stereo3d_create_side_data + av_strcasecmp + av_strdup + av_strerror + av_stristart + av_stristr + av_strlcat + av_strlcatf + av_strlcpy + av_strncasecmp + av_strnstr + av_strstart + av_strtod + av_strtok + av_sub_q + av_tempfile + av_timecode_adjust_ntsc_framenum2 + av_timecode_check_frame_rate + av_timecode_get_smpte_from_framenum + av_timecode_init + av_timecode_init_from_string + av_timecode_make_mpeg_tc_string + av_timecode_make_smpte_tc_string + av_timecode_make_string + av_timegm + av_tree_destroy + av_tree_enumerate + av_tree_find + av_tree_insert + av_tree_node_alloc + av_tree_node_size DATA + av_update_lls + av_usleep + av_utf8_decode + av_vbprintf + av_vlog + av_write_image_line + av_xtea_crypt + av_xtea_init + avpriv_cga_font DATA + avpriv_emms_yasm DATA + avpriv_evaluate_lls + avpriv_float_dsp_init + avpriv_frame_get_metadatap + avpriv_init_lls + avpriv_init_lls2 + avpriv_open + avpriv_report_missing_feature + avpriv_request_sample + avpriv_scalarproduct_float_c + avpriv_set_systematic_pal2 + avpriv_solve_lls + avpriv_solve_lls2 + avpriv_update_lls + avpriv_vga16_font DATA + avutil_configuration + avutil_license + avutil_version + ff_butterflies_float_sse DATA + ff_check_pixfmt_descriptors + ff_cpu_cpuid DATA + ff_cpu_xgetbv DATA + ff_evaluate_lls_sse2 DATA + ff_float_dsp_init_x86 + ff_get_channel_layout + ff_get_cpu_flags_x86 + ff_init_lls_x86 + ff_log2_tab DATA + ff_scalarproduct_float_sse DATA + ff_update_lls_avx DATA + ff_update_lls_sse2 DATA + ff_vector_dmul_scalar_avx DATA + ff_vector_dmul_scalar_sse2 DATA + ff_vector_fmac_scalar_avx DATA + ff_vector_fmac_scalar_sse DATA + ff_vector_fmul_add_avx DATA + ff_vector_fmul_add_sse DATA + ff_vector_fmul_avx DATA + ff_vector_fmul_reverse_avx DATA + ff_vector_fmul_reverse_sse DATA + ff_vector_fmul_scalar_sse DATA + ff_vector_fmul_sse DATA diff --git a/dependencies64/ffmpeg/lib/avutil.lib b/dependencies64/ffmpeg/lib/avutil.lib index ce000d44b..d0840d65b 100644 Binary files a/dependencies64/ffmpeg/lib/avutil.lib and b/dependencies64/ffmpeg/lib/avutil.lib differ diff --git a/dependencies64/ffmpeg/lib/libavcodec.dll.a b/dependencies64/ffmpeg/lib/libavcodec.dll.a new file mode 100644 index 000000000..e895e371d Binary files /dev/null and b/dependencies64/ffmpeg/lib/libavcodec.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libavdevice.dll.a b/dependencies64/ffmpeg/lib/libavdevice.dll.a new file mode 100644 index 000000000..0cc541d8c Binary files /dev/null and b/dependencies64/ffmpeg/lib/libavdevice.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libavfilter.dll.a b/dependencies64/ffmpeg/lib/libavfilter.dll.a new file mode 100644 index 000000000..85ecdf40a Binary files /dev/null and b/dependencies64/ffmpeg/lib/libavfilter.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libavformat.dll.a b/dependencies64/ffmpeg/lib/libavformat.dll.a new file mode 100644 index 000000000..5f8417a6e Binary files /dev/null and b/dependencies64/ffmpeg/lib/libavformat.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libavutil.dll.a b/dependencies64/ffmpeg/lib/libavutil.dll.a new file mode 100644 index 000000000..93e49529f Binary files /dev/null and b/dependencies64/ffmpeg/lib/libavutil.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libpostproc.dll.a b/dependencies64/ffmpeg/lib/libpostproc.dll.a new file mode 100644 index 000000000..a013c5d72 Binary files /dev/null and b/dependencies64/ffmpeg/lib/libpostproc.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libswresample.dll.a b/dependencies64/ffmpeg/lib/libswresample.dll.a new file mode 100644 index 000000000..3c34dfdc8 Binary files /dev/null and b/dependencies64/ffmpeg/lib/libswresample.dll.a differ diff --git a/dependencies64/ffmpeg/lib/libswscale.dll.a b/dependencies64/ffmpeg/lib/libswscale.dll.a new file mode 100644 index 000000000..982350fd6 Binary files /dev/null and b/dependencies64/ffmpeg/lib/libswscale.dll.a differ diff --git a/dependencies64/ffmpeg/lib/postproc-52.def b/dependencies64/ffmpeg/lib/postproc-52.def new file mode 100644 index 000000000..4d8bbb4b3 --- /dev/null +++ b/dependencies64/ffmpeg/lib/postproc-52.def @@ -0,0 +1,10 @@ +EXPORTS + postproc_configuration + postproc_license + postproc_version + pp_free_context + pp_free_mode + pp_get_context + pp_get_mode_by_name_and_quality + pp_help DATA + pp_postprocess diff --git a/dependencies64/ffmpeg/lib/postproc.lib b/dependencies64/ffmpeg/lib/postproc.lib index 63a5417ae..c76189000 100644 Binary files a/dependencies64/ffmpeg/lib/postproc.lib and b/dependencies64/ffmpeg/lib/postproc.lib differ diff --git a/dependencies64/ffmpeg/lib/swresample-0.def b/dependencies64/ffmpeg/lib/swresample-0.def new file mode 100644 index 000000000..6c337fc9b --- /dev/null +++ b/dependencies64/ffmpeg/lib/swresample-0.def @@ -0,0 +1,106 @@ +EXPORTS + ff_float_to_int16_a_sse2 DATA + ff_float_to_int16_u_sse2 DATA + ff_float_to_int32_a_sse2 DATA + ff_float_to_int32_u_sse2 DATA + ff_int16_to_float_a_sse2 DATA + ff_int16_to_float_u_sse2 DATA + ff_int16_to_int32_a_mmx DATA + ff_int16_to_int32_a_sse2 DATA + ff_int16_to_int32_u_mmx DATA + ff_int16_to_int32_u_sse2 DATA + ff_int32_to_float_a_avx DATA + ff_int32_to_float_a_sse2 DATA + ff_int32_to_float_u_avx DATA + ff_int32_to_float_u_sse2 DATA + ff_int32_to_int16_a_mmx DATA + ff_int32_to_int16_a_sse2 DATA + ff_int32_to_int16_u_mmx DATA + ff_int32_to_int16_u_sse2 DATA + ff_log2_tab DATA + ff_mix_1_1_a_float_avx DATA + ff_mix_1_1_a_float_sse DATA + ff_mix_1_1_a_int16_mmx DATA + ff_mix_1_1_a_int16_sse2 DATA + ff_mix_1_1_u_float_avx DATA + ff_mix_1_1_u_float_sse DATA + ff_mix_1_1_u_int16_mmx DATA + ff_mix_1_1_u_int16_sse2 DATA + ff_mix_2_1_a_float_avx DATA + ff_mix_2_1_a_float_sse DATA + ff_mix_2_1_a_int16_mmx DATA + ff_mix_2_1_a_int16_sse2 DATA + ff_mix_2_1_u_float_avx DATA + ff_mix_2_1_u_float_sse DATA + ff_mix_2_1_u_int16_mmx DATA + ff_mix_2_1_u_int16_sse2 DATA + ff_pack_2ch_float_to_int16_a_sse2 DATA + ff_pack_2ch_float_to_int16_u_sse2 DATA + ff_pack_2ch_float_to_int32_a_sse2 DATA + ff_pack_2ch_float_to_int32_u_sse2 DATA + ff_pack_2ch_int16_to_float_a_sse2 DATA + ff_pack_2ch_int16_to_float_u_sse2 DATA + ff_pack_2ch_int16_to_int16_a_sse2 DATA + ff_pack_2ch_int16_to_int16_u_sse2 DATA + ff_pack_2ch_int16_to_int32_a_sse2 DATA + ff_pack_2ch_int16_to_int32_u_sse2 DATA + ff_pack_2ch_int32_to_float_a_sse2 DATA + ff_pack_2ch_int32_to_float_u_sse2 DATA + ff_pack_2ch_int32_to_int16_a_sse2 DATA + ff_pack_2ch_int32_to_int16_u_sse2 DATA + ff_pack_2ch_int32_to_int32_a_sse2 DATA + ff_pack_2ch_int32_to_int32_u_sse2 DATA + ff_pack_6ch_float_to_float_a_avx DATA + ff_pack_6ch_float_to_float_a_mmx DATA + ff_pack_6ch_float_to_float_a_sse4 DATA + ff_pack_6ch_float_to_float_u_avx DATA + ff_pack_6ch_float_to_float_u_mmx DATA + ff_pack_6ch_float_to_float_u_sse4 DATA + ff_pack_6ch_float_to_int32_a_avx DATA + ff_pack_6ch_float_to_int32_a_sse4 DATA + ff_pack_6ch_float_to_int32_u_avx DATA + ff_pack_6ch_float_to_int32_u_sse4 DATA + ff_pack_6ch_int32_to_float_a_avx DATA + ff_pack_6ch_int32_to_float_a_sse4 DATA + ff_pack_6ch_int32_to_float_u_avx DATA + ff_pack_6ch_int32_to_float_u_sse4 DATA + ff_resample_int16_rounder DATA + ff_unpack_2ch_float_to_int16_a_sse2 DATA + ff_unpack_2ch_float_to_int16_u_sse2 DATA + ff_unpack_2ch_float_to_int32_a_sse2 DATA + ff_unpack_2ch_float_to_int32_u_sse2 DATA + ff_unpack_2ch_int16_to_float_a_sse2 DATA + ff_unpack_2ch_int16_to_float_a_ssse3 DATA + ff_unpack_2ch_int16_to_float_u_sse2 DATA + ff_unpack_2ch_int16_to_float_u_ssse3 DATA + ff_unpack_2ch_int16_to_int16_a_sse2 DATA + ff_unpack_2ch_int16_to_int16_a_ssse3 DATA + ff_unpack_2ch_int16_to_int16_u_sse2 DATA + ff_unpack_2ch_int16_to_int16_u_ssse3 DATA + ff_unpack_2ch_int16_to_int32_a_sse2 DATA + ff_unpack_2ch_int16_to_int32_a_ssse3 DATA + ff_unpack_2ch_int16_to_int32_u_sse2 DATA + ff_unpack_2ch_int16_to_int32_u_ssse3 DATA + ff_unpack_2ch_int32_to_float_a_sse2 DATA + ff_unpack_2ch_int32_to_float_u_sse2 DATA + ff_unpack_2ch_int32_to_int16_a_sse2 DATA + ff_unpack_2ch_int32_to_int16_u_sse2 DATA + ff_unpack_2ch_int32_to_int32_a_sse2 DATA + ff_unpack_2ch_int32_to_int32_u_sse2 DATA + swr_alloc + swr_alloc_set_opts + swr_convert + swr_drop_output + swr_free + swr_get_class + swr_get_delay + swr_init + swr_inject_silence + swr_is_initialized + swr_next_pts + swr_set_channel_mapping + swr_set_compensation + swr_set_matrix + swresample_configuration + swresample_license + swresample_version diff --git a/dependencies64/ffmpeg/lib/swresample.lib b/dependencies64/ffmpeg/lib/swresample.lib index 42f4f2e27..b4a6f0b61 100644 Binary files a/dependencies64/ffmpeg/lib/swresample.lib and b/dependencies64/ffmpeg/lib/swresample.lib differ diff --git a/dependencies64/ffmpeg/lib/swscale-2.def b/dependencies64/ffmpeg/lib/swscale-2.def new file mode 100644 index 000000000..d828ee38e --- /dev/null +++ b/dependencies64/ffmpeg/lib/swscale-2.def @@ -0,0 +1,37 @@ +EXPORTS + sws_addVec + sws_allocVec + sws_alloc_context + sws_cloneVec + sws_context_class DATA + sws_convVec + sws_convertPalette8ToPacked24 + sws_convertPalette8ToPacked32 + sws_format_name + sws_freeContext + sws_freeFilter + sws_freeVec + sws_getCachedContext + sws_getCoefficients + sws_getColorspaceDetails + sws_getConstVec + sws_getContext + sws_getDefaultFilter + sws_getGaussianVec + sws_getIdentityVec + sws_get_class + sws_init_context + sws_isSupportedEndiannessConversion + sws_isSupportedInput + sws_isSupportedOutput + sws_normalizeVec + sws_printVec2 + sws_rgb2rgb_init + sws_scale + sws_scaleVec + sws_setColorspaceDetails + sws_shiftVec + sws_subVec + swscale_configuration + swscale_license + swscale_version diff --git a/dependencies64/ffmpeg/lib/swscale.lib b/dependencies64/ffmpeg/lib/swscale.lib index 0bf1751bb..97eadb070 100644 Binary files a/dependencies64/ffmpeg/lib/swscale.lib and b/dependencies64/ffmpeg/lib/swscale.lib differ diff --git a/dependencies64/ffmpeg/licenses/bzip2.txt b/dependencies64/ffmpeg/licenses/bzip2.txt new file mode 100644 index 000000000..cc614178c --- /dev/null +++ b/dependencies64/ffmpeg/licenses/bzip2.txt @@ -0,0 +1,42 @@ + +-------------------------------------------------------------------------- + +This program, "bzip2", the associated library "libbzip2", and all +documentation, are copyright (C) 1996-2010 Julian R Seward. All +rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. The origin of this software must not be misrepresented; you must + not claim that you wrote the original software. If you use this + software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. + +3. Altered source versions must be plainly marked as such, and must + not be misrepresented as being the original software. + +4. The name of the author may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Julian Seward, jseward@bzip.org +bzip2/libbzip2 version 1.0.6 of 6 September 2010 + +-------------------------------------------------------------------------- diff --git a/dependencies64/ffmpeg/licenses/fontconfig.txt b/dependencies64/ffmpeg/licenses/fontconfig.txt new file mode 100644 index 000000000..2a5d777ff --- /dev/null +++ b/dependencies64/ffmpeg/licenses/fontconfig.txt @@ -0,0 +1,27 @@ +fontconfig/COPYING + +Copyright © 2000,2001,2002,2003,2004,2006,2007 Keith Packard +Copyright © 2005 Patrick Lam +Copyright © 2009 Roozbeh Pournader +Copyright © 2008,2009 Red Hat, Inc. +Copyright © 2008 Danilo Å egan + + +Permission to use, copy, modify, distribute, and sell this software and its +documentation for any purpose is hereby granted without fee, provided that +the above copyright notice appear in all copies and that both that +copyright notice and this permission notice appear in supporting +documentation, and that the name of the author(s) not be used in +advertising or publicity pertaining to distribution of the software without +specific, written prior permission. The authors make no +representations about the suitability of this software for any purpose. It +is provided "as is" without express or implied warranty. + +THE AUTHOR(S) DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO +EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY SPECIAL, INDIRECT OR +CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. + diff --git a/dependencies64/ffmpeg/licenses/freetype.txt b/dependencies64/ffmpeg/licenses/freetype.txt new file mode 100644 index 000000000..bbaba33f4 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/freetype.txt @@ -0,0 +1,169 @@ + The FreeType Project LICENSE + ---------------------------- + + 2006-Jan-27 + + Copyright 1996-2002, 2006 by + David Turner, Robert Wilhelm, and Werner Lemberg + + + +Introduction +============ + + The FreeType Project is distributed in several archive packages; + some of them may contain, in addition to the FreeType font engine, + various tools and contributions which rely on, or relate to, the + FreeType Project. + + This license applies to all files found in such packages, and + which do not fall under their own explicit license. The license + affects thus the FreeType font engine, the test programs, + documentation and makefiles, at the very least. + + This license was inspired by the BSD, Artistic, and IJG + (Independent JPEG Group) licenses, which all encourage inclusion + and use of free software in commercial and freeware products + alike. As a consequence, its main points are that: + + o We don't promise that this software works. However, we will be + interested in any kind of bug reports. (`as is' distribution) + + o You can use this software for whatever you want, in parts or + full form, without having to pay us. (`royalty-free' usage) + + o You may not pretend that you wrote this software. If you use + it, or only parts of it, in a program, you must acknowledge + somewhere in your documentation that you have used the + FreeType code. (`credits') + + We specifically permit and encourage the inclusion of this + software, with or without modifications, in commercial products. + We disclaim all warranties covering The FreeType Project and + assume no liability related to The FreeType Project. + + + Finally, many people asked us for a preferred form for a + credit/disclaimer to use in compliance with this license. We thus + encourage you to use the following text: + + """ + Portions of this software are copyright © The FreeType + Project (www.freetype.org). All rights reserved. + """ + + Please replace with the value from the FreeType version you + actually use. + + +Legal Terms +=========== + +0. Definitions +-------------- + + Throughout this license, the terms `package', `FreeType Project', + and `FreeType archive' refer to the set of files originally + distributed by the authors (David Turner, Robert Wilhelm, and + Werner Lemberg) as the `FreeType Project', be they named as alpha, + beta or final release. + + `You' refers to the licensee, or person using the project, where + `using' is a generic term including compiling the project's source + code as well as linking it to form a `program' or `executable'. + This program is referred to as `a program using the FreeType + engine'. + + This license applies to all files distributed in the original + FreeType Project, including all source code, binaries and + documentation, unless otherwise stated in the file in its + original, unmodified form as distributed in the original archive. + If you are unsure whether or not a particular file is covered by + this license, you must contact us to verify this. + + The FreeType Project is copyright (C) 1996-2000 by David Turner, + Robert Wilhelm, and Werner Lemberg. All rights reserved except as + specified below. + +1. No Warranty +-------------- + + THE FREETYPE PROJECT IS PROVIDED `AS IS' WITHOUT WARRANTY OF ANY + KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE. IN NO EVENT WILL ANY OF THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY DAMAGES CAUSED BY THE USE OR THE INABILITY TO + USE, OF THE FREETYPE PROJECT. + +2. Redistribution +----------------- + + This license grants a worldwide, royalty-free, perpetual and + irrevocable right and license to use, execute, perform, compile, + display, copy, create derivative works of, distribute and + sublicense the FreeType Project (in both source and object code + forms) and derivative works thereof for any purpose; and to + authorize others to exercise some or all of the rights granted + herein, subject to the following conditions: + + o Redistribution of source code must retain this license file + (`FTL.TXT') unaltered; any additions, deletions or changes to + the original files must be clearly indicated in accompanying + documentation. The copyright notices of the unaltered, + original files must be preserved in all copies of source + files. + + o Redistribution in binary form must provide a disclaimer that + states that the software is based in part of the work of the + FreeType Team, in the distribution documentation. We also + encourage you to put an URL to the FreeType web page in your + documentation, though this isn't mandatory. + + These conditions apply to any software derived from or based on + the FreeType Project, not just the unmodified files. If you use + our work, you must acknowledge us. However, no fee need be paid + to us. + +3. Advertising +-------------- + + Neither the FreeType authors and contributors nor you shall use + the name of the other for commercial, advertising, or promotional + purposes without specific prior written permission. + + We suggest, but do not require, that you use one or more of the + following phrases to refer to this software in your documentation + or advertising materials: `FreeType Project', `FreeType Engine', + `FreeType library', or `FreeType Distribution'. + + As you have not signed this license, you are not required to + accept it. However, as the FreeType Project is copyrighted + material, only this license, or another one contracted with the + authors, grants you the right to use, distribute, and modify it. + Therefore, by using, distributing, or modifying the FreeType + Project, you indicate that you understand and accept all the terms + of this license. + +4. Contacts +----------- + + There are two mailing lists related to FreeType: + + o freetype@nongnu.org + + Discusses general use and applications of FreeType, as well as + future and wanted additions to the library and distribution. + If you are looking for support, start in this list if you + haven't found anything to help you in the documentation. + + o freetype-devel@nongnu.org + + Discusses bugs, as well as engine internals, design issues, + specific licenses, porting, etc. + + Our home page can be found at + + http://www.freetype.org + + +--- end of FTL.TXT --- diff --git a/dependencies64/ffmpeg/licenses/frei0r.txt b/dependencies64/ffmpeg/licenses/frei0r.txt new file mode 100644 index 000000000..623b6258a --- /dev/null +++ b/dependencies64/ffmpeg/licenses/frei0r.txt @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/dependencies64/ffmpeg/licenses/gme.txt b/dependencies64/ffmpeg/licenses/gme.txt new file mode 100644 index 000000000..5ab7695ab --- /dev/null +++ b/dependencies64/ffmpeg/licenses/gme.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/dependencies64/ffmpeg/licenses/gnutls.txt b/dependencies64/ffmpeg/licenses/gnutls.txt new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/gnutls.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/dependencies64/ffmpeg/licenses/lame.txt b/dependencies64/ffmpeg/licenses/lame.txt new file mode 100644 index 000000000..f5030495b --- /dev/null +++ b/dependencies64/ffmpeg/licenses/lame.txt @@ -0,0 +1,481 @@ + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the library GPL. It is + numbered 2 because it goes with version 2 of the ordinary GPL.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Library General Public License, applies to some +specially designated Free Software Foundation software, and to any +other libraries whose authors decide to use it. You can use it for +your libraries, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if +you distribute copies of the library, or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link a program with the library, you must provide +complete object files to the recipients so that they can relink them +with the library, after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + Our method of protecting your rights has two steps: (1) copyright +the library, and (2) offer you this license which gives you legal +permission to copy, distribute and/or modify the library. + + Also, for each distributor's protection, we want to make certain +that everyone understands that there is no warranty for this free +library. If the library is modified by someone else and passed on, we +want its recipients to know that what they have is not the original +version, so that any problems introduced by others will not reflect on +the original authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that companies distributing free +software will individually obtain patent licenses, thus in effect +transforming the program into proprietary software. To prevent this, +we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + + Most GNU software, including some libraries, is covered by the ordinary +GNU General Public License, which was designed for utility programs. This +license, the GNU Library General Public License, applies to certain +designated libraries. This license is quite different from the ordinary +one; be sure to read it in full, and don't assume that anything in it is +the same as in the ordinary license. + + The reason we have a separate public license for some libraries is that +they blur the distinction we usually make between modifying or adding to a +program and simply using it. Linking a program with a library, without +changing the library, is in some sense simply using the library, and is +analogous to running a utility program or application program. However, in +a textual and legal sense, the linked executable is a combined work, a +derivative of the original library, and the ordinary General Public License +treats it as such. + + Because of this blurred distinction, using the ordinary General +Public License for libraries did not effectively promote software +sharing, because most developers did not use the libraries. We +concluded that weaker conditions might promote sharing better. + + However, unrestricted linking of non-free programs would deprive the +users of those programs of all benefit from the free status of the +libraries themselves. This Library General Public License is intended to +permit developers of non-free programs to use free libraries, while +preserving your freedom as a user of such programs to change the free +libraries that are incorporated in them. (We have not seen how to achieve +this as regards changes in header files, but we have achieved it as regards +changes in the actual functions of the Library.) The hope is that this +will lead to faster development of free libraries. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, while the latter only +works together with the library. + + Note that it is possible for a library to be covered by the ordinary +General Public License rather than by this special one. + + GNU LIBRARY GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library which +contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Library +General Public License (also called "this License"). Each licensee is +addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also compile or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + c) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + d) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the source code distributed need not include anything that is normally +distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Library General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! diff --git a/dependencies64/ffmpeg/licenses/libass.txt b/dependencies64/ffmpeg/licenses/libass.txt new file mode 100644 index 000000000..8351a30e3 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libass.txt @@ -0,0 +1,11 @@ +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/dependencies64/ffmpeg/licenses/libbluray.txt b/dependencies64/ffmpeg/licenses/libbluray.txt new file mode 100644 index 000000000..20fb9c7da --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libbluray.txt @@ -0,0 +1,458 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS diff --git a/dependencies64/ffmpeg/licenses/libcaca.txt b/dependencies64/ffmpeg/licenses/libcaca.txt new file mode 100644 index 000000000..2978491d0 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libcaca.txt @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar + 14 rue de Plaisance, 75014 Paris, France + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/dependencies64/ffmpeg/licenses/libgsm.txt b/dependencies64/ffmpeg/licenses/libgsm.txt new file mode 100644 index 000000000..28fbb3ce1 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libgsm.txt @@ -0,0 +1,35 @@ +Copyright 1992, 1993, 1994 by Jutta Degener and Carsten Bormann, +Technische Universitaet Berlin + +Any use of this software is permitted provided that this notice is not +removed and that neither the authors nor the Technische Universitaet Berlin +are deemed to have made any representations as to the suitability of this +software for any purpose nor are held responsible for any defects of +this software. THERE IS ABSOLUTELY NO WARRANTY FOR THIS SOFTWARE. + +As a matter of courtesy, the authors request to be informed about uses +this software has found, about bugs in this software, and about any +improvements that may be of general interest. + +Berlin, 28.11.1994 +Jutta Degener +Carsten Bormann + + oOo + +Since the original terms of 15 years ago maybe do not make our +intentions completely clear given today's refined usage of the legal +terms, we append this additional permission: + + Permission to use, copy, modify, and distribute this software + for any purpose with or without fee is hereby granted, + provided that this notice is not removed and that neither + the authors nor the Technische Universitaet Berlin are + deemed to have made any representations as to the suitability + of this software for any purpose nor are held responsible + for any defects of this software. THERE IS ABSOLUTELY NO + WARRANTY FOR THIS SOFTWARE. + +Berkeley/Bremen, 05.04.2009 +Jutta Degener +Carsten Bormann diff --git a/dependencies64/ffmpeg/licenses/libiconv.txt b/dependencies64/ffmpeg/licenses/libiconv.txt new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libiconv.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/dependencies64/ffmpeg/licenses/libilbc.txt b/dependencies64/ffmpeg/licenses/libilbc.txt new file mode 100644 index 000000000..4c41b7b25 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libilbc.txt @@ -0,0 +1,29 @@ +Copyright (c) 2011, The WebRTC project authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name of Google nor the names of its contributors may + be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dependencies64/ffmpeg/licenses/libmodplug.txt b/dependencies64/ffmpeg/licenses/libmodplug.txt new file mode 100644 index 000000000..59fbf826c --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libmodplug.txt @@ -0,0 +1 @@ +ModPlug-XMMS and libmodplug are now in the public domain. diff --git a/dependencies64/ffmpeg/licenses/libtheora.txt b/dependencies64/ffmpeg/licenses/libtheora.txt new file mode 100644 index 000000000..c8ccce4ff --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libtheora.txt @@ -0,0 +1,28 @@ +Copyright (C) 2002-2009 Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dependencies64/ffmpeg/licenses/libvorbis.txt b/dependencies64/ffmpeg/licenses/libvorbis.txt new file mode 100644 index 000000000..28de72a97 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libvorbis.txt @@ -0,0 +1,28 @@ +Copyright (c) 2002-2008 Xiph.org Foundation + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dependencies64/ffmpeg/licenses/libvpx.txt b/dependencies64/ffmpeg/licenses/libvpx.txt new file mode 100644 index 000000000..1ce44343c --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libvpx.txt @@ -0,0 +1,31 @@ +Copyright (c) 2010, The WebM Project authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name of Google, nor the WebM Project, nor the names + of its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/dependencies64/ffmpeg/licenses/libwebp.txt b/dependencies64/ffmpeg/licenses/libwebp.txt new file mode 100644 index 000000000..7a6f99547 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/libwebp.txt @@ -0,0 +1,30 @@ +Copyright (c) 2010, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + * Neither the name of Google nor the names of its contributors may + be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/dependencies64/ffmpeg/licenses/opencore-amr.txt b/dependencies64/ffmpeg/licenses/opencore-amr.txt new file mode 100644 index 000000000..5ec4bf01e --- /dev/null +++ b/dependencies64/ffmpeg/licenses/opencore-amr.txt @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the +copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other +entities that control, are controlled by, or are under common control with +that entity. For the purposes of this definition, "control" means (i) the +power, direct or indirect, to cause the direction or management of such +entity, whether by contract or otherwise, or (ii) ownership of fifty +percent (50%) or more of the outstanding shares, or (iii) beneficial +ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation source, +and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation +or translation of a Source form, including but not limited to compiled +object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that +is included in or attached to the work (an example is provided in the +Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, +that is based on (or derived from) the Work and for which the editorial +revisions, annotations, elaborations, or other modifications represent, as +a whole, an original work of authorship. For the purposes of this License, +Derivative Works shall not include works that remain separable from, or +merely link (or bind by name) to the interfaces of, the Work and Derivative +Works thereof. + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the +purposes of this definition, "submitted" means any form of electronic, +verbal, or written communication sent to the Licensor or its +representatives, including but not limited to communication on electronic +mailing lists, source code control systems, and issue tracking systems that +are managed by, or on behalf of, the Licensor for the purpose of discussing +and improving the Work, but excluding communication that is conspicuously +marked or otherwise designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on +behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in +this section) patent license to make, have made, use, offer to sell, sell, +import, and otherwise transfer the Work, where such license applies only to +those patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of their +Contribution(s) with the Work to which such Contribution(s) was submitted. +If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or +contributory patent infringement, then any patent licenses granted to You +under this License for that Work shall terminate as of the date such +litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and +in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a +copy of this License; and + + 2. You must cause any modified files to carry prominent notices stating +that You changed the files; and + + 3. You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from +the Source form of the Work, excluding those notices that do not pertain to +any part of the Derivative Works; and + + 4. If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must include a +readable copy of the attribution notices contained within such NOTICE file, +excluding those notices that do not pertain to any part of the Derivative +Works, in at least one of the following places: within a NOTICE text file +distributed as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, within a +display generated by the Derivative Works, if and wherever such third-party +notices normally appear. The contents of the NOTICE file are for +informational purposes only and do not modify the License. You may add Your +own attribution notices within Derivative Works that You distribute, +alongside or as an addendum to the NOTICE text from the Work, provided that +such additional attribution notices cannot be construed as modifying the +License. + +You may add Your own copyright statement to Your modifications and may +provide additional or different license terms and conditions for use, +reproduction, or distribution of Your modifications, or for any such +Derivative Works as a whole, provided Your use, reproduction, and +distribution of the Work otherwise complies with the conditions stated in +this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to +the Licensor shall be under the terms and conditions of this License, +without any additional terms or conditions. Notwithstanding the above, +nothing herein shall supersede or modify the terms of any separate license +agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, except +as required for reasonable and customary use in describing the origin of +the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to +in writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any +warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or +FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for +determining the appropriateness of using or redistributing the Work and +assume any risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, whether +in tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to +in writing, shall any Contributor be liable to You for damages, including +any direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or +inability to use the Work (including but not limited to damages for loss of +goodwill, work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor has been +advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the +Work or Derivative Works thereof, You may choose to offer, and charge a fee +for, acceptance of support, warranty, indemnity, or other liability +obligations and/or rights consistent with this License. However, in +accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if +You agree to indemnify, defend, and hold each Contributor harmless for any +liability incurred by, or claims asserted against, such Contributor by +reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included +on the same "printed page" as the copyright notice for easier +identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain a + copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable + law or agreed to in writing, software distributed under the License is + distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the specific language + governing permissions and limitations under the License. diff --git a/dependencies64/ffmpeg/licenses/openjpeg.txt b/dependencies64/ffmpeg/licenses/openjpeg.txt new file mode 100644 index 000000000..f578e33a3 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/openjpeg.txt @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2002-2012, Communications and Remote Sensing Laboratory, Universite catholique de Louvain (UCL), Belgium + * Copyright (c) 2002-2012, Professor Benoit Macq + * Copyright (c) 2003-2012, Antonin Descampe + * Copyright (c) 2003-2009, Francois-Olivier Devaux + * Copyright (c) 2005, Herve Drolon, FreeImage Team + * Copyright (c) 2002-2003, Yannick Verschueren + * Copyright (c) 2001-2003, David Janssens + * Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France + * Copyright (c) 2012, CS Systemes d'Information, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/dependencies64/ffmpeg/licenses/opus.txt b/dependencies64/ffmpeg/licenses/opus.txt new file mode 100644 index 000000000..f4159e675 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/opus.txt @@ -0,0 +1,44 @@ +Copyright 2001-2011 Xiph.Org, Skype Limited, Octasic, + Jean-Marc Valin, Timothy B. Terriberry, + CSIRO, Gregory Maxwell, Mark Borgerding, + Erik de Castro Lopo + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of Internet Society, IETF or IETF Trust, nor the +names of specific contributors, may be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Opus is subject to the royalty-free patent licenses which are +specified at: + +Xiph.Org Foundation: +https://datatracker.ietf.org/ipr/1524/ + +Microsoft Corporation: +https://datatracker.ietf.org/ipr/1914/ + +Broadcom Corporation: +https://datatracker.ietf.org/ipr/1526/ diff --git a/dependencies64/ffmpeg/licenses/rtmpdump.txt b/dependencies64/ffmpeg/licenses/rtmpdump.txt new file mode 100644 index 000000000..d511905c1 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/rtmpdump.txt @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/dependencies64/ffmpeg/licenses/schroedinger.txt b/dependencies64/ffmpeg/licenses/schroedinger.txt new file mode 100644 index 000000000..8a68a0d95 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/schroedinger.txt @@ -0,0 +1,467 @@ + GNU LIBRARY GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1991 Free Software Foundation, Inc. + 675 Mass Ave, Cambridge, MA 02139, USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the library GPL. It is + numbered 2 because it goes with version 2 of the ordinary GPL.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Library General Public License, applies to some +specially designated Free Software Foundation software, and to any +other libraries whose authors decide to use it. You can use it for +your libraries, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if +you distribute copies of the library, or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link a program with the library, you must provide +complete object files to the recipients so that they can relink them +with the library, after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + Our method of protecting your rights has two steps: (1) copyright +the library, and (2) offer you this license which gives you legal +permission to copy, distribute and/or modify the library. + + Also, for each distributor's protection, we want to make certain +that everyone understands that there is no warranty for this free +library. If the library is modified by someone else and passed on, we +want its recipients to know that what they have is not the original +version, so that any problems introduced by others will not reflect on +the original authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that companies distributing free +software will individually obtain patent licenses, thus in effect +transforming the program into proprietary software. To prevent this, +we have made it clear that any patent must be licensed for everyone's +free use or not licensed at all. + + Most GNU software, including some libraries, is covered by the ordinary +GNU General Public License, which was designed for utility programs. This +license, the GNU Library General Public License, applies to certain +designated libraries. This license is quite different from the ordinary +one; be sure to read it in full, and don't assume that anything in it is +the same as in the ordinary license. + + The reason we have a separate public license for some libraries is that +they blur the distinction we usually make between modifying or adding to a +program and simply using it. Linking a program with a library, without +changing the library, is in some sense simply using the library, and is +analogous to running a utility program or application program. However, in +a textual and legal sense, the linked executable is a combined work, a +derivative of the original library, and the ordinary General Public License +treats it as such. + + Because of this blurred distinction, using the ordinary General +Public License for libraries did not effectively promote software +sharing, because most developers did not use the libraries. We +concluded that weaker conditions might promote sharing better. + + However, unrestricted linking of non-free programs would deprive the +users of those programs of all benefit from the free status of the +libraries themselves. This Library General Public License is intended to +permit developers of non-free programs to use free libraries, while +preserving your freedom as a user of such programs to change the free +libraries that are incorporated in them. (We have not seen how to achieve +this as regards changes in header files, but we have achieved it as regards +changes in the actual functions of the Library.) The hope is that this +will lead to faster development of free libraries. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, while the latter only +works together with the library. + + Note that it is possible for a library to be covered by the ordinary +General Public License rather than by this special one. + + GNU LIBRARY GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library which +contains a notice placed by the copyright holder or other authorized +party saying it may be distributed under the terms of this Library +General Public License (also called "this License"). Each licensee is +addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also compile or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + c) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + d) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the source code distributed need not include anything that is normally +distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Library General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + Appendix: How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public + License as published by the Free Software Foundation; either + version 2 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with this library; if not, write to the Free + Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. diff --git a/dependencies64/ffmpeg/licenses/soxr.txt b/dependencies64/ffmpeg/licenses/soxr.txt new file mode 100644 index 000000000..1c618785e --- /dev/null +++ b/dependencies64/ffmpeg/licenses/soxr.txt @@ -0,0 +1,24 @@ +SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net + +This library is free software; you can redistribute it and/or modify it +under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 2.1 of the License, or (at +your option) any later version. + +This library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser +General Public License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with this library; if not, write to the Free Software Foundation, +Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +Notes + +1. Re software in the `examples' directory: works that are not resampling +examples but are based on the given examples -- for example, applications using +the library -- shall not be considered to be derivative works of the examples. + +2. If building with pffft.c, see the licence embedded in that file. diff --git a/dependencies64/ffmpeg/licenses/speex.txt b/dependencies64/ffmpeg/licenses/speex.txt new file mode 100644 index 000000000..de6fbe2c9 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/speex.txt @@ -0,0 +1,35 @@ +Copyright 2002-2008 Xiph.org Foundation +Copyright 2002-2008 Jean-Marc Valin +Copyright 2005-2007 Analog Devices Inc. +Copyright 2005-2008 Commonwealth Scientific and Industrial Research + Organisation (CSIRO) +Copyright 1993, 2002, 2006 David Rowe +Copyright 2003 EpicGames +Copyright 1992-1994 Jutta Degener, Carsten Bormann + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +- Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +- Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +- Neither the name of the Xiph.org Foundation nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dependencies64/ffmpeg/licenses/twolame.txt b/dependencies64/ffmpeg/licenses/twolame.txt new file mode 100644 index 000000000..b1e3f5a26 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/twolame.txt @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/dependencies64/ffmpeg/licenses/vid.stab.txt b/dependencies64/ffmpeg/licenses/vid.stab.txt new file mode 100644 index 000000000..a09e1dc74 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/vid.stab.txt @@ -0,0 +1,16 @@ +In this project is open source in the sense of the GPL. + + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the * + * Free Software Foundation, Inc., * + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * diff --git a/dependencies64/ffmpeg/licenses/vo-aacenc.txt b/dependencies64/ffmpeg/licenses/vo-aacenc.txt new file mode 100644 index 000000000..5ec4bf01e --- /dev/null +++ b/dependencies64/ffmpeg/licenses/vo-aacenc.txt @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the +copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other +entities that control, are controlled by, or are under common control with +that entity. For the purposes of this definition, "control" means (i) the +power, direct or indirect, to cause the direction or management of such +entity, whether by contract or otherwise, or (ii) ownership of fifty +percent (50%) or more of the outstanding shares, or (iii) beneficial +ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation source, +and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation +or translation of a Source form, including but not limited to compiled +object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that +is included in or attached to the work (an example is provided in the +Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, +that is based on (or derived from) the Work and for which the editorial +revisions, annotations, elaborations, or other modifications represent, as +a whole, an original work of authorship. For the purposes of this License, +Derivative Works shall not include works that remain separable from, or +merely link (or bind by name) to the interfaces of, the Work and Derivative +Works thereof. + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the +purposes of this definition, "submitted" means any form of electronic, +verbal, or written communication sent to the Licensor or its +representatives, including but not limited to communication on electronic +mailing lists, source code control systems, and issue tracking systems that +are managed by, or on behalf of, the Licensor for the purpose of discussing +and improving the Work, but excluding communication that is conspicuously +marked or otherwise designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on +behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in +this section) patent license to make, have made, use, offer to sell, sell, +import, and otherwise transfer the Work, where such license applies only to +those patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of their +Contribution(s) with the Work to which such Contribution(s) was submitted. +If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or +contributory patent infringement, then any patent licenses granted to You +under this License for that Work shall terminate as of the date such +litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and +in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a +copy of this License; and + + 2. You must cause any modified files to carry prominent notices stating +that You changed the files; and + + 3. You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from +the Source form of the Work, excluding those notices that do not pertain to +any part of the Derivative Works; and + + 4. If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must include a +readable copy of the attribution notices contained within such NOTICE file, +excluding those notices that do not pertain to any part of the Derivative +Works, in at least one of the following places: within a NOTICE text file +distributed as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, within a +display generated by the Derivative Works, if and wherever such third-party +notices normally appear. The contents of the NOTICE file are for +informational purposes only and do not modify the License. You may add Your +own attribution notices within Derivative Works that You distribute, +alongside or as an addendum to the NOTICE text from the Work, provided that +such additional attribution notices cannot be construed as modifying the +License. + +You may add Your own copyright statement to Your modifications and may +provide additional or different license terms and conditions for use, +reproduction, or distribution of Your modifications, or for any such +Derivative Works as a whole, provided Your use, reproduction, and +distribution of the Work otherwise complies with the conditions stated in +this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to +the Licensor shall be under the terms and conditions of this License, +without any additional terms or conditions. Notwithstanding the above, +nothing herein shall supersede or modify the terms of any separate license +agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, except +as required for reasonable and customary use in describing the origin of +the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to +in writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any +warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or +FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for +determining the appropriateness of using or redistributing the Work and +assume any risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, whether +in tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to +in writing, shall any Contributor be liable to You for damages, including +any direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or +inability to use the Work (including but not limited to damages for loss of +goodwill, work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor has been +advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the +Work or Derivative Works thereof, You may choose to offer, and charge a fee +for, acceptance of support, warranty, indemnity, or other liability +obligations and/or rights consistent with this License. However, in +accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if +You agree to indemnify, defend, and hold each Contributor harmless for any +liability incurred by, or claims asserted against, such Contributor by +reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included +on the same "printed page" as the copyright notice for easier +identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain a + copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable + law or agreed to in writing, software distributed under the License is + distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the specific language + governing permissions and limitations under the License. diff --git a/dependencies64/ffmpeg/licenses/vo-amrwbenc.txt b/dependencies64/ffmpeg/licenses/vo-amrwbenc.txt new file mode 100644 index 000000000..5ec4bf01e --- /dev/null +++ b/dependencies64/ffmpeg/licenses/vo-amrwbenc.txt @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the +copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other +entities that control, are controlled by, or are under common control with +that entity. For the purposes of this definition, "control" means (i) the +power, direct or indirect, to cause the direction or management of such +entity, whether by contract or otherwise, or (ii) ownership of fifty +percent (50%) or more of the outstanding shares, or (iii) beneficial +ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation source, +and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation +or translation of a Source form, including but not limited to compiled +object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, +made available under the License, as indicated by a copyright notice that +is included in or attached to the work (an example is provided in the +Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, +that is based on (or derived from) the Work and for which the editorial +revisions, annotations, elaborations, or other modifications represent, as +a whole, an original work of authorship. For the purposes of this License, +Derivative Works shall not include works that remain separable from, or +merely link (or bind by name) to the interfaces of, the Work and Derivative +Works thereof. + +"Contribution" shall mean any work of authorship, including the original +version of the Work and any modifications or additions to that Work or +Derivative Works thereof, that is intentionally submitted to Licensor for +inclusion in the Work by the copyright owner or by an individual or Legal +Entity authorized to submit on behalf of the copyright owner. For the +purposes of this definition, "submitted" means any form of electronic, +verbal, or written communication sent to the Licensor or its +representatives, including but not limited to communication on electronic +mailing lists, source code control systems, and issue tracking systems that +are managed by, or on behalf of, the Licensor for the purpose of discussing +and improving the Work, but excluding communication that is conspicuously +marked or otherwise designated in writing by the copyright owner as "Not a +Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on +behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable copyright license to +reproduce, prepare Derivative Works of, publicly display, publicly perform, +sublicense, and distribute the Work and such Derivative Works in Source or +Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this +License, each Contributor hereby grants to You a perpetual, worldwide, +non-exclusive, no-charge, royalty-free, irrevocable (except as stated in +this section) patent license to make, have made, use, offer to sell, sell, +import, and otherwise transfer the Work, where such license applies only to +those patent claims licensable by such Contributor that are necessarily +infringed by their Contribution(s) alone or by combination of their +Contribution(s) with the Work to which such Contribution(s) was submitted. +If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or +contributory patent infringement, then any patent licenses granted to You +under this License for that Work shall terminate as of the date such +litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or +Derivative Works thereof in any medium, with or without modifications, and +in Source or Object form, provided that You meet the following conditions: + + 1. You must give any other recipients of the Work or Derivative Works a +copy of this License; and + + 2. You must cause any modified files to carry prominent notices stating +that You changed the files; and + + 3. You must retain, in the Source form of any Derivative Works that You +distribute, all copyright, patent, trademark, and attribution notices from +the Source form of the Work, excluding those notices that do not pertain to +any part of the Derivative Works; and + + 4. If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must include a +readable copy of the attribution notices contained within such NOTICE file, +excluding those notices that do not pertain to any part of the Derivative +Works, in at least one of the following places: within a NOTICE text file +distributed as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, within a +display generated by the Derivative Works, if and wherever such third-party +notices normally appear. The contents of the NOTICE file are for +informational purposes only and do not modify the License. You may add Your +own attribution notices within Derivative Works that You distribute, +alongside or as an addendum to the NOTICE text from the Work, provided that +such additional attribution notices cannot be construed as modifying the +License. + +You may add Your own copyright statement to Your modifications and may +provide additional or different license terms and conditions for use, +reproduction, or distribution of Your modifications, or for any such +Derivative Works as a whole, provided Your use, reproduction, and +distribution of the Work otherwise complies with the conditions stated in +this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any +Contribution intentionally submitted for inclusion in the Work by You to +the Licensor shall be under the terms and conditions of this License, +without any additional terms or conditions. Notwithstanding the above, +nothing herein shall supersede or modify the terms of any separate license +agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, except +as required for reasonable and customary use in describing the origin of +the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to +in writing, Licensor provides the Work (and each Contributor provides its +Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied, including, without limitation, any +warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or +FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for +determining the appropriateness of using or redistributing the Work and +assume any risks associated with Your exercise of permissions under this +License. + +8. Limitation of Liability. In no event and under no legal theory, whether +in tort (including negligence), contract, or otherwise, unless required by +applicable law (such as deliberate and grossly negligent acts) or agreed to +in writing, shall any Contributor be liable to You for damages, including +any direct, indirect, special, incidental, or consequential damages of any +character arising as a result of this License or out of the use or +inability to use the Work (including but not limited to damages for loss of +goodwill, work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor has been +advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the +Work or Derivative Works thereof, You may choose to offer, and charge a fee +for, acceptance of support, warranty, indemnity, or other liability +obligations and/or rights consistent with this License. However, in +accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if +You agree to indemnify, defend, and hold each Contributor harmless for any +liability incurred by, or claims asserted against, such Contributor by +reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included +on the same "printed page" as the copyright notice for easier +identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain a + copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable + law or agreed to in writing, software distributed under the License is + distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the specific language + governing permissions and limitations under the License. diff --git a/dependencies64/ffmpeg/licenses/wavpack.txt b/dependencies64/ffmpeg/licenses/wavpack.txt new file mode 100644 index 000000000..6ffc23b93 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/wavpack.txt @@ -0,0 +1,25 @@ + Copyright (c) 1998 - 2009 Conifer Software + All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Conifer Software nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/dependencies64/ffmpeg/licenses/x264.txt b/dependencies64/ffmpeg/licenses/x264.txt new file mode 100644 index 000000000..d60c31a97 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/x264.txt @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/dependencies64/ffmpeg/licenses/x265.txt b/dependencies64/ffmpeg/licenses/x265.txt new file mode 100644 index 000000000..18c946f70 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/x265.txt @@ -0,0 +1,343 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. + +This program is also available under a commercial proprietary license. +For more information, contact us at licensing@multicorewareinc.com. diff --git a/dependencies64/ffmpeg/licenses/xavs.txt b/dependencies64/ffmpeg/licenses/xavs.txt new file mode 100644 index 000000000..94a9ed024 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/xavs.txt @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/dependencies64/ffmpeg/licenses/xvid.txt b/dependencies64/ffmpeg/licenses/xvid.txt new file mode 100644 index 000000000..14db8fc79 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/xvid.txt @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/dependencies64/ffmpeg/licenses/zlib.txt b/dependencies64/ffmpeg/licenses/zlib.txt new file mode 100644 index 000000000..efa9848d0 --- /dev/null +++ b/dependencies64/ffmpeg/licenses/zlib.txt @@ -0,0 +1,26 @@ +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.7, May 2nd, 2012 + + Copyright (C) 1995-2012 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +*/ + diff --git a/dependencies64/ffmpeg/presets/ffprobe.xsd b/dependencies64/ffmpeg/presets/ffprobe.xsd new file mode 100644 index 000000000..1bc1fb5bb --- /dev/null +++ b/dependencies64/ffmpeg/presets/ffprobe.xsd @@ -0,0 +1,260 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/dependencies64/ffmpeg/presets/libvpx-1080p.ffpreset b/dependencies64/ffmpeg/presets/libvpx-1080p.ffpreset new file mode 100644 index 000000000..cf2593210 --- /dev/null +++ b/dependencies64/ffmpeg/presets/libvpx-1080p.ffpreset @@ -0,0 +1,19 @@ +vcodec=libvpx + +g=120 +lag-in-frames=16 +deadline=good +cpu-used=0 +vprofile=1 +qmax=51 +qmin=11 +slices=4 +b=2M + +#ignored unless using -pass 2 +maxrate=24M +minrate=100k +auto-alt-ref=1 +arnr-maxframes=7 +arnr-strength=5 +arnr-type=centered diff --git a/dependencies64/ffmpeg/presets/libvpx-1080p50_60.ffpreset b/dependencies64/ffmpeg/presets/libvpx-1080p50_60.ffpreset new file mode 100644 index 000000000..4a88040d3 --- /dev/null +++ b/dependencies64/ffmpeg/presets/libvpx-1080p50_60.ffpreset @@ -0,0 +1,19 @@ +vcodec=libvpx + +g=120 +lag-in-frames=25 +deadline=good +cpu-used=0 +vprofile=1 +qmax=51 +qmin=11 +slices=4 +b=2M + +#ignored unless using -pass 2 +maxrate=24M +minrate=100k +auto-alt-ref=1 +arnr-maxframes=7 +arnr-strength=5 +arnr-type=centered diff --git a/dependencies64/ffmpeg/presets/libvpx-360p.ffpreset b/dependencies64/ffmpeg/presets/libvpx-360p.ffpreset new file mode 100644 index 000000000..f9729ba2b --- /dev/null +++ b/dependencies64/ffmpeg/presets/libvpx-360p.ffpreset @@ -0,0 +1,18 @@ +vcodec=libvpx + +g=120 +lag-in-frames=16 +deadline=good +cpu-used=0 +vprofile=0 +qmax=63 +qmin=0 +b=768k + +#ignored unless using -pass 2 +maxrate=1.5M +minrate=40k +auto-alt-ref=1 +arnr-maxframes=7 +arnr-strength=5 +arnr-type=centered diff --git a/dependencies64/ffmpeg/presets/libvpx-720p.ffpreset b/dependencies64/ffmpeg/presets/libvpx-720p.ffpreset new file mode 100644 index 000000000..e84cc150c --- /dev/null +++ b/dependencies64/ffmpeg/presets/libvpx-720p.ffpreset @@ -0,0 +1,19 @@ +vcodec=libvpx + +g=120 +lag-in-frames=16 +deadline=good +cpu-used=0 +vprofile=0 +qmax=51 +qmin=11 +slices=4 +b=2M + +#ignored unless using -pass 2 +maxrate=24M +minrate=100k +auto-alt-ref=1 +arnr-maxframes=7 +arnr-strength=5 +arnr-type=centered diff --git a/dependencies64/ffmpeg/presets/libvpx-720p50_60.ffpreset b/dependencies64/ffmpeg/presets/libvpx-720p50_60.ffpreset new file mode 100644 index 000000000..8fce2bfb5 --- /dev/null +++ b/dependencies64/ffmpeg/presets/libvpx-720p50_60.ffpreset @@ -0,0 +1,19 @@ +vcodec=libvpx + +g=120 +lag-in-frames=25 +deadline=good +cpu-used=0 +vprofile=0 +qmax=51 +qmin=11 +slices=4 +b=2M + +#ignored unless using -pass 2 +maxrate=24M +minrate=100k +auto-alt-ref=1 +arnr-maxframes=7 +arnr-strength=5 +arnr-type=centered diff --git a/dependencies64/ffmpeg/presets/libx264-ipod320.ffpreset b/dependencies64/ffmpeg/presets/libx264-ipod320.ffpreset new file mode 100644 index 000000000..76722bd6d --- /dev/null +++ b/dependencies64/ffmpeg/presets/libx264-ipod320.ffpreset @@ -0,0 +1,6 @@ +vcodec=libx264 + +vprofile=baseline +level=13 +maxrate=768000 +bufsize=3000000 diff --git a/dependencies64/ffmpeg/presets/libx264-ipod640.ffpreset b/dependencies64/ffmpeg/presets/libx264-ipod640.ffpreset new file mode 100644 index 000000000..51f756427 --- /dev/null +++ b/dependencies64/ffmpeg/presets/libx264-ipod640.ffpreset @@ -0,0 +1,6 @@ +vcodec=libx264 + +vprofile=baseline +level=30 +maxrate=10000000 +bufsize=10000000 diff --git a/shell/shell.vcxproj b/shell/shell.vcxproj index 78088c4d1..d38bb3f61 100644 --- a/shell/shell.vcxproj +++ b/shell/shell.vcxproj @@ -209,7 +209,7 @@ copy "$(ProjectDir)casparcg_auto_restart.bat" "$(OutDir)" common/compiler/vs/disable_silly_warnings.h - true + false alibcof64.lib;freetype250.lib;sfml-system.lib;sfml-window.lib;sfml-graphics.lib;Winmm.lib;Ws2_32.lib;avformat.lib;avcodec.lib;avutil.lib;avfilter.lib;swscale.lib;swresample.lib;tbb.lib;OpenGL32.lib;glew32.lib;openal32.lib;freeimage.lib %(AdditionalLibraryDirectories) true