]> git.sesse.net Git - casparcg/commitdiff
Merge branch '2.1.0' of https://github.com/aruanoc/Server into 2.1.0
authorAntonio Ruano Cuesta <antonio@fightmetric.com>
Wed, 11 Jan 2017 17:20:36 +0000 (12:20 -0500)
committerAntonio Ruano Cuesta <antonio@fightmetric.com>
Wed, 11 Jan 2017 17:20:36 +0000 (12:20 -0500)
116 files changed:
CHANGELOG
CMakeLists.txt
README
accelerator/cpu/image/image_mixer.cpp
accelerator/cpu/image/image_mixer.h
accelerator/ogl/image/blending_glsl.h
accelerator/ogl/image/image_mixer.cpp
accelerator/ogl/image/image_mixer.h
build-scripts/build-linux.sh
build-scripts/build-windows.bat
common/assert.h
common/env.cpp
common/env.h
common/except.h
common/executor.h
common/filesystem.cpp
common/future.h
common/gl/gl_check.cpp
common/gl/gl_check.h
common/log.cpp
common/log.h
common/memory.h
common/os/linux/signal_handlers.cpp
common/os/linux/stack_trace.cpp
common/os/stack_trace.h
common/os/windows/stack_trace.cpp
common/param.h
common/polling_filesystem_monitor.cpp
common/semaphore.h
core/CMakeLists.txt
core/consumer/frame_consumer.cpp
core/consumer/frame_consumer.h
core/consumer/output.cpp
core/consumer/output.h
core/consumer/port.cpp
core/consumer/port.h
core/consumer/syncto/syncto_consumer.cpp [new file with mode: 0644]
core/consumer/syncto/syncto_consumer.h [moved from modules/ffmpeg/ffmpeg_pipeline_backend_internal.h with 77% similarity]
core/frame/frame.cpp
core/frame/frame.h
core/frame/frame_factory.h
core/frame/frame_transform.h
core/producer/frame_producer.cpp
core/producer/framerate/framerate_producer.cpp
core/producer/framerate/framerate_producer.h
core/producer/layer.cpp
core/producer/transition/transition_producer.cpp
core/video_channel.cpp
core/video_channel.h
core/video_format.cpp
core/video_format.h
dependencies64/ffmpeg/bin/linux/libavcodec.so.56.41.100
dependencies64/ffmpeg/bin/linux/libavdevice.so.56.4.100
dependencies64/ffmpeg/bin/linux/libavfilter.so.5.16.101
dependencies64/ffmpeg/bin/linux/libavformat.so.56.36.100
dependencies64/ffmpeg/bin/linux/libavutil.so.54.27.100
dependencies64/ffmpeg/bin/linux/libpostproc.so.53.3.100
dependencies64/ffmpeg/bin/linux/libswresample.so.1.2.100
dependencies64/ffmpeg/bin/linux/libswscale.so.3.1.101
modules/CMakeLists.txt
modules/bluefish/consumer/bluefish_consumer.cpp
modules/bluefish/consumer/bluefish_consumer.h
modules/decklink/consumer/decklink_consumer.cpp
modules/decklink/consumer/decklink_consumer.h
modules/decklink/producer/decklink_producer.cpp
modules/ffmpeg/CMakeLists.txt
modules/ffmpeg/audio_channel_remapper.cpp
modules/ffmpeg/consumer/ffmpeg_consumer.cpp
modules/ffmpeg/consumer/ffmpeg_consumer.h
modules/ffmpeg/consumer/streaming_consumer.cpp [deleted file]
modules/ffmpeg/consumer/streaming_consumer.h [deleted file]
modules/ffmpeg/ffmpeg.cpp
modules/ffmpeg/ffmpeg.h
modules/ffmpeg/ffmpeg_error.cpp
modules/ffmpeg/ffmpeg_error.h
modules/ffmpeg/ffmpeg_pipeline.cpp [deleted file]
modules/ffmpeg/ffmpeg_pipeline.h [deleted file]
modules/ffmpeg/ffmpeg_pipeline_backend.h [deleted file]
modules/ffmpeg/ffmpeg_pipeline_backend_internal.cpp [deleted file]
modules/ffmpeg/producer/audio/audio_decoder.cpp
modules/ffmpeg/producer/audio/audio_decoder.h
modules/ffmpeg/producer/ffmpeg_producer.cpp
modules/ffmpeg/producer/filter/audio_filter.cpp
modules/ffmpeg/producer/filter/audio_filter.h
modules/ffmpeg/producer/filter/filter.cpp
modules/ffmpeg/producer/filter/filter.h
modules/ffmpeg/producer/input/input.cpp
modules/ffmpeg/producer/input/input.h
modules/ffmpeg/producer/muxer/display_mode.h
modules/ffmpeg/producer/muxer/frame_muxer.cpp
modules/ffmpeg/producer/muxer/frame_muxer.h
modules/ffmpeg/producer/util/util.cpp
modules/ffmpeg/producer/util/util.h
modules/ffmpeg/producer/video/video_decoder.cpp
modules/ffmpeg/producer/video/video_decoder.h
modules/flash/flash.cpp
modules/flash/producer/flash_producer.cpp
modules/html/html.cpp
modules/image/consumer/image_consumer.cpp
modules/image/consumer/image_consumer.h
modules/image/producer/image_producer.cpp
modules/image/producer/image_scroll_producer.cpp
modules/newtek/consumer/newtek_ivga_consumer.cpp
modules/newtek/consumer/newtek_ivga_consumer.h
modules/oal/consumer/oal_consumer.cpp
modules/oal/consumer/oal_consumer.h
modules/psd/layer.cpp
modules/screen/consumer/screen_consumer.cpp
modules/screen/consumer/screen_consumer.h
protocol/amcp/AMCPCommandsImpl.cpp
protocol/util/AsyncEventServer.cpp
shell/casparcg.config
shell/main.cpp
shell/server.cpp
shell/shell.rc
version.tmpl

index 65e6626261f7238777c346953f43e2b5e3d90bdd..24901f20673a9d1070c1707d507b6c21ec8fce37 100644 (file)
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,14 +1,81 @@
-CasparCG 2.1.0 (w.r.t 2.0 Stable)\r
-=================================\r
+C H A N G E S\r
+\r
+CasparCG 2.1.0 Next (w.r.t 2.1.0 Beta 1)\r
+========================================\r
+\r
+General\r
+-------\r
+\r
+  o Fail early with clear error message if configured paths are not\r
+    creatable/writable.\r
+  o Added backwards compatibility (with deprecation warning) for using\r
+    thumbnails-path instead of thumbnail-path in casparcg.config.\r
+  o Suppress the logging of full path names in stack traces so that only the\r
+    relative path within the source tree is visible.\r
+  o General stability improvements.\r
+  o Native thread id is now logged in Linux as well. Finally they are mappable\r
+    against INFO THREADS, ps and top.\r
+  o Created automatically generated build number, so that it is easier to see\r
+    whether a build is newer or older than an other.\r
+  o Changed configuration element mipmapping_default_on to mipmapping-default-on\r
+    for consistency with the rest of the configuration (Jesper Stærkær).\r
+\r
+Consumers\r
+---------\r
+\r
+  o FFmpeg consumer:\r
+    + Fixed long overdue bug where HD material was always recorded using the\r
+      BT.601 color matrix instead of the BT.709 color matrix. RGB codecs like\r
+      qtrle was never affected but all the YCbCr based codecs were.\r
+\r
+Producers\r
+---------\r
+\r
+  o FFmpeg producer:\r
+    + Increased the max number of frames that audio/video can be badly\r
+      interleaved with (Dimitry Ishenko).\r
+\r
+Mixer\r
+-----\r
+\r
+  o Fixed bug in the contrast/saturation/brightness code where the wrong luma\r
+    coefficients was used.\r
+\r
+AMCP\r
+----\r
+\r
+  o INFO PATHS now adds all the path elements even if they are using the default\r
+    values.\r
+\r
+CasparCG 2.1.0 Beta 1 (w.r.t 2.0.7 Stable)\r
+==========================================\r
 \r
 General\r
 -------\r
 \r
   o 64 bit!\r
+  o Linux support!\r
+    + Moved to CMake build system for better platform independence.\r
+      + Contributions before build system switch (not w.r.t 2.0.7 Stable):\r
+        + gitrev.bat adaptions for 2.1 (Thomas Kaltz III).\r
+    + Thanks to our already heavy use of the pimpl idiom, abstracting platform\r
+      specifics was easily done by having different versions of the .cpp files\r
+      included in the build depending on target platform. No #ifdef necessary,\r
+      except for in header only platform specific code.\r
+    + Flash, Bluefish and NewTek modules are not ported to the Linux build.\r
+    + Contributions during development (not w.r.t 2.0.7 Stable):\r
+      + Fixed compilation problems in Linux build (Dimitry Ishenko).\r
+      + Fixed compilation problem in GCC 5 (Krzysztof Pyrkosz).\r
+      + Fixed thumbnail image saving on Linux (Krzysztof Pyrkosz).\r
+      + Fixed compilation problem in PSD module (Krzysztof Pyrkosz).\r
   o Major code refactoring:\r
     + Mixer abstraction so different implementations can be created. Currently\r
-      CPU mixer and GPU mixer (previously the usage of the GPU was ) exists.\r
+      CPU mixer and GPU mixer (previously the usage of the GPU was mandatory)\r
+      exists.\r
     + Flattened folder structure for easier inclusion of header files.\r
+    + Many classes renamed to better describe the abstractions they provide.\r
+    + Sink parameters usually taken by value and moved into place instead of\r
+      taken by const reference as previously done.\r
     + Old Windows specific AsyncEventServer class has been replaced by platform\r
       independent implementation based on Boost.Asio.\r
     + Pimpl classes are now stack allocated with internal shared_ptr to\r
@@ -17,29 +84,657 @@ General
       of via safe_ptr/shared_ptr, because they are internally reference counted.\r
     + Protocol strategies are now easier to implement correctly, because of\r
       separation of state between different client connections.\r
-    + Much more...\r
-  o Data files are now stored in UTF-8 with BOM. Latin1 files are still\r
-    supported for backwards compatibility\r
-  o Commands written in UTF-8 to log file but only ascii characters to console.\r
-  o Added supported video formats:\r
-    + 720p2398 (not supported by decklink)\r
-    + 720p2400 (not supported by decklink)\r
-    + 1080p5994\r
-    + 1080p6000\r
-    + 720p30 (not supported by decklink)\r
-    + 720p29.976 (not supported by decklink)\r
+    + Complete AMCP command refactoring.\r
+    + On-line help system that forces the developer to document AMCP commands,\r
+      producer syntaxes and consumer syntaxes making the documentation coupled\r
+      to the code, which is great.\r
+      + Added missing help for VERSION command (Jesper Stærkær).\r
+    + Upgraded Windows build to target Visual Studio 2015 making it possible to\r
+      use the C++11 features also supported by GCC 4.8 which is targeted on\r
+      Linux.\r
+      + Fixed compilation problems in Visual Studio 2015 Update 1\r
+        (Roman Tarasov)\r
+    + Created abstraction of the different forms of templates (flash, html, psd\r
+      and scene). Each module registers itself as a CG producer provides. All CG\r
+      commands transparently works with all of them.\r
+    + Audio mixer now uses double samples instead of float samples to fully\r
+      accommodate all int32 samples.\r
+    + Reduced coupling between core and modules (and modules and modules):\r
+      + Modules can register system info providers to contribute to INFO SYSTEM.\r
+      + XML configuration factories for adding support for new consumer elements\r
+        in casparcg.config.\r
+      + Server startup hooks can be registered (used by HTML producer to fork\r
+        its sub process).\r
+      + Version providers can contribute content to the VERSION command.\r
+  o Refactored multichannel audio support to use FFmpeg's PAN filter and\r
+    simplified the configuration a lot.\r
+  o Upgraded most third party libraries we depend on.\r
+  o Some unit tests have been created.\r
+  o Renamed README.txt to README, CHANGES.txt to CHANGELOG and LICENSE.txt to\r
+    LICENSE\r
+  o Created README.md for github front page in addition to README which is\r
+    distributed with builds.\r
+  o README file updates (Jonas Hummelstrand).\r
+  o Created BUILDING file describing how to build the server on Windows and\r
+    Linux.\r
+  o Diagnostics:\r
+    + Now also sent over OSC.\r
+    + Diag window is now scrollable and without squeezing of graphs.\r
+    + Contextual information such as video channel and video layer now included\r
+      in graphs.\r
+  o Logging:\r
+    + Implemented a TCP server, simply sending every log line to each connected\r
+      client. Default port is 3250.\r
+    + Changed default log level to info and moved debug statements that are\r
+      interesting in a production system to info.\r
+    + Try to not log full stack traces when user error is the cause. Stacktraces\r
+      should ideally only be logged when a system error or a programming error\r
+      has occurred.\r
+    + More contextual information about an error added to exceptions. An example\r
+      of this is that XML configuration errors now cause the XPath of the error\r
+      is logged.\r
+    + Improved the readability of the log format.\r
+    + Added optional calltrace.log for logging method calls. Allows for trace\r
+      logging to be enabled while calltracing is disabled etc.\r
 \r
 OSC\r
 ---\r
 \r
-  o Technical information about the current state of the server is published via\r
-    the OSC protocol via different subjects.\r
-  o Examples of information published:\r
-    + Different performance metrics.\r
-    + Producer names\r
-    + Producer specific information\r
-    + Consumer names.\r
-    + Consumer specific information.\r
+  o Improved message formatting performance.\r
+  o Added possibility to disable sending OSC to connected AMCP clients.\r
+  o Fixed inconsistent element name predefined_client to predefined-client in\r
+    casparcg.config (Krzysztof Pyrkosz).\r
+\r
+Consumers\r
+---------\r
+\r
+  o System audio consumer:\r
+    + Pushes data to openal instead of being callbacked by SFML when data is\r
+      needed.\r
+    + Added possibility to specify the expected delay in the sound card. Might\r
+      help get better consumer synchronization.\r
+  o Screen consumer:\r
+    + Added mouse interaction support, usable by the producers running on the\r
+      video channel.\r
+  o FFmpeg consumer:\r
+    + Replaced by Streaming Consumer after it was adapted to support everything\r
+      that FFmpeg Consumer did.\r
+    + Added support for recording all audio channels into separate mono audio\r
+      streams.\r
+    + Now sends recording progress via OSC.\r
+  o SyncTo consumer:\r
+    + New in 2.1.0.\r
+    + Allows the pace of a channel to follow another channel. This is useful for\r
+      virtual "precomp" channels without a DeckLink consumer to pace it.\r
+  o DeckLink consumer:\r
+    + Added workaround for timescale bug found in Decklink SDK 10.7.\r
+    + Now ScheduledFrameCompleted is no longer only used for video scheduling\r
+      but for audio as well, simplifying the code a lot.\r
+  o iVGA consumer:\r
+    + No longer provides sync to the video channel.\r
+    + Supports NewTek NDI out of the box just by upgrading the\r
+      Processing.AirSend library.\r
+  \r
+Producers\r
+---------\r
+\r
+  o Scene producer:\r
+    + New in 2.1.0.\r
+    + Utilizes CasparCG concepts such as producers, mixer transforms and uses\r
+      them in a nested way to form infinite number of sub layers. Think movie\r
+      clip in Flash.\r
+    + A scene consists of variables, layers, timelines and marks (intro and\r
+      outro for example).\r
+    + Mostly for use by other producers but comes with a XML based producer that\r
+      is a registered CG producer and shows up in TLS.\r
+    + Enables frame accurate compositions and animations.\r
+    + Has a powerful variable binding system (think expressions in After Effects\r
+      or JavaFX Bindings).\r
+  o PSD producer:\r
+    + New in 2.1.0.\r
+    + Parses PSD files and sets up a scene for the Scene producer to display.\r
+    + Text layers based on CG parameters.\r
+    + Supports Photoshop timeline.\r
+    + Uses Photoshop comment key-frames to describe where intro and outro (CG\r
+      PLAY and CG STOP) should be in the timeline.\r
+    + Shows up as regular templates in TLS.\r
+  o Text producer:\r
+    + New in 2.1.0.\r
+    + Renders text using FreeType library.\r
+    + Is used by the PSD producer for dynamic text layers.\r
+  o Image scroll producer:\r
+    + Speed can be changed while running using a CALL. The speed change can be\r
+      tweened.\r
+    + Added support for an absolute end time so that the duration is calculated\r
+      based on when PLAY is called for shows when an exact end time is\r
+      important.\r
+  o Image producer:\r
+    + Fixed bug where too large (OpenGL limit) images were accepted, causing\r
+      problems during thumbnail generation.\r
+  o Framerate producer:\r
+    + New in 2.1.0.\r
+    + Wraps a producer with one framerate and converts it to another. It is not\r
+      usable on its own but is utilized in the FFmpeg producer and the DeckLink\r
+      consumer.\r
+    + Supports different interpolation algorithms. Currently a no-op\r
+      drop-and-repeat mode and a two different frame blending modes.\r
+    + It also supports changing the speed on demand with tweening support.\r
+  o FFmpeg producer:\r
+    + Supports decoding all audio streams from a clip. Useful with .mxf files\r
+      which usually have separate mono streams for every audio channel.\r
+    + No longer do framerate conversion (half or double), but delegates that\r
+      task to the Framerate producer.\r
+    + Added support for v4l2 devices.\r
+    + Added relative and "from end" seeking (Dimitry Ishenko).\r
+    + Contributions during development (not w.r.t 2.0.7 Stable):\r
+      + Fixed 100% CPU problem on clip EOF (Peter Keuter, Robert Nagy).\r
+      + Constrained SEEK within the length of a clip (Dimitry Ishenko).\r
+      + Fixed a regular expression (Dimitry Ishenko).\r
+  o DeckLink producer:\r
+    + No longer do framerate conversion (half or double), but delegates that\r
+      task to the Framerate producer.\r
+  o Route producer:\r
+    + Added possibility to delay frames routed from a layer or a channel.\r
+  o HTML Producer:\r
+    + Disabled web security in HTML Producer (Robert Nagy).\r
+    + Reimplemented requestAnimationFrame handling in Javascript instead of C++.\r
+    + Implemented cancelAnimationFrame.\r
+    + Increased animation smoothness in HTML Producer with interlaced video\r
+      modes.\r
+    + Added remote debugging support.\r
+    + Added mouse interaction support by utilizing the Screen consumer's new\r
+      interaction support.\r
+  o Flash Producer:\r
+    + Contributions during development (not w.r.t 2.0.7 Stable):\r
+      + Workaround for flickering with high CPU usage and CPU accelerator\r
+        (Robert Nagy)\r
+\r
+AMCP\r
+----\r
+\r
+  o TLS has a new column for "template type" for clients that want to\r
+    differentiate between html and flash for example.\r
+  o SET CHANNEL_LAYOUT added to be able to change the audio channel layout of a\r
+    video channel at runtime.\r
+  o HELP command added for accessing the new on-line help system.\r
+  o FLS added to list the fonts usable by the Text producer.\r
+  o LOCK command added for controlling/gaining exclusive access to a video\r
+    channel.\r
+  o LOG CATEGORY command added to enable/disable the new log categories.\r
+  o SWAP command now optionally supports swapping the transforms as well as the\r
+    layers.\r
+  o VERSION command can now provide CEF version.\r
+\r
+\r
+\r
+CasparCG Server 2.0.7 Stable (as compared to CasparCG Server 2.0.7 Beta 2)\r
+==========================================================================\r
+\r
+General\r
+-------\r
+\r
+  o Added support for using a different configuration file at startup than the\r
+    default casparcg.config by simply adding the name of the file to use as the\r
+    first command line argument to casparcg.exe.\r
+  o Upgraded FFmpeg to latest stable.\r
+  o Created build script.\r
+  o Fixed bug where both layer_producer and channel_producer display:s and\r
+    empty/late first frame when the producer is called before the consumer in\r
+    the other end has received the first frame.\r
+  o Added rudimentary support for audio for layer_producer and channel_producer.\r
+  o Upgraded DeckLink SDK to 10.1.4, bringing new 2K and 4K DCI video modes. New\r
+    template hosts also available for those modes.\r
+  o General bug fixes (mostly memory and resource leaks, some serious).\r
+  o Updated Boost to version 1.57\r
+  o Frontend no longer maintained and therefore not included in the release.\r
+\r
+Mixer\r
+-----\r
+\r
+  o Added support for rotation.\r
+  o Added support for changing the anchor point around which fill_translation,\r
+    fill_scale and rotation will be done from.\r
+  o Added support for perspective correct corner pinning.\r
+  o Added support for mipmapped textures with anisotropic filtering for\r
+    increased downscaling quality. Whether to enable by default can be\r
+    configured in casparcg.config.\r
+  o Added support for cropping a layer. Not the same as clipping.\r
+\r
+AMCP\r
+----\r
+\r
+  o Added RESUME command to complement PAUSE. (Peter Keuter)\r
+  o To support the new mixer features the following commands has been added:\r
+\r
+    + MIXER ANCHOR -- will return or modify the anchor point for a layer\r
+      (default is 0 0 for backwards compatibility). Example:\r
+      MIXER 1-10 ANCHOR 0.5 0.5\r
+      ...for changing the anchor to the middle of the layer\r
+      (a MIXER 1-10 FILL 0.5 0.5 1 1 will be necessary to place the layer at the\r
+      same place on screen as it was before).\r
+\r
+    + MIXER ROTATION -- will return or modify the angle of which a layer is\r
+      rotated by (clockwise degrees) around the point specified by ANCHOR.\r
+\r
+    + MIXER PERSPECTIVE -- will return or modify the corners of the perspective\r
+      transformation of a layer. One X Y pair for each corner (order upper left,\r
+      upper right, lower right and lower left). Example:\r
+      MIXER 1-10 PERSPECTIVE 0.4 0.4 0.6 0.4 1 1 0 1\r
+\r
+    + MIXER MIPMAP -- will return or modify whether to enable mipmapping of\r
+      textures produced on a layer. Only frames produced after a change will be\r
+      affected. So for example image_producer will not be affected while the\r
+      image is displayed.\r
+\r
+    + MIXER CROP -- will return or modify how textures on a layer will be\r
+      cropped. One X Y pair each for the upper left corner and for the lower\r
+      right corner.\r
+\r
+  o Added INFO QUEUES command for debugging AMCP command queues. Useful for\r
+    debugging command queue overflows, where a command is deadlocked. Hopefully\r
+    always accessible via console, even though the TCP command queue may be\r
+    full.\r
+  o Added GL command:\r
+    - GL INFO prints information about device buffers and host buffers.\r
+    - GL GC garbage collects pooled but unused GL resources.\r
+  o Added INFO THREADS command listing the known threads and their descriptive\r
+    names. Can be matched against the thread id column of log entries.\r
+\r
+Consumers\r
+---------\r
+\r
+  o Removed blocking_decklink_consumer. It was more like an experiment at best\r
+    and its usefulness was questionable.\r
+  o Added a 10 second time-out for consumer sends, to detect/recover from\r
+    blocked consumers.\r
+  o Some consumers which are usually added and removed during playout (for\r
+    example ffmpeg_consumer, streaming_consumer and channel_consumer) no longer\r
+    affect the presentation time on other consumers. Previously a lag on the SDI\r
+    output could be seen when adding such consumers.\r
+\r
+HTML producer\r
+-------------\r
+\r
+  o No longer tries to play all files with a . in their name.\r
+    (Georgi Chorbadzhiyski)\r
+  o Reimplemented using CEF3 instead of Berkelium, which enables use of WebGL\r
+    and more. CEF3 is actively maintained, which Berkelium is not. (Robert Nagy)\r
+  o Implements a custom version of window.requestAnimationFrame which will\r
+    follow the pace of the channel, for perfectly smooth animations.\r
+  o No longer manually interlaces frames, to allow for mixer fill transforms\r
+    without artifacts.\r
+  o Now uses CEF3 event loop to avoid 100% CPU core usage.\r
+\r
+\r
+\r
+CasparCG Server 2.0.7 Beta 2 (as compared to CasparCG Server 2.0.7 Beta 1)\r
+==========================================================================\r
+\r
+General\r
+-------\r
+\r
+  o Added sending of OSC messages for channel_grid channel in addition to\r
+    regular channels.\r
+\r
+Producers\r
+---------\r
+\r
+  o FFmpeg: Reports correct nb_frames() when using SEEK (Thomas Kaltz III)\r
+  o Flash: Fixed bug where CG PLAY, CG INVOKE did not work.\r
+\r
+Consumers\r
+---------\r
+\r
+  o channel_consumer: Added support for more than one channel_consumer per\r
+    channel.\r
+  o decklink_consumer: Added support for a single instance of the consumer to\r
+    manage a separate key output for use with DeckLink Duo/Quad cards:\r
+\r
+    <decklink>\r
+      <device>1</device>\r
+      <key-device>2</key-device>\r
+      <keyer>external_separate_device</keyer>\r
+    </decklink>\r
+\r
+    ...in the configuration will enable the feature. The value of <key-device />\r
+    defaults to the value of <device /> + 1.\r
+  o synchronizing_consumer: Removed in favour of a single decklink_consumer\r
+    managing both fill and key device.\r
+  o streaming_consumer: A new implementation of ffmpeg_consumer with added\r
+    support for streaming and other PTS dependent protocols. Examples:\r
+\r
+    <stream>\r
+      <path>udp://localhost:5004</path>\r
+      <args>-vcodec libx264 -tune zerolatency -preset ultrafast -crf 25 -format mpegts -vf scale=240:180</args>\r
+    </stream>\r
+\r
+    ...in configuration or:\r
+\r
+    ADD 1 STREAM udp://localhost:5004 -vcodec libx264 -tune zerolatency -preset ultrafast -crf 25 -format mpegts -vf scale=240:180\r
+\r
+    ...via AMCP. (Robert Nagy sponsored by Ericsson Broadcasting Services)\r
+  o newtek_ivga_consumer: Added support for iVGA consumer to not provide channel\r
+    sync even though connected. Useful for iVGA clients that downloads as fast\r
+    as possible instead of in frame-rate pace, like Wirecast. To enable:\r
+\r
+    <newtek-ivga>\r
+      <provide-sync>false</provide-sync>\r
+    </newtek-ivga>\r
+\r
+    ...in config to not provide channel sync when connected. The default is\r
+    true.\r
+\r
+AMCP\r
+----\r
+\r
+  o Added support in ADD and REMOVE for a placeholder <CLIENT_IP_ADDRESS> which\r
+    will resolve to the connected AMCP client's IPV4 address.\r
+  o Fixed bug where AMCP commands split into multiple TCP packets where not\r
+    correctly parsed (http://casparcg.com/forum/viewtopic.php?f=3&t=2480)\r
+\r
+\r
+\r
+CasparCG Server 2.0.7 Beta 1 (as compared to 2.0.6 Stable)\r
+==========================================================\r
+\r
+General\r
+-------\r
+  o FFmpeg: Upgraded to master and adapted CasparCG to FFmpeg API changes\r
+    (Robert Nagy sponsored by SVT)\r
+  o FFmpeg: Fixed problem with frame count calculation (Thomas Kaltz III)\r
+  o Fixed broken CG UPDATE.\r
+\r
+Producers\r
+---------\r
+\r
+  o New HTML producer has been created (Robert Nagy sponsored by Flemish Radio\r
+    and Television Broadcasting Organization, VRT)\r
+\r
+\r
+\r
+CasparCG Server 2.0.6 Stable (as compared to 2.0.4 Stable)\r
+==========================================================\r
+\r
+General\r
+-------\r
+  o iVGA: Allow for the server to work without Processing.AirSend.x86.dll to\r
+    prevent a possible GPL violation. It is available as a separate optional\r
+    download.\r
+  o iVGA: Only provide sync to channel while connected, to prevent channel\r
+    ticking too fast.\r
+  o FFmpeg: Fixed bug during deinterlace-bob-reinterlace where output fields\r
+    were offset by one field in relation to input fields.\r
+  o FFmpeg: Fixed bug in ffmpeg_consumer where an access violation occurred\r
+    during destruction.\r
+  o FFmpeg: Improved seeking. (Robert Nagy and Thomas Kaltz III)\r
+  o Frontend: Only writes elements to casparcg.config which overrides a default\r
+    value to keep the file as compact as possible.\r
+  o System audio: Patched sfml-audio to work better with oal-consumer and\r
+    therefore removed PortAudio as the system audio implementation and went back\r
+    to oal.\r
+  o Flash: Changed so that the initial buffer fill of frames is rendered at a\r
+    frame-duration pace instead of as fast as possible. Otherwise time based\r
+    animations render incorrectly. During buffer recovery, a higher paced\r
+    rendering takes place, but still not as fast as possible, which can cause\r
+    animations to be somewhat incorrectly rendered. This is the only way though\r
+    if we want the buffer to be able to recover after depletion.\r
+  o Fixed race condition during server shutdown.\r
+  o OSC: outgoing audio levels from the audio mixer for each audio channel is\r
+    now transmitted (pFS and dBFS). (Thomas Kaltz III)\r
+  o Stage: Fixed bug where tweened transforms were only ticked when a\r
+    corresponding layer existed.\r
+  o Screen consumer: Added borderless option and correct handling of name\r
+    option. (Thomas Kaltz III)\r
+  o AMCP: CLS now reports duration and framerate for MOVIE files were\r
+    information is possible to extract. (Robert Nagy)\r
+  o Version bump to keep up with CasparCG Client version.\r
+\r
+\r
+\r
+CasparCG Server 2.0.4 Stable (as compared to 2.0.4 Beta 1)\r
+==========================================================\r
+\r
+General\r
+-------\r
+  o Can now open media with file names that only consist of digits.\r
+    (Cambell Prince)\r
+  o Miscellaneous stability and performance improvements.\r
+\r
+Video mixer\r
+-----------\r
+  o Conditional compilation of chroma key support and straight alpha output\r
+    support in shader (just like with blend-modes) because of performance impact\r
+    even when not in use on a layer or on a channel. New <mixer /> element added\r
+    to configuration for turning on mixer features that not everybody would want\r
+    to pay for (performance-wise.) blend-modes also moved into this element.\r
+  o Fixed bug where MIXER LEVELS interpreted arguments in the wrong order, so \r
+    that gamma was interpreted as max_input and vice versa.\r
+\r
+Consumers\r
+---------\r
+  o Added support for NewTek iVGA, which enables the use of CasparCG Server \r
+    fill+key output(s) as input source(s) to a NewTek TriCaster without \r
+    requiring video card(s) in the CasparCG Server machine, or taking up inputs\r
+    in the TriCaster. <newtek-ivga /> element in config enables iVGA on a\r
+    channel. (Robert Nagy sponsored by NewTek)\r
+  o DeckLink: Created custom decklink allocator to reduce the memory footprint.\r
+  o Replaced usage of SFML for <system-audio /> with PortAudio, because of\r
+    problems with SFML since change to static linkage. Also PortAudio seems to\r
+    give lower latency.\r
+\r
+Producers\r
+---------\r
+  o FFmpeg: Added support for arbitrary FFmpeg options/parameters\r
+    in ffmpeg_producer. (Cambell Prince)\r
+  o Flash: Flash Player 11.8 now tested and fully supported.\r
+  o Flash: No longer starts a Flash Player to service CG commands that mean\r
+    nothing without an already running Flash Player.\r
+  o Flash: globally serialize initialization and destruction of Flash Players,\r
+    to avoid race conditions in Flash.\r
+  o Flash: changed so that the Flash buffer is filled with Flash Player\r
+    generated content at initialization instead of empty frames.\r
+\r
+OSC\r
+---\r
+  o Performance improvements. (Robert Nagy sponsored by Boffins Technologies)\r
+  o Never sends old values to OSC receivers. Collects the latest value of each\r
+    path logged since last UDP send, and sends the new UDP packet (to each\r
+    subscribing OSC receiver) with the values collected. (Robert Nagy sponsored\r
+    by Boffins Technologies)\r
+  o Batches as many OSC messages as possible in an OSC bundle to reduce the \r
+    number of UDP packets sent. Breakup into separate packages if necessary to \r
+    avoid fragmentation. (Robert Nagy sponsored by Boffins Technologies)\r
+  o Removed usage of Microsoft Agents library (Server ran out of memory after a\r
+    while) in favour of direct synchronous invocations.\r
+\r
+\r
+\r
+CasparCG Server 2.0.4 Beta 1 (as compared to 2.0.3 Stable)\r
+==========================================================\r
+\r
+General\r
+-------\r
+  o Front-end GUI for simplified configuration and easy access to common tasks.\r
+    (Thomas Kaltz III and Jeff Lafforgue)\r
+  o Added support for video and images file thumbnail generation. By default the\r
+    media directory is scanned every 5 seconds for new/modified/removed files\r
+    and thumbnails are generated/regenerated/removed accordingly.\r
+  o Support for new video modes: 1556p2398, 1556p2400, 1556p2500, 2160p2398,\r
+    2160p2400, 2160p2500, 2160p2997 and 2160p3000.\r
+  o Experimental ATI graphics card support by using static linking against SFML\r
+    instead of dynamic. Should improve ATI GPU support, but needs testing.\r
+  o Added support for playback and pass-through of up to 16 audio channels. See\r
+    http://casparcg.com/forum/viewtopic.php?f=3&t=1453 for more information.\r
+  o Optimizations in AMCP protocol implementations for large incoming messages,\r
+    for example base64 encoded PNG images.\r
+  o Logging output now includes milliseconds and has modified format:\r
+    YYYY-MM-DD hh:mm:ss.zzz\r
+  o Improved audio playback with 720p5994 and 720p6000 channels.\r
+  o An attempt to improve output synchronization of consumers has been made. Use\r
+    for example:\r
+\r
+    <consumers>\r
+      <synchronizing>\r
+        <decklink>\r
+          <device>1</device>\r
+          <embedded-audio>true</embedded-audio>\r
+        </decklink>\r
+        <decklink>\r
+          <device>2</device>\r
+          <key-only>true</key-only>\r
+        </decklink>\r
+      </synchronizing>\r
+    </consumers>\r
+\r
+    ...to instruct the server to keep both DeckLink consumers in sync with each\r
+    other. Consider this experimental, so don't wrap everything in\r
+    <synchronizing /> unless synchronization of consumer outputs is needed. For\r
+    synchronization to be effective all synchronized cards must have genlock\r
+    reference signal connected.\r
+  o Transfer of source code and issue tracker to github. (Thomas Kaltz III)\r
+\r
+Layer\r
+-----\r
+  o Fixed a problem where the first frame was not always shown on LOAD.\r
+    (Robert Nagy)\r
+\r
+Stage\r
+-----\r
+\r
+  o Support for layer consumers for listening to frames coming out of producers.\r
+    (Cambell Prince)\r
+\r
+Audio mixer\r
+-----------\r
+  o Added support for a master volume mixer setting for each channel.\r
+\r
+Video mixer\r
+-----------\r
+  o Added support for chroma keying. (Cambell Prince)\r
+  o Fixed bug where MIXER CONTRAST set to < 1 can cause transparency issues.\r
+  o Experimental support for straight alpha output.\r
+\r
+Consumers\r
+---------\r
+  o Avoid that the FFmpeg consumer blocks the channel output when it can't keep\r
+    up with the frame rate (drops frames instead).\r
+  o Added support for to create a separate key and fill file when recording with\r
+    the FFmpeg consumer. Add the SEPARATE_KEY parameter to the FFmpeg consumer\r
+    parameter list. The key file will get the _A file name suffix to be picked\r
+    up by the separated_producer when doing playback.\r
+  o The Image consumer now writes to the media folder instead of the data\r
+    folder.\r
+  o Fixed bug in DeckLink consumer where we submit too few audio samples to the\r
+    driver when the video format has a frame rate > 50.\r
+  o Added another experimental DeckLink consumer implementation where scheduled\r
+    playback is not used, but a similar approach as in the bluefish consumer\r
+    where we wait for a frame to be displayed and then display the next frame.\r
+    It is configured via a <blocking-decklink> consumer element. The benefits of\r
+    this consumer is lower latency and more deterministic synchronization\r
+    between multiple instances (should not need to be wrapped in a\r
+    <synchronizing> element when separated key/fill is used).\r
+\r
+Producers\r
+---------\r
+  o Added support for playing .swf files using the Flash producer. (Robert Nagy)\r
+  o Image producer premultiplies PNG images with their alpha.\r
+  o Image producer can load a PNG image encoded as base64 via:\r
+    PLAY 1-0 [PNG_BASE64] <base64 string>\r
+  o FFmpeg producer can now use a directshow input filters:\r
+    PLAY 1-10 "dshow://video=Some Camera"\r
+    (Cambell Prince, Julian Waller and Robert Nagy)\r
+  o New layer producer which directs the output of a layer to another layer via\r
+    a layer consumer. (Cambell Prince)\r
+\r
+AMCP\r
+----\r
+  o The master volume feature is controlled via the MASTERVOLUME MIXER\r
+    parameter. Example: MIXER 1 MASTERVOLUME 0.5\r
+  o THUMBNAIL LIST/RETRIEVE/GENERATE/GENERATE_ALL command was added to support\r
+    the thumbnail feature.\r
+  o ADD 1 FILE output.mov SEPARATE_KEY activates the separate key feature of the\r
+    FFmpeg consumer creating an additional output_a.mov containing only the key.\r
+  o Added KILL command for shutting down the server without console access.\r
+  o Added RESTART command for shutting down the server in the same way as KILL\r
+    except that the return code from CasparCG Server is 5 instead of 0, which\r
+    can be used by parent process to take other actions. The\r
+    'casparcg_auto_restart.bat' script restarts the server if the return code is\r
+    5.\r
+  o DATA RETRIEVE now returns linefeeds encoded as an actual linefeed (the\r
+    single character 0x0a) instead of the previous two characters:\r
+    \ followed by n.\r
+  o MIXER CHROMA command added to control the chroma keying. Example:\r
+    MIXER 1-1 CHROMA GREEN|BLUE 0.10 0.04\r
+    (Cambell Prince)\r
+  o Fixed bug where MIXER FILL overrides any previous MIXER CLIP on the same\r
+    layer. The bug-fix also has the side effect of supporting negative scale on\r
+    MIXER FILL, causing the image to be flipped.\r
+  o MIXER <ch> STRAIGHT_ALPHA_OUTPUT added to control whether to output straight\r
+    alpha or not.\r
+  o Added INFO <ch> DELAY and INFO <ch>-<layer> DELAY commands for showing some\r
+    delay measurements.\r
+  o PLAY 1-1 2-10 creates a layer producer on 1-1 redirecting the output of\r
+    2-10. (Cambell Prince)\r
+\r
+OSC\r
+---\r
+  o Support for sending OSC messages over UDP to either a predefined set of\r
+    clients (servers in the OSC sense) or dynamically to the ip addresses of the\r
+    currently connected AMCP clients.\r
+    (Robert Nagy sponsored by Boffins Technologies)\r
+  o /channel/[1-9]/stage/layer/[0-9]\r
+    + always             /paused           [paused or not]\r
+    + color producer     /color            [color string]\r
+    + ffmpeg producer    /profiler/time    [render time]     [frame duration]\r
+    + ffmpeg producer    /file/time        [elapsed seconds] [total seconds]\r
+    + ffmpeg producer    /file/frame       [frame]           [total frames]\r
+    + ffmpeg producer    /file/fps         [fps]\r
+    + ffmpeg producer    /file/path        [file path]\r
+    + ffmpeg producer    /loop             [looping or not]\r
+    + during transitions /transition/frame [current frame]   [total frames]\r
+    + during transitions /transition/type  [transition type]\r
+    + flash producer     /host/path        [filename]\r
+    + flash producer     /host/width       [width]\r
+    + flash producer     /host/height      [height]\r
+    + flash producer     /host/fps         [fps]\r
+    + flash producer     /buffer           [buffered]        [buffer size]\r
+    + image producer     /file/path        [file path]\r
+\r
+\r
+\r
+CasparCG Server 2.0.3 Stable (as compared to 2.0.3 Alpha)\r
+=========================================================\r
+\r
+Stage\r
+-----\r
+\r
+  o Fixed dead-lock that can occur with multiple mixer tweens. (Robert Nagy)\r
+\r
+AMCP\r
+----\r
+\r
+  o DATA STORE now supports creating folders of path specified if they does not\r
+    exist. (Jeff Lafforgue)\r
+  o DATA REMOVE command was added. (Jeff Lafforgue)\r
+\r
+\r
+\r
+CasparCG Server 2.0.3 Alpha (as compared to 2.0 Stable)\r
+=======================================================\r
+\r
+General\r
+-------\r
+\r
+  o Data files are now stored in UTF-8 with BOM. Latin1 files are still\r
+    supported for backwards compatibility.\r
+  o Commands written in UTF-8 to log file but only ASCII characters to console.\r
+  o Added supported video formats:\r
+    + 720p2398 (not supported by DeckLink)\r
+    + 720p2400 (not supported by DeckLink)\r
+    + 1080p5994\r
+    + 1080p6000\r
+    + 720p30 (not supported by DeckLink)\r
+    + 720p29.976 (not supported by DeckLink)\r
 \r
 CLK\r
 ---\r
@@ -55,15 +750,14 @@ Consumers
   o Consumers on same channel now invoked asynchronously to allow for proper\r
     sync of multiple consumers.\r
   o System audio consumer:\r
-    + No longer provides sync to caspar.\r
-    + Pushes data to openal instead of being callbacked by SFML when data is\r
-      needed.\r
+    + no longer provides sync to the video channel.\r
   o Screen consumer:\r
     + Support for multiple screen consumers on the same channel\r
-    + No longer spin-waits for vsync\r
+    + No longer spin-waits for vsync.\r
     + Now deinterlaces to two separate frames so for example 50i will no longer\r
       be converted to 25p but instead to 50p for smooth playback of interlaced\r
       content.\r
+  o DeckLink consumer now logs whether a reference signal is detected or not.\r
 \r
 Producers\r
 ---------\r
@@ -73,21 +767,17 @@ Producers
       formats. This can be overridden by giving the PROGRESSIVE parameter.\r
     + SPEED parameter now defines pixels per frame/field instead of half pixels\r
       per frame. The scrolling direction is also reversed so SPEED 0.5 is the\r
-      previous equivalent of SPEED -1. Movements are done with subpixel accuracy\r
+      previous equivalent of SPEED -1. Movements are done with sub-pixel\r
+      accuracy.\r
     + Fixed incorrect starting position of image.\r
     + Rounding error fixes to allow for more exact scrolling.\r
     + Added support for motion blur via a new BLUR parameter\r
     + Added PREMULTIPLY parameter to support images stored with straight alpha.\r
 \r
-AMCP\r
-----\r
 \r
-  o DATA STORE now supports creating folders of path specified if they does not\r
-    exist.\r
-  o DATA REMOVE command was added.\r
 \r
-CasparCG 2.0 Stable (w.r.t Beta 3)\r
-==================================\r
+CasparCG Server 2.0 Stable (as compared to Beta 3)\r
+==================================================\r
 \r
 General\r
 -------\r
@@ -98,13 +788,15 @@ Consumers
 ---------\r
 \r
   o File Consumer\r
-    + Changed semantics to more closely follow ffmpeg (see forums).\r
+    + Changed semantics to more closely follow FFmpeg (see forums).\r
     + Added options, -r, -acodec, -s, -pix_fmt, -f and more.\r
   o Screen Consumer\r
     + Added vsync support.\r
 \r
-CasparCG 2.0 Beta 3 (w.r.t Beta 1)\r
-==================================\r
+\r
+\r
+CasparCG Server 2.0 Beta 3 (as compared to Beta 1)\r
+==================================================\r
 \r
 Formats\r
 -------\r
@@ -121,13 +813,13 @@ Consumers
 \r
   o File Consumer added\r
     + See updated wiki or ask in forum for more information.\r
-    + Should support anything ffmpeg supports. However, we will work mainly with\r
-      DNXHD, PRORES and H264.\r
+    + Should support anything FFmpeg supports. However, we will work mainly with\r
+      DNxHD, PRORES and H264.\r
     - Key-only is not supported.\r
   o Bluefish Consumer\r
     + 24 bit audio support.\r
     - Embedded-audio does not work with Epoch cards.\r
-  o Decklink Consumer\r
+  o DeckLink Consumer\r
     + Low latency enabled by default.\r
     + Added graphs for driver buffers.\r
   o Screen Consumer\r
@@ -139,8 +831,8 @@ Consumers
 Producers\r
 ---------\r
 \r
-  o Decklink Producer\r
-    + Improved color quality be avoiding uneccessary conversion to BGRA.\r
+  o DeckLink Producer\r
+    + Improved color quality be avoiding unnecessary conversion to BGRA.\r
   o FFMPEG Producer\r
     + Fixed missing alpha for (RGB)A formats when deinterlacing.\r
     + Updated buffering to work better with files with long audio/video\r
@@ -152,14 +844,14 @@ Producers
     + Improved auto-transcode accuracy.\r
     + Improved seeking accuracy.\r
     + Fixed bug with looping and LENGTH.\r
-    + Updated to newer ffmpeg version.\r
+    + Updated to newer FFmpeg version.\r
     + Fixed incorrect scaling of NTSC DV files.\r
     + Optimized color conversion when using YADIF filters.\r
   o Flash Producer\r
-    + Release flash-player when empty.\r
-    + Use native resolution template-host.\r
-    + Template-hosts are now choosen automatically if not configured. The\r
-      template-host with the corresponding video-mode name is now chosen.\r
+    + Release Flash Player when empty.\r
+    + Use native resolution TemplateHost.\r
+    + TemplateHosts are now chosen automatically if not configured. The\r
+      TemplateHost with the corresponding video-mode name is now chosen.\r
     + Use square pixel dimensions.\r
 \r
 AMCP\r
@@ -203,13 +895,10 @@ Diagnostics
     previously.\r
   o Diagnostics window is now closable.\r
 \r
-Configuration\r
--------------\r
 \r
-  o Simplified.\r
 \r
-CasparCG 2.0 Beta 1 (w.r.t Alpha)\r
-=================================\r
+CasparCG Server 2.0 Beta 1 (as compared to Alpha)\r
+=================================================\r
 \r
   o Blending Modes (needs to be explicitly enabled)\r
     + overlay\r
@@ -217,7 +906,7 @@ CasparCG 2.0 Beta 1 (w.r.t Alpha)
     + multiply\r
     + and many more.\r
   o Added additive keyer in addition to linear keyer.\r
-  o Image adjustements\r
+  o Image adjustments\r
     + saturation\r
     + brightness\r
     + contrast\r
@@ -226,26 +915,26 @@ CasparCG 2.0 Beta 1 (w.r.t Alpha)
     + min output-level\r
     + max output-level\r
     + gamma\r
-  o Support for ffmpeg-filters such as (ee http://ffmpeg.org/libavfilter.html)\r
+  o Support for FFmpeg-filters such as (ee http://ffmpeg.org/libavfilter.html)\r
     + yadif deinterlacer (optimized in CasparCG for full multi-core support)\r
     + de-noising\r
     + dithering\r
     + box blur\r
     + and many more\r
   o 32-bit SSE optimized audio pipeline.\r
-  o Decklink-Consumer uses external-key by default.\r
-  o Decklink-Consumer has 32 bit embedded-audio support.\r
-  o Decklink-Producer has 32 bit embedded-audio support.\r
-  o LOADBG with AUTO feature which automatically playes queued clip when\r
+  o DeckLink-Consumer uses external-key by default.\r
+  o DeckLink-Consumer has 24 bit embedded-audio support.\r
+  o DeckLink-Producer has 24 bit embedded-audio support.\r
+  o LOADBG with AUTO feature which automatically plays queued clip when\r
     foreground clip has ended.\r
   o STATUS command for layers.\r
   o LOG LEVEL command for log filtering.\r
-  o MIX transitation work with transparent clips.\r
+  o MIX transition works with transparent clips.\r
   o Freeze on last frame.\r
   o Producer buffering is now configurable.\r
   o Consumer buffering is now configurable.\r
   o Now possible to configure template-hosts for different video-modes.\r
-  o Added auto transcoder for ffmpeg-producer which automatically transcodes\r
+  o Added auto transcoder for FFmpeg producer which automatically transcodes\r
     input video into compatible video format for the channel.\r
     + interlacing (50p -> 50i)\r
     + deinterlacing (50i -> 25p)\r
@@ -254,41 +943,43 @@ CasparCG 2.0 Beta 1 (w.r.t Alpha)
     + doubling (25p -> 50p)\r
     + halfing (50p -> 25p)\r
     + field-order swap (upper <-> lower)\r
-  o Screen consumer now automatically deinterlaces when receiveing interlaced\r
+  o Screen consumer now automatically deinterlaces when receiving interlaced\r
     content.\r
   o Optimized renderer.\r
   o Renderer can now be run asynchronously with producer by using a\r
     producer-buffer size greater than 0.\r
   o Improved error and crash recovery.\r
   o Improved logging.\r
-  o Added Image-Scroll-Producer\r
-  o Key-only has now near zery performance overhead.\r
-  o Reduced memory requirements\r
-  o Removed "warm up lag" which occured when playing the first media clip after\r
+  o Added Image-Scroll-Producer.\r
+  o Key-only has now near zero performance overhead.\r
+  o Reduced memory requirements.\r
+  o Removed "warm up lag" which occurred when playing the first media clip after\r
     the server has started.\r
   o Added read-back fence for OpenGL device for improved multi-channel\r
     performance.\r
   o Memory support increased from standard 2 GB to 4 GB on 64 bit Win 7 OS.\r
-  o Added support for 2* Decklink cards in FullHD.\r
+  o Added support for 2* DeckLink cards in Full HD.\r
   o Misc bugs fixes and performance improvements.\r
   o Color producer now support some color codes in addition to color codes, e.g.\r
     EMPTY, BLACK, RED etc...\r
   o Alpha value in color codes is now optional.\r
-  o More than 2 Decklink cards might be possible but have not yet been tested.\r
+  o More than 2 DeckLink cards might be possible but have not yet been tested.\r
+\r
 \r
-CasparCG 2.0 Alpha (w.r.t 1.8)\r
-==============================\r
+\r
+CasparCG Server 2.0 Alpha (as compared to 1.8)\r
+==============================================\r
 \r
 General\r
 -------\r
 \r
-  o Mayor refactoring for improved readability and mainainability.\r
-  o Some work towards platorm-independence. Currently the greatest challenge for\r
-    full platform-independence is flash-producer.\r
+  o Mayor refactoring for improved readability and maintainability.\r
+  o Some work towards platform-independence. Currently the greatest challenge\r
+    for full platform-independence is flash-producer.\r
   o Misc improved scalability.\r
   o XML-configuration.\r
-  o Decklink\r
-    + Support for multiple decklink-cards.\r
+  o DeckLink\r
+    + Support for multiple DeckLink cards.\r
 \r
 Core\r
 ----\r
@@ -312,71 +1003,71 @@ Mixer
 \r
   o Animated tween transforms.\r
   o Image-Mixer\r
-    + Fully GPU accelerated (all features listed below are done on the gpu),\r
+    + Fully GPU accelerated (all features listed below are done on the GPU),\r
     + Layer composition.\r
-    + Colorspaces (rgba, bgra, argb, yuv, yuva, yuv-hd, yuva-hd).\r
+    + Color spaces (rgba, bgra, argb, yuv, yuva, yuv-hd, yuva-hd).\r
     + Interlacing.\r
-    + Per-layer image-transforms:\r
+    + Per-layer image transforms:\r
       + Opacity\r
       + Gain\r
       + Scaling\r
       + Clipping\r
       + Translation\r
-  o Audio-Mixer\r
-    + Per-layer and per-sample audio-transforms:\r
+  o Audio Mixer\r
+    + Per-layer and per-sample audio transforms:\r
         + Gain\r
-    + Fully internal audio-mixing. Single output-video_channel.\r
+    + Fully internal audio mixing. Single output video_channel.\r
 \r
 Consumers\r
 ---------\r
 \r
-  o Decklink-Consumer\r
+  o DeckLink Consumer\r
     + Embedded audio.\r
     + HD support.\r
     + Hardware clock.\r
-  o Bluefish-Consumer\r
+  o Bluefish Consumer\r
     + Drivers are loaded on-demand (server now runs on computers without\r
-      installed bluefish-drivers).\r
-    + Embedded-audio.\r
+      installed Bluefish drivers).\r
+    + Embedded audio.\r
     + Allocated frames are no longer leaked.\r
 \r
 Producers\r
 ---------\r
 \r
-  o Decklink-Producer\r
+  o Decklink Producer\r
     + Embedded audio.\r
     + HD support.\r
-  o Color-Producer\r
+  o Color Producer\r
     + GPU accelerated.\r
-  o FFMPEG-Producer\r
+  o FFMPEG Producer\r
     + Asynchronous file IO.\r
     + Parallel decoding of audio and video.\r
-    + Colorspace transform are moved to gpu.\r
-  o Transition-Producer\r
-    + Fully interlaced transition (previsously only progressive, even when\r
+    + Color space transform are moved to GPU.\r
+  o Transition Producer\r
+    + Fully interlaced transition (previously only progressive, even when\r
       running in interlaced mode).\r
     + Per-sample mixing between source and destination clips.\r
     + Tween transitions.\r
-  o Flash-Producer\r
-    + DirectDraw access (sligthly improved performance).\r
+  o Flash Producer\r
+    + DirectDraw access (slightly improved performance).\r
     + Improved time-sync. Smoother animations and proper interlacing.\r
-  o Image-Producer\r
-    + Support for various imageformats through FreeImage library.\r
+  o Image Producer\r
+    + Support for various image formats through FreeImage library.\r
 \r
 Diagnostics\r
 -----------\r
 \r
   o Graphs for monitoring performance and events.\r
   o Misc logging improvements.\r
-  o Seperate log-file for every run of the server.\r
+  o Separate log file for every run of the server.\r
   o Error logging provides full exception details, instead of only printing that\r
-    an error has occured.\r
+    an error has occurred.\r
   o Console with real-time logging output.\r
   o Console with AMCP input.\r
 \r
 Removed\r
 -------\r
 \r
-  o Registry-configuration (replaced by XML-Configuration).\r
-  o TGA-Producer (replaced by Image-Producer).\r
-  o TGA-Scroll-Producer\r
+  o Registry configuration (replaced by XML Configuration).\r
+  o TGA Producer (replaced by Image Producer).\r
+  o TGA Scroll Producer\r
index e4276b470034b6f35d3ee466f67d8c76977e11d0..1c08ef1882d8092607b59497faa60fddcf5138ce 100644 (file)
@@ -3,12 +3,16 @@ project ("CasparCG Server")
 
 find_package(Git)
 
-set(GIT_REV "N/A")
+set(GIT_REV "0")
+set(GIT_HASH "N/A")
 
 if (GIT_FOUND)
        exec_program("${GIT_EXECUTABLE}" "${PROJECT_SOURCE_DIR}"
-                       ARGS rev-parse --verify --short HEAD
+                       ARGS rev-list --all --count
                        OUTPUT_VARIABLE GIT_REV)
+       exec_program("${GIT_EXECUTABLE}" "${PROJECT_SOURCE_DIR}"
+                       ARGS rev-parse --verify --short HEAD
+                       OUTPUT_VARIABLE GIT_HASH)
 endif ()
 
 configure_file("${PROJECT_SOURCE_DIR}/version.tmpl" "${PROJECT_SOURCE_DIR}/version.h")
@@ -75,6 +79,7 @@ add_definitions( -D_UNICODE )
 add_definitions( -DGLEW_NO_GLU )
 add_definitions( "-DBOOST_ASIO_ERROR_CATEGORY_NOEXCEPT=noexcept(true)" ) # Workaround macro redefinition in boost
 add_definitions( -D_GLIBCXX_USE_CXX11_ABI=0 ) # Allow compilation in GCC 5 while keeping old dependencies
+add_definitions( -DCASPAR_SOURCE_PREFIX="${CMAKE_CURRENT_SOURCE_DIR}" )
 
 if (MSVC)
        set(CMAKE_CXX_FLAGS                                     "${CMAKE_CXX_FLAGS}                                     /EHa /Zi /W4 /WX /MP /fp:fast /Zm192 /FIcommon/compiler/vs/disable_silly_warnings.h")
diff --git a/README b/README
index d3415af62a9c9618d2a0c24548fdbc9cff0678ee..dec7ad15ebff3eebed97ea86061380792fcafc32 100644 (file)
--- a/README
+++ b/README
@@ -1,13 +1,13 @@
-CasparCG Server 2.1.0 Unstable\r
-==============================\r
+CasparCG Server 2.1.0 Beta 1\r
+============================\r
 \r
 Thank you for your interest in CasparCG Server, a professional software used to\r
 play out and record professional graphics, audio and video to multiple outputs.\r
 CasparCG Server has been in 24/7 broadcast production since 2006.\r
 \r
-This release is considered untested and unstable, and is NOT intended for use in\r
-professional production. Stable and production-proven versions can be downloaded\r
-at http://casparcg.com/download/\r
+This release is considered beta, and is NOT intended for use in professional\r
+production. Stable and production-proven versions can be downloaded at\r
+http://casparcg.com/download/\r
 \r
 Alpha and beta builds are available at http://builds.casparcg.com/\r
 \r
index 07d669deb382a90f91dc93700f219bc9dacc864c..bf69a0558ec8b98332f4adb374b0e38c9edce5e8 100644 (file)
@@ -371,6 +371,7 @@ image_mixer::~image_mixer(){}
 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}
 void image_mixer::visit(const core::const_frame& frame){impl_->visit(frame);}
 void image_mixer::pop(){impl_->pop();}
+int image_mixer::get_max_frame_size() { return std::numeric_limits<int>::max(); }
 std::future<array<const std::uint8_t>> image_mixer::operator()(const core::video_format_desc& format_desc, bool /* straighten_alpha */){return impl_->render(format_desc);}
 core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) {return impl_->create_frame(tag, desc, channel_layout);}
 
index 55f2b416bd08c514975acd93f024eabfa2ae8464..ba4d0a3915d20c5f4bf73a4ecb029b6ede7f2b85 100644 (file)
@@ -37,7 +37,7 @@ public:
        core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) override;
 
        // Properties
-
+       int get_max_frame_size() override;
 private:
        struct impl;
        spl::unique_ptr<impl> impl_;
index 42e95f37f83a9775a76369955d587ce49dc3c459..2dc65f4ccf69b1ecff6704b48019d7084245dbd5 100644 (file)
@@ -36,7 +36,9 @@ static std::string get_adjustement_glsl()
                                const float AvgLumG = 0.5;
                                const float AvgLumB = 0.5;
 
-                               const vec3 LumCoeff = vec3(0.2125, 0.7154, 0.0721);
+                               vec3 LumCoeff = is_hd
+                                               ? vec3(0.0722, 0.7152, 0.2126)
+                                               : vec3(0.114, 0.587, 0.299);
 
                                vec3 AvgLumin = vec3(AvgLumR, AvgLumG, AvgLumB);
                                vec3 brtColor = color * brt;
index 1c983cee20e4f8db8e5a0d7904fabc57c5bc387f..19d0191e5fef9a23f5a70e542d24a02806f2d776 100644 (file)
@@ -369,6 +369,16 @@ public:
 
                return core::mutable_frame(std::move(buffers), core::mutable_audio_buffer(), tag, desc, channel_layout);
        }
+
+       int get_max_frame_size() override
+       {
+               return ogl_->invoke([]
+               {
+                       GLint64 params[1];
+                       glGetInteger64v(GL_MAX_TEXTURE_SIZE, params);
+                       return static_cast<int>(params[0]);
+               });
+       }
 };
 
 image_mixer::image_mixer(const spl::shared_ptr<device>& ogl, bool blend_modes_wanted, bool straight_alpha_wanted, int channel_id) : impl_(new impl(ogl, blend_modes_wanted, straight_alpha_wanted, channel_id)){}
@@ -376,6 +386,7 @@ image_mixer::~image_mixer(){}
 void image_mixer::push(const core::frame_transform& transform){impl_->push(transform);}
 void image_mixer::visit(const core::const_frame& frame){impl_->visit(frame);}
 void image_mixer::pop(){impl_->pop();}
+int image_mixer::get_max_frame_size() { return impl_->get_max_frame_size(); }
 std::future<array<const std::uint8_t>> image_mixer::operator()(const core::video_format_desc& format_desc, bool straighten_alpha){return impl_->render(format_desc, straighten_alpha);}
 core::mutable_frame image_mixer::create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout) {return impl_->create_frame(tag, desc, channel_layout);}
 
index 3b203b5119425e799206631d8578aa4664a394f8..2ce6e4d69643c0e8bd2c0f9475cd141251069076 100644 (file)
@@ -60,6 +60,8 @@ public:
                        
        // Properties
 
+       int get_max_frame_size() override;
+
 private:
        struct impl;
        spl::unique_ptr<impl> impl_;
index bc75a81a63bfbb71e446dc4ac30d1f81c2c608bc..338d9780a0504fb2f8c29063460a95ec2521c51a 100755 (executable)
@@ -47,6 +47,7 @@ cp -f  shell/*.ttf "$SERVER_FOLDER/" || fail "Could not copy font(s)"
 cp -f  shell/casparcg "$SERVER_FOLDER/bin/" || fail "Could not copy server executable"
 cp -f  shell/casparcg.config "$SERVER_FOLDER/" || fail "Could not copy server config"
 cp -Rf shell/locales "$SERVER_FOLDER/bin/" || fail "Could not copy server CEF locales"
+cp -f  shell/*.pak "$SERVER_FOLDER/" || fail "Could not copy CEF resources"
 
 # Copy binary dependencies
 echo Copying binary dependencies...
index 0ab6e95b8f8caad2a7a69e9eff61dc22c5ad16a8..8830d34a9ebfcac38a4be0951bf6f91c10323d80 100644 (file)
@@ -45,9 +45,9 @@ echo Copying binaries...
 copy shell\*.dll "%SERVER_FOLDER%\Server" || goto :error
 copy shell\RelWithDebInfo\casparcg.exe "%SERVER_FOLDER%\Server" || goto :error
 copy shell\RelWithDebInfo\casparcg.pdb "%SERVER_FOLDER%\Server" || goto :error
-copy shell\RelWithDebInfo\libcef.dll.pdb "%SERVER_FOLDER%\Server" || goto :error
 copy shell\casparcg.config "%SERVER_FOLDER%\Server" || goto :error
 copy shell\*.ttf "%SERVER_FOLDER%\Server" || goto :error
+copy shell\*.pak "%SERVER_FOLDER%\Server" || goto :error
 xcopy shell\locales "%SERVER_FOLDER%\Server\locales" /E /I /Y || goto :error
 
 :: Copy documentation
@@ -58,7 +58,7 @@ copy ..\README "%SERVER_FOLDER%" || goto :error
 
 :: Create zip file
 echo Creating zip...
-"%BUILD_7ZIP%" a "%BUILD_ARCHIVE_NAME%.7z" "%SERVER_FOLDER%" || goto :error
+"%BUILD_7ZIP%" a "%BUILD_ARCHIVE_NAME%.zip" "%SERVER_FOLDER%" || goto :error
 
 :: Skip exiting with failure
 goto :EOF
index 79dd587cf78e21ba09c6d45cd0892e51d66d526a..b8819264887212caeebdc26b30a7ad4379e30b46 100644 (file)
@@ -34,7 +34,7 @@
 
 #define CASPAR_VERIFY(expr) do{if(!(expr)){ CASPAR_LOG(warning) << "Assertion Failed: " << \
        CASPAR_VERIFY_EXPR_STR(expr) << " " \
-       << "file:" << __FILE__ << " " \
+       << "file:" << log::remove_source_prefix(__FILE__) << " " \
        << "line:" << __LINE__ << " "; \
        _CASPAR_DBG_BREAK;\
        }}while(0);
@@ -46,4 +46,4 @@
 #define CASPAR_ASSERT(expr) CASPAR_VERIFY(expr)
 #else
 #define CASPAR_ASSERT(expr)
-#endif
\ No newline at end of file
+#endif
index 1ce341d46f349cf33805e96592640d3c41cb6f38..3c27669689dfc96906079abe85df61d032434c7f 100644 (file)
 #include <functional>
 #include <iostream>
 #include <fstream>
+#include <boost/algorithm/string/replace.hpp>
 
 namespace caspar { namespace env {
-       
+
+std::wstring initial;
 std::wstring media;
 std::wstring log;
 std::wstring ftemplate;
 std::wstring data;
 std::wstring font;
-std::wstring thumbnails;
+std::wstring thumbnail;
 boost::property_tree::wptree pt;
 
 void check_is_configured()
@@ -56,112 +58,100 @@ void check_is_configured()
                CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(L"Enviroment properties has not been configured"));
 }
 
+std::wstring clean_path(std::wstring path)
+{
+       boost::replace_all(path, L"\\\\", L"/");
+       boost::replace_all(path, L"\\", L"/");
+
+       return path;
+}
+
+std::wstring ensure_trailing_slash(std::wstring folder)
+{
+       if (folder.at(folder.length() - 1) != L'/')
+               folder.append(L"/");
+
+       return folder;
+}
+
+std::wstring resolve_or_create(const std::wstring& folder)
+{
+       auto found_path = find_case_insensitive(folder);
+
+       if (found_path)
+               return *found_path;
+       else
+       {
+               boost::system::error_code ec;
+               boost::filesystem::create_directories(folder, ec);
+
+               if (ec)
+                       CASPAR_THROW_EXCEPTION(user_error() << msg_info("Failed to create directory " + u8(folder) + " (" + ec.message() + ")"));
+
+               return folder;
+       }
+}
+
+void ensure_writable(const std::wstring& folder)
+{
+       static const std::wstring CREATE_FILE_TEST = L"casparcg_test_writable.empty";
+
+       boost::system::error_code       ec;
+       boost::filesystem::path         test_file(folder + L"/" + CREATE_FILE_TEST);
+       boost::filesystem::ofstream     out(folder + L"/" + CREATE_FILE_TEST);
+
+       if (out.fail())
+       {
+               boost::filesystem::remove(test_file, ec);
+               CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Directory " + folder + L" is not writable."));
+       }
+
+       out.close();
+       boost::filesystem::remove(test_file, ec);
+}
+
 void configure(const std::wstring& filename)
 {
        try
        {
-               auto initialPath = boost::filesystem::initial_path().wstring();
-       
-               boost::filesystem::wifstream file(initialPath + L"/" + filename);
+               initial = clean_path(boost::filesystem::initial_path().wstring());
+
+               boost::filesystem::wifstream file(initial + L"/" + filename);
                boost::property_tree::read_xml(file, pt, boost::property_tree::xml_parser::trim_whitespace | boost::property_tree::xml_parser::no_comments);
 
                auto paths      = pt.get_child(L"configuration.paths");
-               media           = paths.get(L"media-path", initialPath + L"/media/");
-               log                     = paths.get(L"log-path", initialPath + L"/log/");
-               ftemplate       = boost::filesystem::complete(paths.get(L"template-path", initialPath + L"/template/")).wstring();
-               data            = paths.get(L"data-path", initialPath + L"/data/");
-               font            = paths.get(L"font-path", initialPath + L"/font/");
-               thumbnails      = paths.get(L"thumbnail-path", initialPath + L"/thumbnail/");
+               media           = clean_path(paths.get(L"media-path", initial + L"/media/"));
+               log                     = clean_path(paths.get(L"log-path", initial + L"/log/"));
+               ftemplate       = clean_path(boost::filesystem::complete(paths.get(L"template-path", initial + L"/template/")).wstring());
+               data            = clean_path(paths.get(L"data-path", initial + L"/data/"));
+               font            = clean_path(paths.get(L"font-path", initial + L"/font/"));
+               thumbnail       = clean_path(paths.get(L"thumbnail-path", paths.get(L"thumbnails-path", initial + L"/thumbnail/")));
        }
-       catch(...)
+       catch (...)
        {
                CASPAR_LOG(error) << L" ### Invalid configuration file. ###";
                throw;
        }
 
-       try
-       {
-               auto found_media_path = find_case_insensitive(media);
-               if (found_media_path)
-                       media = *found_media_path;
-               else
-                       boost::filesystem::create_directories(media);
-
-               auto found_template_path = find_case_insensitive(ftemplate);
-               if (found_template_path)
-                       ftemplate = *found_template_path;
-               else
-                       boost::filesystem::create_directories(ftemplate);
-
-               auto found_data_path = find_case_insensitive(data);
-               if (found_data_path)
-                       data = *found_data_path;
-               else
-                       boost::filesystem::create_directories(data);
-
-               auto found_font_path = find_case_insensitive(font);
-               if (found_font_path)
-                       font = *found_font_path;
-               else
-                       boost::filesystem::create_directories(font);
-
-               auto found_thumbnails_path = find_case_insensitive(thumbnails);
-               if (found_thumbnails_path)
-                       thumbnails = *found_thumbnails_path;
-               else
-                       boost::filesystem::create_directories(thumbnails);
-
-               auto found_log_path = find_case_insensitive(log);
-               if (found_log_path)
-                       log = *found_log_path;
-               else if (!boost::filesystem::create_directories(log))
-                       log = L"./";
-
-               //Make sure that all paths have a trailing slash
-               if(media.at(media.length()-1) != L'/')
-                       media.append(L"/");
-               if(log.at(log.length()-1) != L'/')
-                       log.append(L"/");
-               if(ftemplate.at(ftemplate.length()-1) != L'/')
-                       ftemplate.append(L"/");
-               if(data.at(data.length()-1) != L'/')
-                       data.append(L"/");
-               if(font.at(font.length()-1) != L'/')
-                       font.append(L"/");
-               if(thumbnails.at(thumbnails.length()-1) != L'/')
-                       thumbnails.append(L"/");
-
-               try
-               {
-                       auto initialPath = boost::filesystem::initial_path().wstring();
-
-                       for(auto it = boost::filesystem::directory_iterator(initialPath); it != boost::filesystem::directory_iterator(); ++it)
-                       {
-                               if(it->path().wstring().find(L".fth") != std::wstring::npos)
-                               {
-                                       auto from_path = *it;
-                                       auto to_path = boost::filesystem::path(ftemplate + L"/" + it->path().wstring());
-
-                                       if(boost::filesystem::exists(to_path))
-                                               boost::filesystem::remove(to_path);
-
-                                       boost::filesystem::copy_file(from_path, to_path);
-                               }
-                       }
-               }
-               catch(...)
-               {
-                       CASPAR_LOG_CURRENT_EXCEPTION();
-                       CASPAR_LOG(error) << L"Failed to copy template-hosts from initial-path to template-path.";
-               }
-       }
-       catch(...)
-       {
-               CASPAR_LOG_CURRENT_EXCEPTION();
-               CASPAR_LOG(error) << L"Failed to create configured directories.";
-       }
+       media           = ensure_trailing_slash(resolve_or_create(media));
+       log                     = ensure_trailing_slash(resolve_or_create(log));
+       ftemplate       = ensure_trailing_slash(resolve_or_create(ftemplate));
+       data            = ensure_trailing_slash(resolve_or_create(data));
+       font            = ensure_trailing_slash(resolve_or_create(font));
+       thumbnail       = ensure_trailing_slash(resolve_or_create(thumbnail));
+
+       ensure_writable(log);
+       ensure_writable(ftemplate);
+       ensure_writable(data);
+       ensure_writable(thumbnail);
+}
+
+const std::wstring& initial_folder()
+{
+       check_is_configured();
+       return initial;
 }
-       
+
 const std::wstring& media_folder()
 {
        check_is_configured();
@@ -192,10 +182,10 @@ const std::wstring& font_folder()
        return font;
 }
 
-const std::wstring& thumbnails_folder()
+const std::wstring& thumbnail_folder()
 {
        check_is_configured();
-       return thumbnails;
+       return thumbnail;
 }
 
 #define QUOTE(str) #str
@@ -204,10 +194,11 @@ const std::wstring& thumbnails_folder()
 const std::wstring& version()
 {
        static std::wstring ver = u16(
-                       EXPAND_AND_QUOTE(CASPAR_GEN)    "." 
-                       EXPAND_AND_QUOTE(CASPAR_MAYOR)  "." 
-                       EXPAND_AND_QUOTE(CASPAR_MINOR)  "." 
-                       CASPAR_REV      " " 
+                       EXPAND_AND_QUOTE(CASPAR_GEN)    "."
+                       EXPAND_AND_QUOTE(CASPAR_MAYOR)  "."
+                       EXPAND_AND_QUOTE(CASPAR_MINOR)  "."
+                       EXPAND_AND_QUOTE(CASPAR_REV)    " "
+                       CASPAR_HASH                                             " "
                        CASPAR_TAG);
        return ver;
 }
@@ -218,4 +209,13 @@ const boost::property_tree::wptree& properties()
        return pt;
 }
 
+void log_configuration_warnings()
+{
+       if (pt.empty())
+               return;
+
+       if (pt.get_optional<std::wstring>(L"configuration.paths.thumbnails-path"))
+               CASPAR_LOG(warning) << L"Element thumbnails-path in casparcg.config has been deprecated. Use thumbnail-path instead.";
+}
+
 }}
index c7c78ff06872e75b903c8e02c70f68b460384f47..9b06fcf3165aea52de92e865ae876238294cad39 100644 (file)
@@ -29,14 +29,17 @@ namespace caspar { namespace env {
 
 void configure(const std::wstring& filename);
 
+const std::wstring& initial_folder();
 const std::wstring& media_folder();
 const std::wstring& log_folder();
 const std::wstring& template_folder();
 const std::wstring& data_folder();
-const std::wstring& thumbnails_folder();
+const std::wstring& thumbnail_folder();
 const std::wstring& font_folder();
 const std::wstring& version();
 
 const boost::property_tree::wptree& properties();
 
+void log_configuration_warnings();
+
 } }
index f4319f6d3dd5108584d8f01218c3b5bfa6fef89a..a830a9300020ce84397c8ab92f0e35e3378d6fb2 100644 (file)
@@ -24,6 +24,7 @@
 #include "utf.h"
 
 #include "os/stack_trace.h"
+#include "log.h"
 
 #include <exception>
 #include <list>
@@ -64,7 +65,7 @@ inline context_info_t         context_info(const T& str)              {return context_info_t(u8(str
 typedef boost::error_info<struct tag_line_info, size_t>                                                line_info;
 typedef boost::error_info<struct tag_nested_exception_, std::exception_ptr> nested_exception;
 
-struct caspar_exception                        : virtual boost::exception, virtual std::exception 
+struct caspar_exception                        : virtual boost::exception, virtual std::exception
 {
        caspar_exception(){}
        const char* what() const throw() override
@@ -125,9 +126,8 @@ private:
 #define _CASPAR_GENERATE_UNIQUE_IDENTIFIER(name, line) _CASPAR_GENERATE_UNIQUE_IDENTIFIER_CAT(name, line)
 #define CASPAR_SCOPED_CONTEXT_MSG(ctx_msg) ::caspar::scoped_context _CASPAR_GENERATE_UNIQUE_IDENTIFIER(SCOPED_CONTEXT, __LINE__)(u8(ctx_msg));
 
-#define CASPAR_THROW_EXCEPTION(e) BOOST_THROW_EXCEPTION(e << call_stack_info(caspar::get_call_stack()) << context_info(get_context()))
+#define CASPAR_THROW_EXCEPTION(e) ::boost::exception_detail::throw_exception_((e << call_stack_info(caspar::get_call_stack()) << context_info(get_context())), BOOST_THROW_EXCEPTION_CURRENT_FUNCTION, ::caspar::log::remove_source_prefix(__FILE__), __LINE__)
 
 std::string get_message_and_context(const caspar_exception& e);
 
 }
-
index 805c1bc9dbe6d080fbd7a418d0cd29dd34fb65a6..d6949615ac360c8b94671568a46eed750178f3db 100644 (file)
@@ -38,7 +38,6 @@
 #include <future>
 
 namespace caspar {
-               
 enum class task_priority
 {
        lowest_priority = 0,
@@ -50,35 +49,35 @@ enum class task_priority
 };
 
 class executor final
-{      
+{
        executor(const executor&);
        executor& operator=(const executor&);
-       
+
        typedef blocking_priority_queue<std::function<void()>, task_priority>   function_queue_t;
-       
-       const std::wstring                                                                                      name_;
-       tbb::atomic<bool>                                                                                       is_running_;
-       boost::thread                                                                                           thread_;        
-       function_queue_t                                                                                        execution_queue_;
-       tbb::atomic<bool>                                                                                       currently_in_task_;
-
-public:                
+
+       const std::wstring      name_;
+       tbb::atomic<bool>       is_running_;
+       boost::thread           thread_;
+       function_queue_t        execution_queue_;
+       tbb::atomic<bool>       currently_in_task_;
+
+public:
        executor(const std::wstring& name)
                : name_(name)
-               , execution_queue_(512, std::vector<task_priority> {
+               , execution_queue_(std::numeric_limits<int>::max(), std::vector<task_priority> {
                        task_priority::lowest_priority,
                        task_priority::lower_priority,
                        task_priority::low_priority,
                        task_priority::normal_priority,
                        task_priority::high_priority,
-                       task_priority::higher_priority 
+                       task_priority::higher_priority
                })
        {
                is_running_ = true;
                currently_in_task_ = false;
                thread_ = boost::thread([this]{run();});
        }
-       
+
        ~executor()
        {
                CASPAR_LOG(debug) << L"Shutting down " << name_;
@@ -95,7 +94,7 @@ public:
                {
                        CASPAR_LOG_CURRENT_EXCEPTION();
                }
-               
+
                join();
        }
 
@@ -106,19 +105,19 @@ public:
 
        template<typename Func>
        auto begin_invoke(Func&& func, task_priority priority = task_priority::normal_priority) -> std::future<decltype(func())> // noexcept
-       {       
+       {
                if(!is_running_)
                        CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("executor not running.") << source_info(name_));
-                               
-               return internal_begin_invoke(std::forward<Func>(func), priority);       
+
+               return internal_begin_invoke(std::forward<Func>(func), priority);
        }
-       
+
        template<typename Func>
        auto invoke(Func&& func, task_priority prioriy = task_priority::normal_priority) -> decltype(func()) // noexcept
        {
                if(is_current())  // Avoids potential deadlock.
                        return func();
-               
+
                return begin_invoke(std::forward<Func>(func), prioriy).get();
        }
 
@@ -147,13 +146,13 @@ public:
        {
                return execution_queue_.space_available() == 0;
        }
-       
+
        void clear()
-       {               
+       {
                std::function<void ()> func;
                while(execution_queue_.try_pop(func));
        }
-                               
+
        void stop()
        {
                invoke([this]
@@ -166,16 +165,16 @@ public:
        {
                invoke([]{}, task_priority::lowest_priority);
        }
-               
-       function_queue_t::size_type size() const 
+
+       function_queue_t::size_type size() const
        {
-               return execution_queue_.size(); 
+               return execution_queue_.size();
        }
 
        bool is_running() const
        {
-               return is_running_; 
-       }       
+               return is_running_;
+       }
 
        bool is_current() const
        {
@@ -191,23 +190,23 @@ public:
        {
                return name_;
        }
-               
-private:       
+
+private:
 
        std::wstring print() const
        {
                return L"executor[" + name_ + L"]";
        }
-       
+
        template<typename Func>
        auto internal_begin_invoke(
                Func&& func,
                task_priority priority = task_priority::normal_priority) -> std::future<decltype(func())> // noexcept
-       {                                       
+       {
                typedef typename std::remove_reference<Func>::type      function_type;
                typedef decltype(func())                                                        result_type;
                typedef std::packaged_task<result_type()>                       task_type;
-                                                               
+
                std::shared_ptr<task_type> task;
 
                // Use pointers since the boost thread library doesn't fully support move semantics.
@@ -226,7 +225,7 @@ private:
                        delete raw_func2;
                        throw;
                }
-                               
+
                auto future = task->get_future().share();
                auto function = [task]
                {
@@ -239,6 +238,9 @@ private:
 
                if (!execution_queue_.try_push(priority, function))
                {
+                       if (is_current())
+                               CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" Overflow. Avoiding deadlock."));
+
                        CASPAR_LOG(warning) << print() << L" Overflow. Blocking caller.";
                        execution_queue_.push(priority, function);
                }
@@ -257,7 +259,14 @@ private:
                        catch (const caspar_exception& e)
                        {
                                if (!is_current()) // Add context information from this thread before rethrowing.
-                                       e << context_info(get_context() + *boost::get_error_info<context_info_t>(e));
+                               {
+                                       auto ctx_info = boost::get_error_info<context_info_t>(e);
+
+                                       if (ctx_info)
+                                               e << context_info(get_context() + *ctx_info);
+                                       else
+                                               e << context_info(get_context());
+                               }
 
                                throw;
                        }
@@ -267,7 +276,7 @@ private:
        void run() // noexcept
        {
                ensure_gpf_handler_installed_for_thread(u8(name_).c_str());
-               while(is_running_)
+               while (is_running_)
                {
                        try
                        {
@@ -276,14 +285,28 @@ private:
                                currently_in_task_ = true;
                                func();
                        }
-                       catch(...)
+                       catch (...)
                        {
                                CASPAR_LOG_CURRENT_EXCEPTION();
                        }
 
                        currently_in_task_ = false;
                }
-       }       
-};
 
+               // Execute rest
+               try
+               {
+                       std::function<void()> func;
+
+                       while (execution_queue_.try_pop(func))
+                       {
+                               func();
+                       }
+               }
+               catch (...)
+               {
+                       CASPAR_LOG_CURRENT_EXCEPTION();
+               }
+       }
+};
 }
index 41869ad0427a48c9268a387824bf60be23001aa3..d93eb1327f21a691158296c48e6b3028185b70d5 100644 (file)
@@ -22,6 +22,7 @@
 #include "stdafx.h"
 
 #include "filesystem.h"
+#include "except.h"
 
 #include <boost/filesystem/operations.hpp>
 #include <boost/filesystem/path.hpp>
@@ -44,7 +45,7 @@ boost::filesystem::path get_relative(
                        break;
 
                if (current_path.empty())
-                       throw std::runtime_error("File not relative to folder");
+                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("File " + file.string() + " not relative to folder " + relative_to.string()));
 
                result = current_path.filename() / result;
        }
index 12c0bfc603dacd78d2810fad32721b4b6622a6ef..65bea20b56b42e347a2fe89a841fa2b7d4adb5f1 100644 (file)
@@ -25,126 +25,6 @@ bool is_ready(const F& future)
        return future.wait_for(std::chrono::seconds(0)) == std::future_status::ready;
 }
 
-/**
- * A utility that helps the producer side of a future when the task is not
- * able to complete immediately but there are known retry points in the code.
- */
-template<class R>
-class retry_task
-{
-public:
-       typedef boost::function<boost::optional<R> ()> func_type;
-       
-       retry_task() : done_(false) {}
-
-       /**
-        * Reset the state with a new task. If the previous task has not completed
-        * the old one will be discarded.
-        *
-        * @param func The function that tries to calculate future result. If the
-        *             optional return value is set the future is marked as ready.
-        */
-       void set_task(const func_type& func)
-       {
-               boost::unique_lock<boost::mutex> lock(mutex_);
-
-               func_ = func;
-               done_ = false;
-               promise_ = std::promise<R>();
-       }
-
-       /**
-        * Take ownership of the future for the current task. Cannot only be called
-        * once for each task.
-        *
-        * @return the future.
-        */
-       std::future<R> get_future()
-       {
-               boost::unique_lock<boost::mutex> lock(mutex_);
-
-               return promise_.get_future();
-       }
-
-       /**
-        * Call this when it is guaranteed or probable that the task will be able
-        * to complete.
-        *
-        * @return true if the task completed (the future will have a result).
-        */
-       bool try_completion()
-       {
-               boost::unique_lock<boost::mutex> lock(mutex_);
-
-               return try_completion_internal();
-       }
-
-       /**
-        * Call this when it is certain that the result should be ready, and if not
-        * it should be regarded as an unrecoverable error (retrying again would
-        * be useless), so the future will be marked as failed.
-        *
-        * @param exception The exception to mark the future with *if* the task
-        *                  completion fails.
-        */
-       template <class E>
-       void try_or_fail(const E& exception)
-       {
-               boost::unique_lock<boost::mutex> lock(mutex_);
-
-               if (!try_completion_internal())
-               {
-                       try
-                       {
-                               throw exception;
-                       }
-                       catch (...)
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                               promise_.set_exception(std::current_exception());
-                               done_ = true;
-                       }
-               }
-       }
-private:
-       bool try_completion_internal()
-       {
-               if (!func_)
-                       return false;
-
-               if (done_)
-                       return true;
-
-               boost::optional<R> result;
-
-               try
-               {
-                       result = func_();
-               }
-               catch (...)
-               {
-                       CASPAR_LOG_CURRENT_EXCEPTION();
-                       promise_.set_exception(std::current_exception());
-                       done_ = true;
-
-                       return true;
-               }
-
-               if (result)
-               {
-                       promise_.set_value(*result);
-                       done_ = true;
-               }
-
-               return done_;
-       }
-private:
-       boost::mutex mutex_;
-       func_type func_;
-       std::promise<R> promise_;
-       bool done_;
-};
-
 /**
  * Wrap a value in a future with an already known result.
  * <p>
index 2ff4642d86299c306b5b1bad843fd5d76d268b1f..72b12306108ce709447236ace93f47959fcadfbb 100644 (file)
@@ -30,9 +30,9 @@
 
 #include <GL/glew.h>
 
-namespace caspar { namespace gl {      
+namespace caspar { namespace gl {
 
-void SMFL_GLCheckError(const std::string&, const std::string& file, unsigned int line)
+void SMFL_GLCheckError(const std::string&, const char* func, const char* file, unsigned int line)
 {
        // Get the last error
        GLenum LastErrorCode = GL_NO_ERROR;
@@ -49,53 +49,67 @@ void SMFL_GLCheckError(const std::string&, const std::string& file, unsigned int
                switch (LastErrorCode)
                {
                        case GL_INVALID_ENUM :
-                               CASPAR_THROW_EXCEPTION(ogl_invalid_enum()
-                                       << msg_info("an unacceptable value has been specified for an enumerated argument")
-                                       << error_info("GL_INVALID_ENUM")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_invalid_enum()
+                                               << msg_info("an unacceptable value has been specified for an enumerated argument")
+                                               << error_info("GL_INVALID_ENUM")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
 
                        case GL_INVALID_VALUE :
-                               CASPAR_THROW_EXCEPTION(ogl_invalid_value()
-                                       << msg_info("a numeric argument is out of range")
-                                       << error_info("GL_INVALID_VALUE")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_invalid_value()
+                                               << msg_info("a numeric argument is out of range")
+                                               << error_info("GL_INVALID_VALUE")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
 
                        case GL_INVALID_OPERATION :
-                               CASPAR_THROW_EXCEPTION(ogl_invalid_operation()
-                                       << msg_info("the specified operation is not allowed in the current state")
-                                       << error_info("GL_INVALID_OPERATION")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_invalid_operation()
+                                               << msg_info("the specified operation is not allowed in the current state")
+                                               << error_info("GL_INVALID_OPERATION")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
 
                        case GL_STACK_OVERFLOW :
-                               CASPAR_THROW_EXCEPTION(ogl_stack_overflow()
-                                       << msg_info("this command would cause a stack overflow")
-                                       << error_info("GL_STACK_OVERFLOW")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_stack_overflow()
+                                               << msg_info("this command would cause a stack overflow")
+                                               << error_info("GL_STACK_OVERFLOW")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
 
                        case GL_STACK_UNDERFLOW :
-                               CASPAR_THROW_EXCEPTION(ogl_stack_underflow()
-                                       << msg_info("this command would cause a stack underflow")
-                                       << error_info("GL_STACK_UNDERFLOW")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_stack_underflow()
+                                               << msg_info("this command would cause a stack underflow")
+                                               << error_info("GL_STACK_UNDERFLOW")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
 
                        case GL_OUT_OF_MEMORY :
-                               CASPAR_THROW_EXCEPTION(ogl_out_of_memory()
-                                       << msg_info("there is not enough memory left to execute the command")
-                                       << error_info("GL_OUT_OF_MEMORY")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_out_of_memory()
+                                               << msg_info("there is not enough memory left to execute the command")
+                                               << error_info("GL_OUT_OF_MEMORY")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
 
                        case GL_INVALID_FRAMEBUFFER_OPERATION_EXT :
-                               CASPAR_THROW_EXCEPTION(ogl_stack_underflow()
-                                       << msg_info("the object bound to FRAMEBUFFER_BINDING_EXT is not \"framebuffer complete\"")
-                                       << error_info("GL_INVALID_FRAMEBUFFER_OPERATION_EXT")
-                                       << line_info(line)
-                                       << source_info(file));
+                               ::boost::exception_detail::throw_exception_(
+                                       ogl_invalid_framebuffer_operation_ext()
+                                               << msg_info("the object bound to FRAMEBUFFER_BINDING_EXT is not \"framebuffer complete\"")
+                                               << error_info("GL_INVALID_FRAMEBUFFER_OPERATION_EXT")
+                                               << call_stack_info(caspar::get_call_stack())
+                                               << context_info(get_context()),
+                                       func, log::remove_source_prefix(file), line);
                }
        }
 }
index 2ba7fa0039f9f1d212f2203d1ddcf27c7f94a335..f6ceb852571c2fe5c923e68b2b0765500895bc12 100644 (file)
@@ -33,21 +33,21 @@ struct ogl_invalid_enum                                                     : virtual ogl_exception{};
 struct ogl_invalid_value                                               : virtual ogl_exception{};
 struct ogl_invalid_operation                                   : virtual ogl_exception{};
 struct ogl_stack_overflow                                              : virtual ogl_exception{};
-struct ogl_stack_underflow                                             : virtual ogl_exception{};                      
+struct ogl_stack_underflow                                             : virtual ogl_exception{};
 struct ogl_out_of_memory                                               : virtual ogl_exception{};
 struct ogl_invalid_framebuffer_operation_ext   : virtual ogl_exception{};
 
-void SMFL_GLCheckError(const std::string& expr, const std::string& File, unsigned int Line);
+void SMFL_GLCheckError(const std::string& expr, const char* func, const char* file, unsigned int line);
 
 //#ifdef _DEBUG
-       
+
 #define CASPAR_GL_EXPR_STR(expr) #expr
 
 #define GL(expr) \
        if(false){}else \
        { \
                (expr);  \
-               caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FILE__, __LINE__);\
+               caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FUNCTION__, __FILE__, __LINE__);\
        }
 
 // TODO: decltype version does not play well with gcc
@@ -55,7 +55,7 @@ void SMFL_GLCheckError(const std::string& expr, const std::string& File, unsigne
        [&]()\
        {\
                auto ret = (expr);\
-               caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FILE__, __LINE__);\
+               caspar::gl::SMFL_GLCheckError(CASPAR_GL_EXPR_STR(expr), __FUNCTION__, __FILE__, __LINE__);\
                return ret;\
        }()
 /*#define GL2(expr) \
index 31a34577617694555741f1e021b5d342bb45bca8..b6488739dffac300eb56ea85a2b9d12e445f312d 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "log.h"
 
+#include "os/threading.h"
 #include "except.h"
 #include "utf.h"
 
@@ -47,7 +48,7 @@
 #include <boost/log/sinks/async_frontend.hpp>
 #include <boost/log/core/record.hpp>
 #include <boost/log/attributes/attribute_value.hpp>
-#include <boost/log/attributes/current_thread_id.hpp>
+#include <boost/log/attributes/function.hpp>
 #include <boost/log/utility/setup/common_attributes.hpp>
 
 #include <boost/core/null_deleter.hpp>
@@ -121,7 +122,7 @@ void my_formatter(bool print_all_characters, const boost::log::record_view& rec,
 
        std::wstringstream pre_message_stream;
        append_timestamp(pre_message_stream);
-       thread_id_column.write(pre_message_stream, boost::log::extract<boost::log::attributes::current_thread_id::value_type>("ThreadID", rec).get().native_id());
+       thread_id_column.write(pre_message_stream, boost::log::extract<std::int64_t>("NativeThreadId", rec));
        severity_column.write(pre_message_stream, boost::log::extract<boost::log::trivial::severity_level>("Severity", rec));
 
        auto pre_message = pre_message_stream.str();
@@ -141,10 +142,11 @@ void my_formatter(bool print_all_characters, const boost::log::record_view& rec,
 }
 
 namespace internal{
-       
+
 void init()
-{      
+{
        boost::log::add_common_attributes();
+       boost::log::core::get()->add_global_attribute("NativeThreadId", boost::log::attributes::make_function(&get_current_thread_id));
        typedef boost::log::sinks::asynchronous_sink<boost::log::sinks::wtext_ostream_backend> stream_sink_type;
 
        auto stream_backend = boost::make_shared<boost::log::sinks::wtext_ostream_backend>();
@@ -323,4 +325,14 @@ void print_child(
                print_child(level, indent + (elem.empty() ? L"" : elem + L"."), child.first, child.second);
 }
 
+const char* remove_source_prefix(const char* file)
+{
+       auto found = boost::ifind_first(file, get_source_prefix().c_str());
+
+       if (found)
+               return found.end();
+       else
+               return file;
+}
+
 }}
index 4cc983e4556f0b787a1c10df62f7bc28a17f352c..860c1a6466f024852cf96974d8109b37c8b26304 100644 (file)
 #include <memory>
 
 namespace caspar { namespace log {
-       
+
 namespace internal{
 void init();
 std::wstring get_call_stack();
 }
 
+const char* remove_source_prefix(const char* file);
+
 template<typename T>
 inline void replace_nonprintable(std::basic_string<T, std::char_traits<T>, std::allocator<T>>& str, T with)
 {
        std::locale loc;
        std::replace_if(str.begin(), str.end(), [&](T c)->bool {
-               return 
-                       (!std::isprint(c, loc) 
-                       && c != '\r' 
+               return
+                       (!std::isprint(c, loc)
+                       && c != '\r'
                        && c != '\n')
                        || c > static_cast<T>(127);
        }, with);
@@ -119,4 +121,3 @@ void print_child(
                const boost::property_tree::wptree& tree);
 
 }}
-
index 35127f674a5845afb653a1581f28fe6367d12303..810db56f024ca2e491c5a8385ab5853c7dd0d5b0 100644 (file)
@@ -727,6 +727,18 @@ shared_ptr<T> make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5,
        return shared_ptr<T>(std::make_shared<T>(std::forward<P0>(p0), std::forward<P1>(p1), std::forward<P2>(p2), std::forward<P3>(p3), std::forward<P4>(p4), std::forward<P5>(p5), std::forward<P6>(p6), std::forward<P7>(p7), std::forward<P8>(p8), std::forward<P9>(p9)));
 }
 
+template<typename T, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9, typename P10>
+shared_ptr<T> make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9, P10&& p10)
+{
+       return shared_ptr<T>(std::make_shared<T>(std::forward<P0>(p0), std::forward<P1>(p1), std::forward<P2>(p2), std::forward<P3>(p3), std::forward<P4>(p4), std::forward<P5>(p5), std::forward<P6>(p6), std::forward<P7>(p7), std::forward<P8>(p8), std::forward<P9>(p9), std::forward<P10>(p10)));
+}
+
+template<typename T, typename P0, typename P1, typename P2, typename P3, typename P4, typename P5, typename P6, typename P7, typename P8, typename P9, typename P10, typename P11>
+shared_ptr<T> make_shared(P0&& p0, P1&& p1, P2&& p2, P3&& p3, P4&& p4, P5&& p5, P6&& p6, P7&& p7, P8&& p8, P9&& p9, P10&& p10, P11&& p11)
+{
+       return shared_ptr<T>(std::make_shared<T>(std::forward<P0>(p0), std::forward<P1>(p1), std::forward<P2>(p2), std::forward<P3>(p3), std::forward<P4>(p4), std::forward<P5>(p5), std::forward<P6>(p6), std::forward<P7>(p7), std::forward<P8>(p8), std::forward<P9>(p9), std::forward<P10>(p10), std::forward<P11>(p11)));
+}
+
 template<typename T>
 shared_ptr<T>::shared_ptr() 
     : p_(make_shared<T>())
index 3ab4a7332266fa792ea095041ec9e45f193b779a..579558a0037c167ca84503ffd8117662bae1b851 100644 (file)
@@ -54,20 +54,19 @@ void ensure_gpf_handler_installed_for_thread(
 {
        static const int MAX_LINUX_THREAD_NAME_LEN = 15;
        static auto install = []() { do_install_handlers(); return 0; } ();
+
+       auto& for_thread = get_thread_info();
        
-       if (thread_description)
+       if (thread_description && for_thread.name.empty())
        {
-               get_thread_info().name = thread_description;
+               for_thread.name = thread_description;
+
+               std::string kernel_thread_name = for_thread.name;
+
+               if (kernel_thread_name.length() > MAX_LINUX_THREAD_NAME_LEN)
+                       kernel_thread_name.resize(MAX_LINUX_THREAD_NAME_LEN);
 
-               if (std::strlen(thread_description) > MAX_LINUX_THREAD_NAME_LEN)
-               {
-                       char truncated[MAX_LINUX_THREAD_NAME_LEN + 1];
-                       std::memcpy(truncated, thread_description, MAX_LINUX_THREAD_NAME_LEN);
-                       truncated[MAX_LINUX_THREAD_NAME_LEN] = 0;
-                       pthread_setname_np(pthread_self(), truncated);
-               }
-               else
-                       pthread_setname_np(pthread_self(), thread_description);
+               pthread_setname_np(pthread_self(), kernel_thread_name.c_str());
        }
 }
 
index c2369e2c86f239882e868f5a6db03706ab2a97b4..53a9c3567e841ff6213490cb8e734bba34a4f6e0 100644 (file)
@@ -102,4 +102,12 @@ std::wstring get_call_stack()
        }
 }
 
+const std::string& get_source_prefix()
+{
+       static const auto SOURCE_PREFIX = std::string(CASPAR_SOURCE_PREFIX) + "/";
+
+       return SOURCE_PREFIX;
 }
+
+}
+
index 2062372bc0af82e43db56cb46ff41c2e729825ec..efc1a6d3539eb46689202c6f6ae9ad75296c97b8 100644 (file)
@@ -26,5 +26,6 @@
 namespace caspar {
 
 std::wstring get_call_stack();
+const std::string& get_source_prefix();
 
 }
index ff5dc28bf518c48a6e68debe43d3b45955c9d6aa..0b361e7324d832483652f4b68e22af7e2cba984d 100644 (file)
 
 #include "../../compiler/vs/StackWalker.h"
 
+#include <boost/algorithm/string/replace.hpp>
+
 #include <utility>
 
 #include <tbb/enumerable_thread_specific.h>
+#include <boost/algorithm/string/find.hpp>
 
 namespace caspar {
 
@@ -38,11 +41,18 @@ std::wstring get_call_stack()
        {
                std::string str_ = "\n";
        public:
-               log_call_stack_walker() : StackWalker() {}
+               log_call_stack_walker()
+                       : StackWalker()
+               {
+               }
 
                std::string flush()
                {
-                       return std::move(str_);
+                       auto result = std::move(str_);
+
+                       str_ = "\n";
+
+                       return result;
                }
        protected:
                virtual void OnSymInit(LPCSTR szSearchPath, DWORD symOptions, LPCSTR szUserName) override
@@ -58,8 +68,20 @@ std::wstring get_call_stack()
                {
                        std::string str = szText;
 
-                       if(str.find("caspar::get_call_stack") == std::string::npos && str.find("StackWalker::ShowCallstack") == std::string::npos)
+                       auto include_path = boost::find_first(str, "\\include\\");
+
+                       if (include_path)
+                       {
+                               str.erase(str.begin(), include_path.end());
+                       }
+
+                       boost::ireplace_all(str, get_source_prefix(), "");
+
+                       if (str.find("caspar::get_call_stack") == std::string::npos &&
+                               str.find("StackWalker::ShowCallstack") == std::string::npos)
+                       {
                                str_ += "    " + std::move(str);
+                       }
                }
        };
 
@@ -72,8 +94,23 @@ std::wstring get_call_stack()
        }
        catch(...)
        {
-               return L"!!!";
+               return L"Bug in stacktrace code!!!";
        }
 }
 
+const std::string& get_source_prefix()
+{
+       static const std::string SOURCE_PREFIX = []
+       {
+               std::string result = CASPAR_SOURCE_PREFIX;
+
+               boost::replace_all(result, "/", "\\");
+               result += "\\";
+
+               return result;
+       }();
+
+       return SOURCE_PREFIX;
+}
+
 }
index 78d23c2071ed13b5423720dccdbd8523bccbe0eb..91d9fa4d873c75c4610c3387938f1a41e601992c 100644 (file)
@@ -24,6 +24,21 @@ bool contains_param(const std::wstring& name, C&& params)
        return std::find_if(params.begin(), params.end(), param_comparer(name)) != params.end();
 }
 
+template<typename C>
+bool get_and_consume_flag(const std::wstring& flag_param, C& params)
+{
+       auto flag_it = std::find_if(params.begin(), params.end(), param_comparer(flag_param));
+       bool flag = false;
+
+       if (flag_it != params.end())
+       {
+               flag = true;
+               params.erase(flag_it);
+       }
+
+       return flag;
+}
+
 template<typename C>
 void replace_placeholders(const std::wstring& placeholder, const std::wstring& replacement, C&& params)
 {
@@ -31,13 +46,32 @@ void replace_placeholders(const std::wstring& placeholder, const std::wstring& r
                boost::ireplace_all(param, placeholder, replacement);
 }
 
+static std::vector<std::wstring> protocol_split(const std::wstring& s)
+{
+       std::vector<std::wstring> result;
+       size_t pos;
+
+       if ((pos = s.find(L"://")) != std::wstring::npos)
+       {
+               result.push_back(s.substr(0, pos));
+               result.push_back(s.substr(pos + 3));
+       }
+       else
+       {
+               result.push_back(L"");
+               result.push_back(s);
+       }
+
+       return result;
+}
+
 template<typename T, typename C>
 typename std::enable_if<!std::is_convertible<T, std::wstring>::value, typename std::decay<T>::type>::type get_param(const std::wstring& name, C&& params, T fail_value = T())
-{      
+{
        auto it = std::find_if(std::begin(params), std::end(params), param_comparer(name));
-       if(it == params.end())  
+       if(it == params.end())
                return fail_value;
-       
+
        try
        {
                if(++it == params.end())
@@ -46,27 +80,27 @@ typename std::enable_if<!std::is_convertible<T, std::wstring>::value, typename s
                return boost::lexical_cast<typename std::decay<T>::type>(*it);
        }
        catch(...)
-       {               
+       {
                CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Failed to parse param " + name) << nested_exception(std::current_exception()));
        }
 }
 
 template<typename C>
 std::wstring get_param(const std::wstring& name, C&& params, const std::wstring& fail_value = L"")
-{      
+{
        auto it = std::find_if(std::begin(params), std::end(params), param_comparer(name));
-       if(it == params.end())  
+       if(it == params.end())
                return fail_value;
-       
+
        try
        {
                if(++it == params.end())
                        throw std::out_of_range("");
 
-               return *it;     
+               return *it;
        }
        catch(...)
-       {               
+       {
                CASPAR_THROW_EXCEPTION(user_error() << msg_info(L"Failed to parse param " + name) << nested_exception(std::current_exception()));
        }
 }
index c189218ea31ab0d450ef545009477131c33e241f..74962590b1884174f189dbb0275be81fcd7841de 100644 (file)
@@ -203,17 +203,19 @@ private:
        }
 };
 
-class polling_filesystem_monitor : public filesystem_monitor
+class polling_filesystem_monitor
+               : public filesystem_monitor
+               , public spl::enable_shared_from_this<polling_filesystem_monitor>
 {
-       tbb::atomic<bool> running_;
-       std::shared_ptr<boost::asio::io_service> scheduler_;
-       directory_monitor root_monitor_;
-       boost::asio::deadline_timer timer_;
-       int scan_interval_millis_;
-       std::promise<void> initial_scan_completion_;
-       tbb::concurrent_queue<boost::filesystem::path> to_reemmit_;
-       tbb::atomic<bool> reemmit_all_;
-       executor executor_;
+       tbb::atomic<bool>                                                               running_;
+       std::shared_ptr<boost::asio::io_service>                scheduler_;
+       directory_monitor                                                               root_monitor_;
+       boost::asio::deadline_timer                                             timer_;
+       int                                                                                             scan_interval_millis_;
+       std::promise<void>                                                              initial_scan_completion_;
+       tbb::concurrent_queue<boost::filesystem::path>  to_reemmit_;
+       tbb::atomic<bool>                                                               reemmit_all_;
+       executor                                                                                executor_;
 public:
        polling_filesystem_monitor(
                        const boost::filesystem::path& folder_to_watch,
@@ -231,6 +233,10 @@ public:
        {
                running_ = true;
                reemmit_all_ = false;
+       }
+
+       void start()
+       {
                executor_.begin_invoke([this]
                {
                        scan();
@@ -243,7 +249,7 @@ public:
        {
                running_ = false;
                boost::system::error_code e;
-               timer_.cancel(e);
+               timer_.cancel(e); // Can still have be queued for execution by asio, therefore the task has a weak_ptr to this
        }
 
        std::future<void> initial_files_processed() override
@@ -266,11 +272,15 @@ private:
                if (!running_)
                        return;
 
-               timer_.expires_from_now(
-                       boost::posix_time::milliseconds(scan_interval_millis_));
-               timer_.async_wait([this](const boost::system::error_code& e)
+               std::weak_ptr<polling_filesystem_monitor> weak_self = shared_from_this();
+
+               timer_.expires_from_now(boost::posix_time::milliseconds(scan_interval_millis_));
+               timer_.async_wait([weak_self](const boost::system::error_code& e)
                {
-                       begin_scan();
+                       auto strong_self = weak_self.lock();
+
+                       if (strong_self)
+                               strong_self->begin_scan();
                });
        }
 
@@ -344,7 +354,7 @@ filesystem_monitor::ptr polling_filesystem_monitor_factory::create(
                const filesystem_monitor_handler& handler,
                const initial_files_handler& initial_files_handler)
 {
-       return spl::make_shared<polling_filesystem_monitor>(
+       auto monitor = spl::make_shared<polling_filesystem_monitor>(
                        folder_to_watch,
                        events_of_interest_mask,
                        report_already_existing,
@@ -352,6 +362,10 @@ filesystem_monitor::ptr polling_filesystem_monitor_factory::create(
                        impl_->scheduler_,
                        handler,
                        initial_files_handler);
+
+       monitor->start();
+
+       return monitor;
 }
 
 }
index 04e36426045c48c9603807a9c7f0a90d1806c9f8..7baccda0e52df188f8e782e135406b8166a8488d 100644 (file)
 #include <boost/thread/mutex.hpp>
 #include <boost/thread/condition_variable.hpp>
 
+#include <map>
+#include <queue>
+#include <functional>
+
 namespace caspar {
 
 template <class N, class Func>
@@ -43,9 +47,10 @@ void repeat_n(N times_to_repeat_block, const Func& func)
  */
 class semaphore : boost::noncopyable
 {
-       mutable boost::mutex mutex_;
-       unsigned int permits_;
-       boost::condition_variable_any permits_available_;
+       mutable boost::mutex                                                                            mutex_;
+       unsigned int                                                                                            permits_;
+       boost::condition_variable_any                                                           permits_available_;
+       std::map<unsigned int, std::queue<std::function<void()>>>       callbacks_per_requested_permits_;
 public:
        /**
         * Constructor.
@@ -66,6 +71,7 @@ public:
 
                ++permits_;
 
+               perform_callback_based_acquire();
                permits_available_.notify_one();
        }
 
@@ -80,6 +86,7 @@ public:
 
                permits_ += permits;
 
+               perform_callback_based_acquire();
                repeat_n(permits, [this] { permits_available_.notify_one(); });
        }
 
@@ -112,11 +119,11 @@ public:
 
                while (true)
                {
-                       auto num_wanted = permits - num_acquired;
-                       auto to_drain = std::min(num_wanted, permits_);
+                       auto num_wanted = permits - num_acquired;
+                       auto to_drain   = std::min(num_wanted, permits_);
 
-                       permits_ -= to_drain;
-                       num_acquired += to_drain;
+                       permits_                -= to_drain;
+                       num_acquired    += to_drain;
 
                        if (num_acquired == permits)
                                break;
@@ -125,6 +132,20 @@ public:
                }
        }
 
+       /**
+       * Acquire a number of permits. Will not block, but instead invoke a callback
+       * when the specified number of permits are available and has been acquired.
+       *
+       * @param permits           The number of permits to acquire.
+       * @param acquired_callback The callback to invoke when acquired.
+       */
+       void acquire(unsigned int permits, std::function<void()> acquired_callback)
+       {
+               boost::unique_lock<boost::mutex> lock(mutex_);
+
+               callbacks_per_requested_permits_[permits].push(std::move(acquired_callback));
+       }
+
        /**
         * Acquire a number of permits. Will block until the given number of
         * permits has been acquired if not enough permits are currently available
@@ -143,11 +164,11 @@ public:
 
                while (true)
                {
-                       auto num_wanted = permits - num_acquired;
-                       auto to_drain = std::min(num_wanted, permits_);
+                       auto num_wanted = permits - num_acquired;
+                       auto to_drain   = std::min(num_wanted, permits_);
 
-                       permits_ -= to_drain;
-                       num_acquired += to_drain;
+                       permits_                -= to_drain;
+                       num_acquired    += to_drain;
 
                        if (num_acquired == permits)
                                break;
@@ -194,6 +215,47 @@ public:
 
                return permits_;
        }
+
+private:
+       void perform_callback_based_acquire()
+       {
+               if (callbacks_per_requested_permits_.empty())
+                       return;
+
+               while (
+                       !callbacks_per_requested_permits_.empty() &&
+                       callbacks_per_requested_permits_.begin()->first <= permits_)
+               {
+                       auto requested_permits_and_callbacks    = callbacks_per_requested_permits_.begin();
+                       auto requested_permits                                  = requested_permits_and_callbacks->first;
+                       auto& callbacks                                                 = requested_permits_and_callbacks->second;
+
+                       if (callbacks.empty())
+                       {
+                               callbacks_per_requested_permits_.erase(requested_permits_and_callbacks);
+                               continue;
+                       }
+
+                       auto& callback                                                  = callbacks.front();
+
+                       permits_ -= requested_permits;
+                       mutex_.unlock();
+
+                       try
+                       {
+                               callback();
+                       }
+                       catch (...)
+                       {
+                       }
+
+                       mutex_.lock();
+                       callbacks.pop();
+
+                       if (callbacks.empty())
+                               callbacks_per_requested_permits_.erase(requested_permits_and_callbacks);
+               }
+       }
 };
 
 /**
index 562df91cf55f17d675deb765b7d3b22e3ef1675a..21ef8a9bdef15a137c68de1968d2efff4c524801 100644 (file)
@@ -2,6 +2,8 @@ cmake_minimum_required (VERSION 2.6)
 project (core)
 
 set(SOURCES
+               consumer/syncto/syncto_consumer.cpp
+
                consumer/frame_consumer.cpp
                consumer/output.cpp
                consumer/port.cpp
@@ -58,6 +60,8 @@ set(SOURCES
                video_format.cpp
 )
 set(HEADERS
+               consumer/syncto/syncto_consumer.h
+
                consumer/frame_consumer.h
                consumer/output.h
                consumer/port.h
@@ -86,6 +90,10 @@ set(HEADERS
                interaction/interaction_sink.h
                interaction/util.h
 
+               mixer/audio/audio_mixer.h
+
+               mixer/image/blend_modes.h
+
                mixer/mixer.h
 
                monitor/monitor.h
@@ -145,6 +153,7 @@ include_directories(${GLEW_INCLUDE_PATH})
 
 source_group(sources ./*)
 source_group(sources\\consumer consumer/*)
+source_group(sources\\consumer\\syncto consumer/syncto/*)
 source_group(sources\\diagnostics diagnostics/*)
 source_group(sources\\producer producer/*)
 source_group(sources\\producer\\framerate producer/framerate/*)
@@ -152,6 +161,8 @@ source_group(sources\\frame frame/*)
 source_group(sources\\help help/*)
 source_group(sources\\interaction interaction/*)
 source_group(sources\\mixer mixer/*)
+source_group(sources\\mixer\\audio mixer/audio/*)
+source_group(sources\\mixer\\image mixer/image/*)
 source_group(sources\\producer\\color producer/color/*)
 source_group(sources\\producer\\media_info producer/media_info/*)
 source_group(sources\\producer\\scene producer/scene/*)
index 10fccad3e157f4ee1eae8766eae0359dd31b3366..477998ace7cc2c9a2cb20d80e835e12c41642925 100644 (file)
@@ -82,27 +82,27 @@ void destroy_consumers_synchronously()
 }
 
 class destroy_consumer_proxy : public frame_consumer
-{      
+{
        std::shared_ptr<frame_consumer> consumer_;
 public:
-       destroy_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer) 
+       destroy_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer)
                : consumer_(std::move(consumer))
        {
                destroy_consumers_in_separate_thread() = true;
        }
 
        ~destroy_consumer_proxy()
-       {               
+       {
                static tbb::atomic<int> counter;
                static std::once_flag counter_init_once;
                std::call_once(counter_init_once, []{ counter = 0; });
 
                if (!destroy_consumers_in_separate_thread())
                        return;
-                       
+
                ++counter;
                CASPAR_VERIFY(counter < 8);
-               
+
                auto consumer = new std::shared_ptr<frame_consumer>(std::move(consumer_));
                boost::thread([=]
                {
@@ -122,38 +122,39 @@ public:
 
                        pointer_guard.reset();
 
-               }).detach(); 
+               }).detach();
        }
-       
+
        std::future<bool> send(const_frame frame) override                                                                                                                                                              {return consumer_->send(std::move(frame));}
        void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override   {return consumer_->initialize(format_desc, channel_layout, channel_index);}
-       std::wstring print() const override                                                                                                                                                                                             {return consumer_->print();}    
+       std::wstring print() const override                                                                                                                                                                                             {return consumer_->print();}
        std::wstring name() const override                                                                                                                                                                                              {return consumer_->name();}
        boost::property_tree::wptree info() const override                                                                                                                                                              {return consumer_->info();}
        bool has_synchronization_clock() const override                                                                                                                                                                 {return consumer_->has_synchronization_clock();}
        int buffer_depth() const override                                                                                                                                                                                               {return consumer_->buffer_depth();}
        int index() const override                                                                                                                                                                                                              {return consumer_->index();}
        int64_t presentation_frame_age_millis() const override                                                                                                                                                  {return consumer_->presentation_frame_age_millis();}
-       monitor::subject& monitor_output() override                                                                                                                                                                             {return consumer_->monitor_output();}                                                                           
+       monitor::subject& monitor_output() override                                                                                                                                                                             {return consumer_->monitor_output();}
+       const frame_consumer* unwrapped() const override                                                                                                                                                                {return consumer_->unwrapped();}
 };
 
 class print_consumer_proxy : public frame_consumer
-{      
+{
        std::shared_ptr<frame_consumer> consumer_;
 public:
-       print_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer) 
+       print_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer)
                : consumer_(std::move(consumer))
        {
        }
 
        ~print_consumer_proxy()
-       {               
+       {
                auto str = consumer_->print();
                CASPAR_LOG(debug) << str << L" Uninitializing.";
                consumer_.reset();
                CASPAR_LOG(info) << str << L" Uninitialized.";
        }
-       
+
        std::future<bool> send(const_frame frame) override                                                                                                                                                              {return consumer_->send(std::move(frame));}
        void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override
        {
@@ -167,22 +168,23 @@ public:
        int buffer_depth() const override                                                                                                                                                                                               {return consumer_->buffer_depth();}
        int index() const override                                                                                                                                                                                                              {return consumer_->index();}
        int64_t presentation_frame_age_millis() const override                                                                                                                                                  {return consumer_->presentation_frame_age_millis();}
-       monitor::subject& monitor_output() override                                                                                                                                                                             {return consumer_->monitor_output();}                                                                           
+       monitor::subject& monitor_output() override                                                                                                                                                                             {return consumer_->monitor_output();}
+       const frame_consumer* unwrapped() const override                                                                                                                                                                {return consumer_->unwrapped();}
 };
 
 class recover_consumer_proxy : public frame_consumer
-{      
+{
        std::shared_ptr<frame_consumer> consumer_;
        int                                                             channel_index_  = -1;
        video_format_desc                               format_desc_;
        audio_channel_layout                    channel_layout_ = audio_channel_layout::invalid();
 public:
-       recover_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer) 
+       recover_consumer_proxy(spl::shared_ptr<frame_consumer>&& consumer)
                : consumer_(std::move(consumer))
        {
        }
-       
-       std::future<bool> send(const_frame frame) override                              
+
+       std::future<bool> send(const_frame frame) override
        {
                try
                {
@@ -220,7 +222,8 @@ public:
        int buffer_depth() const override                                                                               {return consumer_->buffer_depth();}
        int index() const override                                                                                              {return consumer_->index();}
        int64_t presentation_frame_age_millis() const override                                  {return consumer_->presentation_frame_age_millis();}
-       monitor::subject& monitor_output() override                                                             {return consumer_->monitor_output();}                                                                           
+       monitor::subject& monitor_output() override                                                             {return consumer_->monitor_output();}
+       const frame_consumer* unwrapped() const override                                                {return consumer_->unwrapped();}
 };
 
 // This class is used to guarantee that audio cadence is correct. This is important for NTSC audio.
@@ -236,7 +239,7 @@ public:
                : consumer_(consumer)
        {
        }
-       
+
        void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override
        {
                audio_cadence_  = format_desc.audio_cadence;
@@ -247,14 +250,14 @@ public:
        }
 
        std::future<bool> send(const_frame frame) override
-       {               
+       {
                if(audio_cadence_.size() == 1)
                        return consumer_->send(frame);
 
                std::future<bool> result = make_ready_future(true);
-               
+
                if(boost::range::equal(sync_buffer_, audio_cadence_) && audio_cadence_.front() * channel_layout_.num_channels == static_cast<int>(frame.audio_data().size()))
-               {       
+               {
                        // Audio sent so far is in sync, now we can send the next chunk.
                        result = consumer_->send(frame);
                        boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
@@ -263,10 +266,10 @@ public:
                        CASPAR_LOG(trace) << print() << L" Syncing audio.";
 
                sync_buffer_.push_back(static_cast<int>(frame.audio_data().size() / channel_layout_.num_channels));
-               
+
                return std::move(result);
        }
-       
+
        std::wstring print() const override                                                                             {return consumer_->print();}
        std::wstring name() const override                                                                              {return consumer_->name();}
        boost::property_tree::wptree info() const override                                              {return consumer_->info();}
@@ -274,22 +277,23 @@ public:
        int buffer_depth() const override                                                                               {return consumer_->buffer_depth();}
        int index() const override                                                                                              {return consumer_->index();}
        int64_t presentation_frame_age_millis() const override                                  {return consumer_->presentation_frame_age_millis();}
-       monitor::subject& monitor_output() override                                                             {return consumer_->monitor_output();}                                                                           
+       monitor::subject& monitor_output() override                                                             {return consumer_->monitor_output();}
+       const frame_consumer* unwrapped() const override                                                {return consumer_->unwrapped();}
 };
 
 spl::shared_ptr<core::frame_consumer> frame_consumer_registry::create_consumer(
-               const std::vector<std::wstring>& params, interaction_sink* sink) const
+               const std::vector<std::wstring>& params, interaction_sink* sink, std::vector<spl::shared_ptr<video_channel>> channels) const
 {
        if(params.empty())
                CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("params cannot be empty"));
-       
+
        auto consumer = frame_consumer::empty();
        auto& consumer_factories = impl_->consumer_factories;
        std::any_of(consumer_factories.begin(), consumer_factories.end(), [&](const consumer_factory_t& factory) -> bool
                {
                        try
                        {
-                               consumer = factory(params, sink);
+                               consumer = factory(params, sink, channels);
                        }
                        catch(...)
                        {
@@ -311,7 +315,8 @@ spl::shared_ptr<core::frame_consumer> frame_consumer_registry::create_consumer(
 spl::shared_ptr<frame_consumer> frame_consumer_registry::create_consumer(
                const std::wstring& element_name,
                const boost::property_tree::wptree& element,
-               interaction_sink* sink) const
+               interaction_sink* sink,
+               std::vector<spl::shared_ptr<video_channel>> channels) const
 {
        auto& preconfigured_consumer_factories = impl_->preconfigured_consumer_factories;
        auto found = preconfigured_consumer_factories.find(element_name);
@@ -324,7 +329,7 @@ spl::shared_ptr<frame_consumer> frame_consumer_registry::create_consumer(
                        spl::make_shared<print_consumer_proxy>(
                                        spl::make_shared<recover_consumer_proxy>(
                                                        spl::make_shared<cadence_guard>(
-                                                                       found->second(element, sink)))));
+                                                                       found->second(element, sink, channels)))));
 }
 
 const spl::shared_ptr<frame_consumer>& frame_consumer::empty()
index e09e4edcfe263f8a36c5a22359b831fe3fab445b..58ffeb61b62168581d4f2d91c4f2ea5d068c9897 100644 (file)
@@ -44,19 +44,19 @@ class frame_consumer
 public:
 
        // Static Members
-       
+
        static const spl::shared_ptr<frame_consumer>& empty();
 
        // Constructors
 
        frame_consumer(){}
        virtual ~frame_consumer() {}
-       
+
        // Methods
 
        virtual std::future<bool>                               send(const_frame frame) = 0;
        virtual void                                                    initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) = 0;
-       
+
        // monitor::observable
 
        virtual monitor::subject& monitor_output() = 0;
@@ -70,14 +70,17 @@ public:
        virtual int                                                             buffer_depth() const = 0; // -1 to not participate in frame presentation synchronization
        virtual int                                                             index() const = 0;
        virtual int64_t                                                 presentation_frame_age_millis() const = 0;
+       virtual const frame_consumer*                   unwrapped() const { return this; }
 };
 
 typedef std::function<spl::shared_ptr<frame_consumer>(
                const std::vector<std::wstring>&,
-               interaction_sink* sink)> consumer_factory_t;
+               interaction_sink* sink,
+               std::vector<spl::shared_ptr<video_channel>> channels)> consumer_factory_t;
 typedef std::function<spl::shared_ptr<frame_consumer>(
                const boost::property_tree::wptree& element,
-               interaction_sink* sink)> preconfigured_consumer_factory_t;
+               interaction_sink* sink,
+               std::vector<spl::shared_ptr<video_channel>> channels)> preconfigured_consumer_factory_t;
 
 class frame_consumer_registry : boost::noncopyable
 {
@@ -89,11 +92,13 @@ public:
                        const preconfigured_consumer_factory_t& factory);
        spl::shared_ptr<frame_consumer> create_consumer(
                        const std::vector<std::wstring>& params,
-                       interaction_sink* sink) const;
+                       interaction_sink* sink,
+                       std::vector<spl::shared_ptr<video_channel>> channels) const;
        spl::shared_ptr<frame_consumer> create_consumer(
                        const std::wstring& element_name,
                        const boost::property_tree::wptree& element,
-                       interaction_sink* sink) const;
+                       interaction_sink* sink,
+                       std::vector<spl::shared_ptr<video_channel>> channels) const;
 private:
        struct impl;
        spl::shared_ptr<impl> impl_;
index 33cf01d7e5e98cb2800cc3a37c3c1e38b7e97d5c..d3c0580eb01bc624b18b396db5a8d9ec42a39206 100644 (file)
@@ -53,7 +53,7 @@
 namespace caspar { namespace core {
 
 struct output::impl
-{              
+{
        spl::shared_ptr<diagnostics::graph>     graph_;
        spl::shared_ptr<monitor::subject>       monitor_subject_                        = spl::make_shared<monitor::subject>("/output");
        const int                                                       channel_index_;
@@ -65,23 +65,23 @@ struct output::impl
        std::map<int, int64_t>                          send_to_consumers_delays_;
        executor                                                        executor_                                       { L"output " + boost::lexical_cast<std::wstring>(channel_index_) };
 public:
-       impl(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) 
+       impl(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index)
                : graph_(std::move(graph))
                , channel_index_(channel_index)
                , format_desc_(format_desc)
                , channel_layout_(channel_layout)
        {
                graph_->set_color("consume-time", diagnostics::color(1.0f, 0.4f, 0.0f, 0.8f));
-       }       
-       
+       }
+
        void add(int index, spl::shared_ptr<frame_consumer> consumer)
-       {               
+       {
                remove(index);
 
                consumer->initialize(format_desc_, channel_layout_, channel_index_);
-               
+
                executor_.begin_invoke([this, index, consumer]
-               {                       
+               {
                        port p(index, channel_index_, std::move(consumer));
                        p.monitor_output().attach_parent(monitor_subject_);
                        ports_.insert(std::make_pair(index, std::move(p)));
@@ -94,7 +94,7 @@ public:
        }
 
        void remove(int index)
-       {               
+       {
                executor_.begin_invoke([=]
                {
                        auto it = ports_.find(index);
@@ -110,7 +110,7 @@ public:
        {
                remove(consumer->index());
        }
-       
+
        void change_channel_format(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout)
        {
                executor_.invoke([&]
@@ -120,7 +120,7 @@ public:
 
                        auto it = ports_.begin();
                        while(it != ports_.end())
-                       {                                               
+                       {
                                try
                                {
                                        it->second.change_channel_format(format_desc, channel_layout);
@@ -133,7 +133,7 @@ public:
                                        ports_.erase(it++);
                                }
                        }
-                       
+
                        format_desc_ = format_desc;
                        channel_layout_ = channel_layout;
                        frames_.clear();
@@ -141,7 +141,7 @@ public:
        }
 
        std::pair<int, int> minmax_buffer_depth() const
-       {               
+       {
                if(ports_.empty())
                        return std::make_pair(0, 0);
 
@@ -159,7 +159,7 @@ public:
                        .where(std::mem_fn(&port::has_synchronization_clock))
                        .any();
        }
-               
+
        std::future<void> operator()(const_frame input_frame, const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout)
        {
                spl::shared_ptr<caspar::timer> frame_timer;
@@ -262,12 +262,12 @@ public:
        std::future<boost::property_tree::wptree> info()
        {
                return std::move(executor_.begin_invoke([&]() -> boost::property_tree::wptree
-               {                       
+               {
                        boost::property_tree::wptree info;
                        for (auto& port : ports_)
                        {
                                info.add_child(L"consumers.consumer", port.second.info())
-                                       .add(L"index", port.first); 
+                                       .add(L"index", port.first);
                        }
                        return info;
                }, task_priority::high_priority));
@@ -297,6 +297,19 @@ public:
                        return info;
                }, task_priority::high_priority));
        }
+
+       std::vector<spl::shared_ptr<const frame_consumer>> get_consumers()
+       {
+               return executor_.invoke([=]
+               {
+                       std::vector<spl::shared_ptr<const frame_consumer>> consumers;
+
+                       for (auto& port : ports_)
+                               consumers.push_back(port.second.consumer());
+
+                       return consumers;
+               });
+       }
 };
 
 output::output(spl::shared_ptr<diagnostics::graph> graph, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) : impl_(new impl(std::move(graph), format_desc, channel_layout, channel_index)){}
@@ -306,6 +319,7 @@ void output::remove(int index){impl_->remove(index);}
 void output::remove(const spl::shared_ptr<frame_consumer>& consumer){impl_->remove(consumer);}
 std::future<boost::property_tree::wptree> output::info() const{return impl_->info();}
 std::future<boost::property_tree::wptree> output::delay_info() const{ return impl_->delay_info(); }
+std::vector<spl::shared_ptr<const frame_consumer>> output::get_consumers() const { return impl_->get_consumers(); }
 std::future<void> output::operator()(const_frame frame, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout){ return (*impl_)(std::move(frame), format_desc, channel_layout); }
 monitor::subject& output::monitor_output() {return *impl_->monitor_subject_;}
 }}
index 85a925632ad778ca4d0c4becdb1ffd45a70ed103..bd75385c715a7119e2be6315e76f785b2f3c8351 100644 (file)
@@ -35,7 +35,7 @@
 FORWARD2(caspar, diagnostics, class graph);
 
 namespace caspar { namespace core {
-       
+
 class output final
 {
        output(const output&);
@@ -47,27 +47,28 @@ public:
        // Constructors
 
        explicit output(spl::shared_ptr<caspar::diagnostics::graph> graph, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index);
-       
+
        // Methods
 
        // Returns when submitted to consumers, but the future indicates when the consumers are ready for a new frame.
        std::future<void> operator()(const_frame frame, const video_format_desc& format_desc, const core::audio_channel_layout& channel_layout);
-       
+
        void add(const spl::shared_ptr<frame_consumer>& consumer);
        void add(int index, const spl::shared_ptr<frame_consumer>& consumer);
        void remove(const spl::shared_ptr<frame_consumer>& consumer);
        void remove(int index);
-       
+
        monitor::subject& monitor_output();
 
        // Properties
 
        std::future<boost::property_tree::wptree> info() const;
        std::future<boost::property_tree::wptree> delay_info() const;
+       std::vector<spl::shared_ptr<const frame_consumer>> get_consumers() const;
 
 private:
        struct impl;
        spl::shared_ptr<impl> impl_;
 };
 
-}}
\ No newline at end of file
+}}
index db3c68fa890b9ee28303eb68c4b6565ec73abf4b..ecc19836d07c7fb92e3e9052b2155eed2efc4794 100644 (file)
@@ -14,7 +14,7 @@ struct port::impl
 {
        int                                                                     index_;
        spl::shared_ptr<monitor::subject>       monitor_subject_ = spl::make_shared<monitor::subject>("/port/" + boost::lexical_cast<std::string>(index_));
-       std::shared_ptr<frame_consumer>         consumer_;
+       spl::shared_ptr<frame_consumer>         consumer_;
        int                                                                     channel_index_;
 public:
        impl(int index, int channel_index, spl::shared_ptr<frame_consumer> consumer)
@@ -24,12 +24,12 @@ public:
        {
                consumer_->monitor_output().attach_parent(monitor_subject_);
        }
-       
+
        void change_channel_format(const core::video_format_desc& format_desc, const audio_channel_layout& channel_layout)
        {
                consumer_->initialize(format_desc, channel_layout, channel_index_);
        }
-               
+
        std::future<bool> send(const_frame frame)
        {
                *monitor_subject_ << monitor::message("/type") % consumer_->name();
@@ -64,13 +64,18 @@ public:
        {
                return consumer_->presentation_frame_age_millis();
        }
+
+       spl::shared_ptr<const frame_consumer> consumer() const
+       {
+               return consumer_;
+       }
 };
 
 port::port(int index, int channel_index, spl::shared_ptr<frame_consumer> consumer) : impl_(new impl(index, channel_index, std::move(consumer))){}
 port::port(port&& other) : impl_(std::move(other.impl_)){}
 port::~port(){}
 port& port::operator=(port&& other){impl_ = std::move(other.impl_); return *this;}
-std::future<bool> port::send(const_frame frame){return impl_->send(std::move(frame));} 
+std::future<bool> port::send(const_frame frame){return impl_->send(std::move(frame));}
 monitor::subject& port::monitor_output() {return *impl_->monitor_subject_;}
 void port::change_channel_format(const core::video_format_desc& format_desc, const audio_channel_layout& channel_layout){impl_->change_channel_format(format_desc, channel_layout);}
 int port::buffer_depth() const{return impl_->buffer_depth();}
@@ -78,4 +83,5 @@ std::wstring port::print() const{ return impl_->print();}
 bool port::has_synchronization_clock() const{return impl_->has_synchronization_clock();}
 boost::property_tree::wptree port::info() const{return impl_->info();}
 int64_t port::presentation_frame_age_millis() const{ return impl_->presentation_frame_age_millis(); }
-}}
\ No newline at end of file
+spl::shared_ptr<const frame_consumer> port::consumer() const { return impl_->consumer(); }
+}}
index 46db42c2da504b0f22349c055a445728ac2b17f8..48e639c9225f266eef71d665ea50007a017f29d7 100644 (file)
@@ -28,7 +28,7 @@ public:
 
        port& operator=(port&& other);
 
-       std::future<bool> send(const_frame frame);      
+       std::future<bool> send(const_frame frame);
 
        monitor::subject& monitor_output();
 
@@ -40,9 +40,10 @@ public:
        bool has_synchronization_clock() const;
        boost::property_tree::wptree info() const;
        int64_t presentation_frame_age_millis() const;
+       spl::shared_ptr<const frame_consumer> consumer() const;
 private:
        struct impl;
        std::unique_ptr<impl> impl_;
 };
 
-}}
\ No newline at end of file
+}}
diff --git a/core/consumer/syncto/syncto_consumer.cpp b/core/consumer/syncto/syncto_consumer.cpp
new file mode 100644 (file)
index 0000000..632bdb0
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+*
+* This file is part of CasparCG (www.casparcg.com).
+*
+* CasparCG is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* CasparCG is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
+*
+* Author: Helge Norberg, helge.norberg@svt.se
+*/
+
+#include "../../StdAfx.h"
+
+#include "syncto_consumer.h"
+
+#include "../frame_consumer.h"
+#include "../../frame/frame.h"
+#include "../../help/help_sink.h"
+#include "../../module_dependencies.h"
+#include "../../monitor/monitor.h"
+#include "../../video_channel.h"
+#include "../output.h"
+
+#include <common/semaphore.h>
+
+#include <boost/lexical_cast.hpp>
+#include <boost/property_tree/ptree.hpp>
+
+#include <future>
+
+namespace caspar { namespace core { namespace syncto {
+
+void verify_cyclic_reference(int self_channel_index, const spl::shared_ptr<video_channel>& other_channel);
+
+class syncto_consumer : public frame_consumer
+{
+       monitor::subject                                monitor_subject_;
+       spl::shared_ptr<video_channel>  other_channel_;
+       semaphore                                               frames_to_render_       { 0 };
+       std::shared_ptr<void>                   tick_subscription_;
+       int                                                             self_channel_index_     = -1;
+public:
+       syncto_consumer(spl::shared_ptr<video_channel> other_channel)
+               : other_channel_(std::move(other_channel))
+       {
+       }
+
+       void initialize(const video_format_desc& format_desc, const audio_channel_layout& channel_layout, int channel_index) override
+       {
+               verify_cyclic_reference(channel_index, other_channel_);
+
+               self_channel_index_     = channel_index;
+               tick_subscription_      = other_channel_->add_tick_listener([=]
+               {
+                       frames_to_render_.release();
+               });
+       }
+
+       std::future<bool> send(const_frame frame) override
+       {
+               auto task = spl::make_shared<std::packaged_task<bool ()>>([=] { return true; });
+
+               frames_to_render_.acquire(1, [task]
+               {
+                       (*task)();
+               });
+
+               return task->get_future();
+       }
+
+       monitor::subject& monitor_output() override
+       {
+               return monitor_subject_;
+       }
+
+       std::wstring print() const override
+       {
+               if (self_channel_index_ != -1)
+                       return L"sync[" + boost::lexical_cast<std::wstring>(self_channel_index_) + L"]to[" + boost::lexical_cast<std::wstring>(other_channel_->index()) + L"]";
+               else
+                       return L"syncto[" + boost::lexical_cast<std::wstring>(other_channel_->index()) + L"]";
+       }
+
+       std::wstring name() const override
+       {
+               return L"syncto";
+       }
+
+       boost::property_tree::wptree info() const override
+       {
+               boost::property_tree::wptree info;
+               info.add(L"type", L"syncto-consumer");
+               info.add(L"channel-to-sync-to", other_channel_->index());
+               return info;
+       }
+
+       bool has_synchronization_clock() const override
+       {
+               return true;
+       }
+
+       int buffer_depth() const override
+       {
+               return -1;
+       }
+
+       int index() const override
+       {
+               return 70000;
+       }
+
+       int64_t presentation_frame_age_millis() const override
+       {
+               return 0;
+       }
+
+       spl::shared_ptr<video_channel> other_channel() const
+       {
+               return other_channel_;
+       }
+};
+
+void verify_cyclic_reference(int self_channel_index, const spl::shared_ptr<video_channel>& other_channel)
+{
+       if (self_channel_index == other_channel->index())
+               CASPAR_THROW_EXCEPTION(user_error() << msg_info(
+                               L"Cannot create syncto consumer where source channel and destination channel is the same or indirectly related"));
+
+       for (auto& consumer : other_channel->output().get_consumers())
+       {
+               auto raw_consumer       = consumer->unwrapped();
+               auto syncto                     = dynamic_cast<const syncto_consumer*>(raw_consumer);
+
+               if (syncto)
+                       verify_cyclic_reference(self_channel_index, syncto->other_channel());
+       }
+}
+
+void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
+{
+       sink.short_description(L"Lets a channel provide sync to another.");
+       sink.syntax(L"SYNCTO [other_channel:int]");
+       sink.para()->text(L"Provides sync to its own channel based on the rendering pace of the specified channel.");
+       sink.para()->text(L"Examples:");
+       sink.example(L">> ADD 1 SYNCTO 2");
+}
+
+spl::shared_ptr<core::frame_consumer> create_consumer(
+               const std::vector<std::wstring>& params,
+               core::interaction_sink*,
+               std::vector<spl::shared_ptr<video_channel>> channels)
+{
+       if (params.size() < 1 || !boost::iequals(params.at(0), L"SYNCTO"))
+               return core::frame_consumer::empty();
+
+       auto channel_id = boost::lexical_cast<int>(params.at(1));
+       auto channel    = channels.at(channel_id - 1);
+
+       return spl::make_shared<syncto_consumer>(channel);
+}
+
+spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
+               const boost::property_tree::wptree& ptree,
+               core::interaction_sink*,
+               std::vector<spl::shared_ptr<video_channel>> channels)
+{
+       auto channel_id = ptree.get<int>(L"channel-id");
+
+       return spl::make_shared<syncto_consumer>(channels.at(channel_id - 1));
+}
+
+void init(module_dependencies dependencies)
+{
+       dependencies.consumer_registry->register_consumer_factory(L"syncto", &create_consumer, &describe_consumer);
+       dependencies.consumer_registry->register_preconfigured_consumer_factory(L"syncto", &create_preconfigured_consumer);
+}
+
+}}}
similarity index 77%
rename from modules/ffmpeg/ffmpeg_pipeline_backend_internal.h
rename to core/consumer/syncto/syncto_consumer.h
index d83768f496a2f194417c03e2ad0ecc00b3ca2ad2..9eac2eae7f148194f5c6bdc1666fb7ae3554dc31 100644 (file)
 
 #pragma once
 
-#include <common/memory.h>
+#include "../../fwd.h"
 
-#include <boost/rational.hpp>
+namespace caspar { namespace core { namespace syncto {
 
-#include <string>
-#include <functional>
-#include <cstdint>
+void init(caspar::core::module_dependencies dependencies);
 
-namespace caspar { namespace ffmpeg {
-
-spl::shared_ptr<struct ffmpeg_pipeline_backend> create_internal_pipeline();
-
-}}
+}}}
index 19a83d286e1671a9d4e4d8b9775debe3122e6a1c..f2c5fa01a4b0108794895f0a02ec7de1ebae1ed4 100644 (file)
@@ -27,6 +27,7 @@
 #include <common/array.h>
 #include <common/future.h>
 #include <common/timer.h>
+#include <common/memshfl.h>
 
 #include <core/frame/frame_visitor.h>
 #include <core/frame/pixel_format.h>
@@ -40,9 +41,9 @@
 #include <boost/thread/future.hpp>
 
 namespace caspar { namespace core {
-               
+
 struct mutable_frame::impl : boost::noncopyable
-{                      
+{
        std::vector<array<std::uint8_t>>                        buffers_;
        core::mutable_audio_buffer                                      audio_data_;
        const core::pixel_format_desc                           desc_;
@@ -50,7 +51,7 @@ struct mutable_frame::impl : boost::noncopyable
        const void*                                                                     tag_;
        core::frame_geometry                                            geometry_                               = frame_geometry::get_default();
        caspar::timer                                                           since_created_timer_;
-       
+
        impl(
                        std::vector<array<std::uint8_t>> buffers,
                        mutable_audio_buffer audio_data,
@@ -68,7 +69,7 @@ struct mutable_frame::impl : boost::noncopyable
                                CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("mutable_frame: null argument"));
        }
 };
-       
+
 mutable_frame::mutable_frame(
                std::vector<array<std::uint8_t>> image_buffers,
                mutable_audio_buffer audio_data,
@@ -91,8 +92,8 @@ const core::mutable_audio_buffer& mutable_frame::audio_data() const{return impl_
 array<std::uint8_t>& mutable_frame::image_data(std::size_t index){return impl_->buffers_.at(index);}
 core::mutable_audio_buffer& mutable_frame::audio_data(){return impl_->audio_data_;}
 std::size_t mutable_frame::width() const{return impl_->desc_.planes.at(0).width;}
-std::size_t mutable_frame::height() const{return impl_->desc_.planes.at(0).height;}                                            
-const void* mutable_frame::stream_tag()const{return impl_->tag_;}                              
+std::size_t mutable_frame::height() const{return impl_->desc_.planes.at(0).height;}
+const void* mutable_frame::stream_tag()const{return impl_->tag_;}
 const frame_geometry& mutable_frame::geometry() const { return impl_->geometry_; }
 void mutable_frame::set_geometry(const frame_geometry& g) { impl_->geometry_ = g; }
 caspar::timer mutable_frame::since_created() const { return impl_->since_created_timer_; }
@@ -105,7 +106,7 @@ const const_frame& const_frame::empty()
 }
 
 struct const_frame::impl : boost::noncopyable
-{                      
+{
        mutable std::vector<std::shared_future<array<const std::uint8_t>>>      future_buffers_;
        mutable core::audio_buffer                                                                                      audio_data_;
        const core::pixel_format_desc                                                                           desc_;
@@ -115,35 +116,48 @@ struct const_frame::impl : boost::noncopyable
        caspar::timer                                                                                                           since_created_timer_;
        bool                                                                                                                            should_record_age_;
        mutable tbb::atomic<int64_t>                                                                            recorded_age_;
+       std::shared_future<array<const std::uint8_t>>                                           key_only_on_demand_;
 
        impl(const void* tag)
                : audio_data_(0, 0, true, 0)
                , desc_(core::pixel_format::invalid)
                , channel_layout_(audio_channel_layout::invalid())
-               , tag_(tag)     
+               , tag_(tag)
                , geometry_(frame_geometry::get_default())
                , should_record_age_(true)
        {
                recorded_age_ = 0;
        }
-       
+
        impl(
                        std::shared_future<array<const std::uint8_t>> image,
                        audio_buffer audio_data,
                        const void* tag,
                        const core::pixel_format_desc& desc,
-                       const core::audio_channel_layout& channel_layout)
+                       const core::audio_channel_layout& channel_layout,
+                       caspar::timer since_created_timer = caspar::timer())
                : audio_data_(std::move(audio_data))
                , desc_(desc)
                , channel_layout_(channel_layout)
                , tag_(tag)
                , geometry_(frame_geometry::get_default())
+               , since_created_timer_(std::move(since_created_timer))
                , should_record_age_(false)
        {
                if (desc.format != core::pixel_format::bgra)
                        CASPAR_THROW_EXCEPTION(not_implemented());
-               
-               future_buffers_.push_back(std::move(image));
+
+               future_buffers_.push_back(image);
+
+               key_only_on_demand_ = std::async(std::launch::deferred, [image]
+               {
+                       auto fill       = image.get();
+                       auto key        = cache_aligned_vector<std::uint8_t>(fill.size());
+
+                       aligned_memshfl(key.data(), fill.data(), fill.size(), 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303);
+
+                       return array<const std::uint8_t>(key.data(), key.size(), false, std::move(key));
+               }).share();
        }
 
        impl(mutable_frame&& other)
@@ -172,6 +186,11 @@ struct const_frame::impl : boost::noncopyable
                return tag_ != empty().stream_tag() ? future_buffers_.at(index).get() : array<const std::uint8_t>(nullptr, 0, true, 0);
        }
 
+       spl::shared_ptr<impl> key_only() const
+       {
+               return spl::make_shared<impl>(key_only_on_demand_, audio_data_, tag_, desc_, channel_layout_, since_created_timer_);
+       }
+
        std::size_t width() const
        {
                return tag_ != empty().stream_tag() ? desc_.planes.at(0).width : 0;
@@ -200,7 +219,7 @@ struct const_frame::impl : boost::noncopyable
                        return static_cast<int64_t>(since_created_timer_.elapsed() * 1000.0);
        }
 };
-       
+
 const_frame::const_frame(const void* tag) : impl_(new impl(tag)){}
 const_frame::const_frame(
                std::shared_future<array<const std::uint8_t>> image,
@@ -232,11 +251,18 @@ const core::audio_channel_layout& const_frame::audio_channel_layout()const { ret
 array<const std::uint8_t> const_frame::image_data(int index)const{return impl_->image_data(index);}
 const core::audio_buffer& const_frame::audio_data()const{return impl_->audio_data_;}
 std::size_t const_frame::width()const{return impl_->width();}
-std::size_t const_frame::height()const{return impl_->height();}        
-std::size_t const_frame::size()const{return impl_->size();}                                            
-const void* const_frame::stream_tag()const{return impl_->tag_;}                                
+std::size_t const_frame::height()const{return impl_->height();}
+std::size_t const_frame::size()const{return impl_->size();}
+const void* const_frame::stream_tag()const{return impl_->tag_;}
 const frame_geometry& const_frame::geometry() const { return impl_->geometry_; }
 void const_frame::set_geometry(const frame_geometry& g) { impl_->geometry_ = g; }
 int64_t const_frame::get_age_millis() const { return impl_->get_age_millis(); }
+const_frame const_frame::key_only() const
+{
+       auto result             = const_frame();
+       result.impl_    = impl_->key_only();
+
+       return result;
+}
 
 }}
index 6ae3d0a044c6490af1d3f7a035b6bee9a6ee8a2f..fff5382c845a715e34cecb93380e2dd107852be5 100644 (file)
@@ -18,7 +18,7 @@
 FORWARD1(boost, template<typename> class shared_future);
 
 namespace caspar { namespace core {
-       
+
 typedef caspar::array<const int32_t> audio_buffer;
 typedef cache_aligned_vector<int32_t> mutable_audio_buffer;
 class frame_geometry;
@@ -27,15 +27,15 @@ class mutable_frame final
 {
        mutable_frame(const mutable_frame&);
        mutable_frame& operator=(const mutable_frame&);
-public:        
+public:
 
        // Static Members
 
        // Constructors
 
-       explicit mutable_frame(std::vector<array<std::uint8_t>> image_buffers, 
+       explicit mutable_frame(std::vector<array<std::uint8_t>> image_buffers,
                                                mutable_audio_buffer audio_data,
-                                               const void* tag, 
+                                               const void* tag,
                                                const pixel_format_desc& desc,
                                                const audio_channel_layout& channel_layout);
        ~mutable_frame();
@@ -46,9 +46,9 @@ public:
        mutable_frame& operator=(mutable_frame&& other);
 
        void swap(mutable_frame& other);
-                       
+
        // Properties
-                       
+
        const core::pixel_format_desc& pixel_format_desc() const;
        const core::audio_channel_layout& audio_channel_layout() const;
 
@@ -57,17 +57,17 @@ public:
 
        array<std::uint8_t>& image_data(std::size_t index = 0);
        core::mutable_audio_buffer& audio_data();
-       
+
        std::size_t width() const;
        std::size_t height() const;
-                                                               
+
        const void* stream_tag() const;
 
        const core::frame_geometry& geometry() const;
        void set_geometry(const frame_geometry& g);
 
        caspar::timer since_created() const;
-                       
+
 private:
        struct impl;
        spl::unique_ptr<impl> impl_;
@@ -75,7 +75,7 @@ private:
 
 class const_frame final
 {
-public:        
+public:
 
        // Static Members
 
@@ -84,9 +84,9 @@ public:
        // Constructors
 
        explicit const_frame(const void* tag = nullptr);
-       explicit const_frame(std::shared_future<array<const std::uint8_t>> image, 
-                                               audio_buffer audio_data, 
-                                               const void* tag, 
+       explicit const_frame(std::shared_future<array<const std::uint8_t>> image,
+                                               audio_buffer audio_data,
+                                               const void* tag,
                                                const pixel_format_desc& desc,
                                                const audio_channel_layout& channel_layout);
        const_frame(mutable_frame&& other);
@@ -98,19 +98,21 @@ public:
        const_frame& operator=(const_frame&& other);
        const_frame(const const_frame&);
        const_frame& operator=(const const_frame& other);
-                               
+
+       const_frame key_only() const;
+
        // Properties
-                               
+
        const core::pixel_format_desc& pixel_format_desc() const;
        const core::audio_channel_layout& audio_channel_layout() const;
 
        array<const std::uint8_t> image_data(int index = 0) const;
        const core::audio_buffer& audio_data() const;
-               
+
        std::size_t width() const;
        std::size_t height() const;
        std::size_t size() const;
-                                                               
+
        const void* stream_tag() const;
 
        const core::frame_geometry& geometry() const;
@@ -121,7 +123,7 @@ public:
        bool operator!=(const const_frame& other);
        bool operator<(const const_frame& other);
        bool operator>(const const_frame& other);
-                       
+
 private:
        struct impl;
        spl::shared_ptr<impl> impl_;
index ac2cda11375294e061ec6f2e0ad252d3177762f4..4f9d1c8d80e250efc85b2c6d5c07d87c7921628d 100644 (file)
@@ -48,6 +48,8 @@ public:
                        const core::audio_channel_layout& channel_layout) = 0;
 
        // Properties
+
+       virtual int get_max_frame_size() = 0;
 };
 
 }}
\ No newline at end of file
index bf2cdb35f550a12e299a90b6c05f70446482d751..e21e17fb2bd60d276b32ef534381b62d61455ab8 100644 (file)
@@ -154,7 +154,7 @@ public:
                : duration_(0)
                , time_(0)
        {
-               dest_.image_transform.use_mipmap = env::properties().get(L"configuration.mixer.mipmapping_default_on", false);
+               dest_.image_transform.use_mipmap = env::properties().get(L"configuration.mixer.mipmapping-default-on", false);
        }
 
        tweened_transform(const frame_transform& source, const frame_transform& dest, int duration, const tweener& tween)
index 2b78c61778998ba10c57bf77552893eb2a830224..46ca3e4162f7a8cc682e4f568ff3eaa2e263dbda 100644 (file)
@@ -39,7 +39,6 @@
 #include <boost/thread.hpp>
 
 namespace caspar { namespace core {
-
 struct frame_producer_registry::impl
 {
        std::vector<producer_factory_t>         producer_factories;
@@ -103,7 +102,7 @@ struct frame_producer_base::impl
                frame_number_ = 0;
                paused_ = false;
        }
-       
+
        draw_frame receive()
        {
                if(paused_)
@@ -148,7 +147,7 @@ draw_frame frame_producer_base::last_frame()
        return impl_->last_frame();
 }
 
-std::future<std::wstring> frame_producer_base::call(const std::vector<std::wstring>&) 
+std::future<std::wstring> frame_producer_base::call(const std::vector<std::wstring>&)
 {
        CASPAR_THROW_EXCEPTION(not_supported());
 }
@@ -176,7 +175,7 @@ const std::vector<std::wstring>& frame_producer_base::get_variables() const
        return empty;
 }
 
-const spl::shared_ptr<frame_producer>& frame_producer::empty() 
+const spl::shared_ptr<frame_producer>& frame_producer::empty()
 {
        class empty_frame_producer : public frame_producer
        {
@@ -186,7 +185,7 @@ const spl::shared_ptr<frame_producer>& frame_producer::empty()
                void paused(bool value) override{}
                uint32_t nb_frames() const override {return 0;}
                std::wstring print() const override { return L"empty";}
-               monitor::subject& monitor_output() override {static monitor::subject monitor_subject(""); return monitor_subject;}                                                                              
+               monitor::subject& monitor_output() override {static monitor::subject monitor_subject(""); return monitor_subject;}
                std::wstring name() const override {return L"empty";}
                uint32_t frame_number() const override {return 0;}
                std::future<std::wstring> call(const std::vector<std::wstring>& params) override{CASPAR_THROW_EXCEPTION(not_implemented());}
@@ -194,7 +193,7 @@ const spl::shared_ptr<frame_producer>& frame_producer::empty()
                const std::vector<std::wstring>& get_variables() const override { static std::vector<std::wstring> empty; return empty; }
                draw_frame last_frame() {return draw_frame::empty();}
                constraints& pixel_constraints() override { static constraints c; return c; }
-       
+
                boost::property_tree::wptree info() const override
                {
                        boost::property_tree::wptree info;
@@ -220,10 +219,10 @@ void destroy_producers_synchronously()
 }
 
 class destroy_producer_proxy : public frame_producer
-{      
+{
        std::shared_ptr<frame_producer> producer_;
 public:
-       destroy_producer_proxy(spl::shared_ptr<frame_producer>&& producer) 
+       destroy_producer_proxy(spl::shared_ptr<frame_producer>&& producer)
                : producer_(std::move(producer))
        {
                destroy_producers_in_separate_thread() = true;
@@ -234,13 +233,13 @@ public:
                static tbb::atomic<int> counter;
                static std::once_flag counter_init_once;
                std::call_once(counter_init_once, []{ counter = 0; });
-               
+
                if(producer_ == core::frame_producer::empty() || !destroy_producers_in_separate_thread())
                        return;
 
                ++counter;
                CASPAR_VERIFY(counter < 8);
-               
+
                auto producer = new spl::shared_ptr<frame_producer>(std::move(producer_));
                boost::thread([=]
                {
@@ -256,7 +255,7 @@ public:
                                        CASPAR_LOG(debug) << str << L" Destroying on asynchronous destruction thread.";
                        }
                        catch(...){}
-                       
+
                        try
                        {
                                pointer_guard.reset();
@@ -268,9 +267,9 @@ public:
                        }
 
                        --counter;
-               }).detach(); 
+               }).detach();
        }
-       
+
        draw_frame                                                                                      receive() override                                                                                                                                                                                                              {return producer_->receive();}
        std::wstring                                                                            print() const override                                                                                                                  {return producer_->print();}
        void                                                                                            paused(bool value) override                                                                                                             {producer_->paused(value);}
@@ -283,7 +282,7 @@ public:
        void                                                                                            leading_producer(const spl::shared_ptr<frame_producer>& producer) override              {return producer_->leading_producer(producer);}
        uint32_t                                                                                        nb_frames() const override                                                                                                              {return producer_->nb_frames();}
        draw_frame                                                                                      last_frame()                                                                                                                                    {return producer_->last_frame();}
-       monitor::subject&                                                                       monitor_output() override                                                                                                               {return producer_->monitor_output();}                                                                           
+       monitor::subject&                                                                       monitor_output() override                                                                                                               {return producer_->monitor_output();}
        bool                                                                                            collides(double x, double y) const override                                                                             {return producer_->collides(x, y);}
        void                                                                                            on_interaction(const interaction_event::ptr& event)     override                                        {return producer_->on_interaction(event);}
        constraints&                                                                            pixel_constraints() override                                                                                                    {return producer_->pixel_constraints();}
@@ -298,7 +297,7 @@ spl::shared_ptr<core::frame_producer> do_create_producer(const frame_producer_de
 {
        if(params.empty())
                CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("params cannot be empty"));
-       
+
        auto producer = frame_producer::empty();
        std::any_of(factories.begin(), factories.end(), [&](const producer_factory_t& factory) -> bool
                {
@@ -325,7 +324,7 @@ spl::shared_ptr<core::frame_producer> do_create_producer(const frame_producer_de
 
        if(producer == frame_producer::empty())
                return producer;
-               
+
        return producer;
 }
 
@@ -356,38 +355,41 @@ draw_frame frame_producer_registry::create_thumbnail(const frame_producer_depend
 
        if (key_frame == draw_frame::empty())
                key_frame = do_create_thumbnail_frame(dependencies, media_file + L"_ALPHA", thumbnail_producers);
-  
+
        if (fill_frame != draw_frame::empty() && key_frame != draw_frame::empty())
                return draw_frame::mask(fill_frame, key_frame);
-  
+
        return fill_frame;
 }
 
 spl::shared_ptr<core::frame_producer> frame_producer_registry::create_producer(const frame_producer_dependencies& dependencies, const std::vector<std::wstring>& params) const
-{      
+{
        auto& producer_factories = impl_->producer_factories;
        auto producer = do_create_producer(dependencies, params, producer_factories);
        auto key_producer = frame_producer::empty();
-       
-       try // to find a key file.
+
+       if (!params.empty() && !boost::contains(params.at(0), L"://"))
        {
-               auto params_copy = params;
-               if(params_copy.size() > 0)
+               try // to find a key file.
                {
-                       params_copy[0] += L"_A";
-                       key_producer = do_create_producer(dependencies, params_copy, producer_factories);
-                       if(key_producer == frame_producer::empty())
+                       auto params_copy = params;
+                       if (params_copy.size() > 0)
                        {
-                               params_copy[0] += L"LPHA";
+                               params_copy[0] += L"_A";
                                key_producer = do_create_producer(dependencies, params_copy, producer_factories);
+                               if (key_producer == frame_producer::empty())
+                               {
+                                       params_copy[0] += L"LPHA";
+                                       key_producer = do_create_producer(dependencies, params_copy, producer_factories);
+                               }
                        }
                }
+               catch (...) {}
        }
-       catch(...){}
 
        if(producer != frame_producer::empty() && key_producer != frame_producer::empty())
                return create_separated_producer(producer, key_producer);
-       
+
        if(producer == frame_producer::empty())
        {
                std::wstring str;
@@ -408,5 +410,4 @@ spl::shared_ptr<core::frame_producer> frame_producer_registry::create_producer(c
        std::copy(iterator(iss),  iterator(), std::back_inserter(tokens));
        return create_producer(dependencies, tokens);
 }
-
 }}
index 031e99524fba50945a99ff7190e45407c213ca6b..b6141cc0b1762fd87ca1967b63fb6b1a67fb0004 100644 (file)
 
 namespace caspar { namespace core {
 
-draw_frame drop_and_skip(const draw_frame& source, const draw_frame&, const boost::rational<int64_t>&)
+draw_frame drop_or_repeat(const draw_frame& source, const draw_frame&, const boost::rational<int64_t>&)
 {
        return source;
 }
 
 // Blends next frame with current frame when the distance is not 0.
 // Completely sharp when distance is 0 but blurry when in between.
-draw_frame blend(const draw_frame& source, const draw_frame& destination, const boost::rational<int64_t>& distance)
+draw_frame blend2(const draw_frame& source, const draw_frame& destination, const boost::rational<int64_t>& distance)
 {
        if (destination == draw_frame::empty())
                return source;
@@ -70,8 +70,8 @@ draw_frame blend(const draw_frame& source, const draw_frame& destination, const
 // * A distance of 0.0 gives 50% previous, 50% current and 0% next.
 // * A distance of 0.5 gives 25% previous, 50% current and 25% next.
 // * A distance of 0.75 gives 12.5% previous, 50% current and 37.5% next.
-// This is blurrier than blend, but gives a more even bluriness, instead of sharp, blurry, sharp, blurry.
-struct blend_all
+// This is blurrier than blend2, but gives a more even bluriness, instead of sharp, blurry, sharp, blurry.
+struct blend3
 {
        draw_frame previous_frame       = draw_frame::empty();
        draw_frame last_source          = draw_frame::empty();
@@ -93,7 +93,7 @@ struct blend_all
                bool has_previous = previous_frame != draw_frame::empty();
 
                if (!has_previous)
-                       return blend(source, destination, distance);
+                       return blend2(source, destination, distance);
 
                auto middle                                                                                     = last_source;
                auto next_frame                                                                         = destination;
@@ -176,7 +176,7 @@ public:
                double source   = boost::rational_cast<double>(source_);
                double delta    = boost::rational_cast<double>(dest_) - source;
                double result   = tweener_(time_, source, delta, duration_);
-               
+
                return boost::rational<int64_t>(static_cast<int64_t>(result * 1000000.0), 1000000);
        }
 
@@ -190,83 +190,41 @@ public:
 class framerate_producer : public frame_producer_base
 {
        spl::shared_ptr<frame_producer>                                         source_;
-       boost::rational<int>                                                            source_framerate_;
-       audio_channel_layout                                                            source_channel_layout_          = audio_channel_layout::invalid();
-       boost::rational<int>                                                            destination_framerate_;
-       field_mode                                                                                      destination_fieldmode_;
+       std::function<boost::rational<int>()>                           get_source_framerate_;
+       boost::rational<int>                                                            source_framerate_                               = -1;
+       audio_channel_layout                                                            source_channel_layout_                  = audio_channel_layout::invalid();
+       const boost::rational<int>                                                      original_destination_framerate_;
+       const field_mode                                                                        original_destination_fieldmode_;
+       field_mode                                                                                      destination_fieldmode_                  = field_mode::empty;
        std::vector<int>                                                                        destination_audio_cadence_;
        boost::rational<std::int64_t>                                           speed_;
        speed_tweener                                                                           user_speed_;
        std::function<draw_frame (
                        const draw_frame& source,
                        const draw_frame& destination,
-                       const boost::rational<int64_t>& distance)>      interpolator_                           = drop_and_skip;
-       
-       boost::rational<std::int64_t>                                           current_frame_number_           = 0;
-       draw_frame                                                                                      previous_frame_                         = draw_frame::empty();
-       draw_frame                                                                                      next_frame_                                     = draw_frame::empty();
+                       const boost::rational<int64_t>& distance)>      interpolator_                                   = drop_or_repeat;
+
+       boost::rational<std::int64_t>                                           current_frame_number_                   = 0;
+       draw_frame                                                                                      previous_frame_                                 = draw_frame::empty();
+       draw_frame                                                                                      next_frame_                                             = draw_frame::empty();
        mutable_audio_buffer                                                            audio_samples_;
 
-       unsigned int                                                                            output_repeat_                          = 0;
-       unsigned int                                                                            output_frame_                           = 0;
+       unsigned int                                                                            output_repeat_                                  = 0;
+       unsigned int                                                                            output_frame_                                   = 0;
+       draw_frame                                                                                      last_frame_                                             = draw_frame::empty();
 public:
        framerate_producer(
                        spl::shared_ptr<frame_producer> source,
-                       boost::rational<int> source_framerate,
+                       std::function<boost::rational<int> ()> get_source_framerate,
                        boost::rational<int> destination_framerate,
                        field_mode destination_fieldmode,
                        std::vector<int> destination_audio_cadence)
                : source_(std::move(source))
-               , source_framerate_(std::move(source_framerate))
-               , destination_framerate_(std::move(destination_framerate))
-               , destination_fieldmode_(destination_fieldmode)
+               , get_source_framerate_(std::move(get_source_framerate))
+               , original_destination_framerate_(std::move(destination_framerate))
+               , original_destination_fieldmode_(destination_fieldmode)
                , destination_audio_cadence_(std::move(destination_audio_cadence))
        {
-               // Coarse adjustment to correct fps family (23.98 - 30 vs 47.95 - 60)
-               if (destination_fieldmode_ != field_mode::progressive)  // Interlaced output
-               {
-                       auto diff_double        = boost::abs(source_framerate_ - destination_framerate_ * 2);
-                       auto diff_keep          = boost::abs(source_framerate_ - destination_framerate_);
-
-                       if (diff_double < diff_keep)                                            // Double rate interlaced
-                       {
-                               destination_framerate_ *= 2;
-                       }
-                       else                                                                                            // Progressive non interlaced
-                       {
-                               destination_fieldmode_ = field_mode::progressive;
-                       }
-               }
-               else                                                                                                    // Progressive
-               {
-                       auto diff_halve = boost::abs(source_framerate_ * 2      - destination_framerate_);
-                       auto diff_keep  = boost::abs(source_framerate_          - destination_framerate_);
-
-                       if (diff_halve < diff_keep)                                                     // Repeat every frame two times
-                       {
-                               destination_framerate_  /= 2;
-                               output_repeat_                  = 2;
-                       }
-               }
-
-               speed_ = boost::rational<int64_t>(source_framerate_ / destination_framerate_);
-
-               // drop_and_skip will only be used by default for exact framerate multiples (half, same and double)
-               // for all other framerates a frame interpolator will be chosen.
-               if (speed_ != 1 && speed_ * 2 != 1 && speed_ != 2)
-               {
-                       auto high_source_framerate              = source_framerate_ > 47;
-                       auto high_destination_framerate = destination_framerate_ > 47
-                                       || destination_fieldmode_ != field_mode::progressive;
-
-                       if (high_source_framerate && high_destination_framerate)        // The bluriness of blend_all is acceptable on high framerates.
-                               interpolator_ = blend_all();
-                       else                                                                                                            // blend_all is mostly too blurry on low framerates. blend provides a compromise.
-                               interpolator_ = &blend;
-
-                       CASPAR_LOG(warning) << source_->print() << L" Frame blending frame rate conversion required to conform to channel frame rate.";
-               }
-
                // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
                // This cadence fills the audio mixer most optimally.
                boost::range::rotate(destination_audio_cadence_, std::end(destination_audio_cadence_) - 1);
@@ -274,13 +232,15 @@ public:
 
        draw_frame receive_impl() override
        {
+               // destination field mode initially unknown but known after the first update_source_framerate().
+               auto field1 = do_render_progressive_frame(true);
+
                if (destination_fieldmode_ == field_mode::progressive)
                {
-                       return do_render_progressive_frame(true);
+                       return field1;
                }
                else
                {
-                       auto field1 = do_render_progressive_frame(true);
                        auto field2 = do_render_progressive_frame(false);
 
                        return draw_frame::interlace(field1, field2, destination_fieldmode_);
@@ -304,12 +264,14 @@ public:
                }
                else if (boost::iequals(params.at(1), L"interpolation"))
                {
-                       if (boost::iequals(params.at(2), L"blend"))
-                               interpolator_ = &blend;
-                       else if (boost::iequals(params.at(2), L"blend_all"))
-                               interpolator_ = blend_all();
+                       if (boost::iequals(params.at(2), L"blend2"))
+                               interpolator_ = &blend2;
+                       else if (boost::iequals(params.at(2), L"blend3"))
+                               interpolator_ = blend3();
+                       else if (boost::iequals(params.at(2), L"drop_or_repeat"))
+                               interpolator_ = &drop_or_repeat;
                        else
-                               interpolator_ = &drop_and_skip;
+                               CASPAR_THROW_EXCEPTION(user_error() << msg_info("Valid interpolations are DROP_OR_REPEAT, BLEND2 and BLEND3"));
                }
                else if (boost::iequals(params.at(1), L"output_repeat")) // Only for debugging purposes
                {
@@ -351,12 +313,18 @@ public:
 
        uint32_t nb_frames() const override
        {
-               return static_cast<uint32_t>(source_->nb_frames() * boost::rational_cast<double>(1 / get_speed() / (output_repeat_ != 0 ? 2 : 1)));
+               auto source_nb_frames = source_->nb_frames();
+               auto multiple = boost::rational_cast<double>(1 / get_speed() * (output_repeat_ != 0 ? 2 : 1));
+
+               return static_cast<uint32_t>(source_nb_frames * multiple);
        }
 
        uint32_t frame_number() const override
        {
-               return static_cast<uint32_t>(source_->frame_number() * boost::rational_cast<double>(1 / get_speed() / (output_repeat_ != 0 ? 2 : 1)));
+               auto source_frame_number = source_->frame_number() - 1; // next frame already received
+               auto multiple = boost::rational_cast<double>(1 / get_speed() * (output_repeat_ != 0 ? 2 : 1));
+
+               return static_cast<uint32_t>(source_frame_number * multiple);
        }
 
        constraints& pixel_constraints() override
@@ -368,9 +336,9 @@ private:
        {
                user_speed_.fetch_and_tick();
 
-               if (output_repeat_ && ++output_frame_ % output_repeat_)
+               if (output_repeat_ && output_frame_++ % output_repeat_)
                {
-                       auto frame = draw_frame::still(last_frame());
+                       auto frame = last_frame_;
 
                        frame.transform().audio_transform.volume = 0.0;
 
@@ -395,6 +363,8 @@ private:
 
                fast_forward_integer_frames(integer_next_frame - integer_current_frame);
 
+               last_frame_ = result;
+
                if (sound)
                        return attach_sound(result);
                else
@@ -427,6 +397,7 @@ private:
        draw_frame pop_frame_from_source()
        {
                auto frame = source_->receive();
+               update_source_framerate();
 
                if (user_speed_.fetch() == 1)
                {
@@ -506,14 +477,75 @@ private:
                                || user_speed_.fetch() != 1
                                || audio_samples_.size() / source_channel_layout_.num_channels >= destination_audio_cadence_.at(0);
        }
+
+       void update_source_framerate()
+       {
+               auto source_framerate = get_source_framerate_();
+
+               if (source_framerate_ == source_framerate)
+                       return;
+
+               output_repeat_                          = 0;
+               output_frame_                           = 0;
+               source_framerate_                       = source_framerate;
+               auto destination_framerate      = original_destination_framerate_;
+               destination_fieldmode_          = original_destination_fieldmode_;
+
+               // Coarse adjustment to correct fps family (23.98 - 30 vs 47.95 - 60)
+               if (destination_fieldmode_ != field_mode::progressive)  // Interlaced output
+               {
+                       auto diff_double        = boost::abs(source_framerate_ - destination_framerate * 2);
+                       auto diff_keep          = boost::abs(source_framerate_ - destination_framerate);
+
+                       if (diff_double < diff_keep)                                            // Double rate interlaced
+                       {
+                               destination_framerate *= 2;
+                       }
+                       else                                                                                            // Progressive non interlaced
+                       {
+                               destination_fieldmode_ = field_mode::progressive;
+                       }
+               }
+               else                                                                                                    // Progressive
+               {
+                       auto diff_halve = boost::abs(source_framerate_ * 2      - destination_framerate);
+                       auto diff_keep  = boost::abs(source_framerate_          - destination_framerate);
+
+                       if (diff_halve < diff_keep)                                                     // Repeat every frame two times
+                       {
+                               destination_framerate   /= 2;
+                               output_repeat_                  = 2;
+                       }
+               }
+
+               speed_ = boost::rational<int64_t>(source_framerate_ / destination_framerate);
+
+               // drop_or_repeat will only be used by default for exact framerate multiples (half, same and double)
+               // for all other framerates a frame interpolator will be chosen.
+               if (speed_ != 1 && speed_ * 2 != 1 && speed_ != 2)
+               {
+                       auto high_source_framerate              = source_framerate_ > 47;
+                       auto high_destination_framerate = destination_framerate > 47
+                                       || destination_fieldmode_ != field_mode::progressive;
+
+                       if (high_source_framerate && high_destination_framerate)        // The bluriness of blend3 is acceptable on high framerates.
+                               interpolator_   = blend3();
+                       else                                                                                                            // blend3 is mostly too blurry on low framerates. blend2 provides a compromise.
+                               interpolator_   = &blend2;
+
+                       CASPAR_LOG(warning) << source_->print() << L" Frame blending frame rate conversion required to conform to channel frame rate.";
+               }
+               else
+                       interpolator_           = &drop_or_repeat;
+       }
 };
 
 void describe_framerate_producer(help_sink& sink)
 {
        sink.para()->text(L"Framerate conversion control / Slow motion examples:");
-       sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND", L"enables 2 frame blend interpolation.");
-       sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND_ALL", L"enables 3 frame blend interpolation.");
-       sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION DROP_AND_SKIP", L"disables frame interpolation.");
+       sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND2", L"enables 2 frame blend interpolation.");
+       sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION BLEND3", L"enables 3 frame blend interpolation.");
+       sink.example(L">> CALL 1-10 FRAMERATE INTERPOLATION DROP_OR_REPEAT", L"disables frame interpolation.");
        sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25", L"immediately changes the speed to 25%. Sound will be disabled.");
        sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25 50", L"changes the speed to 25% linearly over 50 frames. Sound will be disabled.");
        sink.example(L">> CALL 1-10 FRAMERATE SPEED 0.25 50 easeinoutsine", L"changes the speed to 25% over 50 frames using specified easing curve. Sound will be disabled.");
@@ -522,18 +554,17 @@ void describe_framerate_producer(help_sink& sink)
 
 spl::shared_ptr<frame_producer> create_framerate_producer(
                spl::shared_ptr<frame_producer> source,
-               boost::rational<int> source_framerate,
+               std::function<boost::rational<int> ()> get_source_framerate,
                boost::rational<int> destination_framerate,
                field_mode destination_fieldmode,
                std::vector<int> destination_audio_cadence)
 {
        return spl::make_shared<framerate_producer>(
                        std::move(source),
-                       std::move(source_framerate),
+                       std::move(get_source_framerate),
                        std::move(destination_framerate),
                        destination_fieldmode,
                        std::move(destination_audio_cadence));
 }
 
 }}
-
index f0995310d1fa97afd2da306b724144927faee59d..f742e484cccad27054c79c97d58043719775e006 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <string>
 #include <vector>
+#include <functional>
 
 #include <boost/rational.hpp>
 
@@ -38,7 +39,7 @@ void describe_framerate_producer(help_sink& sink);
 
 spl::shared_ptr<frame_producer> create_framerate_producer(
                spl::shared_ptr<frame_producer> source,
-               boost::rational<int> source_framerate,
+               std::function<boost::rational<int> ()> get_source_framerate, // Will be called after first receive() on the source
                boost::rational<int> destination_framerate,
                field_mode destination_fieldmode,
                std::vector<int> destination_audio_cadence);
index 39867694a51ca6babc9b7e99ac8cd87f89072a08..76473cc4eada0f3010e69cbb4d4ad0f7b4a6397a 100644 (file)
@@ -84,6 +84,7 @@ public:
                if(preview)
                {
                        play();
+                       receive(video_format::invalid);
                        foreground_->paused(true);
                        is_paused_ = true;
                }
index 34a5ef1bc945ec28face70b4b04d5dab29b860ad..590aca44d248fc02d3c27c9e4179ffb1ca86512c 100644 (file)
@@ -127,6 +127,11 @@ public:
                return dest_producer_->nb_frames();
        }
 
+       uint32_t frame_number() const override
+       {
+               return dest_producer_->frame_number();
+       }
+
        std::wstring print() const override
        {
                return L"transition[" + source_producer_->print() + L"=>" + dest_producer_->print() + L"]";
index 445d612c9eb836f225af9960ef3deedbd5c5d9fe..a0ac2c2012fd249ac76d0fcd85a8483826ee7e86 100644 (file)
@@ -49,6 +49,7 @@
 #include <boost/lexical_cast.hpp>
 
 #include <string>
+#include <unordered_map>
 
 namespace caspar { namespace core {
 
@@ -74,7 +75,11 @@ struct video_channel::impl final
        std::future<void>                                                                       output_ready_for_frame_ = make_ready_future();
        spl::shared_ptr<image_mixer>                                            image_mixer_;
        caspar::core::mixer                                                                     mixer_;
-       caspar::core::stage                                                                     stage_; 
+       caspar::core::stage                                                                     stage_;
+
+       mutable tbb::spin_mutex                                                         tick_listeners_mutex_;
+       int64_t                                                                                         last_tick_listener_id   = 0;
+       std::unordered_map<int64_t, std::function<void ()>>     tick_listeners_;
 
        executor                                                                                        executor_                               { L"video_channel " + boost::lexical_cast<std::wstring>(index_) };
 public:
@@ -145,33 +150,51 @@ public:
                });
        }
 
+       void invoke_tick_listeners()
+       {
+               auto listeners = lock(tick_listeners_mutex_, [=] { return tick_listeners_; });
+
+               for (auto listener : listeners)
+               {
+                       try
+                       {
+                               listener.second();
+                       }
+                       catch (...)
+                       {
+                               CASPAR_LOG_CURRENT_EXCEPTION();
+                       }
+               }
+       }
+
        void tick()
        {
                try
                {
+                       invoke_tick_listeners();
 
                        auto format_desc        = video_format_desc();
                        auto channel_layout = audio_channel_layout();
-                       
+
                        caspar::timer frame_timer;
 
                        // Produce
-                       
+
                        auto stage_frames = stage_(format_desc);
-                       
+
                        // Mix
-                       
+
                        auto mixed_frame  = mixer_(std::move(stage_frames), format_desc, channel_layout);
-                       
+
                        // Consume
 
-                       output_ready_for_frame_.get();
                        output_ready_for_frame_ = output_(std::move(mixed_frame), format_desc, channel_layout);
-               
+                       output_ready_for_frame_.get();
+
                        auto frame_time = frame_timer.elapsed()*format_desc.fps*0.5;
                        graph_->set_value("tick-time", frame_time);
 
-                       *monitor_subject_       << monitor::message("/profiler/time")   % frame_timer.elapsed() % (1.0/format_desc_.fps)
+                       *monitor_subject_       << monitor::message("/profiler/time")   % frame_timer.elapsed() % (1.0/ video_format_desc().fps)
                                                                << monitor::message("/format")                  % format_desc.name;
                }
                catch(...)
@@ -182,7 +205,7 @@ public:
                if (executor_.is_running())
                        executor_.begin_invoke([=]{tick();});
        }
-                       
+
        std::wstring print() const
        {
                return L"video_channel[" + boost::lexical_cast<std::wstring>(index_) + L"|" +  video_format_desc().name + L"]";
@@ -206,8 +229,8 @@ public:
                info.add_child(L"stage", stage_info.get());
                info.add_child(L"mixer", mixer_info.get());
                info.add_child(L"output", output_info.get());
-   
-               return info;                       
+
+               return info;
        }
 
        boost::property_tree::wptree delay_info() const
@@ -225,6 +248,23 @@ public:
 
                return info;
        }
+
+       std::shared_ptr<void> add_tick_listener(std::function<void()> listener)
+       {
+               return lock(tick_listeners_mutex_, [&]
+               {
+                       auto tick_listener_id = last_tick_listener_id++;
+                       tick_listeners_.insert(std::make_pair(tick_listener_id, listener));
+
+                       return std::shared_ptr<void>(nullptr, [=](void*)
+                       {
+                               lock(tick_listeners_mutex_, [&]
+                               {
+                                       tick_listeners_.erase(tick_listener_id);
+                               });
+                       });
+               });
+       }
 };
 
 video_channel::video_channel(
@@ -248,5 +288,6 @@ boost::property_tree::wptree video_channel::info() const{return impl_->info();}
 boost::property_tree::wptree video_channel::delay_info() const { return impl_->delay_info(); }
 int video_channel::index() const { return impl_->index(); }
 monitor::subject& video_channel::monitor_output(){ return *impl_->monitor_subject_; }
+std::shared_ptr<void> video_channel::add_tick_listener(std::function<void()> listener) { return impl_->add_tick_listener(std::move(listener)); }
 
 }}
index b3ac548a7ff56b3107962e816ec4c4c6917d1b87..4c256b0dca24861fd28e9c6b147be78d0d3bc4df 100644 (file)
 
 #include <boost/property_tree/ptree_fwd.hpp>
 
+#include <functional>
+
 namespace caspar { namespace core {
-       
+
 class video_channel final
 {
        video_channel(const video_channel&);
@@ -50,7 +52,7 @@ public:
        ~video_channel();
 
        // Methods
-                       
+
        monitor::subject&                                               monitor_output();
 
        // Properties
@@ -67,6 +69,8 @@ public:
        core::audio_channel_layout                              audio_channel_layout() const;
        void                                                                    audio_channel_layout(const core::audio_channel_layout& channel_layout);
 
+       std::shared_ptr<void>                                   add_tick_listener(std::function<void()> listener);
+
        spl::shared_ptr<core::frame_factory>    frame_factory();
 
        boost::property_tree::wptree                    info() const;
@@ -77,4 +81,4 @@ private:
        spl::unique_ptr<impl> impl_;
 };
 
-}}
\ No newline at end of file
+}}
index 6d21ba2d63f26501887ea2e14e0db59147d587f1..120b1ee44d3a3a339039044e569d3363b9c4b505 100644 (file)
 namespace caspar { namespace core {
        
        const std::vector<video_format_desc> format_descs = {
-               { video_format::pal,          720,  576,  1024, 576,  field_mode::upper,       25,    1,    L"PAL",          { 1920                         } },
+               { video_format::pal,          720,  576,  1024, 576,  field_mode::upper,       25000, 1000, L"PAL",          { 1920                         } },
                { video_format::ntsc,         720,  486,  720,  540,  field_mode::lower,       30000, 1001, L"NTSC",         { 1602, 1601, 1602, 1601, 1602 } },
-               { video_format::x576p2500,    720,  576,  1024, 576,  field_mode::progressive, 25,    1,    L"576p2500",     { 1920                         } },
+               { video_format::x576p2500,    720,  576,  1024, 576,  field_mode::progressive, 25000, 1000, L"576p2500",     { 1920                         } },
                { video_format::x720p2398,    1280, 720,  1280, 720,  field_mode::progressive, 24000, 1001, L"720p2398",     { 2002                         } },
-               { video_format::x720p2400,    1280, 720,  1280, 720,  field_mode::progressive, 24,    1,    L"720p2400",     { 2000                         } },
-               { video_format::x720p2500,    1280, 720,  1280, 720,  field_mode::progressive, 25,    1,    L"720p2500",     { 1920                         } },
-               { video_format::x720p5000,    1280, 720,  1280, 720,  field_mode::progressive, 50,    1,    L"720p5000",     { 960                          } },
+               { video_format::x720p2400,    1280, 720,  1280, 720,  field_mode::progressive, 24000, 1000, L"720p2400",     { 2000                         } },
+               { video_format::x720p2500,    1280, 720,  1280, 720,  field_mode::progressive, 25000, 1000, L"720p2500",     { 1920                         } },
+               { video_format::x720p5000,    1280, 720,  1280, 720,  field_mode::progressive, 50000, 1000, L"720p5000",     { 960                          } },
                { video_format::x720p2997,    1280, 720,  1280, 720,  field_mode::progressive, 30000, 1001, L"720p2997",     { 1602, 1601, 1602, 1601, 1602 } },
                { video_format::x720p5994,    1280, 720,  1280, 720,  field_mode::progressive, 60000, 1001, L"720p5994",     { 801,  800,  801,  801,  801  } },
-               { video_format::x720p3000,    1280, 720,  1280, 720,  field_mode::progressive, 30,    1,    L"720p3000",     { 1600                         } },
-               { video_format::x720p6000,    1280, 720,  1280, 720,  field_mode::progressive, 60,    1,    L"720p6000",     { 800                          } },
+               { video_format::x720p3000,    1280, 720,  1280, 720,  field_mode::progressive, 30000, 1000, L"720p3000",     { 1600                         } },
+               { video_format::x720p6000,    1280, 720,  1280, 720,  field_mode::progressive, 60000, 1000, L"720p6000",     { 800                          } },
                { video_format::x1080p2398,   1920, 1080, 1920, 1080, field_mode::progressive, 24000, 1001, L"1080p2398",    { 2002                         } },
-               { video_format::x1080p2400,   1920, 1080, 1920, 1080, field_mode::progressive, 24,    1,    L"1080p2400",    { 2000                         } },
-               { video_format::x1080i5000,   1920, 1080, 1920, 1080, field_mode::upper,       25,    1,    L"1080i5000",    { 1920                         } },
+               { video_format::x1080p2400,   1920, 1080, 1920, 1080, field_mode::progressive, 24000, 1000, L"1080p2400",    { 2000                         } },
+               { video_format::x1080i5000,   1920, 1080, 1920, 1080, field_mode::upper,       25000, 1000, L"1080i5000",    { 1920                         } },
                { video_format::x1080i5994,   1920, 1080, 1920, 1080, field_mode::upper,       30000, 1001, L"1080i5994",    { 1602, 1601, 1602, 1601, 1602 } },
-               { video_format::x1080i6000,   1920, 1080, 1920, 1080, field_mode::upper,       30,    1,    L"1080i6000",    { 1600                         } },
-               { video_format::x1080p2500,   1920, 1080, 1920, 1080, field_mode::progressive, 25,    1,    L"1080p2500",    { 1920                         } },
+               { video_format::x1080i6000,   1920, 1080, 1920, 1080, field_mode::upper,       30000, 1000, L"1080i6000",    { 1600                         } },
+               { video_format::x1080p2500,   1920, 1080, 1920, 1080, field_mode::progressive, 25000, 1000, L"1080p2500",    { 1920                         } },
                { video_format::x1080p2997,   1920, 1080, 1920, 1080, field_mode::progressive, 30000, 1001, L"1080p2997",    { 1602, 1601, 1602, 1601, 1602 } },
-               { video_format::x1080p3000,   1920, 1080, 1920, 1080, field_mode::progressive, 30,    1,    L"1080p3000",    { 1600                         } },
-               { video_format::x1080p5000,   1920, 1080, 1920, 1080, field_mode::progressive, 50,    1,    L"1080p5000",    { 960                          } },
+               { video_format::x1080p3000,   1920, 1080, 1920, 1080, field_mode::progressive, 30000, 1000, L"1080p3000",    { 1600                         } },
+               { video_format::x1080p5000,   1920, 1080, 1920, 1080, field_mode::progressive, 50000, 1000, L"1080p5000",    { 960                          } },
                { video_format::x1080p5994,   1920, 1080, 1920, 1080, field_mode::progressive, 60000, 1001, L"1080p5994",    { 801,  800,  801,  801,  801  } },
-               { video_format::x1080p6000,   1920, 1080, 1920, 1080, field_mode::progressive, 60,    1,    L"1080p6000",    { 800                          } },
+               { video_format::x1080p6000,   1920, 1080, 1920, 1080, field_mode::progressive, 60000, 1000, L"1080p6000",    { 800                          } },
                { video_format::x1556p2398,   2048, 1556, 2048, 1556, field_mode::progressive, 24000, 1001, L"1556p2398",    { 2002                         } },
-               { video_format::x1556p2400,   2048, 1556, 2048, 1556, field_mode::progressive, 24,    1,    L"1556p2400",    { 2000                         } },
-               { video_format::x1556p2500,   2048, 1556, 2048, 1556, field_mode::progressive, 25,    1,    L"1556p2500",    { 1920                         } },
+               { video_format::x1556p2400,   2048, 1556, 2048, 1556, field_mode::progressive, 24000, 1000, L"1556p2400",    { 2000                         } },
+               { video_format::x1556p2500,   2048, 1556, 2048, 1556, field_mode::progressive, 25000, 1000, L"1556p2500",    { 1920                         } },
                { video_format::dci1080p2398, 2048, 1080, 2048, 1080, field_mode::progressive, 24000, 1001, L"dci1080p2398", { 2002                         } },
-               { video_format::dci1080p2400, 2048, 1080, 2048, 1080, field_mode::progressive, 24,    1,    L"dci1080p2400", { 2000                         } },
-               { video_format::dci1080p2500, 2048, 1080, 2048, 1080, field_mode::progressive, 25,    1,    L"dci1080p2500", { 1920                         } },
+               { video_format::dci1080p2400, 2048, 1080, 2048, 1080, field_mode::progressive, 24000, 1000, L"dci1080p2400", { 2000                         } },
+               { video_format::dci1080p2500, 2048, 1080, 2048, 1080, field_mode::progressive, 25000, 1000, L"dci1080p2500", { 1920                         } },
                { video_format::x2160p2398,   3840, 2160, 3840, 2160, field_mode::progressive, 24000, 1001, L"2160p2398",    { 2002                         } },
-               { video_format::x2160p2400,   3840, 2160, 3840, 2160, field_mode::progressive, 24,    1,    L"2160p2400",    { 2000                         } },
-               { video_format::x2160p2500,   3840, 2160, 3840, 2160, field_mode::progressive, 25,    1,    L"2160p2500",    { 1920                         } },
+               { video_format::x2160p2400,   3840, 2160, 3840, 2160, field_mode::progressive, 24000, 1000, L"2160p2400",    { 2000                         } },
+               { video_format::x2160p2500,   3840, 2160, 3840, 2160, field_mode::progressive, 25000, 1000, L"2160p2500",    { 1920                         } },
                { video_format::x2160p2997,   3840, 2160, 3840, 2160, field_mode::progressive, 30000, 1001, L"2160p2997",    { 1602, 1601, 1602, 1601, 1602 } },
-               { video_format::x2160p3000,   3840, 2160, 3840, 2160, field_mode::progressive, 30,    1,    L"2160p3000",    { 1600                         } },
+               { video_format::x2160p3000,   3840, 2160, 3840, 2160, field_mode::progressive, 30000, 1000, L"2160p3000",    { 1600                         } },
                { video_format::x2160p5000,   3840, 2160, 3840, 2160, field_mode::progressive, 50,    1,    L"2160p5000",    { 960                          } },
                { video_format::x2160p5994,   3840, 2160, 3840, 2160, field_mode::progressive, 60000, 1001, L"2160p5994",    { 801,  800,  801,  801,  801  } },
                { video_format::x2160p6000,   3840, 2160, 3840, 2160, field_mode::progressive, 60,    1,    L"2160p6000",    { 800                          } },
                { video_format::dci2160p2398, 4096, 2160, 4096, 2160, field_mode::progressive, 24000, 1001, L"dci2160p2398", { 2002                         } },
-               { video_format::dci2160p2400, 4096, 2160, 4096, 2160, field_mode::progressive, 24,    1,    L"dci2160p2400", { 2000                         } },
-               { video_format::dci2160p2500, 4096, 2160, 4096, 2160, field_mode::progressive, 25,    1,    L"dci2160p2500", { 1920                         } },
+               { video_format::dci2160p2400, 4096, 2160, 4096, 2160, field_mode::progressive, 24000, 1000, L"dci2160p2400", { 2000                         } },
+               { video_format::dci2160p2500, 4096, 2160, 4096, 2160, field_mode::progressive, 25000, 1000, L"dci2160p2500", { 1920                         } },
                { video_format::invalid,      0,    0,    0,    0,    field_mode::progressive, 1,     1,    L"invalid",      { 1                            } }
        };
 
@@ -88,6 +88,7 @@ video_format_desc::video_format_desc(
        , square_height(square_height)
        , field_mode(field_mode)
        , fps(static_cast<double>(time_scale) / static_cast<double>(duration))
+       , framerate(time_scale, duration)
        , time_scale(time_scale)
        , duration(duration)
        , field_count(field_mode == core::field_mode::progressive ? 1 : 2)
index 1acb1dbbd14d7fcb52c12fad4c012366b6eaebf0..42777242697bb07d528fceedabcf183acc939da1 100644 (file)
@@ -27,6 +27,8 @@
 
 #include <common/enum_class.h>
 
+#include <boost/rational.hpp>
+
 namespace caspar { namespace core {
        
 enum class video_format
@@ -86,22 +88,23 @@ ENUM_ENABLE_BITWISE(field_mode);
 
 struct video_format_desc final
 {
-       video_format            format;         
+       video_format                    format;         
 
-       int                                     width;          
-       int                                     height;         
-       int                                     square_width;
-       int                                     square_height;
-       core::field_mode        field_mode;     // progressive, interlaced upper field first, interlaced lower field first
-       double                          fps;            // actual framerate = duration/time_scale, e.g. i50 = 25 fps, p50 = 50 fps
-       int                                     time_scale;
-       int                                     duration;
-       int                                     field_count;
-       std::size_t                     size;           // frame size in bytes 
-       std::wstring            name;           // name of output format
+       int                                             width;          
+       int                                             height;         
+       int                                             square_width;
+       int                                             square_height;
+       core::field_mode                field_mode;     // progressive, interlaced upper field first, interlaced lower field first
+       double                                  fps;            // actual framerate = duration/time_scale, e.g. i50 = 25 fps, p50 = 50 fps
+       boost::rational<int>    framerate;
+       int                                             time_scale;
+       int                                             duration;
+       int                                             field_count;
+       std::size_t                             size;           // frame size in bytes 
+       std::wstring                    name;           // name of output format
 
-       int                                     audio_sample_rate;
-       std::vector<int>        audio_cadence;  // rotating optimal number of samples per frame
+       int                                             audio_sample_rate;
+       std::vector<int>                audio_cadence;  // rotating optimal number of samples per frame
 
        video_format_desc(video_format format,
                                          int width,
index 958a7d5bc2d928883773ab5aff35697ffe68047c..7deae83ecd97c33f09bcead32e6beae7593f5abb 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libavcodec.so.56.41.100 and b/dependencies64/ffmpeg/bin/linux/libavcodec.so.56.41.100 differ
index ce0f4f3c9aef6d0ced3e468557b747eec3ee462b..e6d2edabd725bc51b75a64a45bdd9013a9ea23fb 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libavdevice.so.56.4.100 and b/dependencies64/ffmpeg/bin/linux/libavdevice.so.56.4.100 differ
index 68e0d543780ea8c89ff55077646701e91d3ac9ab..2ff28ca7cefb53627d348c3abc4aa3ab77c8c949 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libavfilter.so.5.16.101 and b/dependencies64/ffmpeg/bin/linux/libavfilter.so.5.16.101 differ
index 9c6eca11085acd107aef417cfbcd8365632a1992..058ff926a7c76b62cd5175006d9a21de58eb0f12 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libavformat.so.56.36.100 and b/dependencies64/ffmpeg/bin/linux/libavformat.so.56.36.100 differ
index 6b63bb149d0c4f0dbc3c79c1b374e8d1eaaf0de8..fad830991b9c24d76b66fc85440d6950eac320d1 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libavutil.so.54.27.100 and b/dependencies64/ffmpeg/bin/linux/libavutil.so.54.27.100 differ
index 2a183634c28d0fe72525a91c34c15216b0007651..47ab16f0325a94a361204343b8c598d2d3753f57 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libpostproc.so.53.3.100 and b/dependencies64/ffmpeg/bin/linux/libpostproc.so.53.3.100 differ
index 5aa68b334f03f7120c1a1d1db2e50afd0b1849a5..14e55c7bac2ba7d50e90e4c6823e3c89ac969386 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libswresample.so.1.2.100 and b/dependencies64/ffmpeg/bin/linux/libswresample.so.1.2.100 differ
index d094ab7719049a5f7902b652c60f1c5daa6186e6..8fc4a85b37726a19678f9682354ac892f97808bb 100755 (executable)
Binary files a/dependencies64/ffmpeg/bin/linux/libswscale.so.3.1.101 and b/dependencies64/ffmpeg/bin/linux/libswscale.so.3.1.101 differ
index 823188659137ab15842a7ccf92e68dbd48ef0f4c..8dd4a84209b59da78955e588df63cb5f3c7294b2 100644 (file)
@@ -1,6 +1,7 @@
 cmake_minimum_required (VERSION 2.6)
 project ("modules")
 
+add_subdirectory(reroute)
 add_subdirectory(ffmpeg)
 add_subdirectory(oal)
 
@@ -19,5 +20,4 @@ if (MSVC)
 endif ()
 
 add_subdirectory(image)
-add_subdirectory(reroute)
 
index 50529d9f10360d15ad3e52d0e352795424d59134..a51b9dc80bcd5f39b49fb472fa523bdda3633069 100644 (file)
@@ -460,7 +460,7 @@ void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
 }
 
 spl::shared_ptr<core::frame_consumer> create_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*)
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        if(params.size() < 1 || !boost::iequals(params.at(0), L"BLUEFISH"))
                return core::frame_consumer::empty();
@@ -487,8 +487,8 @@ spl::shared_ptr<core::frame_consumer> create_consumer(
 }
 
 spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*)
-{      
+               const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
+{
        const auto device_index         = ptree.get(                                            L"device",                      1);
        const auto embedded_audio       = ptree.get(                                            L"embedded-audio",      false);
        const auto key_only                     = ptree.get(                                            L"key-only",            false);
index 3c4ec6dbb9b61353f00e8da73190ed109bdb20fd..bcdc48bdd93f7809a872a1e351c27313a6c710b1 100644 (file)
@@ -33,8 +33,10 @@ namespace caspar { namespace bluefish {
 
 void describe_consumer(core::help_sink& sink, const core::help_repository& repo);
 spl::shared_ptr<core::frame_consumer> create_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*);
+               const std::vector<std::wstring>& params, core::interaction_sink*,
+               std::vector<spl::shared_ptr<core::video_channel>> channels);
 spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*);
+               const boost::property_tree::wptree& ptree, core::interaction_sink*,
+               std::vector<spl::shared_ptr<core::video_channel>> channels);
 
-}}
\ No newline at end of file
+}}
index c16e6ee2f924a0d9b859380f3bc98ef43ba70755..ca847727eb8b6849834fdb989763757482749a1f 100644 (file)
@@ -20,7 +20,7 @@
 */
 
 #include "../StdAfx.h"
+
 #include "decklink_consumer.h"
 
 #include "../util/util.h"
 #include <boost/lexical_cast.hpp>
 #include <boost/circular_buffer.hpp>
 #include <boost/property_tree/ptree.hpp>
+#include <boost/thread/mutex.hpp>
+
+#include <future>
+
+namespace caspar { namespace decklink {
 
-namespace caspar { namespace decklink { 
-       
 struct configuration
 {
        enum class keyer_t
@@ -78,16 +81,16 @@ struct configuration
 
        int                                                     device_index            = 1;
        int                                                     key_device_idx          = 0;
-       bool                                            embedded_audio          = true;
+       bool                                            embedded_audio          = false;
        keyer_t                                         keyer                           = keyer_t::default_keyer;
        latency_t                                       latency                         = latency_t::default_latency;
        bool                                            key_only                        = false;
        int                                                     base_buffer_depth       = 3;
        core::audio_channel_layout      out_channel_layout      = core::audio_channel_layout::invalid();
-       
+
        int buffer_depth() const
        {
-               return base_buffer_depth + (latency == latency_t::low_latency ? 0 : 1) + (embedded_audio ? 1 : 0);
+               return base_buffer_depth + (latency == latency_t::low_latency ? 0 : 1);
        }
 
        int key_device_index() const
@@ -149,25 +152,25 @@ void set_keyer(
        {
                BOOL value = true;
                if (SUCCEEDED(attributes->GetFlag(BMDDeckLinkSupportsInternalKeying, &value)) && !value)
-                       CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";     
+                       CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";
                else if (FAILED(decklink_keyer->Enable(FALSE)))
-                       CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";                     
+                       CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";
                else if (FAILED(decklink_keyer->SetLevel(255)))
                        CASPAR_LOG(error) << print << L" Failed to set key-level to max.";
                else
-                       CASPAR_LOG(info) << print << L" Enabled internal keyer.";               
+                       CASPAR_LOG(info) << print << L" Enabled internal keyer.";
        }
        else if (keyer == configuration::keyer_t::external_keyer)
        {
                BOOL value = true;
                if (SUCCEEDED(attributes->GetFlag(BMDDeckLinkSupportsExternalKeying, &value)) && !value)
-                       CASPAR_LOG(error) << print << L" Failed to enable external keyer.";     
-               else if (FAILED(decklink_keyer->Enable(TRUE)))                  
-                       CASPAR_LOG(error) << print << L" Failed to enable external keyer.";     
+                       CASPAR_LOG(error) << print << L" Failed to enable external keyer.";
+               else if (FAILED(decklink_keyer->Enable(TRUE)))
+                       CASPAR_LOG(error) << print << L" Failed to enable external keyer.";
                else if (FAILED(decklink_keyer->SetLevel(255)))
                        CASPAR_LOG(error) << print << L" Failed to set key-level to max.";
                else
-                       CASPAR_LOG(info) << print << L" Enabled external keyer.";                       
+                       CASPAR_LOG(info) << print << L" Enabled external keyer.";
        }
 }
 
@@ -200,14 +203,14 @@ public:
 
                needs_to_copy_ = will_attempt_dma && dma_transfer_from_gl_buffer_impossible;
        }
-       
+
        // IUnknown
 
        virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*)
        {
                return E_NOINTERFACE;
        }
-       
+
        virtual ULONG STDMETHODCALLTYPE AddRef()
        {
                return ++ref_count_;
@@ -216,7 +219,12 @@ public:
        virtual ULONG STDMETHODCALLTYPE Release()
        {
                if(--ref_count_ == 0)
+               {
                        delete this;
+
+                       return 0;
+               }
+
                return ref_count_;
        }
 
@@ -227,7 +235,7 @@ public:
        virtual long STDMETHODCALLTYPE GetRowBytes()                {return static_cast<long>(format_desc_.width*4);}
        virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat()   {return bmdFormat8BitBGRA;}
        virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags()                      {return bmdFrameFlagDefault;}
-               
+
        virtual HRESULT STDMETHODCALLTYPE GetBytes(void** buffer)
        {
                try
@@ -266,11 +274,11 @@ public:
 
                return S_OK;
        }
-               
+
        virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode** timecode) {return S_FALSE;}
        virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary** ancillary)              {return S_FALSE;}
 
-       // decklink_frame       
+       // decklink_frame
 
        const core::audio_buffer& audio_data()
        {
@@ -355,8 +363,8 @@ struct key_video_context : public IDeckLinkVideoOutputCallback, boost::noncopyab
 };
 
 template <typename Configuration>
-struct decklink_consumer : public IDeckLinkVideoOutputCallback, public IDeckLinkAudioOutputCallback, boost::noncopyable
-{              
+struct decklink_consumer : public IDeckLinkVideoOutputCallback, boost::noncopyable
+{
        const int                                                                                       channel_index_;
        const configuration                                                                     config_;
 
@@ -370,7 +378,7 @@ struct decklink_consumer : public IDeckLinkVideoOutputCallback, public IDeckLink
        std::exception_ptr                                                                      exception_;
 
        tbb::atomic<bool>                                                                       is_running_;
-               
+
        const std::wstring                                                                      model_name_                             = get_model_name(decklink_);
        bool                                                                                            will_attempt_dma_;
        const core::video_format_desc                                           format_desc_;
@@ -383,15 +391,15 @@ struct decklink_consumer : public IDeckLinkVideoOutputCallback, public IDeckLink
        long long                                                                                       audio_scheduled_                = 0;
 
        int                                                                                                     preroll_count_                  = 0;
-               
+
        boost::circular_buffer<std::vector<int32_t>>            audio_container_                { buffer_size_ + 1 };
 
-       tbb::concurrent_bounded_queue<core::const_frame>        video_frame_buffer_;
-       tbb::concurrent_bounded_queue<core::const_frame>        audio_frame_buffer_;
-       
+       tbb::concurrent_bounded_queue<core::const_frame>        frame_buffer_;
+
        spl::shared_ptr<diagnostics::graph>                                     graph_;
        caspar::timer                                                                           tick_timer_;
-       retry_task<bool>                                                                        send_completion_;
+       boost::mutex                                                                            send_completion_mutex_;
+       std::packaged_task<bool ()>                                                     send_completion_;
        reference_signal_detector                                                       reference_signal_detector_      { output_ };
        tbb::atomic<int64_t>                                                            current_presentation_delay_;
        tbb::atomic<int64_t>                                                            scheduled_frames_completed_;
@@ -402,7 +410,7 @@ public:
                        const configuration& config,
                        const core::video_format_desc& format_desc,
                        const core::audio_channel_layout& in_channel_layout,
-                       int channel_index) 
+                       int channel_index)
                : channel_index_(channel_index)
                , config_(config)
                , format_desc_(format_desc)
@@ -411,18 +419,13 @@ public:
                is_running_ = true;
                current_presentation_delay_ = 0;
                scheduled_frames_completed_ = 0;
-                               
-               video_frame_buffer_.set_capacity(1);
 
-               // Blackmagic calls RenderAudioSamples() 50 times per second
-               // regardless of video mode so we sometimes need to give them
-               // samples from 2 frames in order to keep up
-               audio_frame_buffer_.set_capacity((format_desc.fps > 50.0) ? 2 : 1);
+               frame_buffer_.set_capacity(1);
 
                if (config.keyer == configuration::keyer_t::external_separate_device_keyer)
                        key_context_.reset(new key_video_context<Configuration>(config, print()));
 
-               graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));   
+               graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
                graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
                graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
                graph_->set_color("flushed-frame", diagnostics::color(0.4f, 0.3f, 0.8f));
@@ -436,32 +439,42 @@ public:
 
                graph_->set_text(print());
                diagnostics::register_graph(graph_);
-               
+
                enable_video(get_display_mode(output_, format_desc_.format, bmdFormat8BitBGRA, bmdVideoOutputFlagDefault, will_attempt_dma_));
-                               
+
                if(config.embedded_audio)
                        enable_audio();
-               
-               set_latency(configuration_, config.latency, print());                           
+
+               set_latency(configuration_, config.latency, print());
                set_keyer(attributes_, keyer_, config.keyer, print());
 
-               if(config.embedded_audio)               
-                       output_->BeginAudioPreroll();           
-               
-               for(int n = 0; n < buffer_size_; ++n)
+               if(config.embedded_audio)
+                       output_->BeginAudioPreroll();
+
+               for (int n = 0; n < buffer_size_; ++n)
+               {
+                       if (config.embedded_audio)
+                               schedule_next_audio(core::mutable_audio_buffer(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()] * out_channel_layout_.num_channels, 0));
+
                        schedule_next_video(core::const_frame::empty());
+               }
+
+               if (config.embedded_audio)
+               {
+                       // Preroll one extra frame worth of audio
+                       schedule_next_audio(core::mutable_audio_buffer(format_desc_.audio_cadence[buffer_size_ % format_desc_.audio_cadence.size()] * out_channel_layout_.num_channels, 0));
+                       output_->EndAudioPreroll();
+               }
 
-               if(!config.embedded_audio)
-                       start_playback();
+               start_playback();
        }
 
        ~decklink_consumer()
-       {               
+       {
                is_running_ = false;
-               video_frame_buffer_.try_push(core::const_frame::empty());
-               audio_frame_buffer_.try_push(core::const_frame::empty());
+               frame_buffer_.try_push(core::const_frame::empty());
 
-               if(output_ != nullptr) 
+               if(output_ != nullptr)
                {
                        output_->StopScheduledPlayback(0, nullptr, 0);
                        if(config_.embedded_audio)
@@ -469,25 +482,22 @@ public:
                        output_->DisableVideoOutput();
                }
        }
-       
+
        void enable_audio()
        {
                if(FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, out_channel_layout_.num_channels, bmdAudioOutputStreamTimestamped)))
                                CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable audio output."));
-                               
-               if(FAILED(output_->SetAudioCallback(this)))
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not set audio callback."));
 
                CASPAR_LOG(info) << print() << L" Enabled embedded-audio.";
        }
 
        void enable_video(BMDDisplayMode display_mode)
        {
-               if(FAILED(output_->EnableVideoOutput(display_mode, bmdVideoOutputFlagDefault))) 
+               if(FAILED(output_->EnableVideoOutput(display_mode, bmdVideoOutputFlagDefault)))
                        CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable fill video output."));
-               
+
                if(FAILED(output_->SetScheduledFrameCompletionCallback(this)))
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Failed to set fill playback completion callback.")
                                                                        << boost::errinfo_api_function("SetScheduledFrameCompletionCallback"));
 
@@ -497,17 +507,17 @@ public:
 
        void start_playback()
        {
-               if(FAILED(output_->StartScheduledPlayback(0, format_desc_.time_scale, 1.0))) 
+               if(FAILED(output_->StartScheduledPlayback(0, format_desc_.time_scale, 1.0)))
                        CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to schedule fill playback."));
 
                if (key_context_ && FAILED(key_context_->output_->StartScheduledPlayback(0, format_desc_.time_scale, 1.0)))
                        CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to schedule key playback."));
        }
-       
+
        virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*)       {return E_NOINTERFACE;}
        virtual ULONG STDMETHODCALLTYPE AddRef()                                        {return 1;}
        virtual ULONG STDMETHODCALLTYPE Release()                               {return 1;}
-       
+
        virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped()
        {
                is_running_ = false;
@@ -519,9 +529,15 @@ public:
        {
                if(!is_running_)
                        return E_FAIL;
-               
+
                try
                {
+                       auto tick_time = tick_timer_.elapsed()*format_desc_.fps * 0.5;
+                       graph_->set_value("tick-time", tick_time);
+                       tick_timer_.restart();
+
+                       reference_signal_detector_.detect_change([this]() { return print(); });
+
                        auto dframe = reinterpret_cast<decklink_frame*>(completed_frame);
                        current_presentation_delay_ = dframe->get_age_millis();
                        ++scheduled_frames_completed_;
@@ -538,10 +554,7 @@ public:
                        {
                                graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame");
                                video_scheduled_ += format_desc_.duration;
-                               audio_scheduled_ += dframe->audio_data().size() / out_channel_layout_.num_channels;
-                               //++video_scheduled_;
-                               //audio_scheduled_ += format_desc_.audio_cadence[0];
-                               //++audio_scheduled_;
+                               audio_scheduled_ += dframe->audio_data().size() / in_channel_layout_.num_channels;
                        }
                        else if(result == bmdOutputFrameDropped)
                                graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
@@ -552,61 +565,40 @@ public:
                        output_->GetBufferedVideoFrameCount(&buffered);
                        graph_->set_value("buffered-video", static_cast<double>(buffered) / (config_.buffer_depth()));
 
-                       auto frame = core::const_frame::empty();
-                       video_frame_buffer_.pop(frame);
-                       send_completion_.try_completion();
-                       schedule_next_video(frame);     
-               }
-               catch(...)
-               {
-                       lock(exception_mutex_, [&]
+                       if (config_.embedded_audio)
                        {
-                               exception_ = std::current_exception();
-                       });
-                       return E_FAIL;
-               }
+                               output_->GetBufferedAudioSampleFrameCount(&buffered);
+                               graph_->set_value("buffered-audio", static_cast<double>(buffered) / (format_desc_.audio_cadence[0] * config_.buffer_depth()));
+                       }
+
+                       auto frame = core::const_frame::empty();
+
+                       frame_buffer_.pop(frame);
 
-               return S_OK;
-       }
-               
-       virtual HRESULT STDMETHODCALLTYPE RenderAudioSamples(BOOL preroll)
-       {
-               if(!is_running_)
-                       return E_FAIL;
-               
-               try
-               {       
-                       if(preroll)
                        {
-                               if(++preroll_count_ >= buffer_size_)
-                               {
-                                       output_->EndAudioPreroll();
-                                       start_playback();                               
-                               }
-                               else
+                               boost::lock_guard<boost::mutex> lock(send_completion_mutex_);
+
+                               if (send_completion_.valid())
                                {
-                                       schedule_next_audio(core::mutable_audio_buffer(format_desc_.audio_cadence[preroll % format_desc_.audio_cadence.size()] * out_channel_layout_.num_channels, 0));
+                                       send_completion_();
+                                       send_completion_ = std::packaged_task<bool()>();
                                }
                        }
-                       else
-                       {
-                               auto frame = core::const_frame::empty();
 
-                               while(audio_frame_buffer_.try_pop(frame))
-                               {
-                                       UINT32 buffered;
-                                       output_->GetBufferedAudioSampleFrameCount(&buffered);
-                                       graph_->set_value("buffered-audio", static_cast<double>(buffered) / (format_desc_.audio_cadence[0] * config_.buffer_depth()));
+                       if (!is_running_)
+                               return E_FAIL;
 
-                                       send_completion_.try_completion();
-                                       schedule_next_audio(channel_remapper_.mix_and_rearrange(frame.audio_data()));
-                               }
-                       }
+                       if (config_.embedded_audio)
+                               schedule_next_audio(channel_remapper_.mix_and_rearrange(frame.audio_data()));
+
+                       schedule_next_video(frame);
                }
                catch(...)
                {
-                       tbb::spin_mutex::scoped_lock lock(exception_mutex_);
-                       exception_ = std::current_exception();
+                       lock(exception_mutex_, [&]
+                       {
+                               exception_ = std::current_exception();
+                       });
                        return E_FAIL;
                }
 
@@ -625,7 +617,7 @@ public:
 
                audio_scheduled_ += sample_frame_count;
        }
-                       
+
        void schedule_next_video(core::const_frame frame)
        {
                if (key_context_)
@@ -640,11 +632,6 @@ public:
                        CASPAR_LOG(error) << print() << L" Failed to schedule fill video.";
 
                video_scheduled_ += format_desc_.duration;
-
-               graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);
-               tick_timer_.restart();
-
-               reference_signal_detector_.detect_change([this]() { return print(); });
        }
 
        std::future<bool> send(core::const_frame frame)
@@ -655,36 +642,26 @@ public:
                });
 
                if(exception != nullptr)
-                       std::rethrow_exception(exception);              
+                       std::rethrow_exception(exception);
 
                if(!is_running_)
                        CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Is not running."));
-               
-               bool audio_ready = !config_.embedded_audio;
-               bool video_ready = false;
 
-               auto enqueue_task = [audio_ready, video_ready, frame, this]() mutable -> boost::optional<bool>
-               {
-                       if (!audio_ready)
-                               audio_ready = audio_frame_buffer_.try_push(frame);
+               if (frame_buffer_.try_push(frame))
+                       return make_ready_future(true);
 
-                       if (!video_ready)
-                               video_ready = video_frame_buffer_.try_push(frame);
+               boost::lock_guard<boost::mutex> lock(send_completion_mutex_);
 
-                       if (audio_ready && video_ready)
-                               return true;
-                       else
-                               return boost::optional<bool>();
-               };
-               
-               if (enqueue_task())
-                       return make_ready_future(true);
+               send_completion_ = std::packaged_task<bool ()>([frame, this] () mutable -> bool
+               {
+                       frame_buffer_.push(frame);
 
-               send_completion_.set_task(enqueue_task);
+                       return true;
+               });
 
                return send_completion_.get_future();
        }
-       
+
        std::wstring print() const
        {
                if (config_.keyer == configuration::keyer_t::external_separate_device_keyer)
@@ -732,26 +709,26 @@ public:
        }
 
        // frame_consumer
-       
+
        void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) override
        {
                format_desc_ = format_desc;
                executor_.invoke([=]
                {
                        consumer_.reset();
-                       consumer_.reset(new decklink_consumer<Configuration>(config_, format_desc, channel_layout, channel_index));                     
+                       consumer_.reset(new decklink_consumer<Configuration>(config_, format_desc, channel_layout, channel_index));
                });
        }
-       
+
        std::future<bool> send(core::const_frame frame) override
        {
                return consumer_->send(frame);
        }
-       
+
        std::wstring print() const override
        {
                return consumer_ ? consumer_->print() : L"[decklink_consumer]";
-       }               
+       }
 
        std::wstring name() const override
        {
@@ -848,16 +825,16 @@ void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
 }
 
 spl::shared_ptr<core::frame_consumer> create_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*)
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        if (params.size() < 1 || !boost::iequals(params.at(0), L"DECKLINK"))
                return core::frame_consumer::empty();
-       
+
        configuration config;
-               
+
        if (params.size() > 1)
                config.device_index = boost::lexical_cast<int>(params.at(1));
-       
+
        if (contains_param(L"INTERNAL_KEY", params))
                config.keyer = configuration::keyer_t::internal_keyer;
        else if (contains_param(L"EXTERNAL_KEY", params))
@@ -894,7 +871,7 @@ spl::shared_ptr<core::frame_consumer> create_consumer(
 }
 
 spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*)
+               const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        configuration config;
 
@@ -954,18 +931,18 @@ developer@blackmagic-design.com
 
 -----------------------------------------------------------------------------
 
-Thanks for your inquiry. The minimum number of frames that you can preroll 
-for scheduled playback is three frames for video and four frames for audio. 
+Thanks for your inquiry. The minimum number of frames that you can preroll
+for scheduled playback is three frames for video and four frames for audio.
 As you mentioned if you preroll less frames then playback will not start or
-playback will be very sporadic. From our experience with Media Express, we 
-recommended that at least seven frames are prerolled for smooth playback. 
+playback will be very sporadic. From our experience with Media Express, we
+recommended that at least seven frames are prerolled for smooth playback.
 
 Regarding the bmdDeckLinkConfigLowLatencyVideoOutput flag:
 There can be around 3 frames worth of latency on scheduled output.
 When the bmdDeckLinkConfigLowLatencyVideoOutput flag is used this latency is
-reduced  or removed for scheduled playback. If the DisplayVideoFrameSync() 
-method is used, the bmdDeckLinkConfigLowLatencyVideoOutput setting will 
-guarantee that the provided frame will be output as soon the previous 
+reduced  or removed for scheduled playback. If the DisplayVideoFrameSync()
+method is used, the bmdDeckLinkConfigLowLatencyVideoOutput setting will
+guarantee that the provided frame will be output as soon the previous
 frame output has been completed.
 ################################################################################
 */
@@ -982,10 +959,10 @@ developer@blackmagic-design.com
 
 -----------------------------------------------------------------------------
 
-Thanks for your inquiry. You could try subclassing IDeckLinkMutableVideoFrame 
-and providing a pointer to your video buffer when GetBytes() is called. 
-This may help to keep copying to a minimum. Please ensure that the pixel 
-format is in bmdFormat10BitYUV, otherwise the DeckLink API / driver will 
+Thanks for your inquiry. You could try subclassing IDeckLinkMutableVideoFrame
+and providing a pointer to your video buffer when GetBytes() is called.
+This may help to keep copying to a minimum. Please ensure that the pixel
+format is in bmdFormat10BitYUV, otherwise the DeckLink API / driver will
 have to colourspace convert which may result in additional copying.
 ################################################################################
 */
index 9c0de882f05e50bc7f911f0cfa6e20d81833163a..b06fe7f58577608b47cfa2a4142d02cd58ed249a 100644 (file)
@@ -34,8 +34,10 @@ namespace caspar { namespace decklink {
 
 void describe_consumer(core::help_sink& sink, const core::help_repository& repo);
 spl::shared_ptr<core::frame_consumer> create_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*);
+               const std::vector<std::wstring>& params, core::interaction_sink*,
+               std::vector<spl::shared_ptr<core::video_channel>> channels);
 spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*);
+               const boost::property_tree::wptree& ptree, core::interaction_sink*,
+               std::vector<spl::shared_ptr<core::video_channel>> channels);
 
-}}
\ No newline at end of file
+}}
index b64c976568fb0966d1e5e7b4926bba00433d299f..0349f5bc15052610f7e14fd8516068aa48e73258 100644 (file)
@@ -43,7 +43,9 @@
 #include <core/frame/frame_transform.h>
 #include <core/frame/frame_factory.h>
 #include <core/producer/frame_producer.h>
+#include <core/producer/framerate/framerate_producer.h>
 #include <core/monitor/monitor.h>
+#include <core/diagnostics/call_context.h>
 #include <core/mixer/audio/audio_mixer.h>
 #include <core/help/help_repository.h>
 #include <core/help/help_sink.h>
@@ -58,7 +60,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -73,7 +75,6 @@ extern "C"
 #include <functional>
 
 namespace caspar { namespace decklink {
-
 core::audio_channel_layout get_adjusted_channel_layout(core::audio_channel_layout layout)
 {
        if (layout.num_channels <= 2)
@@ -91,9 +92,18 @@ std::wstring to_string(const T& cadence)
 {
        return boost::join(cadence | boost::adaptors::transformed([](size_t i) { return boost::lexical_cast<std::wstring>(i); }), L", ");
 }
-               
+
+ffmpeg::audio_input_pad create_input_pad(const core::video_format_desc& in_format, int num_channels)
+{
+       return ffmpeg::audio_input_pad(
+                       boost::rational<int>(1, in_format.audio_sample_rate),
+                       in_format.audio_sample_rate,
+                       AVSampleFormat::AV_SAMPLE_FMT_S32,
+                       av_get_default_channel_layout(num_channels));
+}
+
 class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
-{      
+{
        const int                                                                               device_index_;
        core::monitor::subject                                                  monitor_subject_;
        spl::shared_ptr<diagnostics::graph>                             graph_;
@@ -102,29 +112,38 @@ class decklink_producer : boost::noncopyable, public IDeckLinkInputCallback
        com_ptr<IDeckLink>                                                              decklink_                       = get_device(device_index_);
        com_iface_ptr<IDeckLinkInput>                                   input_                          = iface_cast<IDeckLinkInput>(decklink_);
        com_iface_ptr<IDeckLinkAttributes>                              attributes_                     = iface_cast<IDeckLinkAttributes>(decklink_);
-       
+
        const std::wstring                                                              model_name_                     = get_model_name(decklink_);
        const std::wstring                                                              filter_;
-       
+
        core::video_format_desc                                                 in_format_desc_;
        core::video_format_desc                                                 out_format_desc_;
-       std::vector<int>                                                                audio_cadence_          = out_format_desc_.audio_cadence;
+       std::vector<int>                                                                audio_cadence_          = in_format_desc_.audio_cadence;
        boost::circular_buffer<size_t>                                  sync_buffer_            { audio_cadence_.size() };
        spl::shared_ptr<core::frame_factory>                    frame_factory_;
        core::audio_channel_layout                                              channel_layout_;
-       ffmpeg::frame_muxer                                                             muxer_                          { in_format_desc_.fps, frame_factory_, out_format_desc_, channel_layout_, filter_ };
-                       
+       ffmpeg::frame_muxer                                                             muxer_                          {
+                                                                                                                                                       in_format_desc_.framerate,
+                                                                                                                                                       { create_input_pad(in_format_desc_, channel_layout_.num_channels) },
+                                                                                                                                                       frame_factory_,
+                                                                                                                                                       out_format_desc_,
+                                                                                                                                                       channel_layout_,
+                                                                                                                                                       filter_,
+                                                                                                                                                       ffmpeg::filter::is_deinterlacing(filter_)
+                                                                                                                                               };
+
        core::constraints                                                               constraints_            { in_format_desc_.width, in_format_desc_.height };
 
        tbb::concurrent_bounded_queue<core::draw_frame> frame_buffer_;
+       core::draw_frame                                                                last_frame_                     = core::draw_frame::empty();
 
        std::exception_ptr                                                              exception_;
 
 public:
        decklink_producer(
-                       const core::video_format_desc& in_format_desc, 
-                       int device_index, 
-                       const spl::shared_ptr<core::frame_factory>& frame_factory, 
+                       const core::video_format_desc& in_format_desc,
+                       int device_index,
+                       const spl::shared_ptr<core::frame_factory>& frame_factory,
                        const core::video_format_desc& out_format_desc,
                        const core::audio_channel_layout& channel_layout,
                        const std::wstring& filter)
@@ -135,8 +154,8 @@ public:
                , frame_factory_(frame_factory)
                , channel_layout_(get_adjusted_channel_layout(channel_layout))
        {
-               frame_buffer_.set_capacity(2);
-               
+               frame_buffer_.set_capacity(4);
+
                graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
                graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
                graph_->set_color("frame-time", diagnostics::color(1.0f, 0.0f, 0.0f));
@@ -144,13 +163,13 @@ public:
                graph_->set_color("output-buffer", diagnostics::color(0.0f, 1.0f, 0.0f));
                graph_->set_text(print());
                diagnostics::register_graph(graph_);
-               
+
                bool will_attempt_dma;
                auto display_mode = get_display_mode(input_, in_format_desc.format, bmdFormat8BitYUV, bmdVideoInputFlagDefault, will_attempt_dma);
-               
+
                // NOTE: bmdFormat8BitARGB is currently not supported by any decklink card. (2011-05-08)
-               if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0))) 
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+               if(FAILED(input_->EnableVideoInput(display_mode, bmdFormat8BitYUV, 0)))
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Could not enable video input.")
                                                                        << boost::errinfo_api_function("EnableVideoInput"));
 
@@ -158,23 +177,29 @@ public:
                        CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Could not enable audio input.")
                                                                        << boost::errinfo_api_function("EnableAudioInput"));
-                       
+
                if (FAILED(input_->SetCallback(this)) != S_OK)
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Failed to set input callback.")
                                                                        << boost::errinfo_api_function("SetCallback"));
-                       
+
                if(FAILED(input_->StartStreams()))
-                       CASPAR_THROW_EXCEPTION(caspar_exception() 
+                       CASPAR_THROW_EXCEPTION(caspar_exception()
                                                                        << msg_info(print() + L" Failed to start input stream.")
                                                                        << boost::errinfo_api_function("StartStreams"));
 
+               // Wait for first frame until returning or give up after 2 seconds.
+               caspar::timer timeout_timer;
+
+               while (frame_buffer_.size() < 1 && timeout_timer.elapsed() < 2.0)
+                       boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
+
                CASPAR_LOG(info) << print() << L" Initialized";
        }
 
        ~decklink_producer()
        {
-               if(input_ != nullptr) 
+               if(input_ != nullptr)
                {
                        input_->StopStreams();
                        input_->DisableVideoInput();
@@ -189,14 +214,15 @@ public:
        virtual HRESULT STDMETHODCALLTYPE       QueryInterface (REFIID, LPVOID*)        {return E_NOINTERFACE;}
        virtual ULONG STDMETHODCALLTYPE         AddRef ()                                                       {return 1;}
        virtual ULONG STDMETHODCALLTYPE         Release ()                                                      {return 1;}
-               
+
        virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents /*notificationEvents*/, IDeckLinkDisplayMode* newDisplayMode, BMDDetectedVideoInputFormatFlags /*detectedSignalFlags*/)
        {
                return S_OK;
        }
 
        virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame* video, IDeckLinkAudioInputPacket* audio)
-       {       
+       {
+               ensure_gpf_handler_installed_for_thread("decklink-VideoInputFrameArrived");
                if(!video)
                        return S_OK;
 
@@ -206,23 +232,24 @@ public:
                        tick_timer_.restart();
 
                        caspar::timer frame_timer;
-                       
+
                        // Video
 
                        void* video_bytes = nullptr;
                        if(FAILED(video->GetBytes(&video_bytes)) || !video_bytes)
                                return S_OK;
-                       
+
                        auto video_frame = ffmpeg::create_frame();
-                                               
+
                        video_frame->data[0]                    = reinterpret_cast<uint8_t*>(video_bytes);
-                       video_frame->linesize[0]                = video->GetRowBytes();                 
+                       video_frame->linesize[0]                = video->GetRowBytes();
                        video_frame->format                             = PIX_FMT_UYVY422;
                        video_frame->width                              = video->GetWidth();
                        video_frame->height                             = video->GetHeight();
                        video_frame->interlaced_frame   = in_format_desc_.field_mode != core::field_mode::progressive;
                        video_frame->top_field_first    = in_format_desc_.field_mode == core::field_mode::upper ? 1 : 0;
-                               
+                       video_frame->key_frame                  = 1;
+
                        monitor_subject_
                                        << core::monitor::message("/file/name")                                 % model_name_
                                        << core::monitor::message("/file/path")                                 % device_index_
@@ -236,67 +263,57 @@ public:
 
                        // Audio
 
-                       auto audio_frame = ffmpeg::create_frame();
-                       audio_frame->format = AV_SAMPLE_FMT_S32;
-                       core::mutable_audio_buffer audio_buf;
+                       std::shared_ptr<core::mutable_audio_buffer>     audio_buffer;
+                       void*                                                                           audio_bytes             = nullptr;
 
-                       if (audio)
+                       // It is assumed that audio is always equal or ahead of video.
+                       if (audio && SUCCEEDED(audio->GetBytes(&audio_bytes)) && audio_bytes)
                        {
-                               void* audio_bytes = nullptr;
-                               if (FAILED(audio->GetBytes(&audio_bytes)) || !audio_bytes)
-                                       return S_OK;
-
+                               auto sample_frame_count = audio->GetSampleFrameCount();
+                               auto audio_data = reinterpret_cast<int32_t*>(audio_bytes);
 
-                               audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_bytes);
-                               audio_frame->linesize[0] = audio->GetSampleFrameCount() * channel_layout_.num_channels * sizeof(int32_t);
-                               audio_frame->nb_samples = audio->GetSampleFrameCount();
+                               audio_buffer = std::make_shared<core::mutable_audio_buffer>(
+                                       audio_data,
+                                       audio_data + sample_frame_count * channel_layout_.num_channels);
                        }
                        else
-                       {
-                               audio_buf.resize(audio_cadence_.front() * channel_layout_.num_channels, 0);
-                               audio_frame->data[0] = reinterpret_cast<uint8_t*>(audio_buf.data());
-                               audio_frame->linesize[0] = audio_cadence_.front() * channel_layout_.num_channels * sizeof(int32_t);
-                               audio_frame->nb_samples = audio_cadence_.front();
-                       }
-                                               
+                               audio_buffer = std::make_shared<core::mutable_audio_buffer>(audio_cadence_.front() * channel_layout_.num_channels, 0);
+
                        // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
                        // This cadence fills the audio mixer most optimally.
 
-                       sync_buffer_.push_back(audio_frame->nb_samples);
+                       sync_buffer_.push_back(audio_buffer->size() / channel_layout_.num_channels);
                        if(!boost::range::equal(sync_buffer_, audio_cadence_))
                        {
                                CASPAR_LOG(trace) << print() << L" Syncing audio. Expected cadence: " << to_string(audio_cadence_) << L" Got cadence: " << to_string(sync_buffer_);
                                return S_OK;
                        }
                        boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
-                       
+
                        // PUSH
 
-                       muxer_.push_video(video_frame);
-                       muxer_.push_audio(audio_frame);
-                       
+                       muxer_.push({ audio_buffer });
+                       muxer_.push(static_cast<std::shared_ptr<AVFrame>>(video_frame));
+
                        // POLL
 
-                       auto frame = core::draw_frame::late();
-                       if(!muxer_.empty())
+                       for (auto frame = muxer_.poll(); frame != core::draw_frame::empty(); frame = muxer_.poll())
                        {
-                               frame = std::move(muxer_.front());
-                               muxer_.pop();
-
-                               if(!frame_buffer_.try_push(frame))
+                               if (!frame_buffer_.try_push(frame))
                                {
                                        auto dummy = core::draw_frame::empty();
                                        frame_buffer_.try_pop(dummy);
+
                                        frame_buffer_.try_push(frame);
-                                               
+
                                        graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
                                }
                        }
-                       
-                       graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5);        
+
+                       graph_->set_value("frame-time", frame_timer.elapsed()*out_format_desc_.fps*0.5);
                        monitor_subject_ << core::monitor::message("/profiler/time") % frame_timer.elapsed() % out_format_desc_.fps;
 
-                       graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));      
+                       graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));
                        monitor_subject_ << core::monitor::message("/buffer") % frame_buffer_.size() % frame_buffer_.capacity();
                }
                catch(...)
@@ -307,32 +324,42 @@ public:
 
                return S_OK;
        }
-       
+
        core::draw_frame get_frame()
        {
                if(exception_ != nullptr)
                        std::rethrow_exception(exception_);
-               
-               core::draw_frame frame = core::draw_frame::late();
-               if(!frame_buffer_.try_pop(frame))
+
+               core::draw_frame frame = last_frame_;
+
+               if (!frame_buffer_.try_pop(frame))
                        graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame");
-               graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size())/static_cast<float>(frame_buffer_.capacity()));      
+               else
+                       last_frame_ = frame;
+
+               graph_->set_value("output-buffer", static_cast<float>(frame_buffer_.size()) / static_cast<float>(frame_buffer_.capacity()));
+
                return frame;
        }
-       
+
        std::wstring print() const
        {
                return model_name_ + L" [" + boost::lexical_cast<std::wstring>(device_index_) + L"|" + in_format_desc_.name + L"]";
        }
 
+       boost::rational<int> get_out_framerate() const
+       {
+               return muxer_.out_framerate();
+       }
+
        core::monitor::subject& monitor_output()
        {
                return monitor_subject_;
        }
 };
-       
+
 class decklink_producer_proxy : public core::frame_producer_base
-{              
+{
        std::unique_ptr<decklink_producer>      producer_;
        const uint32_t                                          length_;
        executor                                                        executor_;
@@ -348,15 +375,17 @@ public:
                : executor_(L"decklink_producer[" + boost::lexical_cast<std::wstring>(device_index) + L"]")
                , length_(length)
        {
+               auto ctx = core::diagnostics::call_context::for_thread();
                executor_.invoke([=]
                {
+                       core::diagnostics::call_context::for_thread() = ctx;
                        com_initialize();
                        producer_.reset(new decklink_producer(in_format_desc, device_index, frame_factory, out_format_desc, channel_layout, filter_str));
                });
        }
 
        ~decklink_producer_proxy()
-       {               
+       {
                executor_.invoke([=]
                {
                        producer_.reset();
@@ -368,11 +397,11 @@ public:
        {
                return producer_->monitor_output();
        }
-       
+
        // frame_producer
-                               
+
        core::draw_frame receive_impl() override
-       {               
+       {
                return producer_->get_frame();
        }
 
@@ -380,17 +409,17 @@ public:
        {
                return producer_->pixel_constraints();
        }
-                       
+
        uint32_t nb_frames() const override
        {
                return length_;
        }
-       
+
        std::wstring print() const override
        {
                return producer_->print();
        }
-       
+
        std::wstring name() const override
        {
                return L"decklink";
@@ -402,6 +431,11 @@ public:
                info.add(L"type", L"decklink");
                return info;
        }
+
+       boost::rational<int> get_out_framerate() const
+       {
+               return producer_->get_out_framerate();
+       }
 };
 
 void describe_producer(core::help_sink& sink, const core::help_repository& repo)
@@ -430,11 +464,11 @@ spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer
        auto device_index       = get_param(L"DEVICE", params, -1);
        if(device_index == -1)
                device_index = boost::lexical_cast<int>(params.at(1));
-       
-       auto filter_str         = get_param(L"FILTER", params);         
-       auto length                     = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());   
+
+       auto filter_str         = get_param(L"FILTER", params);
+       auto length                     = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
        auto in_format_desc = core::video_format_desc(get_param(L"FORMAT", params, L"INVALID"));
-               
+
        if(in_format_desc.format == core::video_format::invalid)
                in_format_desc = dependencies.format_desc;
 
@@ -450,15 +484,28 @@ spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer
 
                channel_layout = *found_layout;
        }
-                       
-       return create_destroy_proxy(spl::make_shared<decklink_producer_proxy>(
+
+       boost::ireplace_all(filter_str, L"DEINTERLACE_BOB",     L"YADIF=1:-1");
+       boost::ireplace_all(filter_str, L"DEINTERLACE_LQ",      L"SEPARATEFIELDS");
+       boost::ireplace_all(filter_str, L"DEINTERLACE",         L"YADIF=0:-1");
+
+       auto producer = spl::make_shared<decklink_producer_proxy>(
                        in_format_desc,
                        dependencies.frame_factory,
                        dependencies.format_desc,
                        channel_layout,
                        device_index,
                        filter_str,
-                       length));
-}
+                       length);
 
+       auto get_source_framerate       = [=] { return producer->get_out_framerate(); };
+       auto target_framerate           = dependencies.format_desc.framerate;
+
+       return core::create_destroy_proxy(core::create_framerate_producer(
+                       producer,
+                       get_source_framerate,
+                       target_framerate,
+                       dependencies.format_desc.field_mode,
+                       dependencies.format_desc.audio_cadence));
+}
 }}
index e60910bf1bdde96c58cd2bcde6bf6a7e5f873c21..7c34f72f4378c768136447683919fd2673b50615 100644 (file)
@@ -3,7 +3,6 @@ project (ffmpeg)
 
 set(SOURCES
                consumer/ffmpeg_consumer.cpp
-               consumer/streaming_consumer.cpp
 
                producer/audio/audio_decoder.cpp
 
@@ -25,13 +24,10 @@ set(SOURCES
                audio_channel_remapper.cpp
                ffmpeg.cpp
                ffmpeg_error.cpp
-               ffmpeg_pipeline.cpp
-               ffmpeg_pipeline_backend_internal.cpp
                StdAfx.cpp
 )
 set(HEADERS
                consumer/ffmpeg_consumer.h
-               consumer/streaming_consumer.h
 
                producer/audio/audio_decoder.h
 
@@ -53,9 +49,6 @@ set(HEADERS
 
                ffmpeg.h
                ffmpeg_error.h
-               ffmpeg_pipeline.h
-               ffmpeg_pipeline_backend.h
-               ffmpeg_pipeline_backend_internal.h
                StdAfx.h
 )
 
@@ -90,6 +83,7 @@ if (MSVC)
                        avcodec.lib
                        avutil.lib
                        avfilter.lib
+                       avdevice.lib
                        swscale.lib
                        swresample.lib
        )
@@ -103,6 +97,7 @@ else()
                        avcodec.so
                        avutil.so
                        avfilter.so
+                       avdevice.so
                        swscale.so
                        swresample.so
                        postproc.so
index a49395a870cbeab8f4d89f15eff7482cdc78762a..5b632690e5729543de7975f5a3df293794c9b789 100644 (file)
@@ -166,29 +166,8 @@ struct audio_channel_remapper::impl
 
                auto num_samples                        =       input.size() / input_layout_.num_channels;
                auto expected_output_size       =       num_samples * output_layout_.num_channels;
-               auto input_frame                        =       std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* p)
-                                                                               {
-                                                                                       if (p)
-                                                                                               av_frame_free(&p);
-                                                                               });
-
-               input_frame->channels           =       input_layout_.num_channels;
-               input_frame->channel_layout     =       ffmpeg::create_channel_layout_bitmask(input_layout_.num_channels);
-               input_frame->sample_rate        =       48000;
-               input_frame->nb_samples         =       static_cast<int>(num_samples);
-               input_frame->format                     =       AV_SAMPLE_FMT_S32;
-               input_frame->pts                        =       0;
-
-               av_samples_fill_arrays(
-                               input_frame->extended_data,
-                               input_frame->linesize,
-                               reinterpret_cast<const std::uint8_t*>(input.data()),
-                               input_frame->channels,
-                               input_frame->nb_samples,
-                               static_cast<AVSampleFormat>(input_frame->format),
-                               16);
-
-               filter_->push(0, input_frame);
+
+               filter_->push(0, boost::make_iterator_range(input));
 
                auto frames = filter_->poll_all(0);
 
index fb2045b6f5cd65f3334e648b53dfae171c96a1f1..25f2b87b01dd695f08b5a9043e21ba7098f2b48e 100644 (file)
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Robert Nagy, ronag89@gmail.com
-*/
 #include "../StdAfx.h"
 
-#include "../ffmpeg_error.h"
-
 #include "ffmpeg_consumer.h"
 
-#include "../producer/tbb_avcodec.h"
-
-#include <core/frame/frame.h>
-#include <core/frame/audio_channel_layout.h>
-#include <core/mixer/audio/audio_util.h>
-#include <core/consumer/frame_consumer.h>
-#include <core/video_format.h>
-#include <core/help/help_repository.h>
-#include <core/help/help_sink.h>
+#include "../ffmpeg_error.h"
+#include "../producer/util/util.h"
+#include "../producer/filter/filter.h"
+#include "../producer/filter/audio_filter.h"
 
-#include <common/array.h>
-#include <common/env.h>
 #include <common/except.h>
 #include <common/executor.h>
+#include <common/assert.h>
+#include <common/utf.h>
 #include <common/future.h>
 #include <common/diagnostics/graph.h>
-#include <common/lock.h>
-#include <common/memory.h>
-#include <common/param.h>
-#include <common/utf.h>
-#include <common/assert.h>
-#include <common/memshfl.h>
-#include <common/timer.h>
+#include <common/env.h>
+#include <common/scope_exit.h>
 #include <common/ptree.h>
+#include <common/param.h>
+#include <common/semaphore.h>
 
-#include <boost/algorithm/string.hpp>
+#include <core/consumer/frame_consumer.h>
+#include <core/frame/frame.h>
+#include <core/frame/audio_channel_layout.h>
+#include <core/video_format.h>
+#include <core/monitor/monitor.h>
+#include <core/help/help_repository.h>
+#include <core/help/help_sink.h>
+
+#include <boost/noncopyable.hpp>
+#include <boost/rational.hpp>
+#include <boost/format.hpp>
+#include <boost/algorithm/string/predicate.hpp>
 #include <boost/property_tree/ptree.hpp>
-#include <boost/filesystem.hpp>
-#include <boost/range/algorithm.hpp>
-#include <boost/range/algorithm_ext.hpp>
-#include <boost/lexical_cast.hpp>
 
-#include <tbb/spin_mutex.h>
+#pragma warning(push)
+#pragma warning(disable: 4244)
+#pragma warning(disable: 4245)
+#include <boost/crc.hpp>
+#pragma warning(pop)
+
+#include <tbb/atomic.h>
+#include <tbb/concurrent_queue.h>
+#include <tbb/parallel_invoke.h>
+#include <tbb/parallel_for.h>
 
 #include <numeric>
-#include <cstring>
 
-#if defined(_MSC_VER)
-#pragma warning (push)
-#pragma warning (disable : 4244)
-#endif
-extern "C" 
+#pragma warning(push)
+#pragma warning(disable: 4244)
+
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
        #include <libavformat/avformat.h>
-       #include <libswscale/swscale.h>
+       #include <libavcodec/avcodec.h>
+       #include <libavutil/avutil.h>
+       #include <libavutil/frame.h>
        #include <libavutil/opt.h>
-       #include <libavutil/pixdesc.h>
+       #include <libavutil/imgutils.h>
        #include <libavutil/parseutils.h>
-       #include <libavutil/samplefmt.h>
-       #include <libswresample/swresample.h>
+       #include <libavfilter/avfilter.h>
+       #include <libavfilter/buffersink.h>
+       #include <libavfilter/buffersrc.h>
 }
-#if defined(_MSC_VER)
-#pragma warning (pop)
-#endif
+
+#pragma warning(pop)
 
 namespace caspar { namespace ffmpeg {
-       
-int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
+
+void set_pixel_format(AVFilterContext* sink, AVPixelFormat pix_fmt)
 {
-       AVClass* av_class = *(AVClass**)obj;
+#pragma warning (push)
+#pragma warning (disable : 4245)
 
-       if((strcmp(name, "pix_fmt") == 0 || strcmp(name, "pixel_format") == 0) && strcmp(av_class->class_name, "AVCodecContext") == 0)
-       {
-               AVCodecContext* c = (AVCodecContext*)obj;               
-               auto pix_fmt = av_get_pix_fmt(val);
-               if(pix_fmt == PIX_FMT_NONE)
-                       return -1;              
-               c->pix_fmt = pix_fmt;
-               return 0;
-       }
-       //if((strcmp(name, "r") == 0 || strcmp(name, "frame_rate") == 0) && strcmp(av_class->class_name, "AVCodecContext") == 0)
-       //{
-       //      AVCodecContext* c = (AVCodecContext*)obj;       
+       FF(av_opt_set_int_list(
+               sink,
+               "pix_fmts",
+               std::vector<AVPixelFormat>({ pix_fmt, AVPixelFormat::AV_PIX_FMT_NONE }).data(),
+               -1,
+               AV_OPT_SEARCH_CHILDREN));
 
-       //      if(c->codec_type != AVMEDIA_TYPE_VIDEO)
-       //              return -1;
+#pragma warning (pop)
+}
 
-       //      AVRational rate;
-       //      int ret = av_parse_video_rate(&rate, val);
-       //      if(ret < 0)
-       //              return ret;
+void adjust_video_filter(const AVCodec& codec, const core::video_format_desc& in_format, AVFilterContext* sink, std::string& filter)
+{
+       switch (codec.id)
+       {
+       case AV_CODEC_ID_DVVIDEO:
+               // Crop
+               if (in_format.format == core::video_format::ntsc)
+                       filter = u8(append_filter(u16(filter), L"crop=720:480:0:2"));
+
+               // Pixel format selection
+               if (in_format.format == core::video_format::ntsc)
+                       set_pixel_format(sink, AVPixelFormat::AV_PIX_FMT_YUV411P);
+               else if (in_format.format == core::video_format::pal)
+                       set_pixel_format(sink, AVPixelFormat::AV_PIX_FMT_YUV420P);
+               else
+                       set_pixel_format(sink, AVPixelFormat::AV_PIX_FMT_YUV422P);
 
-       //      c->time_base.num = rate.den;
-       //      c->time_base.den = rate.num;
-       //      return 0;
-       //}
+               // Scale
+               if (in_format.height == 1080)
+                       filter = u8(append_filter(u16(filter), in_format.duration == 1001
+                               ? L"scale=1280:1080"
+                               : L"scale=1440:1080"));
+               else if (in_format.height == 720)
+                       filter = u8(append_filter(u16(filter), L"scale=960:720"));
 
-       return ::av_opt_set(obj, name, val, search_flags);
+               break;
+       }
 }
 
-struct option
+void setup_codec_defaults(AVCodecContext& encoder)
 {
-       std::string name;
-       std::string value;
+       static const int MEGABIT = 1000000;
 
-       option(std::string name, std::string value)
-               : name(std::move(name))
-               , value(std::move(value))
+       switch (encoder.codec_id)
        {
+       case AV_CODEC_ID_DNXHD:
+               encoder.bit_rate = 220 * MEGABIT;
+
+               break;
+       case AV_CODEC_ID_PRORES:
+               encoder.bit_rate = encoder.width < 1280
+                               ?  63 * MEGABIT
+                               : 220 * MEGABIT;
+
+               break;
+       case AV_CODEC_ID_H264:
+               av_opt_set(encoder.priv_data,   "preset",       "ultrafast",    0);
+               av_opt_set(encoder.priv_data,   "tune",         "fastdecode",   0);
+               av_opt_set(encoder.priv_data,   "crf",          "5",                    0);
+
+               break;
        }
-};
-       
-struct output_format
-{
-       AVOutputFormat* format;
-       int                             width;
-       int                             height;
-       AVCodecID               vcodec;
-       AVCodecID               acodec;
-       int                             croptop;
-       int                             cropbot;
-
-       output_format(const core::video_format_desc& format_desc, const std::string& filename, std::vector<option>& options)
-               : format(av_guess_format(nullptr, filename.c_str(), nullptr))
-               , width(format_desc.width)
-               , height(format_desc.height)
-               , vcodec(CODEC_ID_NONE)
-               , acodec(CODEC_ID_NONE)
-               , croptop(0)
-               , cropbot(0)
-       {
-               if(boost::iequals(boost::filesystem::path(filename).extension().string(), ".dv"))
-                       set_opt("f", "dv");
-
-               boost::range::remove_erase_if(options, [&](const option& o)
-               {
-                       return set_opt(o.name, o.value);
-               });
-               
-               if(vcodec == CODEC_ID_NONE && format)
-                       vcodec = format->video_codec;
-
-               if(acodec == CODEC_ID_NONE && format)
-                       acodec = format->audio_codec;
-               
-               if(vcodec == CODEC_ID_NONE)
-                       vcodec = CODEC_ID_H264;
-               
-               if(acodec == CODEC_ID_NONE)
-                       acodec = CODEC_ID_PCM_S16LE;
-       }
-       
-       bool set_opt(const std::string& name, const std::string& value)
-       {
-               //if(name == "target")
-               //{ 
-               //      enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
-               //      
-               //      if(name.find("pal-") != std::string::npos)
-               //              norm = PAL;
-               //      else if(name.find("ntsc-") != std::string::npos)
-               //              norm = NTSC;
-
-               //      if(norm == UNKNOWN)
-               //              CASPAR_THROW_EXCEPTION(invalid_argument() << arg_name_info("target"));
-               //      
-               //      if (name.find("-dv") != std::string::npos) 
-               //      {
-               //              set_opt("f", "dv");
-               //              if(norm == PAL)
-               //              {
-               //                      set_opt("s", "720x576");
-               //              }
-               //              else
-               //              {
-               //                      set_opt("s", "720x480");
-               //                      if(height == 486)
-               //                      {
-               //                              set_opt("croptop", "2");
-               //                              set_opt("cropbot", "4");
-               //                      }
-               //              }
-               //              set_opt("s", norm == PAL ? "720x576" : "720x480");
-               //      } 
-
-               //      return true;
-               //}
-               //else 
-               if(name == "f")
-               {
-                       format = av_guess_format(value.c_str(), nullptr, nullptr);
-
-                       if(format == nullptr)
-                               CASPAR_THROW_EXCEPTION(user_error() << msg_info("Unknown format " + value));
+}
 
-                       return true;
-               }
-               else if(name == "vcodec" || name == "v:codec")
-               {
-                       auto c = avcodec_find_encoder_by_name(value.c_str());
-                       if(c == nullptr)
-                               CASPAR_THROW_EXCEPTION(user_error() << msg_info("Unknown video codec " + value));
+bool is_pcm_s24le_not_supported(const AVFormatContext& container)
+{
+       auto name = std::string(container.oformat->name);
 
-                       vcodec = avcodec_find_encoder_by_name(value.c_str())->id;
-                       return true;
+       if (name == "mp4" || name == "dv")
+               return true;
 
-               }
-               else if(name == "acodec" || name == "a:codec")
-               {
-                       auto c = avcodec_find_encoder_by_name(value.c_str());
-                       if(c == nullptr)
-                               CASPAR_THROW_EXCEPTION(user_error() << msg_info("Unknown audio codec " + value));
+       return false;
+}
 
-                       acodec = avcodec_find_encoder_by_name(value.c_str())->id;
+template<typename Out, typename In>
+std::vector<Out> from_terminated_array(const In* array, In terminator)
+{
+       std::vector<Out> result;
 
-                       return true;
-               }
-               else if(name == "s")
-               {
-                       if(av_parse_video_size(&width, &height, value.c_str()) < 0)
-                               CASPAR_THROW_EXCEPTION(user_error() << msg_info("Unknown video size " + value));
-                       
-                       return true;
-               }
-               else if(name == "croptop")
-               {
-                       croptop = boost::lexical_cast<int>(value);
+       while (array != nullptr && *array != terminator)
+       {
+               In val          = *array;
+               Out casted      = static_cast<Out>(val);
 
-                       return true;
-               }
-               else if(name == "cropbot")
-               {
-                       cropbot = boost::lexical_cast<int>(value);
+               result.push_back(casted);
 
-                       return true;
-               }
-               
-               return false;
+               ++array;
        }
-};
 
-typedef cache_aligned_vector<uint8_t> byte_vector;
+       return result;
+}
 
-struct ffmpeg_consumer : boost::noncopyable
-{              
+class ffmpeg_consumer
+{
+private:
        const spl::shared_ptr<diagnostics::graph>       graph_;
-       const std::string                                                       filename_;
-       const std::string                                                       full_filename_          = u8(env::media_folder()) + filename_;
-       const std::shared_ptr<AVFormatContext>          oc_                                     { avformat_alloc_context(), avformat_free_context };
-       const core::video_format_desc                           format_desc_;
-       const core::audio_channel_layout                        channel_layout_;
-
-       core::monitor::subject                                          monitor_subject_;
-       
-       tbb::spin_mutex                                                         exception_mutex_;
-       std::exception_ptr                                                      exception_;
-       
-       std::shared_ptr<AVStream>                                       audio_st_;
+       core::monitor::subject                                          subject_;
+       std::string                                                                     path_;
+       boost::filesystem::path                                         full_path_;
+
+       std::map<std::string, std::string>                      options_;
+       bool                                                                            mono_streams_;
+
+       core::video_format_desc                                         in_video_format_;
+       core::audio_channel_layout                                      in_channel_layout_                      = core::audio_channel_layout::invalid();
+
+       std::shared_ptr<AVFormatContext>                        oc_;
+       tbb::atomic<bool>                                                       abort_request_;
+
        std::shared_ptr<AVStream>                                       video_st_;
-       
-       byte_vector                                                                     picture_buffer_;
-       byte_vector                                                                     key_picture_buf_;
-       byte_vector                                                                     audio_buffer_;
-       std::shared_ptr<SwrContext>                                     swr_;
-       std::shared_ptr<SwsContext>                                     sws_;
+       std::vector<std::shared_ptr<AVStream>>          audio_sts_;
+
+       std::int64_t                                                            video_pts_                                      = 0;
+       std::int64_t                                                            audio_pts_                                      = 0;
+
+       std::unique_ptr<audio_filter>                           audio_filter_;
+
+       // TODO: make use of already existent avfilter abstraction for video also
+    AVFilterContext*                                                   video_graph_in_;
+    AVFilterContext*                                                   video_graph_out_;
+    std::shared_ptr<AVFilterGraph>                             video_graph_;
+
+       executor                                                                        video_encoder_executor_;
+       executor                                                                        audio_encoder_executor_;
 
-       int64_t                                                                         frame_number_           = 0;
+       semaphore                                                                       tokens_                                         { 0 };
 
-       output_format                                                           output_format_;
-       bool                                                                            key_only_;
        tbb::atomic<int64_t>                                            current_encoding_delay_;
 
-       executor                                                                        executor_;
+       executor                                                                        write_executor_;
+
 public:
+
        ffmpeg_consumer(
-                       const std::string& filename,
-                       const core::video_format_desc& format_desc,
-                       const core::audio_channel_layout& channel_layout,
-                       std::vector<option> options,
-                       bool key_only)
-               : filename_(filename)
-               , format_desc_(format_desc)
-               , channel_layout_(channel_layout)
-               , output_format_(format_desc, full_filename_, options)
-               , key_only_(key_only)
-               , executor_(print())
+                       std::string path,
+                       std::string options,
+                       bool mono_streams)
+               : path_(path)
+               , full_path_(path)
+               , mono_streams_(mono_streams)
+               , audio_encoder_executor_(print() + L" audio_encoder")
+               , video_encoder_executor_(print() + L" video_encoder")
+               , write_executor_(print() + L" io")
        {
+               abort_request_ = false;
                current_encoding_delay_ = 0;
-               check_space();
-
-               // TODO: Ask stakeholders about case where file already exists.
-               boost::filesystem::remove(boost::filesystem::path(full_filename_)); // Delete the file if it exists
-
-               graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
-               graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
-               graph_->set_text(print());
-               diagnostics::register_graph(graph_);
-
-               executor_.set_capacity(8);
-
-               oc_->oformat = output_format_.format;
-                               
-               std::strcpy(oc_->filename, full_filename_.c_str());
-               
-               //  Add the audio and video streams using the default format codecs     and initialize the codecs.
-               video_st_ = add_video_stream(options);
-
-               if (!key_only)
-                       audio_st_ = add_audio_stream(options);
-                               
-               av_dump_format(oc_.get(), 0, full_filename_.c_str(), 1);
-                
-               // Open the output ffmpeg, if needed.
-               if (!(oc_->oformat->flags & AVFMT_NOFILE)) 
-                       THROW_ON_ERROR2(avio_open(&oc_->pb, full_filename_.c_str(), AVIO_FLAG_WRITE), "[ffmpeg_consumer]");
-                               
-               THROW_ON_ERROR2(avformat_write_header(oc_.get(), nullptr), "[ffmpeg_consumer]");
-
-               if(options.size() > 0)
+
+               for(auto it =
+                               boost::sregex_iterator(
+                                       options.begin(),
+                                       options.end(),
+                                       boost::regex("-(?<NAME>[^-\\s]+)(\\s+(?<VALUE>[^\\s]+))?"));
+                       it != boost::sregex_iterator();
+                       ++it)
                {
-                       for (auto& option : options)
-                               CASPAR_LOG(warning) << L"Invalid option: -" << u16(option.name) << L" " << u16(option.value);
+                       options_[(*it)["NAME"].str()] = (*it)["VALUE"].matched ? (*it)["VALUE"].str() : "";
                }
+
+        if (options_.find("threads") == options_.end())
+            options_["threads"] = "auto";
+
+               tokens_.release(
+                       std::max(
+                               1,
+                               try_remove_arg<int>(
+                                       options_,
+                                       boost::regex("tokens")).get_value_or(2)));
        }
 
        ~ffmpeg_consumer()
-       {    
+       {
+               if(oc_)
+               {
+                       video_encoder_executor_.begin_invoke([&] { encode_video(core::const_frame::empty(), nullptr); });
+                       audio_encoder_executor_.begin_invoke([&] { encode_audio(core::const_frame::empty(), nullptr); });
+
+                       video_encoder_executor_.stop();
+                       audio_encoder_executor_.stop();
+                       video_encoder_executor_.join();
+                       audio_encoder_executor_.join();
+
+                       video_graph_.reset();
+                       audio_filter_.reset();
+                       video_st_.reset();
+                       audio_sts_.clear();
+
+                       write_packet(nullptr, nullptr);
+
+                       write_executor_.stop();
+                       write_executor_.join();
+
+                       FF(av_write_trailer(oc_.get()));
+
+                       if (!(oc_->oformat->flags & AVFMT_NOFILE) && oc_->pb)
+                               avio_close(oc_->pb);
+
+                       oc_.reset();
+               }
+       }
+
+       void initialize(
+                       const core::video_format_desc& format_desc,
+                       const core::audio_channel_layout& channel_layout)
+       {
                try
                {
-                       executor_.wait();
+                       static boost::regex prot_exp("^.+:.*" );
+
+                       if(!boost::regex_match(
+                                       path_,
+                                       prot_exp))
+                       {
+                               if(!full_path_.is_complete())
+                               {
+                                       full_path_ =
+                                               u8(
+                                                       env::media_folder()) +
+                                                       path_;
+                               }
+
+                               if(boost::filesystem::exists(full_path_))
+                                       boost::filesystem::remove(full_path_);
+
+                               boost::filesystem::create_directories(full_path_.parent_path());
+                       }
+
+                       graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
+                       graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
+                       graph_->set_text(print());
+                       diagnostics::register_graph(graph_);
+
+                       const auto oformat_name =
+                               try_remove_arg<std::string>(
+                                       options_,
+                                       boost::regex("^f|format$"));
+
+                       AVFormatContext* oc;
+
+                       FF(avformat_alloc_output_context2(
+                               &oc,
+                               nullptr,
+                               oformat_name && !oformat_name->empty() ? oformat_name->c_str() : nullptr,
+                               full_path_.string().c_str()));
+
+                       oc_.reset(
+                               oc,
+                               avformat_free_context);
+
+                       CASPAR_VERIFY(oc_->oformat);
+
+                       oc_->interrupt_callback.callback = ffmpeg_consumer::interrupt_cb;
+                       oc_->interrupt_callback.opaque   = this;
+
+                       CASPAR_VERIFY(format_desc.format != core::video_format::invalid);
+
+                       in_video_format_ = format_desc;
+                       in_channel_layout_ = channel_layout;
+
+                       CASPAR_VERIFY(oc_->oformat);
+
+                       const auto video_codec_name =
+                               try_remove_arg<std::string>(
+                                       options_,
+                                       boost::regex("^c:v|codec:v|vcodec$"));
+
+                       const auto video_codec =
+                               video_codec_name
+                                       ? avcodec_find_encoder_by_name(video_codec_name->c_str())
+                                       : avcodec_find_encoder(oc_->oformat->video_codec);
+
+                       const auto audio_codec_name =
+                               try_remove_arg<std::string>(
+                                       options_,
+                                        boost::regex("^c:a|codec:a|acodec$"));
+
+                       const auto audio_codec =
+                               audio_codec_name
+                                       ? avcodec_find_encoder_by_name(audio_codec_name->c_str())
+                                       : (is_pcm_s24le_not_supported(*oc_)
+                                               ? avcodec_find_encoder(oc_->oformat->audio_codec)
+                                               : avcodec_find_encoder_by_name("pcm_s24le"));
+
+                       if (!video_codec)
+                               CASPAR_THROW_EXCEPTION(user_error() << msg_info(
+                                               "Failed to find video codec " + (video_codec_name
+                                                               ? *video_codec_name
+                                                               : "with id " + boost::lexical_cast<std::string>(
+                                                                               oc_->oformat->video_codec))));
+                       if (!audio_codec)
+                               CASPAR_THROW_EXCEPTION(user_error() << msg_info(
+                                               "Failed to find audio codec " + (audio_codec_name
+                                                               ? *audio_codec_name
+                                                               : "with id " + boost::lexical_cast<std::string>(
+                                                                               oc_->oformat->audio_codec))));
+
+                       // Filters
+
+                       {
+                               configure_video_filters(
+                                       *video_codec,
+                                       try_remove_arg<std::string>(options_,
+                                       boost::regex("vf|f:v|filter:v")).get_value_or(""));
+
+                               configure_audio_filters(
+                                       *audio_codec,
+                                       try_remove_arg<std::string>(options_,
+                                       boost::regex("af|f:a|filter:a")).get_value_or(""));
+                       }
+
+                       // Encoders
+
+                       {
+                               auto video_options = options_;
+                               auto audio_options = options_;
+
+                               video_st_ = open_encoder(
+                                       *video_codec,
+                                       video_options,
+                                       0);
+
+                               for (int i = 0; i < audio_filter_->get_num_output_pads(); ++i)
+                                       audio_sts_.push_back(open_encoder(
+                                                       *audio_codec,
+                                                       audio_options,
+                                                       i));
+
+                               auto it = options_.begin();
+                               while(it != options_.end())
+                               {
+                                       if(video_options.find(it->first) == video_options.end() || audio_options.find(it->first) == audio_options.end())
+                                               it = options_.erase(it);
+                                       else
+                                               ++it;
+                               }
+                       }
+
+                       // Output
+                       {
+                               AVDictionary* av_opts = nullptr;
+
+                               to_dict(
+                                       &av_opts,
+                                       std::move(options_));
+
+                               CASPAR_SCOPE_EXIT
+                               {
+                                       av_dict_free(&av_opts);
+                               };
+
+                               if (!(oc_->oformat->flags & AVFMT_NOFILE))
+                               {
+                                       FF(avio_open2(
+                                               &oc_->pb,
+                                               full_path_.string().c_str(),
+                                               AVIO_FLAG_WRITE,
+                                               &oc_->interrupt_callback,
+                                               &av_opts));
+                               }
+
+                               FF(avformat_write_header(
+                                       oc_.get(),
+                                       &av_opts));
+
+                               options_ = to_map(av_opts);
+                       }
+
+                       // Dump Info
+
+                       av_dump_format(
+                               oc_.get(),
+                               0,
+                               oc_->filename,
+                               1);
+
+                       for (const auto& option : options_)
+                       {
+                               CASPAR_LOG(warning)
+                                       << L"Invalid option: -"
+                                       << u16(option.first)
+                                       << L" "
+                                       << u16(option.second);
+                       }
                }
                catch(...)
                {
-                       CASPAR_LOG_CURRENT_EXCEPTION();
+                       video_st_.reset();
+                       audio_sts_.clear();
+                       oc_.reset();
+                       throw;
                }
+       }
 
-               LOG_ON_ERROR2(av_write_trailer(oc_.get()), "[ffmpeg_consumer]");
-               
-               if (!key_only_)
-                       audio_st_.reset();
-
-               video_st_.reset();
-                         
-               if (!(oc_->oformat->flags & AVFMT_NOFILE)) 
-                       LOG_ON_ERROR2(avio_close(oc_->pb), "[ffmpeg_consumer]");
+       core::monitor::subject& monitor_output()
+       {
+               return subject_;
        }
-       
-       // frame_consumer
 
-       void send(core::const_frame& frame)
+       void send(core::const_frame frame)
        {
-               auto exception = lock(exception_mutex_, [&]
+               CASPAR_VERIFY(in_video_format_.format != core::video_format::invalid);
+
+               auto frame_timer = spl::make_shared<caspar::timer>();
+
+               std::shared_ptr<void> token(
+                       nullptr,
+                       [this, frame, frame_timer](void*)
+                       {
+                               tokens_.release();
+                               current_encoding_delay_ = frame.get_age_millis();
+                               graph_->set_value("frame-time", frame_timer->elapsed() * in_video_format_.fps * 0.5);
+                       });
+               tokens_.acquire();
+
+               video_encoder_executor_.begin_invoke([=]() mutable
                {
-                       return exception_;
+                       encode_video(
+                               frame,
+                               token);
                });
 
-               if(exception != nullptr)
-                       std::rethrow_exception(exception);
-
-               executor_.begin_invoke([=]
-               {               
-                       encode(frame);
-                       current_encoding_delay_ = frame.get_age_millis();
+               audio_encoder_executor_.begin_invoke([=]() mutable
+               {
+                       encode_audio(
+                               frame,
+                               token);
                });
        }
 
        bool ready_for_frame() const
        {
-               return !executor_.is_full();
+               return tokens_.permits() > 0;
        }
 
        void mark_dropped()
@@ -395,437 +514,719 @@ public:
 
        std::wstring print() const
        {
-               return L"ffmpeg[" + u16(filename_) + L"]";
+               return L"ffmpeg_consumer[" + u16(path_) + L"]";
        }
-       
-       core::monitor::subject& monitor_output()
+
+       int64_t presentation_frame_age_millis() const
        {
-               return monitor_subject_;
+               return current_encoding_delay_;
        }
 
 private:
-       std::shared_ptr<AVStream> add_video_stream(std::vector<option>& options)
-       { 
-               if(output_format_.vcodec == CODEC_ID_NONE)
-                       return nullptr;
-
-               auto st = avformat_new_stream(oc_.get(), 0);
-               if (!st)                
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));             
-
-               auto encoder = avcodec_find_encoder(output_format_.vcodec);
-               if (!encoder)
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Codec not found."));
-
-               auto c = st->codec;
-
-               avcodec_get_context_defaults3(c, encoder);
-                               
-               c->codec_id                     = output_format_.vcodec;
-               c->codec_type           = AVMEDIA_TYPE_VIDEO;
-               c->width                        = output_format_.width;
-               c->height                       = output_format_.height - output_format_.croptop - output_format_.cropbot;
-               c->time_base.den        = format_desc_.time_scale;
-               c->time_base.num        = format_desc_.duration;
-               c->gop_size                     = 25;
-               c->flags                   |= format_desc_.field_mode == core::field_mode::progressive ? 0 : (CODEC_FLAG_INTERLACED_ME | CODEC_FLAG_INTERLACED_DCT);
-               c->pix_fmt                      = c->pix_fmt != PIX_FMT_NONE ? c->pix_fmt : PIX_FMT_YUV420P;
-
-               if(c->codec_id == CODEC_ID_PRORES)
-               {                       
-                       c->bit_rate     = output_format_.width < 1280 ? 63*1000000 : 220*1000000;
-                       c->pix_fmt      = PIX_FMT_YUV422P10;
-               }
-               else if(c->codec_id == CODEC_ID_DNXHD)
+
+       static int interrupt_cb(void* ctx)
+       {
+               CASPAR_ASSERT(ctx);
+               return reinterpret_cast<ffmpeg_consumer*>(ctx)->abort_request_;
+       }
+
+       std::shared_ptr<AVStream> open_encoder(
+                       const AVCodec& codec,
+                       std::map<std::string,
+                       std::string>& options,
+                       int stream_number_for_media_type)
+       {
+               auto st =
+                       avformat_new_stream(
+                               oc_.get(),
+                               &codec);
+
+               if (!st)
+                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("avformat_new_stream"));
+
+               auto enc = st->codec;
+
+               CASPAR_VERIFY(enc);
+
+               switch(enc->codec_type)
                {
-                       if(c->width < 1280 || c->height < 720)
-                               CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Unsupported video dimensions."));
+                       case AVMEDIA_TYPE_VIDEO:
+                       {
+                               enc->time_base                          = video_graph_out_->inputs[0]->time_base;
+                               enc->pix_fmt                                    = static_cast<AVPixelFormat>(video_graph_out_->inputs[0]->format);
+                               enc->sample_aspect_ratio                = st->sample_aspect_ratio = video_graph_out_->inputs[0]->sample_aspect_ratio;
+                               enc->width                                      = video_graph_out_->inputs[0]->w;
+                               enc->height                                     = video_graph_out_->inputs[0]->h;
+                               enc->bit_rate_tolerance         = 400 * 1000000;
+
+                               break;
+                       }
+                       case AVMEDIA_TYPE_AUDIO:
+                       {
+                               enc->time_base                          = audio_filter_->get_output_pad_info(stream_number_for_media_type).time_base;
+                               enc->sample_fmt                         = static_cast<AVSampleFormat>(audio_filter_->get_output_pad_info(stream_number_for_media_type).format);
+                               enc->sample_rate                                = audio_filter_->get_output_pad_info(stream_number_for_media_type).sample_rate;
+                               enc->channel_layout                     = audio_filter_->get_output_pad_info(stream_number_for_media_type).channel_layout;
+                               enc->channels                           = audio_filter_->get_output_pad_info(stream_number_for_media_type).channels;
 
-                       c->bit_rate     = 220*1000000;
-                       c->pix_fmt      = PIX_FMT_YUV422P;
+                               break;
+                       }
                }
-               else if(c->codec_id == CODEC_ID_DVVIDEO)
+
+               setup_codec_defaults(*enc);
+
+               if(oc_->oformat->flags & AVFMT_GLOBALHEADER)
+                       enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
+               static const std::array<std::string, 4> char_id_map = {{"v", "a", "d", "s"}};
+
+               const auto char_id = char_id_map.at(enc->codec_type);
+
+               const auto codec_opts =
+                       remove_options(
+                               options,
+                               boost::regex("^(" + char_id + "?[^:]+):" + char_id + "$"));
+
+               AVDictionary* av_codec_opts = nullptr;
+
+               to_dict(
+                       &av_codec_opts,
+                       options);
+
+               to_dict(
+                       &av_codec_opts,
+                       codec_opts);
+
+               options.clear();
+
+               FF(avcodec_open2(
+                       enc,
+                       &codec,
+                       av_codec_opts ? &av_codec_opts : nullptr));
+
+               if(av_codec_opts)
                {
-                       c->width = c->height == 1280 ? 960  : c->width;
-                       
-                       if(format_desc_.format == core::video_format::ntsc)
+                       auto t =
+                               av_dict_get(
+                                       av_codec_opts,
+                                       "",
+                                        nullptr,
+                                       AV_DICT_IGNORE_SUFFIX);
+
+                       while(t)
                        {
-                               c->pix_fmt = PIX_FMT_YUV411P;
-                               output_format_.croptop = 2;
-                               output_format_.cropbot = 4;
-                               c->height                          = output_format_.height - output_format_.croptop - output_format_.cropbot;
+                               options[t->key + (codec_opts.find(t->key) != codec_opts.end() ? ":" + char_id : "")] = t->value;
+
+                               t = av_dict_get(
+                                               av_codec_opts,
+                                               "",
+                                               t,
+                                               AV_DICT_IGNORE_SUFFIX);
                        }
-                       else if(format_desc_.format == core::video_format::pal)
-                               c->pix_fmt = PIX_FMT_YUV420P;
-                       else // dv50
-                               c->pix_fmt = PIX_FMT_YUV422P;
-                       
-                       if(format_desc_.duration == 1001)                       
-                               c->width = c->height == 1080 ? 1280 : c->width;                 
-                       else
-                               c->width = c->height == 1080 ? 1440 : c->width;                 
-               }
-               else if(c->codec_id == CODEC_ID_H264)
-               {                          
-                       c->pix_fmt = PIX_FMT_YUV420P;    
-                       av_opt_set(c->priv_data, "preset", "ultrafast", 0);
-                       av_opt_set(c->priv_data, "tune",   "fastdecode",   0);
-                       av_opt_set(c->priv_data, "crf",    "5",     0);
+
+                       av_dict_free(&av_codec_opts);
                }
-               else if(c->codec_id == CODEC_ID_QTRLE)
+
+               if(enc->codec_type == AVMEDIA_TYPE_AUDIO && !(codec.capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
                {
-                       c->pix_fmt = PIX_FMT_ARGB;
+                       CASPAR_ASSERT(enc->frame_size > 0);
+                       audio_filter_->set_guaranteed_output_num_samples_per_frame(
+                                       stream_number_for_media_type,
+                                       enc->frame_size);
                }
-                                                               
-               boost::range::remove_erase_if(options, [&](const option& o)
-               {
-                       return o.name.at(0) != 'a' && ffmpeg::av_opt_set(c, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1;
-               });
-                               
-               if(output_format_.format->flags & AVFMT_GLOBALHEADER)
-                       c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-               
-               THROW_ON_ERROR2(tbb_avcodec_open(c, encoder, false), "[ffmpeg_consumer]");
 
-               return std::shared_ptr<AVStream>(st, [](AVStream* st)
+               return std::shared_ptr<AVStream>(st, [this](AVStream* st)
                {
-                       LOG_ON_ERROR2(tbb_avcodec_close(st->codec), "[ffmpeg_consumer]");
+                       avcodec_close(st->codec);
                });
        }
-               
-       std::shared_ptr<AVStream> add_audio_stream(std::vector<option>& options)
-       {
-               if(output_format_.acodec == CODEC_ID_NONE)
-                       return nullptr;
-
-               auto st = avformat_new_stream(oc_.get(), nullptr);
-               if(!st)
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate audio-stream") << boost::errinfo_api_function("av_new_stream"));              
-               
-               auto encoder = avcodec_find_encoder(output_format_.acodec);
-               if (!encoder)
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("codec not found"));
-               
-               auto c = st->codec;
-
-               avcodec_get_context_defaults3(c, encoder);
-
-               c->codec_id                     = output_format_.acodec;
-               c->codec_type           = AVMEDIA_TYPE_AUDIO;
-               c->sample_rate          = 48000;
-               c->channels                     = 2;
-               c->sample_fmt           = AV_SAMPLE_FMT_S16;
-               c->time_base.num        = 1;
-               c->time_base.den        = c->sample_rate;
-
-               if(output_format_.vcodec == CODEC_ID_FLV1)              
-                       c->sample_rate  = 44100;                
-
-               if(output_format_.format->flags & AVFMT_GLOBALHEADER)
-                       c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-                               
-               boost::range::remove_erase_if(options, [&](const option& o)
-               {
-                       return ffmpeg::av_opt_set(c, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1;
-               });
 
-               THROW_ON_ERROR2(avcodec_open2(c, encoder, nullptr), "[ffmpeg_consumer]");
+       void configure_video_filters(
+                       const AVCodec& codec,
+                       std::string filtergraph)
+       {
+               video_graph_.reset(
+                               avfilter_graph_alloc(),
+                               [](AVFilterGraph* p)
+                               {
+                                       avfilter_graph_free(&p);
+                               });
+
+               video_graph_->nb_threads  = boost::thread::hardware_concurrency()/2;
+               video_graph_->thread_type = AVFILTER_THREAD_SLICE;
+
+               const auto sample_aspect_ratio =
+                       boost::rational<int>(
+                                       in_video_format_.square_width,
+                                       in_video_format_.square_height) /
+                       boost::rational<int>(
+                                       in_video_format_.width,
+                                       in_video_format_.height);
+
+               const auto vsrc_options = (boost::format("video_size=%1%x%2%:pix_fmt=%3%:time_base=%4%/%5%:pixel_aspect=%6%/%7%:frame_rate=%8%/%9%")
+                       % in_video_format_.width % in_video_format_.height
+                       % AVPixelFormat::AV_PIX_FMT_BGRA
+                       % in_video_format_.duration     % in_video_format_.time_scale
+                       % sample_aspect_ratio.numerator() % sample_aspect_ratio.denominator()
+                       % in_video_format_.time_scale % in_video_format_.duration).str();
+
+               AVFilterContext* filt_vsrc = nullptr;
+               FF(avfilter_graph_create_filter(
+                               &filt_vsrc,
+                               avfilter_get_by_name("buffer"),
+                               "ffmpeg_consumer_buffer",
+                               vsrc_options.c_str(),
+                               nullptr,
+                               video_graph_.get()));
+
+               AVFilterContext* filt_vsink = nullptr;
+               FF(avfilter_graph_create_filter(
+                               &filt_vsink,
+                               avfilter_get_by_name("buffersink"),
+                               "ffmpeg_consumer_buffersink",
+                               nullptr,
+                               nullptr,
+                               video_graph_.get()));
 
-               return std::shared_ptr<AVStream>(st, [](AVStream* st)
-               {
-                       LOG_ON_ERROR2(avcodec_close(st->codec), "[ffmpeg_consumer]");
-               });
+#pragma warning (push)
+#pragma warning (disable : 4245)
+
+               FF(av_opt_set_int_list(
+                               filt_vsink,
+                               "pix_fmts",
+                               codec.pix_fmts,
+                               -1,
+                               AV_OPT_SEARCH_CHILDREN));
+
+#pragma warning (pop)
+
+               adjust_video_filter(codec, in_video_format_, filt_vsink, filtergraph);
+
+               if (in_video_format_.width < 1280)
+                       video_graph_->scale_sws_opts = "out_color_matrix=bt601";
+               else
+                       video_graph_->scale_sws_opts = "out_color_matrix=bt709";
+
+               configure_filtergraph(
+                               *video_graph_,
+                               filtergraph,
+                               *filt_vsrc,
+                               *filt_vsink);
+
+               video_graph_in_  = filt_vsrc;
+               video_graph_out_ = filt_vsink;
+
+               CASPAR_LOG(info)
+                       <<      u16(std::string("\n")
+                               + avfilter_graph_dump(
+                                               video_graph_.get(),
+                                               nullptr));
        }
-  
-       void encode_video_frame(core::const_frame frame)
-       { 
-               if(!video_st_)
-                       return;
-               
-               auto enc = video_st_->codec;
-        
-               auto av_frame                           = convert_video(frame, enc);
-               av_frame->interlaced_frame      = format_desc_.field_mode != core::field_mode::progressive;
-               av_frame->top_field_first       = format_desc_.field_mode == core::field_mode::upper;
-               av_frame->pts = frame_number_++;
-
-               monitor_subject_
-                       << core::monitor::message("/frame") % static_cast<int64_t>(frame_number_)
-                       << core::monitor::message("/path") % filename_
-                       << core::monitor::message("/fps") % format_desc_.fps;
-
-               AVPacket pkt;
-               av_init_packet(&pkt);
-               pkt.data = nullptr;
-               pkt.size = 0;
 
-               int got_packet = 0;
-               THROW_ON_ERROR2(avcodec_encode_video2(enc, &pkt, av_frame.get(), &got_packet), "[ffmpeg_consumer]");
-               std::shared_ptr<AVPacket> guard(&pkt, av_free_packet);
+       void configure_audio_filters(
+                       const AVCodec& codec,
+                       std::string filtergraph)
+       {
+               int num_output_pads = 1;
 
-               if(!got_packet)
-                       return;
-                
-               if (pkt.pts != AV_NOPTS_VALUE)
-                       pkt.pts = av_rescale_q(pkt.pts, enc->time_base, video_st_->time_base);
-               if (pkt.dts != AV_NOPTS_VALUE)
-                       pkt.dts = av_rescale_q(pkt.dts, enc->time_base, video_st_->time_base);
-                
-               pkt.stream_index = video_st_->index;
-                       
-               THROW_ON_ERROR2(av_interleaved_write_frame(oc_.get(), &pkt), "[ffmpeg_consumer]");
-       }
-               
-       uint64_t get_channel_layout(AVCodecContext* dec)
-       {
-               auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
-               return layout;
-       }
-               
-       void encode_audio_frame(core::const_frame frame)
-       {               
-               if(!audio_st_)
-                       return;
-               
-               auto enc = audio_st_->codec;
-
-               boost::push_back(audio_buffer_, convert_audio(frame, enc));
-                       
-               auto frame_size = enc->frame_size != 0 ? enc->frame_size * enc->channels * av_get_bytes_per_sample(enc->sample_fmt) : static_cast<int>(audio_buffer_.size());
-                       
-               while(audio_buffer_.size() >= frame_size)
-               {                       
-                       std::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [=](AVFrame* p) { av_frame_free(&p); });
-                       avcodec_get_frame_defaults(av_frame.get());             
-                       av_frame->nb_samples = frame_size / (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
-
-                       AVPacket pkt;
-                       av_init_packet(&pkt);
-                       pkt.data = nullptr;
-                       pkt.size = 0;                           
-                       
-                       THROW_ON_ERROR2(avcodec_fill_audio_frame(av_frame.get(), enc->channels, enc->sample_fmt, audio_buffer_.data(), frame_size, 1), "[ffmpeg_consumer]");
-
-                       int got_packet = 0;
-                       THROW_ON_ERROR2(avcodec_encode_audio2(enc, &pkt, av_frame.get(), &got_packet), "[ffmpeg_consumer]");
-                       std::shared_ptr<AVPacket> guard(&pkt, av_free_packet);
-                               
-                       audio_buffer_.erase(audio_buffer_.begin(), audio_buffer_.begin() + frame_size);
-
-                       if(!got_packet)
-                               return;
-               
-                       if (pkt.pts != AV_NOPTS_VALUE)
-                               pkt.pts      = av_rescale_q(pkt.pts, enc->time_base, audio_st_->time_base);
-                       if (pkt.dts != AV_NOPTS_VALUE)
-                               pkt.dts      = av_rescale_q(pkt.dts, enc->time_base, audio_st_->time_base);
-                       if (pkt.duration > 0)
-                               pkt.duration = static_cast<int>(av_rescale_q(pkt.duration, enc->time_base, audio_st_->time_base));
-               
-                       pkt.stream_index = audio_st_->index;
-                                               
-                       THROW_ON_ERROR2(av_interleaved_write_frame(oc_.get(), &pkt), "[ffmpeg_consumer]");
+               if (mono_streams_)
+               {
+                       num_output_pads = in_channel_layout_.num_channels;
                }
-       }                
-       
-       std::shared_ptr<AVFrame> convert_video(core::const_frame frame, AVCodecContext* c)
-       {
-               if(!sws_) 
+
+               if (num_output_pads > 1)
                {
-                       sws_.reset(sws_getContext(format_desc_.width, 
-                                                                         format_desc_.height - output_format_.croptop  - output_format_.cropbot, 
-                                                                         PIX_FMT_BGRA,
-                                                                         c->width,
-                                                                         c->height, 
-                                                                         c->pix_fmt, 
-                                                                         SWS_BICUBIC, nullptr, nullptr, nullptr), 
-                                               sws_freeContext);
-                       if (sws_ == nullptr) 
-                               CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Cannot initialize the conversion context"));
+                       std::string splitfilter = "[a:0]channelsplit=channel_layout=";
+
+                       splitfilter += (boost::format("0x%|1$x|") % create_channel_layout_bitmask(in_channel_layout_.num_channels)).str();
+
+                       for (int i = 0; i < num_output_pads; ++i)
+                               splitfilter += "[aout:" + boost::lexical_cast<std::string>(i) + "]";
+
+                       filtergraph = u8(append_filter(u16(filtergraph), u16(splitfilter)));
                }
 
-               // #in_frame
+               std::vector<audio_output_pad> output_pads(
+                               num_output_pads,
+                               audio_output_pad(
+                                               from_terminated_array<int>(                             codec.supported_samplerates,    0),
+                                               from_terminated_array<AVSampleFormat>(  codec.sample_fmts,                              AVSampleFormat::AV_SAMPLE_FMT_NONE),
+                                               from_terminated_array<uint64_t>(                codec.channel_layouts,                  static_cast<uint64_t>(0))));
+
+               audio_filter_.reset(new audio_filter(
+                               { audio_input_pad(
+                                               boost::rational<int>(1, in_video_format_.audio_sample_rate),
+                                               in_video_format_.audio_sample_rate,
+                                               AVSampleFormat::AV_SAMPLE_FMT_S32,
+                                               create_channel_layout_bitmask(in_channel_layout_.num_channels)) },
+                                               output_pads,
+                                               filtergraph));
+       }
 
-               std::shared_ptr<AVFrame> in_frame(avcodec_alloc_frame(), av_free);
+       void configure_filtergraph(
+                       AVFilterGraph& graph,
+                       const std::string& filtergraph,
+                       AVFilterContext& source_ctx,
+                       AVFilterContext& sink_ctx)
+       {
+               AVFilterInOut* outputs = nullptr;
+               AVFilterInOut* inputs = nullptr;
 
-               auto in_picture = reinterpret_cast<AVPicture*>(in_frame.get());
-               
-               if (key_only_)
+               if(!filtergraph.empty())
                {
-                       key_picture_buf_.resize(frame.image_data().size());
-                       in_picture->linesize[0] = format_desc_.width * 4;
-                       in_picture->data[0] = key_picture_buf_.data();
+                       outputs = avfilter_inout_alloc();
+                       inputs  = avfilter_inout_alloc();
 
-                       aligned_memshfl(in_picture->data[0], frame.image_data().begin(), frame.image_data().size(), 0x0F0F0F0F, 0x0B0B0B0B, 0x07070707, 0x03030303);
+                       try
+                       {
+                               CASPAR_VERIFY(outputs && inputs);
+
+                               outputs->name           = av_strdup("in");
+                               outputs->filter_ctx     = &source_ctx;
+                               outputs->pad_idx                = 0;
+                               outputs->next           = nullptr;
+
+                               inputs->name                    = av_strdup("out");
+                               inputs->filter_ctx      = &sink_ctx;
+                               inputs->pad_idx         = 0;
+                               inputs->next                    = nullptr;
+                       }
+                       catch (...)
+                       {
+                               avfilter_inout_free(&outputs);
+                               avfilter_inout_free(&inputs);
+                               throw;
+                       }
+
+                       FF(avfilter_graph_parse(
+                                       &graph,
+                                       filtergraph.c_str(),
+                                       inputs,
+                                       outputs,
+                                       nullptr));
                }
                else
                {
-                       avpicture_fill(
-                                       in_picture,
-                                       const_cast<uint8_t*>(frame.image_data().begin()),
-                                       PIX_FMT_BGRA,
-                                       format_desc_.width,
-                                       format_desc_.height - output_format_.croptop  - output_format_.cropbot);
+                       FF(avfilter_link(
+                                       &source_ctx,
+                                       0,
+                                       &sink_ctx,
+                                       0));
                }
 
-               // crop-top
+               FF(avfilter_graph_config(
+                               &graph,
+                               nullptr));
+       }
 
-               for(int n = 0; n < 4; ++n)              
-                       in_frame->data[n] += in_frame->linesize[n] * output_format_.croptop;            
-               
-               // #out_frame
+       void encode_video(core::const_frame frame_ptr, std::shared_ptr<void> token)
+       {
+               if(!video_st_)
+                       return;
 
-               std::shared_ptr<AVFrame> out_frame(avcodec_alloc_frame(), av_free);
-               
-               av_image_fill_linesizes(out_frame->linesize, c->pix_fmt, c->width);
-               for(int n = 0; n < 4; ++n)
-                       out_frame->linesize[n] += 32 - (out_frame->linesize[n] % 32); // align
+               auto enc = video_st_->codec;
 
-               picture_buffer_.resize(av_image_fill_pointers(out_frame->data, c->pix_fmt, c->height, nullptr, out_frame->linesize));
-               av_image_fill_pointers(out_frame->data, c->pix_fmt, c->height, picture_buffer_.data(), out_frame->linesize);
-               
-               // #scale
+               if(frame_ptr != core::const_frame::empty())
+               {
+                       auto src_av_frame = create_frame();
+
+                       const auto sample_aspect_ratio =
+                               boost::rational<int>(
+                                       in_video_format_.square_width,
+                                       in_video_format_.square_height) /
+                               boost::rational<int>(
+                                       in_video_format_.width,
+                                       in_video_format_.height);
+
+                       src_av_frame->format                                            = AVPixelFormat::AV_PIX_FMT_BGRA;
+                       src_av_frame->width                                             = in_video_format_.width;
+                       src_av_frame->height                                            = in_video_format_.height;
+                       src_av_frame->sample_aspect_ratio.num   = sample_aspect_ratio.numerator();
+                       src_av_frame->sample_aspect_ratio.den   = sample_aspect_ratio.denominator();
+                       src_av_frame->pts                                               = video_pts_;
+
+                       video_pts_ += 1;
+
+                       subject_
+                                       << core::monitor::message("/frame")     % video_pts_
+                                       << core::monitor::message("/path")      % path_
+                                       << core::monitor::message("/fps")       % in_video_format_.fps;
+
+                       FF(av_image_fill_arrays(
+                               src_av_frame->data,
+                               src_av_frame->linesize,
+                               frame_ptr.image_data().begin(),
+                               static_cast<AVPixelFormat>(src_av_frame->format),
+                               in_video_format_.width,
+                               in_video_format_.height,
+                               1));
+
+                       FF(av_buffersrc_add_frame(
+                               video_graph_in_,
+                               src_av_frame.get()));
+               }
+
+               int ret = 0;
 
-               sws_scale(sws_.get(), 
-                                 in_frame->data, 
-                                 in_frame->linesize,
-                                 0, 
-                                 format_desc_.height - output_format_.cropbot - output_format_.croptop, 
-                                 out_frame->data, 
-                                 out_frame->linesize);
+               while(ret >= 0)
+               {
+                       auto filt_frame = create_frame();
 
-               out_frame->format       = c->pix_fmt;
-               out_frame->width        = c->width;
-               out_frame->height       = c->height;
+                       ret = av_buffersink_get_frame(
+                               video_graph_out_,
+                               filt_frame.get());
 
-               return out_frame;
+                       video_encoder_executor_.begin_invoke([=]
+                       {
+                               if(ret == AVERROR_EOF)
+                               {
+                                       if(enc->codec->capabilities & CODEC_CAP_DELAY)
+                                       {
+                                               while(encode_av_frame(
+                                                               *video_st_,
+                                                               avcodec_encode_video2,
+                                                               nullptr, token))
+                                               {
+                                                       boost::this_thread::yield(); // TODO:
+                                               }
+                                       }
+                               }
+                               else if(ret != AVERROR(EAGAIN))
+                               {
+                                       FF_RET(ret, "av_buffersink_get_frame");
+
+                                       if (filt_frame->interlaced_frame)
+                                       {
+                                               if (enc->codec->id == AV_CODEC_ID_MJPEG)
+                                                       enc->field_order = filt_frame->top_field_first ? AV_FIELD_TT : AV_FIELD_BB;
+                                               else
+                                                       enc->field_order = filt_frame->top_field_first ? AV_FIELD_TB : AV_FIELD_BT;
+                                       }
+                                       else
+                                               enc->field_order = AV_FIELD_PROGRESSIVE;
+
+                                       filt_frame->quality = enc->global_quality;
+
+                                       if (!enc->me_threshold)
+                                               filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
+
+                                       encode_av_frame(
+                                               *video_st_,
+                                               avcodec_encode_video2,
+                                               filt_frame,
+                                               token);
+
+                                       boost::this_thread::yield(); // TODO:
+                               }
+                       });
+               }
        }
-       
-       byte_vector convert_audio(core::const_frame& frame, AVCodecContext* c)
+
+       void encode_audio(core::const_frame frame_ptr, std::shared_ptr<void> token)
        {
-               if(!swr_) 
+               if(audio_sts_.empty())
+                       return;
+
+               if(frame_ptr != core::const_frame::empty())
                {
-                       swr_ = std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,
-                                                                               get_channel_layout(c), c->sample_fmt, c->sample_rate,
-                                                                               av_get_default_channel_layout(channel_layout_.num_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,
-                                                                               0, nullptr), [](SwrContext* p){swr_free(&p);});
+                       auto src_av_frame = create_frame();
+
+                       src_av_frame->channels                  = in_channel_layout_.num_channels;
+                       src_av_frame->channel_layout            = create_channel_layout_bitmask(in_channel_layout_.num_channels);
+                       src_av_frame->sample_rate               = in_video_format_.audio_sample_rate;
+                       src_av_frame->nb_samples                        = static_cast<int>(frame_ptr.audio_data().size()) / src_av_frame->channels;
+                       src_av_frame->format                            = AV_SAMPLE_FMT_S32;
+                       src_av_frame->pts                               = audio_pts_;
+
+                       audio_pts_ += src_av_frame->nb_samples;
+
+                       FF(av_samples_fill_arrays(
+                                       src_av_frame->extended_data,
+                                       src_av_frame->linesize,
+                                       reinterpret_cast<const std::uint8_t*>(&*frame_ptr.audio_data().begin()),
+                                       src_av_frame->channels,
+                                       src_av_frame->nb_samples,
+                                       static_cast<AVSampleFormat>(src_av_frame->format),
+                                       16));
+
+                       audio_filter_->push(0, src_av_frame);
+               }
 
-                       if(!swr_)
-                               CASPAR_THROW_EXCEPTION(bad_alloc());
+               for (int pad_id = 0; pad_id < audio_filter_->get_num_output_pads(); ++pad_id)
+               {
+                       for (auto filt_frame : audio_filter_->poll_all(pad_id))
+                       {
+                               audio_encoder_executor_.begin_invoke([=]
+                               {
+                                       encode_av_frame(
+                                                       *audio_sts_.at(pad_id),
+                                                       avcodec_encode_audio2,
+                                                       filt_frame,
+                                                       token);
+
+                                       boost::this_thread::yield(); // TODO:
+                               });
+                       }
+               }
+
+               bool eof = frame_ptr == core::const_frame::empty();
 
-                       THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");
+               if (eof)
+               {
+                       audio_encoder_executor_.begin_invoke([=]
+                       {
+                               for (int pad_id = 0; pad_id < audio_filter_->get_num_output_pads(); ++pad_id)
+                               {
+                                       auto enc = audio_sts_.at(pad_id)->codec;
+
+                                       if (enc->codec->capabilities & CODEC_CAP_DELAY)
+                                       {
+                                               while (encode_av_frame(
+                                                               *audio_sts_.at(pad_id),
+                                                               avcodec_encode_audio2,
+                                                               nullptr,
+                                                               token))
+                                               {
+                                                       boost::this_thread::yield(); // TODO:
+                                               }
+                                       }
+                               }
+                       });
                }
-                               
-               byte_vector buffer(48000);
+       }
 
-               const uint8_t* in[]  = {reinterpret_cast<const uint8_t*>(frame.audio_data().data())};
-               uint8_t*       out[] = {buffer.data()};
+       template<typename F>
+       bool encode_av_frame(
+                       AVStream& st,
+                       const F& func,
+                       const std::shared_ptr<AVFrame>& src_av_frame,
+                       std::shared_ptr<void> token)
+       {
+               AVPacket pkt = {};
+               av_init_packet(&pkt);
 
-               auto channel_samples = swr_convert(swr_.get(), 
-                                                                                  out, static_cast<int>(buffer.size()) / c->channels / av_get_bytes_per_sample(c->sample_fmt), 
-                                                                                  in, static_cast<int>(frame.audio_data().size()/channel_layout_.num_channels));
+               int got_packet = 0;
 
-               buffer.resize(channel_samples * c->channels * av_get_bytes_per_sample(c->sample_fmt));  
+               FF(func(
+                       st.codec,
+                       &pkt,
+                       src_av_frame.get(),
+                       &got_packet));
 
-               return buffer;
+               if(!got_packet || pkt.size <= 0)
+                       return false;
+
+               pkt.stream_index = st.index;
+
+               if (pkt.pts != AV_NOPTS_VALUE)
+               {
+                       pkt.pts =
+                               av_rescale_q(
+                                       pkt.pts,
+                                       st.codec->time_base,
+                                       st.time_base);
+               }
+
+               if (pkt.dts != AV_NOPTS_VALUE)
+               {
+                       pkt.dts =
+                               av_rescale_q(
+                                       pkt.dts,
+                                       st.codec->time_base,
+                                       st.time_base);
+               }
+
+               pkt.duration =
+                       static_cast<int>(
+                               av_rescale_q(
+                                       pkt.duration,
+                                       st.codec->time_base, st.time_base));
+
+               write_packet(
+                       std::shared_ptr<AVPacket>(
+                               new AVPacket(pkt),
+                               [](AVPacket* p)
+                               {
+                                       av_free_packet(p);
+                                       delete p;
+                               }), token);
+
+               return true;
        }
 
-       void check_space()
+       void write_packet(
+                       const std::shared_ptr<AVPacket>& pkt_ptr,
+                       std::shared_ptr<void> token)
        {
-               auto space = boost::filesystem::space(boost::filesystem::path(full_filename_).parent_path());
-               if(space.available < 512*1000000)
-                       CASPAR_THROW_EXCEPTION(file_write_error() << msg_info("out of space"));
+               write_executor_.begin_invoke([this, pkt_ptr, token]() mutable
+               {
+                       FF(av_interleaved_write_frame(
+                               oc_.get(),
+                               pkt_ptr.get()));
+               });
        }
 
-       void encode(const core::const_frame& frame)
+       template<typename T>
+       static boost::optional<T> try_remove_arg(
+                       std::map<std::string, std::string>& options,
+                       const boost::regex& expr)
        {
-               try
+               for(auto it = options.begin(); it != options.end(); ++it)
                {
-                       if(frame_number_ % 25 == 0)
-                               check_space();
+                       if(boost::regex_search(it->first, expr))
+                       {
+                               auto arg = it->second;
+                               options.erase(it);
+                               return boost::lexical_cast<T>(arg);
+                       }
+               }
 
-                       caspar::timer frame_timer;
+               return boost::optional<T>();
+       }
 
-                       encode_video_frame(frame);
-                       encode_audio_frame(frame);
+       static std::map<std::string, std::string> remove_options(
+                       std::map<std::string, std::string>& options,
+                       const boost::regex& expr)
+       {
+               std::map<std::string, std::string> result;
 
-                       graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);
-               }
-               catch(...)
-               {                       
-                       lock(exception_mutex_, [&]
+               auto it = options.begin();
+               while(it != options.end())
+               {
+                       boost::smatch what;
+                       if(boost::regex_search(it->first, what, expr))
                        {
-                               exception_ = std::current_exception();
-                       });
+                               result[
+                                       what.size() > 0 && what[1].matched
+                                               ? what[1].str()
+                                               : it->first] = it->second;
+                               it = options.erase(it);
+                       }
+                       else
+                               ++it;
+               }
+
+               return result;
+       }
+
+       static void to_dict(AVDictionary** dest, const std::map<std::string, std::string>& c)
+       {
+               for (const auto& entry : c)
+               {
+                       av_dict_set(
+                               dest,
+                               entry.first.c_str(),
+                               entry.second.c_str(), 0);
                }
        }
+
+       static std::map<std::string, std::string> to_map(AVDictionary* dict)
+       {
+               std::map<std::string, std::string> result;
+
+               for(auto t = dict
+                               ? av_dict_get(
+                                       dict,
+                                       "",
+                                       nullptr,
+                                       AV_DICT_IGNORE_SUFFIX)
+                               : nullptr;
+                       t;
+                       t = av_dict_get(
+                               dict,
+                               "",
+                               t,
+                               AV_DICT_IGNORE_SUFFIX))
+               {
+                       result[t->key] = t->value;
+               }
+
+               return result;
+       }
 };
 
+int crc16(const std::string& str)
+{
+       boost::crc_16_type result;
+
+       result.process_bytes(str.data(), str.length());
+
+       return result.checksum();
+}
+
 struct ffmpeg_consumer_proxy : public core::frame_consumer
 {
-       const std::wstring                      filename_;
-       const std::vector<option>       options_;
-       const bool                                      separate_key_;
+       const std::string                                       path_;
+       const std::string                                       options_;
+       const bool                                                      separate_key_;
+       const bool                                                      mono_streams_;
+       const bool                                                      compatibility_mode_;
+       int                                                                     consumer_index_offset_;
 
-       std::unique_ptr<ffmpeg_consumer> consumer_;
-       std::unique_ptr<ffmpeg_consumer> key_only_consumer_;
+       std::unique_ptr<ffmpeg_consumer>        consumer_;
+       std::unique_ptr<ffmpeg_consumer>        key_only_consumer_;
 
 public:
 
-       ffmpeg_consumer_proxy(const std::wstring& filename, const std::vector<option>& options, bool separate_key)
-               : filename_(filename)
+       ffmpeg_consumer_proxy(const std::string& path, const std::string& options, bool separate_key, bool mono_streams, bool compatibility_mode)
+               : path_(path)
                , options_(options)
                , separate_key_(separate_key)
+               , mono_streams_(mono_streams)
+               , compatibility_mode_(compatibility_mode)
+               , consumer_index_offset_(crc16(path))
        {
        }
-       
+
        void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int) override
        {
-               if(consumer_)
+               if (consumer_)
                        CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Cannot reinitialize ffmpeg-consumer."));
 
-               consumer_.reset(new ffmpeg_consumer(u8(filename_), format_desc, channel_layout, options_, false));
+               consumer_.reset(new ffmpeg_consumer(path_, options_, mono_streams_));
+               consumer_->initialize(format_desc, channel_layout);
 
                if (separate_key_)
                {
-                       boost::filesystem::path fill_file(filename_);
-                       auto without_extension = u16(fill_file.stem().string());
+                       boost::filesystem::path fill_file(path_);
+                       auto without_extension = u16(fill_file.parent_path().string() + "/" + fill_file.stem().string());
                        auto key_file = without_extension + L"_A" + u16(fill_file.extension().string());
 
-                       key_only_consumer_.reset(new ffmpeg_consumer(u8(key_file), format_desc, channel_layout, options_, true));
+                       key_only_consumer_.reset(new ffmpeg_consumer(u8(key_file), options_, mono_streams_));
+                       key_only_consumer_->initialize(format_desc, channel_layout);
                }
        }
 
        int64_t presentation_frame_age_millis() const override
        {
-               return consumer_ ? static_cast<int64_t>(consumer_->current_encoding_delay_) : 0;
+               return consumer_ ? static_cast<int64_t>(consumer_->presentation_frame_age_millis()) : 0;
        }
 
        std::future<bool> send(core::const_frame frame) override
        {
                bool ready_for_frame = consumer_->ready_for_frame();
-               
+
                if (ready_for_frame && separate_key_)
                        ready_for_frame = ready_for_frame && key_only_consumer_->ready_for_frame();
 
                if (ready_for_frame)
                {
                        consumer_->send(frame);
-                       
+
                        if (separate_key_)
-                               key_only_consumer_->send(frame);
+                               key_only_consumer_->send(frame.key_only());
                }
                else
                {
                        consumer_->mark_dropped();
-                       
+
                        if (separate_key_)
                                key_only_consumer_->mark_dropped();
                }
-               
+
                return make_ready_future(true);
        }
-       
+
        std::wstring print() const override
        {
                return consumer_ ? consumer_->print() : L"[ffmpeg_consumer]";
@@ -833,18 +1234,21 @@ public:
 
        std::wstring name() const override
        {
-               return L"file";
+               return L"ffmpeg";
        }
 
        boost::property_tree::wptree info() const override
        {
                boost::property_tree::wptree info;
-               info.add(L"type", L"file");
-               info.add(L"filename", filename_);
-               info.add(L"separate_key", separate_key_);
+
+               info.add(L"type",                       L"ffmpeg");
+               info.add(L"path",                       u16(path_));
+               info.add(L"separate_key",       separate_key_);
+               info.add(L"mono_streams",       mono_streams_);
+
                return info;
        }
-               
+
        bool has_synchronization_clock() const override
        {
                return false;
@@ -857,84 +1261,62 @@ public:
 
        int index() const override
        {
-               return 200;
+               return compatibility_mode_ ? 200 : 100000 + consumer_index_offset_;
        }
 
-       core::monitor::subject& monitor_output()
+       core::monitor::subject& monitor_output() override
        {
                return consumer_->monitor_output();
        }
 };
 
-void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
+void describe_ffmpeg_consumer(core::help_sink& sink, const core::help_repository& repo)
 {
-       sink.short_description(L"Can record a channel to a file supported by FFmpeg.");
-       sink.syntax(L"FILE [filename:string] {-[ffmpeg_param1:string] [value1:string] {-[ffmpeg_param2:string] [value2:string] {...}}} {[separate_key:SEPARATE_KEY]}");
-       sink.para()->text(L"Can record a channel to a file supported by FFmpeg.");
+       sink.short_description(L"For streaming/recording the contents of a channel using FFmpeg.");
+       sink.syntax(L"FILE,STREAM [filename:string],[url:string] {-[ffmpeg_param1:string] [value1:string] {-[ffmpeg_param2:string] [value2:string] {...}}} {[separate_key:SEPARATE_KEY]} {[mono_streams:MONO_STREAMS]}");
+       sink.para()->text(L"For recording or streaming the contents of a channel using FFmpeg");
        sink.definitions()
-               ->item(L"filename", L"The filename under the media folder including the extension (decides which kind of container format that will be used).")
-               ->item(L"ffmpeg_paramX", L"A parameter supported by FFmpeg. For example vcodec or acodec etc.")
-               ->item(L"separate_key", L"If defined will create two files simultaneously -- One for fill and one for key (_A will be appended).")
-               ;
+               ->item(L"filename",                     L"The filename under the media folder including the extension (decides which kind of container format that will be used).")
+               ->item(L"url",                          L"If the filename is given in the form of an URL a network stream will be created instead of a file on disk.")
+               ->item(L"ffmpeg_paramX",                L"A parameter supported by FFmpeg. For example vcodec or acodec etc.")
+               ->item(L"separate_key",         L"If defined will create two files simultaneously -- One for fill and one for key (_A will be appended).")
+               ->item(L"mono_streams",         L"If defined every audio channel will be written to its own audio stream.");
        sink.para()->text(L"Examples:");
        sink.example(L">> ADD 1 FILE output.mov -vcodec dnxhd");
        sink.example(L">> ADD 1 FILE output.mov -vcodec prores");
        sink.example(L">> ADD 1 FILE output.mov -vcodec dvvideo");
        sink.example(L">> ADD 1 FILE output.mov -vcodec libx264 -preset ultrafast -tune fastdecode -crf 25");
        sink.example(L">> ADD 1 FILE output.mov -vcodec dnxhd SEPARATE_KEY", L"for creating output.mov with fill and output_A.mov with key/alpha");
+       sink.example(L">> ADD 1 FILE output.mxf -vcodec dnxhd MONO_STREAMS", L"for creating output.mxf with every audio channel encoded in its own mono stream.");
+       sink.example(L">> ADD 1 STREAM udp://<client_ip_address>:9250 -format mpegts -vcodec libx264 -crf 25 -tune zerolatency -preset ultrafast",
+               L"for streaming over UDP instead of creating a local file.");
 }
 
-spl::shared_ptr<core::frame_consumer> create_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_ffmpeg_consumer(
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
-       auto params2 = params;
-       auto separate_key_it = std::find_if(params2.begin(), params2.end(), param_comparer(L"SEPARATE_KEY"));
-       bool separate_key = false;
-
-       if (separate_key_it != params2.end())
-       {
-               separate_key = true;
-               params2.erase(separate_key_it);
-       }
+       if (params.size() < 1 || (!boost::iequals(params.at(0), L"STREAM") && !boost::iequals(params.at(0), L"FILE")))
+               return core::frame_consumer::empty();
 
-       auto str = std::accumulate(params2.begin(), params2.end(), std::wstring(), [](const std::wstring& lhs, const std::wstring& rhs) {return lhs + L" " + rhs;});
-       
-       boost::wregex path_exp(LR"(\s*FILE(\s(?<PATH>.+\.[^\s]+))?.*)", boost::regex::icase);
+       auto params2                    = params;
+       bool separate_key               = get_and_consume_flag(L"SEPARATE_KEY", params2);
+       bool mono_streams               = get_and_consume_flag(L"MONO_STREAMS", params2);
+       auto compatibility_mode = boost::iequals(params.at(0), L"FILE");
+       auto path                               = u8(params2.size() > 1 ? params2.at(1) : L"");
+       auto args                               = u8(boost::join(params2, L" "));
 
-       boost::wsmatch path;
-       if(!boost::regex_match(str, path, path_exp))
-               return core::frame_consumer::empty();
-       
-       boost::wregex opt_exp(LR"(-((?<NAME>[^\s]+)\s+(?<VALUE>[^\s]+)))");     
-       
-       std::vector<option> options;
-       for(boost::wsregex_iterator it(str.begin(), str.end(), opt_exp); it != boost::wsregex_iterator(); ++it)
-       {
-               auto name  = u8(boost::trim_copy(boost::to_lower_copy((*it)["NAME"].str())));
-               auto value = u8(boost::trim_copy(boost::to_lower_copy((*it)["VALUE"].str())));
-               
-               if(value == "h264")
-                       value = "libx264";
-               else if(value == "dvcpro")
-                       value = "dvvideo";
-
-               options.push_back(option(name, value));
-       }
-                               
-       return spl::make_shared<ffmpeg_consumer_proxy>(path["PATH"].str(), options, separate_key);
+       return spl::make_shared<ffmpeg_consumer_proxy>(path, args, separate_key, mono_streams, compatibility_mode);
 }
 
-spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_preconfigured_ffmpeg_consumer(
+               const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
-       auto filename           = ptree_get<std::wstring>(ptree, L"path");
-       auto codec                      = ptree.get(L"vcodec", L"libx264");
-       auto separate_key       = ptree.get(L"separate-key", false);
-
-       std::vector<option> options;
-       options.push_back(option("vcodec", u8(codec)));
-       
-       return spl::make_shared<ffmpeg_consumer_proxy>(filename, options, separate_key);
+       return spl::make_shared<ffmpeg_consumer_proxy>(
+                       u8(ptree_get<std::wstring>(ptree, L"path")),
+                       u8(ptree.get<std::wstring>(L"args", L"")),
+                       ptree.get<bool>(L"separate-key", false),
+                       ptree.get<bool>(L"mono-streams", false),
+                       false);
 }
 
 }}
index ac27b94e915b1d846d431693e043e71098844aaa..053e40439f46449c81ad1c135b92d836fd936bc4 100644 (file)
@@ -1,24 +1,3 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Robert Nagy, ronag89@gmail.com
-*/
-
 #pragma once
 
 #include <common/memory.h>
 
 namespace caspar { namespace ffmpeg {
 
-void describe_consumer(core::help_sink& sink, const core::help_repository& repo);
-spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params, core::interaction_sink*);
-spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*);
+void describe_ffmpeg_consumer(core::help_sink& sink, const core::help_repository& repo);
+spl::shared_ptr<core::frame_consumer> create_ffmpeg_consumer(
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
+spl::shared_ptr<core::frame_consumer> create_preconfigured_ffmpeg_consumer(
+               const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
 
-}}
\ No newline at end of file
+}}
diff --git a/modules/ffmpeg/consumer/streaming_consumer.cpp b/modules/ffmpeg/consumer/streaming_consumer.cpp
deleted file mode 100644 (file)
index 39904a0..0000000
+++ /dev/null
@@ -1,1308 +0,0 @@
-#include "../StdAfx.h"
-
-#include "ffmpeg_consumer.h"
-
-#include "../ffmpeg_error.h"
-
-#include <common/except.h>
-#include <common/executor.h>
-#include <common/assert.h>
-#include <common/utf.h>
-#include <common/future.h>
-#include <common/env.h>
-#include <common/scope_exit.h>
-#include <common/ptree.h>
-
-#include <core/consumer/frame_consumer.h>
-#include <core/frame/frame.h>
-#include <core/frame/audio_channel_layout.h>
-#include <core/video_format.h>
-#include <core/monitor/monitor.h>
-#include <core/help/help_repository.h>
-#include <core/help/help_sink.h>
-
-#include <boost/noncopyable.hpp>
-#include <boost/rational.hpp>
-#include <boost/format.hpp>
-#include <boost/algorithm/string/predicate.hpp>
-#include <boost/property_tree/ptree.hpp>
-
-#pragma warning(push)
-#pragma warning(disable: 4244)
-#pragma warning(disable: 4245)
-#include <boost/crc.hpp>
-#pragma warning(pop)
-
-#include <tbb/atomic.h>
-#include <tbb/concurrent_queue.h>
-#include <tbb/parallel_invoke.h>
-#include <tbb/parallel_for.h>
-
-#include <numeric>
-
-#pragma warning(push)
-#pragma warning(disable: 4244)
-
-extern "C" 
-{
-       #define __STDC_CONSTANT_MACROS
-       #define __STDC_LIMIT_MACROS
-       #include <libavformat/avformat.h>
-       #include <libavcodec/avcodec.h>
-       #include <libavutil/avutil.h>
-       #include <libavutil/frame.h>
-       #include <libavutil/opt.h>
-       #include <libavutil/imgutils.h>
-       #include <libavutil/parseutils.h>
-       #include <libavfilter/avfilter.h>
-       #include <libavfilter/buffersink.h>
-       #include <libavfilter/buffersrc.h>
-}
-
-#pragma warning(pop)
-
-namespace caspar { namespace ffmpeg {
-
-int crc16(const std::string& str)
-{
-       boost::crc_16_type result;
-
-       result.process_bytes(str.data(), str.length());
-
-       return result.checksum();
-}
-
-class streaming_consumer final : public core::frame_consumer
-{
-public:
-       // Static Members
-               
-private:
-       core::monitor::subject                                          subject_;
-       boost::filesystem::path                                         path_;
-       int                                                                                     consumer_index_offset_;
-
-       std::map<std::string, std::string>                      options_;
-       bool                                                                            compatibility_mode_;
-                                                                                               
-       core::video_format_desc                                         in_video_format_;
-       core::audio_channel_layout                                      in_channel_layout_                      = core::audio_channel_layout::invalid();
-
-       std::shared_ptr<AVFormatContext>                        oc_;
-       tbb::atomic<bool>                                                       abort_request_;
-                                                                                               
-       std::shared_ptr<AVStream>                                       video_st_;
-       std::shared_ptr<AVStream>                                       audio_st_;
-
-       std::int64_t                                                            video_pts_;
-       std::int64_t                                                            audio_pts_;
-                                                                                                                                                                       
-    AVFilterContext*                                                   audio_graph_in_;  
-    AVFilterContext*                                                   audio_graph_out_; 
-    std::shared_ptr<AVFilterGraph>                             audio_graph_;    
-       std::shared_ptr<AVBitStreamFilterContext>       audio_bitstream_filter_;       
-
-    AVFilterContext*                                                   video_graph_in_;  
-    AVFilterContext*                                                   video_graph_out_; 
-    std::shared_ptr<AVFilterGraph>                             video_graph_;  
-       std::shared_ptr<AVBitStreamFilterContext>       video_bitstream_filter_;
-       
-       executor                                                                        executor_;
-
-       executor                                                                        video_encoder_executor_;
-       executor                                                                        audio_encoder_executor_;
-
-       tbb::atomic<int>                                                        tokens_;
-       boost::mutex                                                            tokens_mutex_;
-       boost::condition_variable                                       tokens_cond_;
-       tbb::atomic<int64_t>                                            current_encoding_delay_;
-
-       executor                                                                        write_executor_;
-       
-public:
-
-       streaming_consumer(
-                       std::string path, 
-                       std::string options,
-                       bool compatibility_mode)
-               : path_(path)
-               , consumer_index_offset_(crc16(path))
-               , compatibility_mode_(compatibility_mode)
-               , video_pts_(0)
-               , audio_pts_(0)
-               , executor_(print())
-               , audio_encoder_executor_(print() + L" audio_encoder")
-               , video_encoder_executor_(print() + L" video_encoder")
-               , write_executor_(print() + L" io")
-       {               
-               abort_request_ = false;
-               current_encoding_delay_ = 0;
-
-               for(auto it = 
-                               boost::sregex_iterator(
-                                       options.begin(), 
-                                       options.end(), 
-                                       boost::regex("-(?<NAME>[^-\\s]+)(\\s+(?<VALUE>[^\\s]+))?")); 
-                       it != boost::sregex_iterator(); 
-                       ++it)
-               {                               
-                       options_[(*it)["NAME"].str()] = (*it)["VALUE"].matched ? (*it)["VALUE"].str() : "";
-               }
-                                                                               
-        if (options_.find("threads") == options_.end())
-            options_["threads"] = "auto";
-
-               tokens_ = 
-                       std::max(
-                               1, 
-                               try_remove_arg<int>(
-                                       options_, 
-                                       boost::regex("tokens")).get_value_or(2));               
-       }
-               
-       ~streaming_consumer()
-       {
-               if(oc_)
-               {
-                       video_encoder_executor_.begin_invoke([&] { encode_video(core::const_frame::empty(), nullptr); });
-                       audio_encoder_executor_.begin_invoke([&] { encode_audio(core::const_frame::empty(), nullptr); });
-
-                       video_encoder_executor_.stop();
-                       audio_encoder_executor_.stop();
-                       video_encoder_executor_.join();
-                       audio_encoder_executor_.join();
-
-                       video_graph_.reset();
-                       audio_graph_.reset();
-                       video_st_.reset();
-                       audio_st_.reset();
-
-                       write_packet(nullptr, nullptr);
-
-                       write_executor_.stop();
-                       write_executor_.join();
-
-                       FF(av_write_trailer(oc_.get()));
-
-                       if (!(oc_->oformat->flags & AVFMT_NOFILE) && oc_->pb)
-                               avio_close(oc_->pb);
-
-                       oc_.reset();
-               }
-       }
-
-       void initialize(
-                       const core::video_format_desc& format_desc,
-                       const core::audio_channel_layout& channel_layout,
-                       int channel_index) override
-       {
-               try
-               {                               
-                       static boost::regex prot_exp("^.+:.*" );
-                       
-                       const auto overwrite = 
-                               try_remove_arg<std::string>(
-                                       options_,
-                                       boost::regex("y")) != boost::none;
-
-                       if(!boost::regex_match(
-                                       path_.string(), 
-                                       prot_exp))
-                       {
-                               if(!path_.is_complete())
-                               {
-                                       path_ = 
-                                               u8(
-                                                       env::media_folder()) + 
-                                                       path_.string();
-                               }
-                       
-                               if(boost::filesystem::exists(path_))
-                               {
-                                       if(!overwrite && !compatibility_mode_)
-                                               BOOST_THROW_EXCEPTION(invalid_argument() << msg_info("File exists"));
-                                               
-                                       boost::filesystem::remove(path_);
-                               }
-                       }
-                                                       
-                       const auto oformat_name = 
-                               try_remove_arg<std::string>(
-                                       options_, 
-                                       boost::regex("^f|format$"));
-                       
-                       AVFormatContext* oc;
-
-                       FF(avformat_alloc_output_context2(
-                               &oc, 
-                               nullptr, 
-                               oformat_name && !oformat_name->empty() ? oformat_name->c_str() : nullptr, 
-                               path_.string().c_str()));
-
-                       oc_.reset(
-                               oc, 
-                               avformat_free_context);
-                                       
-                       CASPAR_VERIFY(oc_->oformat);
-
-                       oc_->interrupt_callback.callback = streaming_consumer::interrupt_cb;
-                       oc_->interrupt_callback.opaque   = this;        
-
-                       CASPAR_VERIFY(format_desc.format != core::video_format::invalid);
-
-                       in_video_format_ = format_desc;
-                       in_channel_layout_ = channel_layout;
-                                                       
-                       CASPAR_VERIFY(oc_->oformat);
-                       
-                       const auto video_codec_name = 
-                               try_remove_arg<std::string>(
-                                       options_, 
-                                       boost::regex("^c:v|codec:v|vcodec$"));
-
-                       const auto video_codec = 
-                               video_codec_name 
-                                       ? avcodec_find_encoder_by_name(video_codec_name->c_str())
-                                       : avcodec_find_encoder(oc_->oformat->video_codec);
-                                               
-                       const auto audio_codec_name = 
-                               try_remove_arg<std::string>(
-                                       options_, 
-                                        boost::regex("^c:a|codec:a|acodec$"));
-                       
-                       const auto audio_codec = 
-                               audio_codec_name 
-                                       ? avcodec_find_encoder_by_name(audio_codec_name->c_str())
-                                       : avcodec_find_encoder(oc_->oformat->audio_codec);
-                       
-                       if (!video_codec)
-                               CASPAR_THROW_EXCEPTION(user_error() << msg_info(
-                                               "Failed to find video codec " + (video_codec_name
-                                                               ? *video_codec_name
-                                                               : "with id " + boost::lexical_cast<std::string>(
-                                                                               oc_->oformat->video_codec))));
-                       if (!audio_codec)
-                               CASPAR_THROW_EXCEPTION(user_error() << msg_info(
-                                               "Failed to find audio codec " + (audio_codec_name
-                                                               ? *audio_codec_name
-                                                               : "with id " + boost::lexical_cast<std::string>(
-                                                                               oc_->oformat->audio_codec))));
-                       
-                       // Filters
-
-                       {
-                               configure_video_filters(
-                                       *video_codec, 
-                                       try_remove_arg<std::string>(options_, 
-                                       boost::regex("vf|f:v|filter:v")).get_value_or(""));
-
-                               configure_audio_filters(
-                                       *audio_codec, 
-                                       try_remove_arg<std::string>(options_,
-                                       boost::regex("af|f:a|filter:a")).get_value_or(""));
-                       }
-
-                       // Bistream Filters
-                       {
-                               configue_audio_bistream_filters(options_);
-                               configue_video_bistream_filters(options_);
-                       }
-
-                       // Encoders
-
-                       {
-                               auto video_options = options_;
-                               auto audio_options = options_;
-
-                               video_st_ = open_encoder(
-                                       *video_codec, 
-                                       video_options);
-
-                               audio_st_ = open_encoder(
-                                       *audio_codec, 
-                                       audio_options);
-
-                               auto it = options_.begin();
-                               while(it != options_.end())
-                               {
-                                       if(video_options.find(it->first) == video_options.end() || audio_options.find(it->first) == audio_options.end())
-                                               it = options_.erase(it);
-                                       else
-                                               ++it;
-                               }
-                       }
-
-                       // Output
-                       {
-                               AVDictionary* av_opts = nullptr;
-
-                               to_dict(
-                                       &av_opts, 
-                                       std::move(options_));
-
-                               CASPAR_SCOPE_EXIT
-                               {
-                                       av_dict_free(&av_opts);
-                               };
-
-                               if (!(oc_->oformat->flags & AVFMT_NOFILE)) 
-                               {
-                                       FF(avio_open2(
-                                               &oc_->pb, 
-                                               path_.string().c_str(), 
-                                               AVIO_FLAG_WRITE, 
-                                               &oc_->interrupt_callback, 
-                                               &av_opts));
-                               }
-                               
-                               FF(avformat_write_header(
-                                       oc_.get(), 
-                                       &av_opts));
-                               
-                               options_ = to_map(av_opts);
-                       }
-
-                       // Dump Info
-                       
-                       av_dump_format(
-                               oc_.get(), 
-                               0, 
-                               oc_->filename, 
-                               1);             
-
-                       for (const auto& option : options_)
-                       {
-                               CASPAR_LOG(warning) 
-                                       << L"Invalid option: -" 
-                                       << u16(option.first) 
-                                       << L" " 
-                                       << u16(option.second);
-                       }
-               }
-               catch(...)
-               {
-                       video_st_.reset();
-                       audio_st_.reset();
-                       oc_.reset();
-                       throw;
-               }
-       }
-
-       core::monitor::subject& monitor_output() override
-       {
-               return subject_;
-       }
-
-       std::wstring name() const override
-       {
-               return L"streaming";
-       }
-
-       std::future<bool> send(core::const_frame frame) override
-       {               
-               CASPAR_VERIFY(in_video_format_.format != core::video_format::invalid);
-               
-               --tokens_;
-               std::shared_ptr<void> token(
-                       nullptr, 
-                       [this, frame](void*)
-                       {
-                               ++tokens_;
-                               tokens_cond_.notify_one();
-                               current_encoding_delay_ = frame.get_age_millis();
-                       });
-
-               return executor_.begin_invoke([=]() -> bool
-               {
-                       boost::unique_lock<boost::mutex> tokens_lock(tokens_mutex_);
-
-                       while(tokens_ < 0)
-                               tokens_cond_.wait(tokens_lock);
-
-                       video_encoder_executor_.begin_invoke([=]() mutable
-                       {
-                               encode_video(
-                                       frame, 
-                                       token);
-                       });
-               
-                       audio_encoder_executor_.begin_invoke([=]() mutable
-                       {
-                               encode_audio(
-                                       frame, 
-                                       token);
-                       });
-                               
-                       return true;
-               });
-       }
-
-       std::wstring print() const override
-       {
-               return L"streaming_consumer[" + u16(path_.string()) + L"]";
-       }
-       
-       virtual boost::property_tree::wptree info() const override
-       {
-               boost::property_tree::wptree info;
-               info.add(L"type", L"stream");
-               info.add(L"path", path_.wstring());
-               return info;
-       }
-
-       bool has_synchronization_clock() const override
-       {
-               return false;
-       }
-
-       int buffer_depth() const override
-       {
-               return -1;
-       }
-
-       int index() const override
-       {
-               return compatibility_mode_ ? 200 : 100000 + consumer_index_offset_;
-       }
-
-       int64_t presentation_frame_age_millis() const override
-       {
-               return current_encoding_delay_;
-       }
-
-private:
-
-       static int interrupt_cb(void* ctx)
-       {
-               CASPAR_ASSERT(ctx);
-               return reinterpret_cast<streaming_consumer*>(ctx)->abort_request_;              
-       }
-               
-       std::shared_ptr<AVStream> open_encoder(
-                       const AVCodec& codec,
-                       std::map<std::string,
-                       std::string>& options)
-       {                       
-               auto st = 
-                       avformat_new_stream(
-                               oc_.get(), 
-                               &codec);
-
-               if (!st)                
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));
-
-               auto enc = st->codec;
-                               
-               CASPAR_VERIFY(enc);
-                                               
-               switch(enc->codec_type)
-               {
-                       case AVMEDIA_TYPE_VIDEO:
-                       {
-                               enc->time_base                          = video_graph_out_->inputs[0]->time_base;
-                               enc->pix_fmt                            = static_cast<AVPixelFormat>(video_graph_out_->inputs[0]->format);
-                               enc->sample_aspect_ratio        = st->sample_aspect_ratio = video_graph_out_->inputs[0]->sample_aspect_ratio;
-                               enc->width                                      = video_graph_out_->inputs[0]->w;
-                               enc->height                                     = video_graph_out_->inputs[0]->h;
-                               enc->bit_rate_tolerance         = 400 * 1000000;
-                       
-                               break;
-                       }
-                       case AVMEDIA_TYPE_AUDIO:
-                       {
-                               enc->time_base                          = audio_graph_out_->inputs[0]->time_base;
-                               enc->sample_fmt                         = static_cast<AVSampleFormat>(audio_graph_out_->inputs[0]->format);
-                               enc->sample_rate                        = audio_graph_out_->inputs[0]->sample_rate;
-                               enc->channel_layout                     = audio_graph_out_->inputs[0]->channel_layout;
-                               enc->channels                           = audio_graph_out_->inputs[0]->channels;
-                       
-                               break;
-                       }
-               }
-                                                                               
-               if(oc_->oformat->flags & AVFMT_GLOBALHEADER)
-                       enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
-               
-               static const std::array<std::string, 4> char_id_map = {{"v", "a", "d", "s"}};
-
-               const auto char_id = char_id_map.at(enc->codec_type);
-                                                               
-               const auto codec_opts = 
-                       remove_options(
-                               options, 
-                               boost::regex("^(" + char_id + "?[^:]+):" + char_id + "$"));
-               
-               AVDictionary* av_codec_opts = nullptr;
-
-               to_dict(
-                       &av_codec_opts, 
-                       options);
-
-               to_dict(
-                       &av_codec_opts,
-                       codec_opts);
-
-               options.clear();
-               
-               FF(avcodec_open2(
-                       enc,            
-                       &codec, 
-                       av_codec_opts ? &av_codec_opts : nullptr));             
-
-               if(av_codec_opts)
-               {
-                       auto t = 
-                               av_dict_get(
-                                       av_codec_opts, 
-                                       "", 
-                                        nullptr, 
-                                       AV_DICT_IGNORE_SUFFIX);
-
-                       while(t)
-                       {
-                               options[t->key + (codec_opts.find(t->key) != codec_opts.end() ? ":" + char_id : "")] = t->value;
-
-                               t = av_dict_get(
-                                               av_codec_opts, 
-                                               "", 
-                                               t, 
-                                               AV_DICT_IGNORE_SUFFIX);
-                       }
-
-                       av_dict_free(&av_codec_opts);
-               }
-                               
-               if(enc->codec_type == AVMEDIA_TYPE_AUDIO && !(codec.capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
-               {
-                       CASPAR_ASSERT(enc->frame_size > 0);
-                       av_buffersink_set_frame_size(audio_graph_out_, 
-                                                                                enc->frame_size);
-               }
-               
-               return std::shared_ptr<AVStream>(st, [this](AVStream* st)
-               {
-                       avcodec_close(st->codec);
-               });
-       }
-
-       void configue_audio_bistream_filters(
-                       std::map<std::string, std::string>& options)
-       {
-               const auto audio_bitstream_filter_str = 
-                       try_remove_arg<std::string>(
-                               options, 
-                               boost::regex("^bsf:a|absf$"));
-
-               const auto audio_bitstream_filter = 
-                       audio_bitstream_filter_str 
-                               ? av_bitstream_filter_init(audio_bitstream_filter_str->c_str()) 
-                               : nullptr;
-
-               CASPAR_VERIFY(!audio_bitstream_filter_str || audio_bitstream_filter);
-
-               if(audio_bitstream_filter)
-               {
-                       audio_bitstream_filter_.reset(
-                               audio_bitstream_filter, 
-                               av_bitstream_filter_close);
-               }
-               
-               if(audio_bitstream_filter_str && !audio_bitstream_filter_)
-                       options["bsf:a"] = *audio_bitstream_filter_str;
-       }
-       
-       void configue_video_bistream_filters(
-                       std::map<std::string, std::string>& options)
-       {
-               const auto video_bitstream_filter_str = 
-                               try_remove_arg<std::string>(
-                                       options, 
-                                       boost::regex("^bsf:v|vbsf$"));
-
-               const auto video_bitstream_filter = 
-                       video_bitstream_filter_str 
-                               ? av_bitstream_filter_init(video_bitstream_filter_str->c_str()) 
-                               : nullptr;
-
-               CASPAR_VERIFY(!video_bitstream_filter_str || video_bitstream_filter);
-
-               if(video_bitstream_filter)
-               {
-                       video_bitstream_filter_.reset(
-                               video_bitstream_filter, 
-                               av_bitstream_filter_close);
-               }
-               
-               if(video_bitstream_filter_str && !video_bitstream_filter_)
-                       options["bsf:v"] = *video_bitstream_filter_str;
-       }
-       
-       void configure_video_filters(
-                       const AVCodec& codec,
-                       const std::string& filtergraph)
-       {
-               video_graph_.reset(
-                               avfilter_graph_alloc(), 
-                               [](AVFilterGraph* p)
-                               {
-                                       avfilter_graph_free(&p);
-                               });
-               
-               video_graph_->nb_threads  = boost::thread::hardware_concurrency()/2;
-               video_graph_->thread_type = AVFILTER_THREAD_SLICE;
-
-               const auto sample_aspect_ratio =
-                       boost::rational<int>(
-                                       in_video_format_.square_width,
-                                       in_video_format_.square_height) /
-                       boost::rational<int>(
-                                       in_video_format_.width,
-                                       in_video_format_.height);
-               
-               const auto vsrc_options = (boost::format("video_size=%1%x%2%:pix_fmt=%3%:time_base=%4%/%5%:pixel_aspect=%6%/%7%:frame_rate=%8%/%9%")
-                       % in_video_format_.width % in_video_format_.height
-                       % AV_PIX_FMT_BGRA
-                       % in_video_format_.duration     % in_video_format_.time_scale
-                       % sample_aspect_ratio.numerator() % sample_aspect_ratio.denominator()
-                       % in_video_format_.time_scale % in_video_format_.duration).str();
-                                       
-               AVFilterContext* filt_vsrc = nullptr;                   
-               FF(avfilter_graph_create_filter(
-                               &filt_vsrc,
-                               avfilter_get_by_name("buffer"), 
-                               "ffmpeg_consumer_buffer",
-                               vsrc_options.c_str(), 
-                               nullptr, 
-                               video_graph_.get()));
-                               
-               AVFilterContext* filt_vsink = nullptr;
-               FF(avfilter_graph_create_filter(
-                               &filt_vsink,
-                               avfilter_get_by_name("buffersink"), 
-                               "ffmpeg_consumer_buffersink",
-                               nullptr, 
-                               nullptr, 
-                               video_graph_.get()));
-               
-#pragma warning (push)
-#pragma warning (disable : 4245)
-
-               FF(av_opt_set_int_list(
-                               filt_vsink, 
-                               "pix_fmts", 
-                               codec.pix_fmts, 
-                               -1,
-                               AV_OPT_SEARCH_CHILDREN));
-
-#pragma warning (pop)
-                       
-               configure_filtergraph(
-                               *video_graph_, 
-                               filtergraph,
-                               *filt_vsrc,
-                               *filt_vsink);
-
-               video_graph_in_  = filt_vsrc;
-               video_graph_out_ = filt_vsink;
-               
-               CASPAR_LOG(info)
-                       <<      u16(std::string("\n") 
-                               + avfilter_graph_dump(
-                                               video_graph_.get(), 
-                                               nullptr));
-       }
-
-       void configure_audio_filters(
-                       const AVCodec& codec,
-                       const std::string& filtergraph)
-       {
-               audio_graph_.reset(
-                       avfilter_graph_alloc(), 
-                       [](AVFilterGraph* p)
-                       {
-                               avfilter_graph_free(&p);
-                       });
-               
-               audio_graph_->nb_threads  = boost::thread::hardware_concurrency()/2;
-               audio_graph_->thread_type = AVFILTER_THREAD_SLICE;
-               
-               const auto asrc_options = (boost::format("sample_rate=%1%:sample_fmt=%2%:channels=%3%:time_base=%4%/%5%:channel_layout=%6%")
-                       % in_video_format_.audio_sample_rate
-                       % av_get_sample_fmt_name(AV_SAMPLE_FMT_S32)
-                       % in_channel_layout_.num_channels
-                       % 1     % in_video_format_.audio_sample_rate
-                       % boost::io::group(
-                               std::hex, 
-                               std::showbase, 
-                               av_get_default_channel_layout(in_channel_layout_.num_channels))).str();
-
-               AVFilterContext* filt_asrc = nullptr;
-               FF(avfilter_graph_create_filter(
-                       &filt_asrc,
-                       avfilter_get_by_name("abuffer"), 
-                       "ffmpeg_consumer_abuffer",
-                       asrc_options.c_str(), 
-                       nullptr, 
-                       audio_graph_.get()));
-                               
-               AVFilterContext* filt_asink = nullptr;
-               FF(avfilter_graph_create_filter(
-                       &filt_asink,
-                       avfilter_get_by_name("abuffersink"), 
-                       "ffmpeg_consumer_abuffersink",
-                       nullptr, 
-                       nullptr, 
-                       audio_graph_.get()));
-               
-#pragma warning (push)
-#pragma warning (disable : 4245)
-
-               FF(av_opt_set_int(
-                       filt_asink,        
-                       "all_channel_counts",
-                       1,      
-                       AV_OPT_SEARCH_CHILDREN));
-
-               FF(av_opt_set_int_list(
-                       filt_asink, 
-                       "sample_fmts",           
-                       codec.sample_fmts,                              
-                       -1, 
-                       AV_OPT_SEARCH_CHILDREN));
-
-               FF(av_opt_set_int_list(
-                       filt_asink,
-                       "channel_layouts",       
-                       codec.channel_layouts,                  
-                       -1, 
-                       AV_OPT_SEARCH_CHILDREN));
-
-               FF(av_opt_set_int_list(
-                       filt_asink, 
-                       "sample_rates" ,         
-                       codec.supported_samplerates,    
-                       -1, 
-                       AV_OPT_SEARCH_CHILDREN));
-
-#pragma warning (pop)
-                       
-               configure_filtergraph(
-                       *audio_graph_, 
-                       filtergraph, 
-                       *filt_asrc, 
-                       *filt_asink);
-
-               audio_graph_in_  = filt_asrc;
-               audio_graph_out_ = filt_asink;
-
-               CASPAR_LOG(info) 
-                       <<      u16(std::string("\n") 
-                               + avfilter_graph_dump(
-                                       audio_graph_.get(), 
-                                       nullptr));
-       }
-
-       void configure_filtergraph(
-                       AVFilterGraph& graph,
-                       const std::string& filtergraph,
-                       AVFilterContext& source_ctx,
-                       AVFilterContext& sink_ctx)
-       {
-               AVFilterInOut* outputs = nullptr;
-               AVFilterInOut* inputs = nullptr;
-
-               try
-               {
-                       if(!filtergraph.empty())
-                       {
-                               outputs = avfilter_inout_alloc();
-                               inputs  = avfilter_inout_alloc();
-
-                               CASPAR_VERIFY(outputs && inputs);
-
-                               outputs->name       = av_strdup("in");
-                               outputs->filter_ctx = &source_ctx;
-                               outputs->pad_idx    = 0;
-                               outputs->next       = nullptr;
-
-                               inputs->name        = av_strdup("out");
-                               inputs->filter_ctx  = &sink_ctx;
-                               inputs->pad_idx     = 0;
-                               inputs->next        = nullptr;
-
-                               FF(avfilter_graph_parse(
-                                       &graph, 
-                                       filtergraph.c_str(), 
-                                       inputs,
-                                       outputs,
-                                       nullptr));
-                       } 
-                       else 
-                       {
-                               FF(avfilter_link(
-                                       &source_ctx, 
-                                       0, 
-                                       &sink_ctx, 
-                                       0));
-                       }
-
-                       FF(avfilter_graph_config(
-                               &graph, 
-                               nullptr));
-               }
-               catch(...)
-               {
-                       avfilter_inout_free(&outputs);
-                       avfilter_inout_free(&inputs);
-                       throw;
-               }
-       }
-       
-       void encode_video(core::const_frame frame_ptr, std::shared_ptr<void> token)
-       {               
-               if(!video_st_)
-                       return;
-
-               auto enc = video_st_->codec;
-                       
-               std::shared_ptr<AVFrame> src_av_frame;
-
-               if(frame_ptr != core::const_frame::empty())
-               {
-                       src_av_frame.reset(
-                               av_frame_alloc(),
-                               [frame_ptr](AVFrame* frame)
-                               {
-                                       av_frame_free(&frame);
-                               });
-
-                       avcodec_get_frame_defaults(src_av_frame.get());         
-                       
-                       const auto sample_aspect_ratio = 
-                               boost::rational<int>(
-                                       in_video_format_.square_width, 
-                                       in_video_format_.square_height) /
-                               boost::rational<int>(
-                                       in_video_format_.width, 
-                                       in_video_format_.height);
-
-                       src_av_frame->format                              = AV_PIX_FMT_BGRA;
-                       src_av_frame->width                                       = in_video_format_.width;
-                       src_av_frame->height                              = in_video_format_.height;
-                       src_av_frame->sample_aspect_ratio.num = sample_aspect_ratio.numerator();
-                       src_av_frame->sample_aspect_ratio.den = sample_aspect_ratio.denominator();
-                       src_av_frame->pts                                         = video_pts_;
-
-                       video_pts_ += 1;
-
-                       FF(av_image_fill_arrays(
-                               src_av_frame->data,
-                               src_av_frame->linesize,
-                               frame_ptr.image_data().begin(),
-                               static_cast<AVPixelFormat>(src_av_frame->format), 
-                               in_video_format_.width, 
-                               in_video_format_.height, 
-                               1));
-
-                       FF(av_buffersrc_add_frame(
-                               video_graph_in_, 
-                               src_av_frame.get()));
-               }               
-
-               int ret = 0;
-
-               while(ret >= 0)
-               {
-                       std::shared_ptr<AVFrame> filt_frame(
-                               av_frame_alloc(), 
-                               [](AVFrame* p)
-                               {
-                                       av_frame_free(&p);
-                               });
-
-                       ret = av_buffersink_get_frame(
-                               video_graph_out_, 
-                               filt_frame.get());
-                                               
-                       video_encoder_executor_.begin_invoke([=]
-                       {
-                               if(ret == AVERROR_EOF)
-                               {
-                                       if(enc->codec->capabilities & CODEC_CAP_DELAY)
-                                       {
-                                               while(encode_av_frame(
-                                                               *video_st_, 
-                                                               video_bitstream_filter_.get(),
-                                                               avcodec_encode_video2, 
-                                                               nullptr, token))
-                                               {
-                                                       boost::this_thread::yield(); // TODO:
-                                               }
-                                       }               
-                               }
-                               else if(ret != AVERROR(EAGAIN))
-                               {
-                                       FF_RET(ret, "av_buffersink_get_frame");
-                                       
-                                       if (filt_frame->interlaced_frame) 
-                                       {
-                                               if (enc->codec->id == AV_CODEC_ID_MJPEG)
-                                                       enc->field_order = filt_frame->top_field_first ? AV_FIELD_TT : AV_FIELD_BB;
-                                               else
-                                                       enc->field_order = filt_frame->top_field_first ? AV_FIELD_TB : AV_FIELD_BT;
-                                       } 
-                                       else
-                                               enc->field_order = AV_FIELD_PROGRESSIVE;
-
-                                       filt_frame->quality = enc->global_quality;
-
-                                       if (!enc->me_threshold)
-                                               filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
-                       
-                                       encode_av_frame(
-                                               *video_st_,
-                                               video_bitstream_filter_.get(),
-                                               avcodec_encode_video2,
-                                               filt_frame, 
-                                               token);
-
-                                       boost::this_thread::yield(); // TODO:
-                               }
-                       });
-               }
-       }
-                                       
-       void encode_audio(core::const_frame frame_ptr, std::shared_ptr<void> token)
-       {               
-               if(!audio_st_)
-                       return;
-               
-               auto enc = audio_st_->codec;
-                       
-               std::shared_ptr<AVFrame> src_av_frame;
-
-               if(frame_ptr != core::const_frame::empty())
-               {
-                       src_av_frame.reset(
-                               av_frame_alloc(), 
-                               [](AVFrame* p)
-                               {
-                                       av_frame_free(&p);
-                               });
-               
-                       src_av_frame->channels           = in_channel_layout_.num_channels;
-                       src_av_frame->channel_layout = av_get_default_channel_layout(in_channel_layout_.num_channels);
-                       src_av_frame->sample_rate        = in_video_format_.audio_sample_rate;
-                       src_av_frame->nb_samples         = static_cast<int>(frame_ptr.audio_data().size()) / src_av_frame->channels;
-                       src_av_frame->format             = AV_SAMPLE_FMT_S32;
-                       src_av_frame->pts                        = audio_pts_;
-
-                       audio_pts_ += src_av_frame->nb_samples;
-
-                       FF(av_samples_fill_arrays(
-                                       src_av_frame->extended_data,
-                                       src_av_frame->linesize,
-                                       reinterpret_cast<const std::uint8_t*>(&*frame_ptr.audio_data().begin()),
-                                       src_av_frame->channels,
-                                       src_av_frame->nb_samples,
-                                       static_cast<AVSampleFormat>(src_av_frame->format),
-                                       16));
-               
-                       FF(av_buffersrc_add_frame(
-                                       audio_graph_in_, 
-                                       src_av_frame.get()));
-               }
-
-               int ret = 0;
-
-               while(ret >= 0)
-               {
-                       std::shared_ptr<AVFrame> filt_frame(
-                               av_frame_alloc(), 
-                               [](AVFrame* p)
-                               {
-                                       av_frame_free(&p);
-                               });
-
-                       ret = av_buffersink_get_frame(
-                               audio_graph_out_, 
-                               filt_frame.get());
-                                       
-                       audio_encoder_executor_.begin_invoke([=]
-                       {       
-                               if(ret == AVERROR_EOF)
-                               {
-                                       if(enc->codec->capabilities & CODEC_CAP_DELAY)
-                                       {
-                                               while(encode_av_frame(
-                                                               *audio_st_, 
-                                                               audio_bitstream_filter_.get(), 
-                                                               avcodec_encode_audio2, 
-                                                               nullptr, 
-                                                               token))
-                                               {
-                                                       boost::this_thread::yield(); // TODO:
-                                               }
-                                       }
-                               }
-                               else if(ret != AVERROR(EAGAIN))
-                               {
-                                       FF_RET(
-                                               ret, 
-                                               "av_buffersink_get_frame");
-
-                                       encode_av_frame(
-                                               *audio_st_, 
-                                               audio_bitstream_filter_.get(), 
-                                               avcodec_encode_audio2, 
-                                               filt_frame, 
-                                               token);
-
-                                       boost::this_thread::yield(); // TODO:
-                               }
-                       });
-               }
-       }
-       
-       template<typename F>
-       bool encode_av_frame(
-                       AVStream& st,
-                       AVBitStreamFilterContext* bsfc, 
-                       const F& func, 
-                       const std::shared_ptr<AVFrame>& src_av_frame, 
-                       std::shared_ptr<void> token)
-       {
-               AVPacket pkt = {};
-               av_init_packet(&pkt);
-
-               int got_packet = 0;
-
-               FF(func(
-                       st.codec, 
-                       &pkt, 
-                       src_av_frame.get(), 
-                       &got_packet));
-                                       
-               if(!got_packet || pkt.size <= 0)
-                       return false;
-
-               pkt.stream_index = st.index;
-               
-               if(bsfc)
-               {
-                       auto new_pkt = pkt;
-
-                       auto a = av_bitstream_filter_filter(
-                                       bsfc,
-                                       st.codec,
-                                       nullptr,
-                                       &new_pkt.data,
-                                       &new_pkt.size,
-                                       pkt.data,
-                                       pkt.size,
-                                       pkt.flags & AV_PKT_FLAG_KEY);
-
-                       if(a == 0 && new_pkt.data != pkt.data && new_pkt.destruct) 
-                       {
-                               auto t = reinterpret_cast<std::uint8_t*>(av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE));
-
-                               if(t) 
-                               {
-                                       memcpy(
-                                               t, 
-                                               new_pkt.data,
-                                               new_pkt.size);
-
-                                       memset(
-                                               t + new_pkt.size, 
-                                               0, 
-                                               FF_INPUT_BUFFER_PADDING_SIZE);
-
-                                       new_pkt.data = t;
-                                       new_pkt.buf  = nullptr;
-                               } 
-                               else
-                                       a = AVERROR(ENOMEM);
-                       }
-
-                       av_free_packet(&pkt);
-
-                       FF_RET(
-                               a, 
-                               "av_bitstream_filter_filter");
-
-                       new_pkt.buf =
-                               av_buffer_create(
-                                       new_pkt.data, 
-                                       new_pkt.size,
-                                       av_buffer_default_free, 
-                                       nullptr, 
-                                       0);
-
-                       CASPAR_VERIFY(new_pkt.buf);
-
-                       pkt = new_pkt;
-               }
-               
-               if (pkt.pts != AV_NOPTS_VALUE)
-               {
-                       pkt.pts = 
-                               av_rescale_q(
-                                       pkt.pts,
-                                       st.codec->time_base, 
-                                       st.time_base);
-               }
-
-               if (pkt.dts != AV_NOPTS_VALUE)
-               {
-                       pkt.dts = 
-                               av_rescale_q(
-                                       pkt.dts, 
-                                       st.codec->time_base, 
-                                       st.time_base);
-               }
-                               
-               pkt.duration = 
-                       static_cast<int>(
-                               av_rescale_q(
-                                       pkt.duration, 
-                                       st.codec->time_base, st.time_base));
-
-               write_packet(
-                       std::shared_ptr<AVPacket>(
-                               new AVPacket(pkt), 
-                               [](AVPacket* p)
-                               {
-                                       av_free_packet(p); 
-                                       delete p;
-                               }), token);
-
-               return true;
-       }
-
-       void write_packet(
-                       const std::shared_ptr<AVPacket>& pkt_ptr,
-                       std::shared_ptr<void> token)
-       {               
-               write_executor_.begin_invoke([this, pkt_ptr, token]() mutable
-               {
-                       FF(av_interleaved_write_frame(
-                               oc_.get(), 
-                               pkt_ptr.get()));
-               });     
-       }       
-       
-       template<typename T>
-       static boost::optional<T> try_remove_arg(
-                       std::map<std::string, std::string>& options, 
-                       const boost::regex& expr)
-       {
-               for(auto it = options.begin(); it != options.end(); ++it)
-               {                       
-                       if(boost::regex_search(it->first, expr))
-                       {
-                               auto arg = it->second;
-                               options.erase(it);
-                               return boost::lexical_cast<T>(arg);
-                       }
-               }
-
-               return boost::optional<T>();
-       }
-               
-       static std::map<std::string, std::string> remove_options(
-                       std::map<std::string, std::string>& options, 
-                       const boost::regex& expr)
-       {
-               std::map<std::string, std::string> result;
-                       
-               auto it = options.begin();
-               while(it != options.end())
-               {                       
-                       boost::smatch what;
-                       if(boost::regex_search(it->first, what, expr))
-                       {
-                               result[
-                                       what.size() > 0 && what[1].matched 
-                                               ? what[1].str() 
-                                               : it->first] = it->second;
-                               it = options.erase(it);
-                       }
-                       else
-                               ++it;
-               }
-
-               return result;
-       }
-               
-       static void to_dict(AVDictionary** dest, const std::map<std::string, std::string>& c)
-       {               
-               for (const auto& entry : c)
-               {
-                       av_dict_set(
-                               dest, 
-                               entry.first.c_str(), 
-                               entry.second.c_str(), 0);
-               }
-       }
-
-       static std::map<std::string, std::string> to_map(AVDictionary* dict)
-       {
-               std::map<std::string, std::string> result;
-               
-               for(auto t = dict 
-                               ? av_dict_get(
-                                       dict, 
-                                       "", 
-                                       nullptr, 
-                                       AV_DICT_IGNORE_SUFFIX) 
-                               : nullptr;
-                       t; 
-                       t = av_dict_get(
-                               dict, 
-                               "", 
-                               t,
-                               AV_DICT_IGNORE_SUFFIX))
-               {
-                       result[t->key] = t->value;
-               }
-
-               return result;
-       }
-};
-
-void describe_streaming_consumer(core::help_sink& sink, const core::help_repository& repo)
-{
-       sink.short_description(L"For streaming the contents of a channel using FFmpeg.");
-       sink.syntax(L"STREAM [url:string] {-[ffmpeg_param1:string] [value1:string] {-[ffmpeg_param2:string] [value2:string] {...}}}");
-       sink.para()->text(L"For streaming the contents of a channel using FFmpeg");
-       sink.definitions()
-               ->item(L"url", L"The stream URL to create/stream to.")
-               ->item(L"ffmpeg_paramX", L"A parameter supported by FFmpeg. For example vcodec or acodec etc.");
-       sink.para()->text(L"Examples:");
-       sink.example(L">> ADD 1 STREAM udp://<client_ip_address>:9250 -format mpegts -vcodec libx264 -crf 25 -tune zerolatency -preset ultrafast");
-}
-
-spl::shared_ptr<core::frame_consumer> create_streaming_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*)
-{       
-       if (params.size() < 1 || (!boost::iequals(params.at(0), L"STREAM") && !boost::iequals(params.at(0), L"FILE")))
-               return core::frame_consumer::empty();
-
-       auto compatibility_mode = boost::iequals(params.at(0), L"FILE");
-       auto path = u8(params.size() > 1 ? params.at(1) : L"");
-       auto args = u8(boost::join(params, L" "));
-
-       return spl::make_shared<streaming_consumer>(path, args, compatibility_mode);
-}
-
-spl::shared_ptr<core::frame_consumer> create_preconfigured_streaming_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*)
-{                      
-       return spl::make_shared<streaming_consumer>(
-                       u8(ptree_get<std::wstring>(ptree, L"path")), 
-                       u8(ptree.get<std::wstring>(L"args", L"")),
-                       false);
-}
-
-}}
diff --git a/modules/ffmpeg/consumer/streaming_consumer.h b/modules/ffmpeg/consumer/streaming_consumer.h
deleted file mode 100644 (file)
index 2663cf4..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-
-#include <common/memory.h>
-
-#include <core/fwd.h>
-
-#include <boost/property_tree/ptree_fwd.hpp>
-
-#include <string>
-#include <vector>
-
-namespace caspar { namespace ffmpeg {
-
-void describe_streaming_consumer(core::help_sink& sink, const core::help_repository& repo);
-spl::shared_ptr<core::frame_consumer> create_streaming_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*);
-spl::shared_ptr<core::frame_consumer> create_preconfigured_streaming_consumer(
-               const boost::property_tree::wptree& ptree, core::interaction_sink*);
-
-}}
\ No newline at end of file
index 7d8159238c8cc0ed9f386f4c231f41ef64c556ca..9f9dc549fcc8e57c86ae87793579a901b6588734 100644 (file)
@@ -24,7 +24,6 @@
 #include "ffmpeg.h"
 
 #include "consumer/ffmpeg_consumer.h"
-#include "consumer/streaming_consumer.h"
 #include "producer/ffmpeg_producer.h"
 #include "producer/util/util.h"
 
@@ -50,7 +49,7 @@
 #pragma warning (disable : 4996)
 #endif
 
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -58,155 +57,108 @@ extern "C"
        #include <libswscale/swscale.h>
        #include <libavutil/avutil.h>
        #include <libavfilter/avfilter.h>
+       #include <libavdevice/avdevice.h>
 }
 
 namespace caspar { namespace ffmpeg {
-       
-int ffmpeg_lock_callback(void **mutex, enum AVLockOp op) 
-{ 
+int ffmpeg_lock_callback(void **mutex, enum AVLockOp op)
+{
        if(!mutex)
                return 0;
 
        auto my_mutex = reinterpret_cast<tbb::recursive_mutex*>(*mutex);
-       
-       switch(op) 
-       { 
-               case AV_LOCK_CREATE: 
-               { 
-                       *mutex = new tbb::recursive_mutex(); 
-                       break; 
-               } 
-               case AV_LOCK_OBTAIN: 
-               { 
+
+       switch(op)
+       {
+               case AV_LOCK_CREATE:
+               {
+                       *mutex = new tbb::recursive_mutex();
+                       break;
+               }
+               case AV_LOCK_OBTAIN:
+               {
                        if(my_mutex)
-                               my_mutex->lock(); 
-                       break; 
-               } 
-               case AV_LOCK_RELEASE: 
-               { 
+                               my_mutex->lock();
+                       break;
+               }
+               case AV_LOCK_RELEASE:
+               {
                        if(my_mutex)
-                               my_mutex->unlock(); 
-                       break; 
-               } 
-               case AV_LOCK_DESTROY: 
-               { 
+                               my_mutex->unlock();
+                       break;
+               }
+               case AV_LOCK_DESTROY:
+               {
                        delete my_mutex;
                        *mutex = nullptr;
-                       break; 
-               } 
-       } 
-       return 0; 
-} 
+                       break;
+               }
+       }
+       return 0;
+}
 
 static void sanitize(uint8_t *line)
 {
-    while(*line)
+       while(*line)
        {
-        if(*line < 0x08 || (*line > 0x0D && *line < 0x20))
-            *line='?';
-        line++;
-    }
+               if(*line < 0x08 || (*line > 0x0D && *line < 0x20))
+                       *line='?';
+               line++;
+       }
 }
 
 void log_callback(void* ptr, int level, const char* fmt, va_list vl)
 {
-    static int print_prefix=1;
-    //static int count;
-    static char prev[1024];
-    char line[8192];
-    //static int is_atty;
-    AVClass* avc= ptr ? *(AVClass**)ptr : NULL;
-    if(level > av_log_get_level())
-        return;
-    line[0]=0;
-       
+       static int print_prefix=1;
+       static char prev[1024];
+       char line[8192];
+       AVClass* avc= ptr ? *(AVClass**)ptr : NULL;
+       if (level > AV_LOG_DEBUG)
+               return;
+       line[0]=0;
+
 #undef fprintf
-    if(print_prefix && avc) 
+       if(print_prefix && avc)
        {
-        if (avc->parent_log_context_offset) 
+               if (avc->parent_log_context_offset)
                {
-            AVClass** parent= *(AVClass***)(((uint8_t*)ptr) + avc->parent_log_context_offset);
-            if(parent && *parent)
-                std::sprintf(line, "[%s @ %p] ", (*parent)->item_name(parent), parent);            
-        }
-        std::sprintf(line + strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr);
-    }
-
-    std::vsprintf(line + strlen(line), fmt, vl);
-
-    print_prefix = strlen(line) && line[strlen(line)-1] == '\n';
-       
-    //if(print_prefix && !strcmp(line, prev)){
-    //    count++;
-    //    if(is_atty==1)
-    //        fprintf(stderr, "    Last message repeated %d times\r", count);
-    //    return;
-    //}
-    //if(count>0){
-    //    fprintf(stderr, "    Last message repeated %d times\n", count);
-    //    count=0;
-    //}
-    strcpy(prev, line);
-    sanitize((uint8_t*)line);
+                       AVClass** parent= *(AVClass***)(((uint8_t*)ptr) + avc->parent_log_context_offset);
+                       if(parent && *parent)
+                               std::sprintf(line, "[%s @ %p] ", (*parent)->item_name(parent), parent);
+               }
+               std::sprintf(line + strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr);
+       }
+
+       std::vsprintf(line + strlen(line), fmt, vl);
+
+       print_prefix = strlen(line) && line[strlen(line)-1] == '\n';
+
+       strcpy(prev, line);
+       sanitize((uint8_t*)line);
 
        auto len = strlen(line);
        if(len > 0)
                line[len-1] = 0;
-       
-       if(level == AV_LOG_DEBUG)
-               CASPAR_LOG(debug) << L"[ffmpeg] " << line;
-       else if(level == AV_LOG_INFO)
-               CASPAR_LOG(info) << L"[ffmpeg] " << line;
-       else if(level == AV_LOG_WARNING)
-               CASPAR_LOG(warning) << L"[ffmpeg] " << line;
-       else if(level == AV_LOG_ERROR)
-               CASPAR_LOG(error) << L"[ffmpeg] " << line;
-       else if(level == AV_LOG_FATAL)
-               CASPAR_LOG(fatal) << L"[ffmpeg] " << line;
-       else
-               CASPAR_LOG(trace) << L"[ffmpeg] " << line;
-
-    //colored_fputs(av_clip(level>>3, 0, 6), line);
-}
 
-//static int query_yadif_formats(AVFilterContext *ctx)
-//{
-//    static const int pix_fmts[] = {
-//        PIX_FMT_YUV444P,
-//        PIX_FMT_YUV422P,
-//        PIX_FMT_YUV420P,
-//        PIX_FMT_YUV410P,
-//        PIX_FMT_YUV411P,
-//        PIX_FMT_GRAY8,
-//        PIX_FMT_YUVJ444P,
-//        PIX_FMT_YUVJ422P,
-//        PIX_FMT_YUVJ420P,
-//        AV_NE( PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE ),
-//        PIX_FMT_YUV440P,
-//        PIX_FMT_YUVJ440P,
-//        AV_NE( PIX_FMT_YUV444P16BE, PIX_FMT_YUV444P16LE ),
-//        AV_NE( PIX_FMT_YUV422P16BE, PIX_FMT_YUV422P16LE ),
-//        AV_NE( PIX_FMT_YUV420P16BE, PIX_FMT_YUV420P16LE ),
-//        PIX_FMT_YUVA420P,
-//        PIX_FMT_NONE
-//    };
-//    avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
-//
-//    return 0;
-//}
-//
-//#pragma warning (push)
-//#pragma warning (disable : 4706)
-//void fix_yadif_filter_format_query()
-//{
-//     AVFilter** filter = nullptr;
-//    while((filter = av_filter_next(filter)) && *filter)
-//     {
-//             if(strstr((*filter)->name, "yadif") != 0)
-//                     (*filter)->query_formats = query_yadif_formats;
-//     }
-//}
-//#pragma warning (pop)
+       try
+       {
+               if (level == AV_LOG_VERBOSE)
+                       CASPAR_LOG(debug) << L"[ffmpeg] " << line;
+               else if (level == AV_LOG_INFO)
+                       CASPAR_LOG(info) << L"[ffmpeg] " << line;
+               else if (level == AV_LOG_WARNING)
+                       CASPAR_LOG(warning) << L"[ffmpeg] " << line;
+               else if (level == AV_LOG_ERROR)
+                       CASPAR_LOG(error) << L"[ffmpeg] " << line;
+               else if (level == AV_LOG_FATAL)
+                       CASPAR_LOG(fatal) << L"[ffmpeg] " << line;
+               else
+                       CASPAR_LOG(trace) << L"[ffmpeg] " << line;
+       }
+       catch (...)
+       {
+       }
+}
 
 std::wstring make_version(unsigned int ver)
 {
@@ -254,6 +206,11 @@ bool& get_quiet_logging_for_thread()
        return *local;
 }
 
+void enable_quiet_logging_for_thread()
+{
+       get_quiet_logging_for_thread() = true;
+}
+
 bool is_logging_quiet_for_thread()
 {
        return get_quiet_logging_for_thread();
@@ -286,18 +243,17 @@ void init(core::module_dependencies dependencies)
        av_lockmgr_register(ffmpeg_lock_callback);
        av_log_set_callback(log_for_thread);
 
-    avfilter_register_all();
+       avfilter_register_all();
        //fix_yadif_filter_format_query();
        av_register_all();
-    avformat_network_init();
-    avcodec_register_all();
+       avformat_network_init();
+       avcodec_register_all();
+       avdevice_register_all();
 
        auto info_repo = dependencies.media_info_repo;
-       
-       dependencies.consumer_registry->register_consumer_factory(L"FFmpeg Consumer", create_consumer, describe_consumer);
-       dependencies.consumer_registry->register_consumer_factory(L"Streaming Consumer",  create_streaming_consumer, describe_streaming_consumer);
-       dependencies.consumer_registry->register_preconfigured_consumer_factory(L"file", create_preconfigured_consumer);
-       dependencies.consumer_registry->register_preconfigured_consumer_factory(L"stream", create_preconfigured_streaming_consumer);
+
+       dependencies.consumer_registry->register_consumer_factory(L"FFmpeg Consumer", create_ffmpeg_consumer, describe_ffmpeg_consumer);
+       dependencies.consumer_registry->register_preconfigured_consumer_factory(L"ffmpeg", create_preconfigured_ffmpeg_consumer);
        dependencies.producer_registry->register_producer_factory(L"FFmpeg Producer", boost::bind(&create_producer, _1, _2, info_repo), describe_producer);
        dependencies.producer_registry->register_thumbnail_producer(boost::bind(&create_thumbnail_frame, _1, _2, info_repo));
 
@@ -331,8 +287,7 @@ void init(core::module_dependencies dependencies)
 void uninit()
 {
        avfilter_uninit();
-    avformat_network_deinit();
+       avformat_network_deinit();
        av_lockmgr_register(nullptr);
 }
-
 }}
index 08a098da45bdc8f2f0271e968379c90e42c7e5cd..f9569dc6baefb2b6671840e373a2a119d1b9b367 100644 (file)
@@ -29,6 +29,7 @@ namespace caspar { namespace ffmpeg {
 void init(core::module_dependencies dependencies);
 void uninit();
 std::shared_ptr<void> temporary_enable_quiet_logging_for_thread(bool enable);
+void enable_quiet_logging_for_thread();
 bool is_logging_quiet_for_thread();
 
 }}
index de51f14f65a718c3bfe932e1cd24019016459e6f..89b7dc70b3f86a90af2ac4ecff28d928420241d0 100644 (file)
 #include "ffmpeg_error.h"
 
 #include <common/utf.h>
+#include <common/log.h>
 
 #pragma warning(disable: 4146)
 
-extern "C" 
+extern "C"
 {
 #include <libavutil/error.h>
 }
 
 namespace caspar { namespace ffmpeg {
-       
+
 std::string av_error_str(int errn)
 {
        char buf[256];
@@ -50,83 +51,145 @@ void throw_on_ffmpeg_error(int ret, const char* source, const char* func, const
        switch(ret)
        {
        case AVERROR_BSF_NOT_FOUND:
-               ::boost::exception_detail::throw_exception_(averror_bsf_not_found()<<                                                                           
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);  
+               ::boost::exception_detail::throw_exception_(
+                       averror_bsf_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
        case AVERROR_DECODER_NOT_FOUND:
-               ::boost::exception_detail::throw_exception_(averror_decoder_not_found()<<                                                                               
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
+               ::boost::exception_detail::throw_exception_(
+                       averror_decoder_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
        case AVERROR_DEMUXER_NOT_FOUND:
-               ::boost::exception_detail::throw_exception_(averror_demuxer_not_found()<<                                                                               
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
+               ::boost::exception_detail::throw_exception_(
+                       averror_demuxer_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
        case AVERROR_ENCODER_NOT_FOUND:
-               ::boost::exception_detail::throw_exception_(averror_encoder_not_found()<<                                                                               
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);  
-       case AVERROR_EOF:       
-               ::boost::exception_detail::throw_exception_(averror_eof()<<                                                                             
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
-       case AVERROR_EXIT:                              
-               ::boost::exception_detail::throw_exception_(averror_exit()<<                                                                            
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
-       case AVERROR_FILTER_NOT_FOUND:                          
-               ::boost::exception_detail::throw_exception_(averror_filter_not_found()<<                                                                                
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
-       case AVERROR_MUXER_NOT_FOUND:   
-               ::boost::exception_detail::throw_exception_(averror_muxer_not_found()<<                                                                         
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
-       case AVERROR_OPTION_NOT_FOUND:  
-               ::boost::exception_detail::throw_exception_(averror_option_not_found()<<                                                                                
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
-       case AVERROR_PATCHWELCOME:      
-               ::boost::exception_detail::throw_exception_(averror_patchwelcome()<<                                                                            
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
-       case AVERROR_PROTOCOL_NOT_FOUND:        
-               ::boost::exception_detail::throw_exception_(averror_protocol_not_found()<<                                                                              
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
+               ::boost::exception_detail::throw_exception_(
+                       averror_encoder_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_EOF:
+               ::boost::exception_detail::throw_exception_(
+                       averror_eof()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_EXIT:
+               ::boost::exception_detail::throw_exception_(
+                       averror_exit()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_FILTER_NOT_FOUND:
+               ::boost::exception_detail::throw_exception_(
+                       averror_filter_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_MUXER_NOT_FOUND:
+               ::boost::exception_detail::throw_exception_(
+                       averror_muxer_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_OPTION_NOT_FOUND:
+               ::boost::exception_detail::throw_exception_(
+                       averror_option_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_PATCHWELCOME:
+               ::boost::exception_detail::throw_exception_(
+                       averror_patchwelcome()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVERROR_PROTOCOL_NOT_FOUND:
+               ::boost::exception_detail::throw_exception_(
+                       averror_protocol_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
        case AVERROR_STREAM_NOT_FOUND:
-               ::boost::exception_detail::throw_exception_(averror_stream_not_found()<<                                                                                
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
+               ::boost::exception_detail::throw_exception_(
+                       averror_stream_not_found()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
+       case AVUNERROR(EINVAL):
+               ::boost::exception_detail::throw_exception_(
+                       averror_invalid_argument()
+                               << msg_info("Invalid FFmpeg argument given")
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
        default:
-               ::boost::exception_detail::throw_exception_(ffmpeg_error()<<                                                                            
-                       msg_info(av_error_str(ret)) <<                                                  
-                       source_info(source) <<                                          
-                       boost::errinfo_api_function(func) <<                                    
-                       boost::errinfo_errno(AVUNERROR(ret)), local_func, file, line);
+               ::boost::exception_detail::throw_exception_(
+                       ffmpeg_error()
+                               << msg_info(av_error_str(ret))
+                               << source_info(source)
+                               << boost::errinfo_api_function(func)
+                               << boost::errinfo_errno(AVUNERROR(ret))
+                               << call_stack_info(caspar::get_call_stack())
+                               << context_info(get_context()),
+                       local_func, log::remove_source_prefix(file), line);
        }
 }
 
@@ -135,4 +198,4 @@ void throw_on_ffmpeg_error(int ret, const std::wstring& source, const char* func
        throw_on_ffmpeg_error(ret, u8(source).c_str(), func, local_func, file, line);
 }
 
-}}
\ No newline at end of file
+}}
index b615631cbfe6ded6809961451bb88c1198c788f1..0c70667b1f7cb75ca53bb1ec0f0000dd9d7cd2de 100644 (file)
 namespace caspar { namespace ffmpeg {
 
 struct ffmpeg_error : virtual caspar_exception{};
+struct ffmpeg_user_error : virtual user_error {};
 struct averror_bsf_not_found : virtual ffmpeg_error{};
-struct averror_decoder_not_found : virtual ffmpeg_error{};
-struct averror_demuxer_not_found : virtual ffmpeg_error{};
-struct averror_encoder_not_found : virtual ffmpeg_error{};
+struct averror_decoder_not_found : virtual ffmpeg_user_error {};
+struct averror_demuxer_not_found : virtual ffmpeg_user_error {};
+struct averror_encoder_not_found : virtual ffmpeg_user_error {};
 struct averror_eof : virtual ffmpeg_error{};
 struct averror_exit : virtual ffmpeg_error{};
-struct averror_filter_not_found : virtual ffmpeg_error{};
-struct averror_muxer_not_found : virtual ffmpeg_error{};
-struct averror_option_not_found : virtual ffmpeg_error{};
+struct averror_filter_not_found : virtual ffmpeg_user_error {};
+struct averror_muxer_not_found : virtual ffmpeg_user_error {};
+struct averror_option_not_found : virtual ffmpeg_user_error {};
 struct averror_patchwelcome : virtual ffmpeg_error{};
-struct averror_protocol_not_found : virtual ffmpeg_error{};
+struct averror_protocol_not_found : virtual ffmpeg_user_error {};
 struct averror_stream_not_found : virtual ffmpeg_error{};
+struct averror_invalid_argument : virtual ffmpeg_user_error {};
 
 std::string av_error_str(int errn);
 
@@ -53,7 +55,7 @@ void throw_on_ffmpeg_error(int ret, const std::wstring& source, const char* func
 #define THROW_ON_ERROR_STR(call) THROW_ON_ERROR_STR_(call)
 
 #define THROW_ON_ERROR(ret, func, source) \
-               throw_on_ffmpeg_error(ret, source, func, __FUNCTION__, __FILE__, __LINE__);             
+               throw_on_ffmpeg_error(ret, source, func, __FUNCTION__, __FILE__, __LINE__);
 
 #define THROW_ON_ERROR2(call, source)                                                                          \
        [&]() -> int                                                                                                                    \
@@ -76,7 +78,7 @@ void throw_on_ffmpeg_error(int ret, const std::wstring& source, const char* func
        }()
 
 #define FF_RET(ret, func) \
-               caspar::ffmpeg::throw_on_ffmpeg_error(ret, L"", func, __FUNCTION__, __FILE__, __LINE__);                
+               caspar::ffmpeg::throw_on_ffmpeg_error(ret, L"", func, __FUNCTION__, __FILE__, __LINE__);
 
 #define FF(call)                                                                               \
        [&]() -> int                                                                                                            \
diff --git a/modules/ffmpeg/ffmpeg_pipeline.cpp b/modules/ffmpeg/ffmpeg_pipeline.cpp
deleted file mode 100644 (file)
index 0d46908..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#include "StdAfx.h"
-
-#include "ffmpeg_pipeline.h"
-#include "ffmpeg_pipeline_backend.h"
-#include "ffmpeg_pipeline_backend_internal.h"
-
-#include <core/frame/draw_frame.h>
-#include <core/video_format.h>
-
-namespace caspar { namespace ffmpeg {
-
-ffmpeg_pipeline::ffmpeg_pipeline()
-       : impl_(create_internal_pipeline())
-{
-}
-
-ffmpeg_pipeline                        ffmpeg_pipeline::graph(spl::shared_ptr<caspar::diagnostics::graph> g)                                                                                                   { impl_->graph(std::move(g)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_file(std::string filename)                                                                                                                                                { impl_->from_file(std::move(filename)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_memory_only_audio(int num_channels, int samplerate)                                                                                               { impl_->from_memory_only_audio(num_channels, samplerate); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_memory_only_video(int width, int height, boost::rational<int> framerate)                                                  { impl_->from_memory_only_video(width, height, std::move(framerate)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate)   { impl_->from_memory(num_channels, samplerate, width, height, std::move(framerate)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::start_frame(std::uint32_t frame)                                                                                                                                               { impl_->start_frame(frame); return *this; }
-std::uint32_t                  ffmpeg_pipeline::start_frame() const                                                                                                                                                                    { return impl_->start_frame(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::length(std::uint32_t frames)                                                                                                                                                   { impl_->length(frames); return *this; }
-std::uint32_t                  ffmpeg_pipeline::length() const                                                                                                                                                                                 { return impl_->length(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::seek(std::uint32_t frame)                                                                                                                                                              { impl_->seek(frame); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::loop(bool value)                                                                                                                                                                               { impl_->loop(value); return *this; }
-bool                                   ffmpeg_pipeline::loop() const                                                                                                                                                                                   { return impl_->loop(); }
-std::string                            ffmpeg_pipeline::source_filename() const                                                                                                                                                                { return impl_->source_filename(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::vfilter(std::string filter)                                                                                                                                                    { impl_->vfilter(std::move(filter)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::afilter(std::string filter)                                                                                                                                                    { impl_->afilter(std::move(filter)); return *this; }
-int                                            ffmpeg_pipeline::width() const                                                                                                                                                                                  { return impl_->width(); }
-int                                            ffmpeg_pipeline::height() const                                                                                                                                                                                 { return impl_->height(); }
-boost::rational<int>   ffmpeg_pipeline::framerate() const                                                                                                                                                                              { return impl_->framerate(); }
-bool                                   ffmpeg_pipeline::progressive() const                                                                                                                                                                    { return impl_->progressive(); }
-ffmpeg_pipeline                        ffmpeg_pipeline::to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format)                                { impl_->to_memory(std::move(factory), std::move(format)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::to_file(std::string filename)                                                                                                                                                  { impl_->to_file(std::move(filename)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::vcodec(std::string codec)                                                                                                                                                              { impl_->vcodec(std::move(codec)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::acodec(std::string codec)                                                                                                                                                              { impl_->acodec(std::move(codec)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::format(std::string fmt)                                                                                                                                                                { impl_->format(std::move(fmt)); return *this; }
-ffmpeg_pipeline                        ffmpeg_pipeline::start()                                                                                                                                                                                                { impl_->start(); return *this; }
-bool                                   ffmpeg_pipeline::try_push_audio(caspar::array<const std::int32_t> data)                                                                                                 { return impl_->try_push_audio(std::move(data)); }
-bool                                   ffmpeg_pipeline::try_push_video(caspar::array<const std::uint8_t> data)                                                                                                 { return impl_->try_push_video(std::move(data)); }
-core::draw_frame               ffmpeg_pipeline::try_pop_frame()                                                                                                                                                                                { return impl_->try_pop_frame(); }
-std::uint32_t                  ffmpeg_pipeline::last_frame() const                                                                                                                                                                             { return impl_->last_frame(); }
-bool                                   ffmpeg_pipeline::started() const                                                                                                                                                                                { return impl_->started(); }
-void                                   ffmpeg_pipeline::stop()                                                                                                                                                                                                 { impl_->stop(); }
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline.h b/modules/ffmpeg/ffmpeg_pipeline.h
deleted file mode 100644 (file)
index 2508228..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#pragma once
-
-#include <common/memory.h>
-#include <common/array.h>
-
-#include <core/fwd.h>
-
-#include <boost/rational.hpp>
-
-#include <string>
-#include <functional>
-#include <cstdint>
-
-FORWARD2(caspar, diagnostics, class graph);
-
-namespace caspar { namespace ffmpeg {
-
-struct ffmpeg_pipeline_backend;
-
-class ffmpeg_pipeline
-{
-public:
-       ffmpeg_pipeline();
-
-       ffmpeg_pipeline                 graph(spl::shared_ptr<caspar::diagnostics::graph> g);
-
-       ffmpeg_pipeline                 from_file(std::string filename);
-       ffmpeg_pipeline                 from_memory_only_audio(int num_channels, int samplerate);
-       ffmpeg_pipeline                 from_memory_only_video(int width, int height, boost::rational<int> framerate);
-       ffmpeg_pipeline                 from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate);
-
-       ffmpeg_pipeline                 start_frame(std::uint32_t frame);
-       std::uint32_t                   start_frame() const;
-       ffmpeg_pipeline                 length(std::uint32_t frames);
-       std::uint32_t                   length() const;
-       ffmpeg_pipeline                 seek(std::uint32_t frame);
-       ffmpeg_pipeline                 loop(bool value);
-       bool                                    loop() const;
-       std::string                             source_filename() const;
-
-       ffmpeg_pipeline                 vfilter(std::string filter);
-       ffmpeg_pipeline                 afilter(std::string filter);
-       int                                             width() const;
-       int                                             height() const;
-       boost::rational<int>    framerate() const;
-       bool                                    progressive() const;
-
-       ffmpeg_pipeline                 to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format);
-       ffmpeg_pipeline                 to_file(std::string filename);
-       ffmpeg_pipeline                 vcodec(std::string codec);
-       ffmpeg_pipeline                 acodec(std::string codec);
-       ffmpeg_pipeline                 format(std::string fmt);
-
-       ffmpeg_pipeline                 start();
-       bool                                    try_push_audio(caspar::array<const std::int32_t> data);
-       bool                                    try_push_video(caspar::array<const std::uint8_t> data);
-       core::draw_frame                try_pop_frame();
-       std::uint32_t                   last_frame() const;
-       bool                                    started() const;
-       void                                    stop();
-
-private:
-       std::shared_ptr<ffmpeg_pipeline_backend> impl_;
-};
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline_backend.h b/modules/ffmpeg/ffmpeg_pipeline_backend.h
deleted file mode 100644 (file)
index e56e04b..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#include "StdAfx.h"
-
-#include <common/diagnostics/graph.h>
-#include <common/array.h>
-
-#include <core/frame/draw_frame.h>
-
-#include <boost/rational.hpp>
-
-namespace caspar { namespace ffmpeg {
-
-struct ffmpeg_pipeline_backend
-{
-       virtual ~ffmpeg_pipeline_backend() { }
-
-       virtual void                                    graph(spl::shared_ptr<caspar::diagnostics::graph> g) = 0;
-
-       virtual void                                    from_file(std::string filename) = 0;
-       virtual void                                    from_memory_only_audio(int num_channels, int samplerate) = 0;
-       virtual void                                    from_memory_only_video(int width, int height, boost::rational<int> framerate) = 0;
-       virtual void                                    from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) = 0;
-
-       virtual void                                    start_frame(std::uint32_t frame) = 0;
-       virtual std::uint32_t                   start_frame() const = 0;
-       virtual void                                    length(std::uint32_t frames) = 0;
-       virtual std::uint32_t                   length() const = 0;
-       virtual void                                    seek(std::uint32_t frame) = 0;
-       virtual void                                    loop(bool value) = 0;
-       virtual bool                                    loop() const = 0;
-       virtual std::string                             source_filename() const = 0;
-
-       virtual void                                    vfilter(std::string filter) = 0;
-       virtual void                                    afilter(std::string filter) = 0;
-       virtual int                                             width() const = 0;
-       virtual int                                             height() const = 0;
-       virtual boost::rational<int>    framerate() const = 0;
-       virtual bool                                    progressive() const = 0;
-       virtual std::uint32_t                   last_frame() const = 0;
-
-       virtual void                                    to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) = 0;
-       virtual void                                    to_file(std::string filename) = 0;
-       virtual void                                    vcodec(std::string codec) = 0;
-       virtual void                                    acodec(std::string codec) = 0;
-       virtual void                                    format(std::string fmt) = 0;
-
-       virtual void                                    start() = 0;
-       virtual bool                                    try_push_audio(caspar::array<const std::int32_t> data) = 0;
-       virtual bool                                    try_push_video(caspar::array<const std::uint8_t> data) = 0;
-       virtual core::draw_frame                try_pop_frame() = 0;
-       virtual bool                                    started() const = 0;
-       virtual void                                    stop() = 0;
-};
-
-}}
diff --git a/modules/ffmpeg/ffmpeg_pipeline_backend_internal.cpp b/modules/ffmpeg/ffmpeg_pipeline_backend_internal.cpp
deleted file mode 100644 (file)
index f512ebf..0000000
+++ /dev/null
@@ -1,1303 +0,0 @@
-/*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
-*
-* This file is part of CasparCG (www.casparcg.com).
-*
-* CasparCG is free software: you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation, either version 3 of the License, or
-* (at your option) any later version.
-*
-* CasparCG is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
-*
-* Author: Helge Norberg, helge.norberg@svt.se
-*/
-
-#include "StdAfx.h"
-
-#include "ffmpeg_pipeline_backend.h"
-#include "ffmpeg_pipeline_backend_internal.h"
-#include "producer/input/input.h"
-#include "producer/video/video_decoder.h"
-#include "producer/audio/audio_decoder.h"
-#include "producer/filter/audio_filter.h"
-#include "producer/filter/filter.h"
-#include "producer/util/util.h"
-#include "ffmpeg_error.h"
-#include "ffmpeg.h"
-
-#include <common/diagnostics/graph.h>
-#include <common/os/general_protection_fault.h>
-#include <common/enum_class.h>
-
-#include <core/frame/audio_channel_layout.h>
-#include <core/frame/frame.h>
-#include <core/frame/frame_factory.h>
-#include <core/video_format.h>
-
-#include <functional>
-#include <limits>
-#include <queue>
-#include <map>
-
-#include <tbb/atomic.h>
-#include <tbb/concurrent_queue.h>
-#include <tbb/spin_mutex.h>
-
-#include <boost/thread.hpp>
-#include <boost/optional.hpp>
-#include <boost/exception_ptr.hpp>
-
-namespace caspar { namespace ffmpeg {
-
-std::string to_string(const boost::rational<int>& framerate)
-{
-       return boost::lexical_cast<std::string>(framerate.numerator())
-               + "/" + boost::lexical_cast<std::string>(framerate.denominator()) + " (" + boost::lexical_cast<std::string>(static_cast<double>(framerate.numerator()) / static_cast<double>(framerate.denominator())) + ") fps";
-}
-
-std::vector<int> find_audio_cadence(const boost::rational<int>& framerate)
-{
-       static std::map<boost::rational<int>, std::vector<int>> CADENCES_BY_FRAMERATE = []
-       {
-               std::map<boost::rational<int>, std::vector<int>> result;
-
-               for (core::video_format format : enum_constants<core::video_format>())
-               {
-                       core::video_format_desc desc(format);
-                       boost::rational<int> format_rate(desc.time_scale, desc.duration);
-
-                       result.insert(std::make_pair(format_rate, desc.audio_cadence));
-               }
-
-               return result;
-       }();
-
-       auto exact_match = CADENCES_BY_FRAMERATE.find(framerate);
-
-       if (exact_match != CADENCES_BY_FRAMERATE.end())
-               return exact_match->second;
-
-       boost::rational<int> closest_framerate_diff     = std::numeric_limits<int>::max();
-       boost::rational<int> closest_framerate          = 0;
-
-       for (auto format_framerate : CADENCES_BY_FRAMERATE | boost::adaptors::map_keys)
-       {
-               auto diff = boost::abs(framerate - format_framerate);
-
-               if (diff < closest_framerate_diff)
-               {
-                       closest_framerate_diff  = diff;
-                       closest_framerate               = format_framerate;
-               }
-       }
-
-       if (is_logging_quiet_for_thread())
-               CASPAR_LOG(debug) << "No exact audio cadence match found for framerate " << to_string(framerate)
-                       << "\nClosest match is " << to_string(closest_framerate)
-                       << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
-       else
-               CASPAR_LOG(warning) << "No exact audio cadence match found for framerate " << to_string(framerate)
-                       << "\nClosest match is " << to_string(closest_framerate)
-                       << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
-
-       return CADENCES_BY_FRAMERATE[closest_framerate];
-}
-
-struct source
-{
-       virtual ~source() { }
-
-       virtual std::wstring                                                    print() const                                                                                   = 0;
-       virtual void                                                                    start()                                                                                                 { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual void                                                                    graph(spl::shared_ptr<caspar::diagnostics::graph> g)    { }
-       virtual void                                                                    stop()                                                                                                  { }
-       virtual void                                                                    start_frame(std::uint32_t frame)                                                { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual std::uint32_t                                                   start_frame() const                                                                             { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual void                                                                    loop(bool value)                                                                                { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual bool                                                                    loop() const                                                                                    { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual void                                                                    length(std::uint32_t frames)                                                    { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual std::uint32_t                                                   length() const                                                                                  { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual std::string                                                             filename() const                                                                                { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print())); }
-       virtual void                                                                    seek(std::uint32_t frame)                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not seekable.")); }
-       virtual bool                                                                    has_audio() const                                                                               { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual int                                                                             samplerate() const                                                                              { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual bool                                                                    has_video() const                                                                               { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual bool                                                                    eof() const                                                                                             { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual boost::rational<int>                                    framerate() const                                                                               { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::uint32_t                                                   frame_number() const                                                                    { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::vector<std::shared_ptr<AVFrame>>   get_input_frames_for_streams(AVMediaType type)                  { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-};
-
-struct no_source_selected : public source
-{
-       std::wstring print() const override
-       {
-               return L"[no_source_selected]";
-       }
-};
-
-class file_source : public source
-{
-       std::wstring                                                            filename_;
-       spl::shared_ptr<diagnostics::graph>                     graph_;
-       std::uint32_t                                                           start_frame_    = 0;
-       std::uint32_t                                                           length_                 = std::numeric_limits<std::uint32_t>::max();
-       bool                                                                            loop_                   = false;
-       mutable boost::mutex                                            pointer_mutex_;
-       std::shared_ptr<input>                                          input_;
-       std::vector<spl::shared_ptr<audio_decoder>>     audio_decoders_;
-       std::shared_ptr<video_decoder>                          video_decoder_;
-       bool                                                                            started_                = false;
-public:
-       file_source(std::string filename)
-               : filename_(u16(filename))
-       {
-       }
-
-       std::wstring print() const override
-       {
-               return L"[file_source " + filename_ + L"]";
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-               graph_ = std::move(g);
-       }
-
-       void start() override
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               bool thumbnail_mode = is_logging_quiet_for_thread();
-               input_.reset(new input(graph_, filename_, loop_, start_frame_, length_, thumbnail_mode));
-
-               for (int i = 0; i < input_->num_audio_streams(); ++i)
-               {
-                       try
-                       {
-                               audio_decoders_.push_back(spl::make_shared<audio_decoder>(*input_, core::video_format_desc(), i));
-                       }
-                       catch (...)
-                       {
-                               if (is_logging_quiet_for_thread())
-                               {
-                                       CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-                                       CASPAR_LOG(info) << print() << " Failed to open audio-stream. Turn on log level debug to see more information.";
-                               }
-                               else
-                               {
-                                       CASPAR_LOG_CURRENT_EXCEPTION();
-                                       CASPAR_LOG(warning) << print() << " Failed to open audio-stream.";
-                               }
-                       }
-               }
-
-               if (audio_decoders_.empty())
-                       CASPAR_LOG(debug) << print() << " No audio-stream found. Running without audio.";
-
-               try
-               {
-                       video_decoder_.reset(new video_decoder(*input_, false));
-               }
-               catch (averror_stream_not_found&)
-               {
-                       CASPAR_LOG(debug) << print() << " No video-stream found. Running without video.";
-               }
-               catch (...)
-               {
-                       if (is_logging_quiet_for_thread())
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-                               CASPAR_LOG(info) << print() << " Failed to open video-stream. Running without audio. Turn on log level debug to see more information.";
-                       }
-                       else
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                               CASPAR_LOG(warning) << print() << " Failed to open video-stream. Running without audio.";
-                       }
-               }
-
-               started_ = true;
-       }
-
-       void stop() override
-       {
-               started_ = false;
-       }
-
-       void start_frame(std::uint32_t frame) override 
-       {
-               start_frame_ = frame;
-
-               auto i = get_input();
-               if (i)
-                       i->start(frame);
-       }
-
-       std::uint32_t start_frame() const override
-       {
-               return start_frame_;
-       }
-
-       void loop(bool value) override
-       {
-               loop_ = value;
-
-               auto i = get_input();
-               if (i)
-                       i->loop(value);
-       }
-
-       bool loop() const override
-       {
-               return loop_;
-       }
-
-       void length(std::uint32_t frames) override
-       {
-               length_ = frames;
-
-               auto i = get_input();
-               if (i)
-                       i->length(frames);
-       }
-
-       std::uint32_t length() const override
-       {
-               auto v = get_video_decoder();
-
-               if (v)
-                       return v->nb_frames();
-
-               auto a = get_audio_decoders();
-
-               if (!a.empty())
-                       return a.at(0)->nb_frames(); // Should be ok.
-
-               return length_;
-       }
-
-       std::string filename() const override
-       {
-               return u8(filename_);
-       }
-
-       void seek(std::uint32_t frame) override
-       {
-               expect_started();
-               get_input()->seek(frame);
-       }
-
-       bool eof() const override
-       {
-               auto i = get_input();
-               return !i || i->eof();
-       }
-
-       bool has_audio() const override
-       {
-               return !get_audio_decoders().empty();
-       }
-
-       int samplerate() const override
-       {
-               if (get_audio_decoders().empty())
-                       return -1;
-
-               return 48000;
-       }
-
-       bool has_video() const override
-       {
-               return static_cast<bool>(get_video_decoder());
-       }
-
-       boost::rational<int> framerate() const override
-       {
-               auto decoder = get_video_decoder();
-
-               if (!decoder)
-                       return -1;
-
-               return decoder->framerate();
-       }
-
-       std::uint32_t frame_number() const override
-       {
-               auto decoder = get_video_decoder();
-
-               if (!decoder)
-                       return 0;
-
-               return decoder->file_frame_number();
-       }
-
-       std::vector<std::shared_ptr<AVFrame>> get_input_frames_for_streams(AVMediaType type) override
-       {
-               auto a_decoders = get_audio_decoders();
-               auto v_decoder  = get_video_decoder();
-               expect_started();
-
-               if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && !a_decoders.empty())
-               {
-                       std::vector<std::shared_ptr<AVFrame>> frames;
-
-                       for (auto& a_decoder : a_decoders)
-                       {
-                               std::shared_ptr<AVFrame> frame;
-
-                               for (int i = 0; i < 64; ++i)
-                               {
-                                       frame = (*a_decoder)();
-
-                                       if (frame && frame->data[0])
-                                               break;
-                                       else
-                                               frame.reset();
-                               }
-
-                               frames.push_back(std::move(frame));
-                       }
-
-                       return frames;
-               }
-               else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO && v_decoder)
-               {
-                       std::shared_ptr<AVFrame> frame;
-
-                       for (int i = 0; i < 128; ++i)
-                       {
-                               frame = (*v_decoder)();
-
-                               if (frame && frame->data[0])
-                                       return { frame };
-                       }
-               }
-               else
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(
-                               print() + L" Unhandled media type " + boost::lexical_cast<std::wstring>(type)));
-
-               return { };
-       }
-private:
-       void expect_started() const
-       {
-               if (!started_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" Not started."));
-       }
-
-       std::shared_ptr<input> get_input() const
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               return input_;
-       }
-
-       std::vector<spl::shared_ptr<audio_decoder>> get_audio_decoders() const
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               return audio_decoders_;
-       }
-
-       std::shared_ptr<video_decoder> get_video_decoder() const
-       {
-               boost::lock_guard<boost::mutex> lock(pointer_mutex_);
-               return video_decoder_;
-       }
-};
-
-class memory_source : public source
-{
-       int                                                                                                                     samplerate_             = -1;
-       int                                                                                                                     num_channels_   = -1;
-       int                                                                                                                     width_                  = -1;
-       int                                                                                                                     height_                 = -1;
-       boost::rational<int>                                                                            framerate_              = -1;
-
-       tbb::atomic<bool>                                                                                       running_;
-       tbb::concurrent_bounded_queue<caspar::array<const int32_t>>     audio_frames_;
-       tbb::concurrent_bounded_queue<caspar::array<const uint8_t>>     video_frames_;
-       int64_t                                                                                                         audio_pts_              = 0;
-       int64_t                                                                                                         video_pts_              = 0;
-public:
-       memory_source()
-       {
-               running_ = false;
-               video_frames_.set_capacity(1);
-               audio_frames_.set_capacity(1);
-       }
-
-       ~memory_source()
-       {
-               stop();
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-       }
-
-       std::wstring print() const override
-       {
-               return L"[memory_source]";
-       }
-
-       void enable_audio(int samplerate, int num_channels)
-       {
-               samplerate_ = samplerate;
-               num_channels_ = num_channels;
-       }
-
-       void enable_video(int width, int height, boost::rational<int> framerate)
-       {
-               width_ = width;
-               height_ = height;
-       }
-
-       void start() override
-       {
-               running_ = true;
-       }
-
-       void stop() override
-       {
-               running_ = false;
-               video_frames_.try_push(caspar::array<const uint8_t>());
-               audio_frames_.try_push(caspar::array<const int32_t>());
-       }
-
-       bool has_audio() const override
-       {
-               return samplerate_ != -1;
-       }
-
-       int samplerate() const override
-       {
-               return samplerate_;
-       }
-
-       bool has_video() const override
-       {
-               return width_ != -1;
-       }
-
-       bool eof() const override
-       {
-               return !running_;
-       }
-
-       boost::rational<int> framerate() const override
-       {
-               return framerate_;
-       }
-       
-       bool try_push_audio(caspar::array<const std::int32_t> data)
-       {
-               if (!has_audio())
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" audio not enabled."));
-
-               if (data.empty() || data.size() % num_channels_ != 0)
-                       CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(print() + L" audio with incorrect number of channels submitted."));
-
-               return audio_frames_.try_push(std::move(data));
-       }
-
-       bool try_push_video(caspar::array<const std::uint8_t> data)
-       {
-               if (!has_video())
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" video not enabled."));
-
-               if (data.size() != width_ * height_ * 4)
-                       CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(print() + L" video with incorrect size submitted."));
-
-               return video_frames_.try_push(std::move(data));
-       }
-
-       std::vector<std::shared_ptr<AVFrame>> get_input_frames_for_streams(AVMediaType type) override
-       {
-               if (!running_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not running."));
-
-               if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && has_audio())
-               {
-                       caspar::array<const std::int32_t> samples;
-                       audio_frames_.pop(samples);
-
-                       if (samples.empty())
-                               return { };
-                       
-                       spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [samples](AVFrame* p) { av_frame_free(&p); });
-
-                       av_frame->channels                      = num_channels_;
-                       av_frame->channel_layout        = av_get_default_channel_layout(num_channels_);
-                       av_frame->sample_rate           = samplerate_;
-                       av_frame->nb_samples            = static_cast<int>(samples.size()) / num_channels_;
-                       av_frame->format                        = AV_SAMPLE_FMT_S32;
-                       av_frame->pts                           = audio_pts_;
-
-                       audio_pts_ += av_frame->nb_samples;
-
-                       FF(av_samples_fill_arrays(
-                                       av_frame->extended_data,
-                                       av_frame->linesize,
-                                       reinterpret_cast<const std::uint8_t*>(&*samples.begin()),
-                                       av_frame->channels,
-                                       av_frame->nb_samples,
-                                       static_cast<AVSampleFormat>(av_frame->format),
-                                       16));
-
-                       return { av_frame };
-               }
-               else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO && has_video())
-               {
-                       caspar::array<const std::uint8_t> data;
-                       video_frames_.pop(data);
-
-                       if (data.empty())
-                               return {};
-
-                       spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [data](AVFrame* p) { av_frame_free(&p); });
-                       avcodec_get_frame_defaults(av_frame.get());             
-                       
-                       const auto sample_aspect_ratio = boost::rational<int>(width_, height_);
-
-                       av_frame->format                                  = AV_PIX_FMT_BGRA;
-                       av_frame->width                                   = width_;
-                       av_frame->height                                  = height_;
-                       av_frame->sample_aspect_ratio.num = sample_aspect_ratio.numerator();
-                       av_frame->sample_aspect_ratio.den = sample_aspect_ratio.denominator();
-                       av_frame->pts                                     = video_pts_;
-
-                       video_pts_ += 1;
-
-                       FF(av_image_fill_arrays(
-                                       av_frame->data,
-                                       av_frame->linesize,
-                                       data.begin(),
-                                       static_cast<AVPixelFormat>(av_frame->format),
-                                       width_,
-                                       height_,
-                                       1));
-
-                       return { av_frame };
-               }
-               else
-                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(
-                               print() + L" Unhandled media type " + boost::lexical_cast<std::wstring>(type)));
-       }
-};
-
-struct sink
-{
-       virtual ~sink() { }
-
-       virtual std::wstring                                    print() const                                                                                                                                   = 0;
-       virtual void                                                    graph(spl::shared_ptr<caspar::diagnostics::graph> g)                                                    { }
-       virtual void                                                    acodec(std::string codec)                                                                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    vcodec(std::string codec)                                                                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    format(std::string fmt)                                                                                                                 { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    framerate(boost::rational<int> framerate)                                                                               { CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info(print() + L" not an encoder.")); }
-       virtual void                                                    start(bool has_audio, bool has_video)                                                                                   { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual void                                                    stop()                                                                                                                                                  { }
-       virtual std::vector<AVSampleFormat>             supported_sample_formats() const                                                                                                { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::vector<int>                                supported_samplerates() const                                                                                                   { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual std::vector<AVPixelFormat>              supported_pixel_formats() const                                                                                                 { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual int                                                             wanted_num_audio_streams() const                                                                                                { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual boost::optional<int>                    wanted_num_channels_per_stream() const                                                                          { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual boost::optional<AVMediaType>    try_push(AVMediaType type, int stream_index, spl::shared_ptr<AVFrame> frame)    { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-       virtual void                                                    eof()                                                                                                                                                   { CASPAR_THROW_EXCEPTION(not_implemented() << msg_info(print())); }
-};
-
-struct no_sink_selected : public sink
-{
-       std::wstring print() const override
-       {
-               return L"[no_sink_selected]";
-       }
-};
-
-class file_sink : public sink
-{
-       std::wstring                                            filename_;
-       spl::shared_ptr<diagnostics::graph>     graph_;
-public:
-       file_sink(std::string filename)
-               : filename_(u16(std::move(filename)))
-       {
-       }
-
-       std::wstring print() const override
-       {
-               return L"[file_sink " + filename_ + L"]";
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-               graph_ = std::move(g);
-       }
-};
-
-class memory_sink : public sink
-{
-       spl::shared_ptr<core::frame_factory>                    factory_;
-
-       bool                                                                                    has_audio_                      = false;
-       bool                                                                                    has_video_                      = false;
-       std::vector<int>                                                                audio_cadence_;
-       core::audio_channel_layout                                              channel_layout_         = core::audio_channel_layout::invalid();
-       core::mutable_audio_buffer                                              audio_samples_;
-
-       std::queue<std::shared_ptr<AVFrame>>                    video_frames_;
-
-       tbb::concurrent_bounded_queue<core::draw_frame> output_frames_;
-       tbb::atomic<bool>                                                               running_;
-public:
-       memory_sink(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format)
-               : factory_(std::move(factory))
-               , audio_cadence_(format.audio_cadence)
-       {
-               output_frames_.set_capacity(2);
-               running_ = false;
-               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
-               // This cadence fills the audio mixer most optimally.
-               boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
-       }
-
-       ~memory_sink()
-       {
-               stop();
-       }
-
-       std::wstring print() const override
-       {
-               return L"[memory_sink]";
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-       }
-
-       void framerate(boost::rational<int> framerate) override
-       {
-               audio_cadence_ = find_audio_cadence(framerate);
-               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
-               // This cadence fills the audio mixer most optimally.
-               boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
-       }
-
-       void start(bool has_audio, bool has_video) override
-       {
-               has_audio_      = has_audio;
-               has_video_      = has_video;
-               running_        = true;
-       }
-
-       void stop() override
-       {
-               running_ = false;
-               try_pop_frame();
-               try_pop_frame();
-       }
-
-       std::vector<AVSampleFormat> supported_sample_formats() const override
-       {
-               return { AVSampleFormat::AV_SAMPLE_FMT_S32 };
-       }
-
-       std::vector<int> supported_samplerates() const override {
-               return { 48000 };
-       }
-
-       std::vector<AVPixelFormat> supported_pixel_formats() const override
-       {
-               return {
-                       AVPixelFormat::AV_PIX_FMT_YUVA420P,
-                       AVPixelFormat::AV_PIX_FMT_YUV444P,
-                       AVPixelFormat::AV_PIX_FMT_YUV422P,
-                       AVPixelFormat::AV_PIX_FMT_YUV420P,
-                       AVPixelFormat::AV_PIX_FMT_YUV411P,
-                       AVPixelFormat::AV_PIX_FMT_BGRA,
-                       AVPixelFormat::AV_PIX_FMT_ARGB,
-                       AVPixelFormat::AV_PIX_FMT_RGBA,
-                       AVPixelFormat::AV_PIX_FMT_ABGR,
-                       AVPixelFormat::AV_PIX_FMT_GRAY8
-               };
-       }
-
-       int wanted_num_audio_streams() const override
-       {
-               return 1;
-       }
-
-       boost::optional<int> wanted_num_channels_per_stream() const
-       {
-               return boost::none;
-       }
-
-       boost::optional<AVMediaType> try_push(AVMediaType type, int stream_index, spl::shared_ptr<AVFrame> av_frame) override
-       {
-               if (!has_audio_ && !has_video_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation());
-
-               if (type == AVMediaType::AVMEDIA_TYPE_AUDIO && av_frame->data[0])
-               {
-                       if (channel_layout_ == core::audio_channel_layout::invalid()) // First audio
-                       {
-                               channel_layout_ = get_audio_channel_layout(av_frame->channels, av_frame->channel_layout, L"");
-
-                               // Insert silence samples so that the audio mixer is guaranteed to be filled.
-                               auto min_num_samples_per_frame  = *boost::min_element(audio_cadence_);
-                               auto max_num_samples_per_frame  = *boost::max_element(audio_cadence_);
-                               auto cadence_safety_samples             = max_num_samples_per_frame - min_num_samples_per_frame;
-                               audio_samples_.resize(channel_layout_.num_channels * cadence_safety_samples, 0);
-                       }
-
-                       auto ptr = reinterpret_cast<int32_t*>(av_frame->data[0]);
-
-                       audio_samples_.insert(audio_samples_.end(), ptr, ptr + av_frame->linesize[0] / sizeof(int32_t));
-               }
-               else if (type == AVMediaType::AVMEDIA_TYPE_VIDEO)
-               {
-                       video_frames_.push(std::move(av_frame));
-               }
-
-               while (true)
-               {
-                       bool enough_audio =
-                               !has_audio_ ||
-                               (channel_layout_ != core::audio_channel_layout::invalid() && audio_samples_.size() >= audio_cadence_.front() * channel_layout_.num_channels);
-                       bool enough_video =
-                               !has_video_ ||
-                               !video_frames_.empty();
-
-                       if (!enough_audio)
-                               return AVMediaType::AVMEDIA_TYPE_AUDIO;
-
-                       if (!enough_video)
-                               return AVMediaType::AVMEDIA_TYPE_VIDEO;
-
-                       core::mutable_audio_buffer audio_data;
-
-                       if (has_audio_)
-                       {
-                               auto begin = audio_samples_.begin();
-                               auto end = begin + audio_cadence_.front() * channel_layout_.num_channels;
-
-                               audio_data.insert(audio_data.begin(), begin, end);
-                               audio_samples_.erase(begin, end);
-                               boost::range::rotate(audio_cadence_, std::begin(audio_cadence_) + 1);
-                       }
-
-                       if (!has_video_) // Audio only
-                       {
-                               core::mutable_frame audio_only_frame(
-                                               { },
-                                               std::move(audio_data),
-                                               this,
-                                               core::pixel_format_desc(core::pixel_format::invalid),
-                                               channel_layout_);
-
-                               output_frames_.push(core::draw_frame(std::move(audio_only_frame)));
-
-                               return AVMediaType::AVMEDIA_TYPE_AUDIO;
-                       }
-
-                       auto output_frame = make_frame(this, spl::make_shared_ptr(video_frames_.front()), *factory_, channel_layout_);
-                       video_frames_.pop();
-                       output_frame.audio_data() = std::move(audio_data);
-
-                       output_frames_.push(core::draw_frame(std::move(output_frame)));
-               }
-       }
-
-       void eof() override
-       {
-               // Drain rest, regardless of it being enough or not.
-               while (!video_frames_.empty() || !audio_samples_.empty())
-               {
-                       core::mutable_audio_buffer audio_data;
-
-                       audio_data.swap(audio_samples_);
-
-                       if (!video_frames_.empty())
-                       {
-                               auto output_frame = make_frame(this, spl::make_shared_ptr(video_frames_.front()), *factory_, channel_layout_);
-                               video_frames_.pop();
-                               output_frame.audio_data() = std::move(audio_data);
-
-                               output_frames_.push(core::draw_frame(std::move(output_frame)));
-                       }
-                       else
-                       {
-                               core::mutable_frame audio_only_frame(
-                                               {},
-                                               std::move(audio_data),
-                                               this,
-                                               core::pixel_format_desc(core::pixel_format::invalid),
-                                               channel_layout_);
-
-                               output_frames_.push(core::draw_frame(std::move(audio_only_frame)));
-                               output_frames_.push(core::draw_frame::empty());
-                       }
-               }
-       }
-
-       core::draw_frame try_pop_frame()
-       {
-               core::draw_frame frame = core::draw_frame::late();
-
-               if (!output_frames_.try_pop(frame) && !running_)
-                       return core::draw_frame::empty();
-
-               return frame;
-       }
-};
-
-struct audio_stream_info
-{
-       int                             num_channels = 0;
-       AVSampleFormat  sampleformat = AVSampleFormat::AV_SAMPLE_FMT_NONE;
-};
-
-struct video_stream_info
-{
-       int                                     width           = 0;
-       int                                     height          = 0;
-       AVPixelFormat           pixelformat     = AVPixelFormat::AV_PIX_FMT_NONE;
-       core::field_mode        fieldmode       = core::field_mode::progressive;
-};
-
-class ffmpeg_pipeline_backend_internal : public ffmpeg_pipeline_backend
-{
-       spl::shared_ptr<diagnostics::graph>                                                             graph_;
-
-       spl::unique_ptr<source>                                                                                 source_                                 = spl::make_unique<no_source_selected>();
-       std::function<bool (caspar::array<const std::int32_t> data)>    try_push_audio_;
-       std::function<bool (caspar::array<const std::uint8_t> data)>    try_push_video_;
-
-       std::vector<audio_stream_info>                                                                  source_audio_streams_;
-       video_stream_info                                                                                               source_video_stream_;
-
-       std::string                                                                                                             afilter_;
-       std::unique_ptr<audio_filter>                                                                   audio_filter_;
-       std::string                                                                                                             vfilter_;
-       std::unique_ptr<filter>                                                                                 video_filter_;
-
-       spl::unique_ptr<sink>                                                                                   sink_                                   = spl::make_unique<no_sink_selected>();
-       std::function<core::draw_frame ()>                                                              try_pop_frame_;
-
-       tbb::atomic<bool>                                                                                               started_;
-       tbb::spin_mutex                                                                                                 exception_mutex_;
-       boost::exception_ptr                                                                                    exception_;
-       boost::thread                                                                                                   thread_;
-public:
-       ffmpeg_pipeline_backend_internal()
-       {
-               started_ = false;
-               diagnostics::register_graph(graph_);
-       }
-
-       ~ffmpeg_pipeline_backend_internal()
-       {
-               stop();
-       }
-
-       void throw_if_error()
-       {
-               boost::lock_guard<tbb::spin_mutex> lock(exception_mutex_);
-
-               if (exception_ != nullptr)
-                       boost::rethrow_exception(exception_);
-       }
-
-       void graph(spl::shared_ptr<caspar::diagnostics::graph> g) override
-       {
-               graph_ = std::move(g);
-               source_->graph(graph_);
-               sink_->graph(graph_);
-       }
-
-       // Source setup
-
-       void from_file(std::string filename) override
-       {
-               source_                 = spl::make_unique<file_source>(std::move(filename));
-               try_push_audio_ = std::function<bool (caspar::array<const std::int32_t>)>();
-               try_push_video_ = std::function<bool (caspar::array<const std::uint8_t>)>();
-               source_->graph(graph_);
-       }
-
-       void from_memory_only_audio(int num_channels, int samplerate) override
-       {
-               auto source             = spl::make_unique<memory_source>();
-               auto source_ptr = source.get();
-               try_push_audio_ = [this, source_ptr](caspar::array<const std::int32_t> data) { return source_ptr->try_push_audio(std::move(data)); };
-               source->enable_audio(samplerate, num_channels);
-
-               source_ = std::move(source);
-               source_->graph(graph_);
-       }
-
-       void from_memory_only_video(int width, int height, boost::rational<int> framerate) override
-       {
-               auto source             = spl::make_unique<memory_source>();
-               auto source_ptr = source.get();
-               try_push_video_ = [this, source_ptr](caspar::array<const std::uint8_t> data) { return source_ptr->try_push_video(std::move(data)); };
-               source->enable_video(width, height, std::move(framerate));
-
-               source_ = std::move(source);
-               source_->graph(graph_);
-       }
-
-       void from_memory(int num_channels, int samplerate, int width, int height, boost::rational<int> framerate) override
-       {
-               auto source             = spl::make_unique<memory_source>();
-               auto source_ptr = source.get();
-               try_push_audio_ = [this, source_ptr](caspar::array<const std::int32_t> data) { return source_ptr->try_push_audio(std::move(data)); };
-               try_push_video_ = [this, source_ptr](caspar::array<const std::uint8_t> data) { return source_ptr->try_push_video(std::move(data)); };
-               source->enable_audio(samplerate, num_channels);
-               source->enable_video(width, height, std::move(framerate));
-
-               source_ = std::move(source);
-               source_->graph(graph_);
-       }
-
-       void                    start_frame(std::uint32_t frame) override       { source_->start_frame(frame);          }
-       std::uint32_t   start_frame() const override                            { return source_->start_frame();        }
-       void                    length(std::uint32_t frames) override           { source_->length(frames);                      }
-       std::uint32_t   length() const override                                         { return source_->length();                     }
-       void                    seek(std::uint32_t frame) override                      { source_->seek(frame);                         }
-       void                    loop(bool value) override                                       { source_->loop(value);                         }
-       bool                    loop() const override                                           { return source_->loop();                       }
-       std::string             source_filename() const override                        { return source_->filename();           }
-
-       // Filter setup
-
-       void vfilter(std::string filter) override
-       {
-               vfilter_ = std::move(filter);
-       }
-
-       void afilter(std::string filter) override
-       {
-               afilter_ = std::move(filter);
-       }
-
-       int width() const override
-       {
-               return source_video_stream_.width;
-       }
-
-       int height() const override
-       {
-               return source_video_stream_.height;
-       }
-
-       boost::rational<int> framerate() const override
-       {
-               bool double_rate = filter::is_double_rate(u16(vfilter_));
-
-               return double_rate ? source_->framerate() * 2 : source_->framerate();
-       }
-
-       bool progressive() const override
-       {
-               return true;//TODO
-       }
-
-       // Sink setup
-
-       void to_memory(spl::shared_ptr<core::frame_factory> factory, core::video_format_desc format) override
-       {
-               auto sink               = spl::make_unique<memory_sink>(std::move(factory), std::move(format));
-               auto sink_ptr   = sink.get();
-               try_pop_frame_  = [sink_ptr] { return sink_ptr->try_pop_frame(); };
-
-               sink_ = std::move(sink);
-               sink_->graph(graph_);
-       }
-
-       void to_file(std::string filename) override
-       {
-               sink_                   = spl::make_unique<file_sink>(std::move(filename));
-               try_pop_frame_  = std::function<core::draw_frame ()>();
-               sink_->graph(graph_);
-       }
-
-       void acodec(std::string codec) override { sink_->acodec(std::move(codec)); }
-       void vcodec(std::string codec) override { sink_->vcodec(std::move(codec)); }
-       void format(std::string fmt) override   { sink_->format(std::move(fmt)); }
-
-       // Runtime control
-
-       void start() override
-       {
-               source_->start();
-               sink_->start(source_->has_audio(), source_->has_video());
-               started_ = true;
-               bool quiet = is_logging_quiet_for_thread();
-
-               thread_ = boost::thread([=] { run(quiet); });
-       }
-
-       bool try_push_audio(caspar::array<const std::int32_t> data) override
-       {
-               throw_if_error();
-
-               if (try_push_audio_)
-                       return try_push_audio_(std::move(data));
-               else
-                       return false;
-       }
-
-       bool try_push_video(caspar::array<const std::uint8_t> data) override
-       {
-               throw_if_error();
-
-               if (try_push_video_)
-                       return try_push_video_(std::move(data));
-               else
-                       return false;
-       }
-
-       core::draw_frame try_pop_frame() override
-       {
-               throw_if_error();
-
-               if (!try_pop_frame_)
-                       CASPAR_THROW_EXCEPTION(invalid_operation());
-
-               return try_pop_frame_();
-       }
-
-       std::uint32_t last_frame() const override
-       {
-               return source_->frame_number();
-       }
-
-       bool started() const override
-       {
-               return started_;
-       }
-
-       void stop() override
-       {
-               started_ = false;
-
-               sink_->stop();
-               source_->stop();
-
-               if (thread_.joinable())
-                       thread_.join();
-       }
-
-private:
-       void run(bool quiet)
-       {
-               ensure_gpf_handler_installed_for_thread(u8(L"ffmpeg-pipeline: " + source_->print() + L" -> " + sink_->print()).c_str());
-               auto quiet_logging = temporary_enable_quiet_logging_for_thread(quiet);
-
-               try
-               {
-                       boost::optional<AVMediaType> result = source_->has_audio() ? AVMediaType::AVMEDIA_TYPE_AUDIO : AVMediaType::AVMEDIA_TYPE_VIDEO;
-
-                       while (started_ && (source_->has_audio() || source_->has_video()))
-                       {
-                               auto needed                                             = *result;
-                               auto input_frames_for_streams   = source_->get_input_frames_for_streams(needed);
-
-                               if (!input_frames_for_streams.empty() && input_frames_for_streams.at(0))
-                               {
-                                       for (int input_stream_index = 0; input_stream_index < input_frames_for_streams.size(); ++input_stream_index)
-                                       {
-                                               if (needed == AVMediaType::AVMEDIA_TYPE_AUDIO)
-                                               {
-                                                       initialize_audio_filter_if_needed(input_frames_for_streams);
-                                                       audio_filter_->push(input_stream_index, std::move(input_frames_for_streams.at(input_stream_index)));
-
-                                                       for (int output_stream_index = 0; output_stream_index < sink_->wanted_num_audio_streams(); ++output_stream_index)
-                                                               for (auto filtered_frame : audio_filter_->poll_all(output_stream_index))
-                                                                       result = sink_->try_push(AVMediaType::AVMEDIA_TYPE_AUDIO, output_stream_index, std::move(filtered_frame));
-                                               }
-                                               else if (needed == AVMediaType::AVMEDIA_TYPE_VIDEO)
-                                               {
-                                                       initialize_video_filter_if_needed(*input_frames_for_streams.at(input_stream_index));
-                                                       video_filter_->push(std::move(input_frames_for_streams.at(input_stream_index)));
-
-                                                       for (auto filtered_frame : video_filter_->poll_all())
-                                                               result = sink_->try_push(AVMediaType::AVMEDIA_TYPE_VIDEO, 0, std::move(filtered_frame));
-                                               }
-                                               else
-                                                       CASPAR_THROW_EXCEPTION(not_supported());
-                                       }
-                               }
-                               else if (source_->eof())
-                               {
-                                       started_ = false;
-                                       sink_->eof();
-                                       break;
-                               }
-                               else
-                                       result = boost::none;
-
-                               if (!result)
-                               {
-                                       graph_->set_tag(caspar::diagnostics::tag_severity::WARNING, "dropped-frame");
-                                       result = needed; // Repeat same media type
-                               }
-                       }
-               }
-               catch (...)
-               {
-                       if (is_logging_quiet_for_thread())
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-                       }
-                       else
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                       }
-
-                       boost::lock_guard<tbb::spin_mutex> lock(exception_mutex_);
-                       exception_ = boost::current_exception();
-               }
-
-               video_filter_.reset();
-               audio_filter_.reset();
-               source_->stop();
-               sink_->stop();
-               started_ = false;
-       }
-
-       template<typename T>
-       void set_if_changed(bool& changed, T& old_value, T new_value)
-       {
-               if (old_value != new_value)
-               {
-                       changed = true;
-                       old_value = new_value;
-               }
-       }
-
-       void initialize_audio_filter_if_needed(const std::vector<std::shared_ptr<AVFrame>>& av_frames_per_stream)
-       {
-               bool changed = av_frames_per_stream.size() != source_audio_streams_.size();
-               source_audio_streams_.resize(av_frames_per_stream.size());
-
-               for (int i = 0; i < av_frames_per_stream.size(); ++i)
-               {
-                       auto& av_frame  = *av_frames_per_stream.at(i);
-                       auto& stream    = source_audio_streams_.at(i);
-
-                       set_if_changed(changed, stream.sampleformat, static_cast<AVSampleFormat>(av_frame.format));
-                       set_if_changed(changed, stream.num_channels, av_frame.channels);
-               }
-
-               if (changed)
-                       initialize_audio_filter();
-       }
-
-       void initialize_audio_filter()
-       {
-               std::vector<audio_input_pad> input_pads;
-               std::vector<audio_output_pad> output_pads;
-
-               for (auto& source_audio_stream : source_audio_streams_)
-               {
-                       input_pads.emplace_back(
-                                       boost::rational<int>(1, source_->samplerate()),
-                                       source_->samplerate(),
-                                       source_audio_stream.sampleformat,
-                                       av_get_default_channel_layout(source_audio_stream.num_channels));
-               }
-
-               auto total_num_channels = cpplinq::from(source_audio_streams_)
-                               .select([](const audio_stream_info& info) { return info.num_channels; })
-                               .aggregate(0, std::plus<int>());
-
-               if (total_num_channels > 1 && sink_->wanted_num_audio_streams() > 1)
-                       CASPAR_THROW_EXCEPTION(invalid_operation()
-                                       << msg_info("only one-to-many or many-to-one audio stream conversion supported."));
-
-               std::wstring amerge;
-
-               if (sink_->wanted_num_audio_streams() == 1 && !sink_->wanted_num_channels_per_stream())
-               {
-                       output_pads.emplace_back(
-                                       sink_->supported_samplerates(),
-                                       sink_->supported_sample_formats(),
-                                       std::vector<int64_t>({ av_get_default_channel_layout(total_num_channels) }));
-
-                       if (source_audio_streams_.size() > 1)
-                       {
-                               for (int i = 0; i < source_audio_streams_.size(); ++i)
-                                       amerge += L"[a:" + boost::lexical_cast<std::wstring>(i) + L"]";
-
-                               amerge += L"amerge=inputs=" + boost::lexical_cast<std::wstring>(source_audio_streams_.size());
-                       }
-               }
-
-               std::wstring afilter = u16(afilter_);
-
-               if (!amerge.empty())
-               {
-                       afilter = prepend_filter(u16(afilter), amerge);
-                       afilter += L"[aout:0]";
-               }
-
-               audio_filter_.reset(new audio_filter(input_pads, output_pads, u8(afilter)));
-       }
-
-       void initialize_video_filter_if_needed(const AVFrame& av_frame)
-       {
-               bool changed = false;
-
-               set_if_changed(changed, source_video_stream_.width, av_frame.width);
-               set_if_changed(changed, source_video_stream_.height, av_frame.height);
-               set_if_changed(changed, source_video_stream_.pixelformat, static_cast<AVPixelFormat>(av_frame.format));
-
-               core::field_mode field_mode = core::field_mode::progressive;
-
-               if (av_frame.interlaced_frame)
-                       field_mode = av_frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;
-
-               set_if_changed(changed, source_video_stream_.fieldmode, field_mode);
-
-               if (changed)
-                       initialize_video_filter();
-       }
-
-       void initialize_video_filter()
-       {
-               if (source_video_stream_.fieldmode != core::field_mode::progressive && !filter::is_deinterlacing(u16(vfilter_)))
-                       vfilter_ = u8(append_filter(u16(vfilter_), L"YADIF=1:-1"));
-
-               if (source_video_stream_.height == 480) // NTSC DV
-               {
-                       auto pad_str = L"PAD=" + boost::lexical_cast<std::wstring>(source_video_stream_.width) + L":486:0:2:black";
-                       vfilter_ = u8(append_filter(u16(vfilter_), pad_str));
-               }
-
-               video_filter_.reset(new filter(
-                               source_video_stream_.width,
-                               source_video_stream_.height,
-                               1 / source_->framerate(),
-                               source_->framerate(),
-                               boost::rational<int>(1, 1), // TODO
-                               source_video_stream_.pixelformat,
-                               sink_->supported_pixel_formats(),
-                               vfilter_));
-               sink_->framerate(framerate());
-       }
-};
-
-spl::shared_ptr<struct ffmpeg_pipeline_backend> create_internal_pipeline()
-{
-       return spl::make_shared<ffmpeg_pipeline_backend_internal>();
-}
-
-}}
index 2366c10d8032260be15a9ca11197455926c528ce..0a1008876583afed71cbc24252315e4d5a3e6a48 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 #include "audio_decoder.h"
 
 #include "../util/util.h"
-#include "../input/input.h"
 #include "../../ffmpeg_error.h"
 
 #include <core/video_format.h>
-#include <core/frame/audio_channel_layout.h>
+#include <core/mixer/audio/audio_util.h>
 
-#include <common/log.h>
 #include <common/cache_aligned_vector.h>
 
 #include <queue>
@@ -39,7 +37,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libavformat/avformat.h>
        #include <libavcodec/avcodec.h>
@@ -50,143 +48,143 @@ extern "C"
 #endif
 
 namespace caspar { namespace ffmpeg {
-       
-uint64_t get_ffmpeg_channel_layout(AVCodecContext* dec)
+
+struct audio_decoder::implementation : boost::noncopyable
 {
-       auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
-       return layout;
-}
+       int                                                                             index_;
+       const spl::shared_ptr<AVCodecContext>   codec_context_;
+       const int                                                               out_samplerate_;
+
+       cache_aligned_vector<int32_t>                   buffer_;
+
+       std::queue<spl::shared_ptr<AVPacket>>   packets_;
+
+       std::shared_ptr<SwrContext>                             swr_                            {
+                                                                                                                                       swr_alloc_set_opts(
+                                                                                                                                                       nullptr,
+                                                                                                                                                       codec_context_->channel_layout
+                                                                                                                                                                       ? codec_context_->channel_layout
+                                                                                                                                                                       : av_get_default_channel_layout(codec_context_->channels),
+                                                                                                                                                       AV_SAMPLE_FMT_S32,
+                                                                                                                                                       out_samplerate_,
+                                                                                                                                                       codec_context_->channel_layout
+                                                                                                                                                                       ? codec_context_->channel_layout
+                                                                                                                                                                       : av_get_default_channel_layout(codec_context_->channels),
+                                                                                                                                                       codec_context_->sample_fmt,
+                                                                                                                                                       codec_context_->sample_rate,
+                                                                                                                                                       0,
+                                                                                                                                                       nullptr),
+                                                                                                                                       [](SwrContext* p)
+                                                                                                                                       {
+                                                                                                                                               swr_free(&p);
+                                                                                                                                       }
+                                                                                                                               };
 
-struct audio_decoder::impl : boost::noncopyable
-{      
-       core::monitor::subject                                                                          monitor_subject_;
-       input&                                                                                                          input_;
-       int                                                                                                                     index_;
-       int                                                                                                                     actual_index_;
-       const core::video_format_desc                                                           format_desc_;
-       const spl::shared_ptr<AVCodecContext>                                           codec_context_          = open_codec(input_.context(), AVMEDIA_TYPE_AUDIO, actual_index_, false);
-
-       std::shared_ptr<SwrContext>                                                                     swr_                            {
-                                                                                                                                                                               swr_alloc_set_opts(
-                                                                                                                                                                                               nullptr,
-                                                                                                                                                                                               create_channel_layout_bitmask(codec_context_->channels),//get_ffmpeg_channel_layout(codec_context_.get()),
-                                                                                                                                                                                               AV_SAMPLE_FMT_S32,
-                                                                                                                                                                                               format_desc_.audio_sample_rate,
-                                                                                                                                                                                               create_channel_layout_bitmask(codec_context_->channels),//get_ffmpeg_channel_layout(codec_context_.get()),
-                                                                                                                                                                                               codec_context_->sample_fmt,
-                                                                                                                                                                                               codec_context_->sample_rate,
-                                                                                                                                                                                               0,
-                                                                                                                                                                                               nullptr),
-                                                                                                                                                                               [](SwrContext* p){swr_free(&p); }
-                                                                                                                                                                       };
-
-       cache_aligned_vector<uint8_t>                                                           buffer_;
-
-       std::shared_ptr<AVPacket>                                                                       current_packet_;
-       
 public:
-       explicit impl(
-                       input& in,
-                       const core::video_format_desc& format_desc,
-                       int audio_stream_index)
-               : input_(in)
-               , index_(audio_stream_index)
-               , actual_index_(input_.get_actual_audio_stream_index(index_))
-               , format_desc_(format_desc)
-               , buffer_(480000 * 4)
+       explicit implementation(int stream_index, const spl::shared_ptr<AVFormatContext>& context, int out_samplerate)
+               : index_(stream_index)
+               , codec_context_(open_codec(*context, AVMEDIA_TYPE_AUDIO, index_, false))
+               , out_samplerate_(out_samplerate)
+               , buffer_(10 * out_samplerate_ * codec_context_->channels) // 10 seconds of audio
        {
                if(!swr_)
                        CASPAR_THROW_EXCEPTION(bad_alloc());
 
                THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");
+
+               codec_context_->refcounted_frames = 1;
+       }
+
+       void push(const std::shared_ptr<AVPacket>& packet)
+       {
+               if(!packet)
+                       return;
+
+               if(packet->stream_index == index_ || packet->data == nullptr)
+                       packets_.push(spl::make_shared_ptr(packet));
        }
-               
-       std::shared_ptr<AVFrame> poll()
-       {               
-               if(!current_packet_ && !input_.try_pop_audio(current_packet_, index_))
+
+       std::shared_ptr<core::mutable_audio_buffer> poll()
+       {
+               if(packets_.empty())
                        return nullptr;
-               
-               std::shared_ptr<AVFrame> audio;
 
-               if(!current_packet_)    
-               {
-                       avcodec_flush_buffers(codec_context_.get());    
-               }
-               else if(!current_packet_->data)
-               {
-                       if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)                       
-                               audio = decode(*current_packet_);
-                       
-                       if(!audio)
-                               current_packet_.reset();
-               }
-               else
+               auto packet = packets_.front();
+
+               if(packet->data == nullptr)
                {
-                       audio = decode(*current_packet_);
-                       
-                       if(current_packet_->size == 0)
-                               current_packet_.reset();
+                       packets_.pop();
+                       avcodec_flush_buffers(codec_context_.get());
+                       return flush_audio();
                }
-       
+
+               auto audio = decode(*packet);
+
+               if(packet->size == 0)
+                       packets_.pop();
+
                return audio;
        }
 
-       std::shared_ptr<AVFrame> decode(AVPacket& pkt)
-       {               
-               auto frame = create_frame();
-               
+       std::shared_ptr<core::mutable_audio_buffer> decode(AVPacket& pkt)
+       {
+               auto decoded_frame = create_frame();
+
                int got_frame = 0;
-               auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), frame.get(), &got_frame, &pkt), "[audio_decoder]");
-                                       
-               if(len == 0)
+               auto len = THROW_ON_ERROR2(avcodec_decode_audio4(codec_context_.get(), decoded_frame.get(), &got_frame, &pkt), "[audio_decoder]");
+
+               if (len == 0)
                {
                        pkt.size = 0;
                        return nullptr;
                }
 
-        pkt.data += len;
-        pkt.size -= len;
+               pkt.data += len;
+               pkt.size -= len;
 
-               if(!got_frame)
+               if (!got_frame)
                        return nullptr;
-                                                       
-               const uint8_t **in      = const_cast<const uint8_t**>(frame->extended_data);
-               uint8_t* out[]          = {buffer_.data()};
 
-               auto channel_samples = swr_convert(swr_.get(), 
-                                                                                       out, static_cast<int>(buffer_.size()) / codec_context_->channels / av_get_bytes_per_sample(AV_SAMPLE_FMT_S32),
-                                                                                       in, frame->nb_samples);
+               const uint8_t **in = const_cast<const uint8_t**>(decoded_frame->extended_data);
+               uint8_t* out[] = { reinterpret_cast<uint8_t*>(buffer_.data()) };
 
-               frame->data[0]          = buffer_.data();
-               frame->linesize[0]      = channel_samples * codec_context_->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S32);
-               frame->nb_samples       = channel_samples;
-               frame->format           = AV_SAMPLE_FMT_S32;
+               const auto channel_samples = swr_convert(
+                               swr_.get(),
+                               out,
+                               static_cast<int>(buffer_.size()) / codec_context_->channels,
+                               in,
+                               decoded_frame->nb_samples);
 
-               monitor_subject_  << core::monitor::message("/file/audio/sample-rate")  % codec_context_->sample_rate
-                                               << core::monitor::message("/file/audio/channels")       % codec_context_->channels
-                                               << core::monitor::message("/file/audio/format")         % u8(av_get_sample_fmt_name(codec_context_->sample_fmt))
-                                               << core::monitor::message("/file/audio/codec")          % u8(codec_context_->codec->long_name);                 
-
-               return frame;
+               return std::make_shared<core::mutable_audio_buffer>(
+                               buffer_.begin(),
+                               buffer_.begin() + channel_samples * decoded_frame->channels);
        }
-       
-       uint32_t nb_frames() const
+
+       bool ready() const
        {
-               return 0;
+               return packets_.size() > 10;
        }
 
        std::wstring print() const
-       {               
+       {
                return L"[audio-decoder] " + u16(codec_context_->codec->long_name);
        }
+
+       uint64_t ffmpeg_channel_layout() const
+       {
+               if (codec_context_->channel_layout == 0)
+                       return av_get_default_channel_layout(codec_context_->channels);
+               else
+                       return codec_context_->channel_layout;
+       }
 };
 
-audio_decoder::audio_decoder(input& input, const core::video_format_desc& format_desc, int audio_stream_index) : impl_(new impl(input, format_desc, audio_stream_index)){}
-audio_decoder::audio_decoder(audio_decoder&& other) : impl_(std::move(other.impl_)){}
-audio_decoder& audio_decoder::operator=(audio_decoder&& other){impl_ = std::move(other.impl_); return *this;}
-std::shared_ptr<AVFrame> audio_decoder::operator()(){return impl_->poll();}
-uint32_t audio_decoder::nb_frames() const{return impl_->nb_frames();}
+audio_decoder::audio_decoder(int stream_index, const spl::shared_ptr<AVFormatContext>& context, int out_samplerate) : impl_(new implementation(stream_index, context, out_samplerate)){}
+void audio_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}
+bool audio_decoder::ready() const{return impl_->ready();}
+std::shared_ptr<core::mutable_audio_buffer> audio_decoder::poll() { return impl_->poll(); }
+int    audio_decoder::num_channels() const { return impl_->codec_context_->channels; }
+uint64_t audio_decoder::ffmpeg_channel_layout() const { return impl_->ffmpeg_channel_layout(); }
 std::wstring audio_decoder::print() const{return impl_->print();}
-core::monitor::subject& audio_decoder::monitor_output() { return impl_->monitor_subject_;}
 
 }}
index 99f6e398be358d3342bb7c44b94b1761b28314fd..6c06ea941cedf5b03ef910a90fabd447aa75e5fa 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 
 #pragma once
 
-#include <core/mixer/audio/audio_mixer.h>
-#include <core/monitor/monitor.h>
-
 #include <common/memory.h>
 
-#include <core/fwd.h>
+#include <core/frame/frame.h>
 
 #include <boost/noncopyable.hpp>
 
 struct AVPacket;
+struct AVFrame;
 struct AVFormatContext;
 
 namespace caspar { namespace ffmpeg {
-       
-class audio_decoder : public boost::noncopyable
+
+class audio_decoder : boost::noncopyable
 {
 public:
-       explicit audio_decoder(class input& input, const core::video_format_desc& format_desc, int audio_stream_index);
-       
-       audio_decoder(audio_decoder&& other);
-       audio_decoder& operator=(audio_decoder&& other);
+       explicit audio_decoder(int stream_index, const spl::shared_ptr<AVFormatContext>& context, int out_samplerate);
 
-       std::shared_ptr<AVFrame> operator()();
+       bool ready() const;
+       void push(const std::shared_ptr<AVPacket>& packet);
+       std::shared_ptr<core::mutable_audio_buffer> poll();
 
-       uint32_t nb_frames() const;
-       
-       std::wstring print() const;
-       
-       core::monitor::subject& monitor_output();
+       int     num_channels() const;
+       uint64_t ffmpeg_channel_layout() const;
 
+       std::wstring print() const;
 private:
-       struct impl;
-       spl::shared_ptr<impl> impl_;
+       struct implementation;
+       spl::shared_ptr<implementation> impl_;
 };
 
-}}
\ No newline at end of file
+}}
index b2d4b0e8b7ceed1db9b443f6a4859f56ff3a38ad..ac4ae09e0233eef8e42e455f959c3efb07c3bee9 100644 (file)
 
 #include "ffmpeg_producer.h"
 
-#include "../ffmpeg_pipeline.h"
 #include "../ffmpeg.h"
+#include "../ffmpeg_error.h"
 #include "util/util.h"
+#include "input/input.h"
+#include "audio/audio_decoder.h"
+#include "video/video_decoder.h"
+#include "muxer/frame_muxer.h"
+#include "filter/audio_filter.h"
 
 #include <common/param.h>
 #include <common/diagnostics/graph.h>
 #include <core/help/help_sink.h>
 #include <core/producer/media_info/media_info.h>
 #include <core/producer/framerate/framerate_producer.h>
+#include <core/frame/frame_factory.h>
 
 #include <future>
+#include <queue>
 
 namespace caspar { namespace ffmpeg {
-
 struct seek_out_of_range : virtual user_error {};
 
 std::wstring get_relative_or_original(
@@ -70,153 +76,412 @@ std::wstring get_relative_or_original(
 
 struct ffmpeg_producer : public core::frame_producer_base
 {
-       spl::shared_ptr<core::monitor::subject>                 monitor_subject_;
-       ffmpeg_pipeline                                                                 pipeline_;
-       const std::wstring                                                              filename_;
-       const std::wstring                                                              path_relative_to_media_ = get_relative_or_original(filename_, env::media_folder());
-       
-       const spl::shared_ptr<diagnostics::graph>               graph_;
-                                       
-       const core::video_format_desc                                   format_desc_;
-
-       core::constraints                                                               constraints_;
-       
-       core::draw_frame                                                                first_frame_                    = core::draw_frame::empty();
-       core::draw_frame                                                                last_frame_                             = core::draw_frame::empty();
-
-       boost::optional<uint32_t>                                               seek_target_;
-       
+       spl::shared_ptr<core::monitor::subject>                         monitor_subject_;
+       const std::wstring                                                                      filename_;
+       const std::wstring                                                                      path_relative_to_media_         = get_relative_or_original(filename_, env::media_folder());
+
+       const spl::shared_ptr<diagnostics::graph>                       graph_;
+       timer                                                                                           frame_timer_;
+
+       const spl::shared_ptr<core::frame_factory>                      frame_factory_;
+
+       std::shared_ptr<void>                                                           initial_logger_disabler_;
+
+       core::constraints                                                                       constraints_;
+
+       input                                                                                           input_;
+       std::unique_ptr<video_decoder>                                          video_decoder_;
+       std::vector<std::unique_ptr<audio_decoder>>                     audio_decoders_;
+       std::unique_ptr<frame_muxer>                                            muxer_;
+
+       const boost::rational<int>                                                      framerate_;
+       const uint32_t                                                                          start_;
+       const uint32_t                                                                          length_;
+       const bool                                                                                      thumbnail_mode_;
+
+       core::draw_frame                                                                        last_frame_;
+
+       std::queue<std::pair<core::draw_frame, uint32_t>>       frame_buffer_;
+
+       int64_t                                                                                         frame_number_                           = 0;
+       uint32_t                                                                                        file_frame_number_                      = 0;
 public:
        explicit ffmpeg_producer(
-                       ffmpeg_pipeline pipeline, 
-                       const core::video_format_desc& format_desc)
-               : pipeline_(std::move(pipeline))
-               , filename_(u16(pipeline_.source_filename()))
-               , format_desc_(format_desc)
+                       const spl::shared_ptr<core::frame_factory>& frame_factory,
+                       const core::video_format_desc& format_desc,
+                       const std::wstring& url_or_file,
+                       const std::wstring& filter,
+                       bool loop,
+                       uint32_t start,
+                       uint32_t length,
+                       bool thumbnail_mode,
+                       const std::wstring& custom_channel_order,
+                       const ffmpeg_options& vid_params)
+               : filename_(url_or_file)
+               , frame_factory_(frame_factory)
+               , initial_logger_disabler_(temporary_enable_quiet_logging_for_thread(thumbnail_mode))
+               , input_(graph_, url_or_file, loop, start, length, thumbnail_mode, vid_params)
+               , framerate_(read_framerate(*input_.context(), format_desc.framerate))
+               , start_(start)
+               , length_(length)
+               , thumbnail_mode_(thumbnail_mode)
+               , last_frame_(core::draw_frame::empty())
+               , frame_number_(0)
        {
                graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
-               graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));   
+               graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));
                diagnostics::register_graph(graph_);
 
-               pipeline_.graph(graph_);
-               pipeline_.start();
+               try
+               {
+                       video_decoder_.reset(new video_decoder(input_.context()));
+                       if (!thumbnail_mode_)
+                               CASPAR_LOG(info) << print() << L" " << video_decoder_->print();
 
-               while ((first_frame_ = pipeline_.try_pop_frame()) == core::draw_frame::late())
-                       boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
+                       constraints_.width.set(video_decoder_->width());
+                       constraints_.height.set(video_decoder_->height());
+               }
+               catch (averror_stream_not_found&)
+               {
+                       //CASPAR_LOG(warning) << print() << " No video-stream found. Running without video.";
+               }
+               catch (...)
+               {
+                       if (!thumbnail_mode_)
+                       {
+                               CASPAR_LOG_CURRENT_EXCEPTION();
+                               CASPAR_LOG(warning) << print() << "Failed to open video-stream. Running without video.";
+                       }
+               }
 
-               constraints_.width.set(pipeline_.width());
-               constraints_.height.set(pipeline_.height());
+               auto channel_layout = core::audio_channel_layout::invalid();
+               std::vector<audio_input_pad> audio_input_pads;
 
-               if (is_logging_quiet_for_thread())
-                       CASPAR_LOG(debug) << print() << L" Initialized";
-               else
-                       CASPAR_LOG(info) << print() << L" Initialized";
+               if (!thumbnail_mode_)
+               {
+                       for (unsigned stream_index = 0; stream_index < input_.context()->nb_streams; ++stream_index)
+                       {
+                               auto stream = input_.context()->streams[stream_index];
+
+                               if (stream->codec->codec_type != AVMediaType::AVMEDIA_TYPE_AUDIO)
+                                       continue;
+
+                               try
+                               {
+                                       audio_decoders_.push_back(std::unique_ptr<audio_decoder>(new audio_decoder(stream_index, input_.context(), format_desc.audio_sample_rate)));
+                                       audio_input_pads.emplace_back(
+                                                       boost::rational<int>(1, format_desc.audio_sample_rate),
+                                                       format_desc.audio_sample_rate,
+                                                       AVSampleFormat::AV_SAMPLE_FMT_S32,
+                                                       audio_decoders_.back()->ffmpeg_channel_layout());
+                                       CASPAR_LOG(info) << print() << L" " << audio_decoders_.back()->print();
+                               }
+                               catch (averror_stream_not_found&)
+                               {
+                                       //CASPAR_LOG(warning) << print() << " No audio-stream found. Running without audio.";
+                               }
+                               catch (...)
+                               {
+                                       CASPAR_LOG_CURRENT_EXCEPTION();
+                                       CASPAR_LOG(warning) << print() << " Failed to open audio-stream. Running without audio.";
+                               }
+                       }
+
+                       if (audio_decoders_.size() == 1)
+                       {
+                               channel_layout = get_audio_channel_layout(
+                                               audio_decoders_.at(0)->num_channels(),
+                                               audio_decoders_.at(0)->ffmpeg_channel_layout(),
+                                               custom_channel_order);
+                       }
+                       else if (audio_decoders_.size() > 1)
+                       {
+                               auto num_channels = cpplinq::from(audio_decoders_)
+                                       .select(std::mem_fn(&audio_decoder::num_channels))
+                                       .aggregate(0, std::plus<int>());
+                               auto ffmpeg_channel_layout = av_get_default_channel_layout(num_channels);
+
+                               channel_layout = get_audio_channel_layout(
+                                               num_channels,
+                                               ffmpeg_channel_layout,
+                                               custom_channel_order);
+                       }
+               }
+
+               if (!video_decoder_ && audio_decoders_.empty())
+                       CASPAR_THROW_EXCEPTION(averror_stream_not_found() << msg_info("No streams found"));
+
+               muxer_.reset(new frame_muxer(framerate_, std::move(audio_input_pads), frame_factory, format_desc, channel_layout, filter, true));
        }
 
        // frame_producer
-       
+
        core::draw_frame receive_impl() override
-       {                               
-               auto frame = core::draw_frame::late();
-               
-               caspar::timer frame_timer;
-               
-               auto decoded_frame = first_frame_;
-
-               if (decoded_frame == core::draw_frame::empty())
-                       decoded_frame = pipeline_.try_pop_frame();
-               else
-                       first_frame_ = core::draw_frame::empty();
+       {
+               return render_frame().first;
+       }
+
+       core::draw_frame last_frame() override
+       {
+               return core::draw_frame::still(last_frame_);
+       }
+
+       core::constraints& pixel_constraints() override
+       {
+               return constraints_;
+       }
+
+       double out_fps() const
+       {
+               auto out_framerate      = muxer_->out_framerate();
+               auto fps                        = static_cast<double>(out_framerate.numerator()) / static_cast<double>(out_framerate.denominator());
+
+               return fps;
+       }
+
+       std::pair<core::draw_frame, uint32_t> render_frame()
+       {
+               frame_timer_.restart();
+               auto disable_logging = temporary_enable_quiet_logging_for_thread(thumbnail_mode_);
 
-               if (decoded_frame == core::draw_frame::empty())
-                       frame = core::draw_frame::still(last_frame_);
-               else if (decoded_frame != core::draw_frame::late())
-                       last_frame_ = frame = core::draw_frame(std::move(decoded_frame));
-               else if (pipeline_.started())
-                       graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
+               for (int n = 0; n < 16 && frame_buffer_.size() < 2; ++n)
+                       try_decode_frame();
+
+               graph_->set_value("frame-time", frame_timer_.elapsed() * out_fps() *0.5);
+
+               if (frame_buffer_.empty())
+               {
+                       if (input_.eof())
+                       {
+                               send_osc();
+                               return std::make_pair(last_frame(), -1);
+                       }
+                       else if (!is_url())
+                       {
+                               graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
+                               send_osc();
+                               return std::make_pair(last_frame_, -1);
+                       }
+                       else
+                       {
+                               send_osc();
+                               return std::make_pair(last_frame_, -1);
+                       }
+               }
+
+               auto frame = frame_buffer_.front();
+               frame_buffer_.pop();
+
+               ++frame_number_;
+               file_frame_number_ = frame.second;
 
                graph_->set_text(print());
 
-               graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);
-               *monitor_subject_
-                               << core::monitor::message("/profiler/time")     % frame_timer.elapsed() % (1.0/format_desc_.fps);                       
-               *monitor_subject_
-                               << core::monitor::message("/file/frame")        % static_cast<int32_t>(pipeline_.last_frame())
-                                                                                                                       % static_cast<int32_t>(pipeline_.length())
-                               << core::monitor::message("/file/fps")          % boost::rational_cast<double>(pipeline_.framerate())
-                               << core::monitor::message("/file/path")         % path_relative_to_media_
-                               << core::monitor::message("/loop")                      % pipeline_.loop();
+               last_frame_ = frame.first;
+
+               send_osc();
 
                return frame;
        }
 
-       core::draw_frame last_frame() override
+       bool is_url() const
        {
-               return core::draw_frame::still(last_frame_);
+               return boost::contains(filename_, L"://");
        }
 
-       core::constraints& pixel_constraints() override
+       void send_osc()
        {
-               return constraints_;
+               double fps = static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator());
+
+               *monitor_subject_       << core::monitor::message("/profiler/time")             % frame_timer_.elapsed() % (1.0/out_fps());
+
+               *monitor_subject_       << core::monitor::message("/file/time")                 % (file_frame_number()/fps)
+                                                                                                                                                       % (file_nb_frames()/fps)
+                                                       << core::monitor::message("/file/frame")                        % static_cast<int32_t>(file_frame_number())
+                                                                                                                                                       % static_cast<int32_t>(file_nb_frames())
+                                                       << core::monitor::message("/file/fps")                  % fps
+                                                       << core::monitor::message("/file/path")                 % path_relative_to_media_
+                                                       << core::monitor::message("/loop")                              % input_.loop();
+       }
+
+       core::draw_frame render_specific_frame(uint32_t file_position)
+       {
+               // Some trial and error and undeterministic stuff here
+               static const int NUM_RETRIES = 32;
+
+               if (file_position > 0) // Assume frames are requested in sequential order,
+                                          // therefore no seeking should be necessary for the first frame.
+               {
+                       input_.seek(file_position > 1 ? file_position - 2: file_position).get();
+            boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
+               }
+
+               for (int i = 0; i < NUM_RETRIES; ++i)
+               {
+            boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
+
+                       auto frame = render_frame();
+
+                       if (frame.second == std::numeric_limits<uint32_t>::max())
+                       {
+                               // Retry
+                               continue;
+                       }
+                       else if (frame.second == file_position + 1 || frame.second == file_position)
+                               return frame.first;
+                       else if (frame.second > file_position + 1)
+                       {
+                               CASPAR_LOG(trace) << print() << L" " << frame.second << L" received, wanted " << file_position + 1;
+                               int64_t adjusted_seek = file_position - (frame.second - file_position + 1);
+
+                               if (adjusted_seek > 1 && file_position > 0)
+                               {
+                                       CASPAR_LOG(trace) << print() << L" adjusting to " << adjusted_seek;
+                                       input_.seek(static_cast<uint32_t>(adjusted_seek) - 1).get();
+                    boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
+                               }
+                               else
+                                       return frame.first;
+                       }
+               }
+
+               CASPAR_LOG(trace) << print() << " Giving up finding frame at " << file_position;
+               return core::draw_frame::empty();
+       }
+
+       core::draw_frame create_thumbnail_frame()
+       {
+               auto total_frames = nb_frames();
+               auto grid = env::properties().get(L"configuration.thumbnails.video-grid", 2);
+
+               if (grid < 1)
+               {
+                       CASPAR_LOG(error) << L"configuration/thumbnails/video-grid cannot be less than 1";
+                       CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
+               }
+
+               if (grid == 1)
+               {
+                       return render_specific_frame(total_frames / 2);
+               }
+
+               auto num_snapshots = grid * grid;
+
+               std::vector<core::draw_frame> frames;
+
+               for (int i = 0; i < num_snapshots; ++i)
+               {
+                       int x = i % grid;
+                       int y = i / grid;
+                       int desired_frame;
+
+                       if (i == 0)
+                               desired_frame = 0; // first
+                       else if (i == num_snapshots - 1)
+                               desired_frame = total_frames - 1; // last
+                       else
+                               // evenly distributed across the file.
+                               desired_frame = total_frames * i / (num_snapshots - 1);
+
+                       auto frame = render_specific_frame(desired_frame);
+                       frame.transform().image_transform.fill_scale[0] = 1.0 / static_cast<double>(grid);
+                       frame.transform().image_transform.fill_scale[1] = 1.0 / static_cast<double>(grid);
+                       frame.transform().image_transform.fill_translation[0] = 1.0 / static_cast<double>(grid) * x;
+                       frame.transform().image_transform.fill_translation[1] = 1.0 / static_cast<double>(grid) * y;
+
+                       frames.push_back(frame);
+               }
+
+               return core::draw_frame(frames);
+       }
+
+       uint32_t file_frame_number() const
+       {
+               return video_decoder_ ? video_decoder_->file_frame_number() : 0;
        }
 
        uint32_t nb_frames() const override
        {
-               if (pipeline_.loop())
+               if (is_url() || input_.loop())
                        return std::numeric_limits<uint32_t>::max();
 
-               return pipeline_.length();
+               uint32_t nb_frames = file_nb_frames();
+
+               nb_frames = std::min(length_, nb_frames - start_);
+               nb_frames = muxer_->calc_nb_frames(nb_frames);
+
+               return nb_frames;
+       }
+
+       uint32_t file_nb_frames() const
+       {
+               uint32_t file_nb_frames = 0;
+               file_nb_frames = std::max(file_nb_frames, video_decoder_ ? video_decoder_->nb_frames() : 0);
+               return file_nb_frames;
        }
-               
+
        std::future<std::wstring> call(const std::vector<std::wstring>& params) override
        {
                static const boost::wregex loop_exp(LR"(LOOP\s*(?<VALUE>\d?)?)", boost::regex::icase);
-               static const boost::wregex seek_exp(LR"(SEEK\s+(?<VALUE>\d+))", boost::regex::icase);
+               static const boost::wregex seek_exp(LR"(SEEK\s+(?<VALUE>(\+|-)?\d+)(\s+(?<WHENCE>REL|END))?)", boost::regex::icase);
                static const boost::wregex length_exp(LR"(LENGTH\s+(?<VALUE>\d+)?)", boost::regex::icase);
-               static const boost::wregex start_exp(LR"(START\\s+(?<VALUE>\\d+)?)", boost::regex::icase);
+               static const boost::wregex start_exp(LR"(START\s+(?<VALUE>\d+)?)", boost::regex::icase);
 
                auto param = boost::algorithm::join(params, L" ");
-               
+
                std::wstring result;
-                       
+
                boost::wsmatch what;
                if(boost::regex_match(param, what, loop_exp))
                {
                        auto value = what["VALUE"].str();
-                       if(!value.empty())
-                               pipeline_.loop(boost::lexical_cast<bool>(value));
-                       result = boost::lexical_cast<std::wstring>(pipeline_.loop());
+                       if (!value.empty())
+                               input_.loop(boost::lexical_cast<bool>(value));
+                       result = boost::lexical_cast<std::wstring>(input_.loop());
                }
                else if(boost::regex_match(param, what, seek_exp))
                {
-                       auto value = what["VALUE"].str();
-                       pipeline_.seek(boost::lexical_cast<uint32_t>(value));
+                       auto value = boost::lexical_cast<int64_t>(what["VALUE"].str());
+                       auto whence = what["WHENCE"].str();
+                       auto total = file_nb_frames();
+
+                       if(boost::iequals(whence, L"REL"))
+                               value = file_frame_number() + value;
+                       else if(boost::iequals(whence, L"END"))
+                               value = total - value;
+
+                       if(value < 0)
+                               value = 0;
+                       else if(value >= total)
+                               value = total - 1;
+
+                       input_.seek(static_cast<uint32_t>(value));
                }
                else if(boost::regex_match(param, what, length_exp))
                {
                        auto value = what["VALUE"].str();
                        if(!value.empty())
-                               pipeline_.length(boost::lexical_cast<uint32_t>(value));                 
-                       result = boost::lexical_cast<std::wstring>(pipeline_.length());
+                               input_.length(boost::lexical_cast<uint32_t>(value));
+                       result = boost::lexical_cast<std::wstring>(input_.length());
                }
                else if(boost::regex_match(param, what, start_exp))
                {
                        auto value = what["VALUE"].str();
                        if(!value.empty())
-                               pipeline_.start_frame(boost::lexical_cast<uint32_t>(value));
-                       result = boost::lexical_cast<std::wstring>(pipeline_.start_frame());
+                               input_.start(boost::lexical_cast<uint32_t>(value));
+                       result = boost::lexical_cast<std::wstring>(input_.start());
                }
                else
                        CASPAR_THROW_EXCEPTION(invalid_argument());
 
                return make_ready_future(std::move(result));
        }
-                               
+
        std::wstring print() const override
        {
-               return L"ffmpeg[" + boost::filesystem::path(filename_).filename().wstring() + L"|" 
-                                                 + print_mode() + L"|" 
-                                                 + boost::lexical_cast<std::wstring>(pipeline_.last_frame()) + L"/" + boost::lexical_cast<std::wstring>(pipeline_.length()) + L"]";
+               return L"ffmpeg[" + (is_url() ? filename_ : boost::filesystem::path(filename_).filename().wstring()) + L"|"
+                                                 + print_mode() + L"|"
+                                                 + boost::lexical_cast<std::wstring>(file_frame_number_) + L"/" + boost::lexical_cast<std::wstring>(file_nb_frames()) + L"]";
        }
 
        std::wstring name() const override
@@ -227,20 +492,21 @@ public:
        boost::property_tree::wptree info() const override
        {
                boost::property_tree::wptree info;
-               info.add(L"type",                               L"ffmpeg");
+               info.add(L"type",                               L"ffmpeg-producer");
                info.add(L"filename",                   filename_);
-               info.add(L"width",                              pipeline_.width());
-               info.add(L"height",                             pipeline_.height());
-               info.add(L"progressive",                pipeline_.progressive());
-               info.add(L"fps",                                boost::rational_cast<double>(pipeline_.framerate()));
-               info.add(L"loop",                               pipeline_.loop());
-               info.add(L"frame-number",               frame_number());
-               info.add(L"nb-frames",                  nb_frames());
-               info.add(L"file-frame-number",  pipeline_.last_frame());
-               info.add(L"file-nb-frames",             pipeline_.length());
+               info.add(L"width",                              video_decoder_ ? video_decoder_->width() : 0);
+               info.add(L"height",                             video_decoder_ ? video_decoder_->height() : 0);
+               info.add(L"progressive",                video_decoder_ ? video_decoder_->is_progressive() : false);
+               info.add(L"fps",                                static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator()));
+               info.add(L"loop",                               input_.loop());
+               info.add(L"frame-number",               frame_number_);
+               auto nb_frames2 = nb_frames();
+               info.add(L"nb-frames",                  nb_frames2 == std::numeric_limits<int64_t>::max() ? -1 : nb_frames2);
+               info.add(L"file-frame-number",  file_frame_number_);
+               info.add(L"file-nb-frames",             file_nb_frames());
                return info;
        }
-       
+
        core::monitor::subject& monitor_output()
        {
                return *monitor_subject_;
@@ -250,24 +516,106 @@ public:
 
        std::wstring print_mode() const
        {
-               return ffmpeg::print_mode(
-                               pipeline_.width(),
-                               pipeline_.height(),
-                               boost::rational_cast<double>(pipeline_.framerate()), 
-                               !pipeline_.progressive());
+               return video_decoder_ ? ffmpeg::print_mode(
+                               video_decoder_->width(),
+                               video_decoder_->height(),
+                               static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator()),
+                               !video_decoder_->is_progressive()) : L"";
+       }
+
+       bool all_audio_decoders_ready() const
+       {
+               for (auto& audio_decoder : audio_decoders_)
+                       if (!audio_decoder->ready())
+                               return false;
+
+               return true;
+       }
+
+       void try_decode_frame()
+       {
+               std::shared_ptr<AVPacket> pkt;
+
+               for (int n = 0; n < 32 && ((video_decoder_ && !video_decoder_->ready()) || !all_audio_decoders_ready()) && input_.try_pop(pkt); ++n)
+               {
+                       if (video_decoder_)
+                               video_decoder_->push(pkt);
+
+                       for (auto& audio_decoder : audio_decoders_)
+                               audio_decoder->push(pkt);
+               }
+
+               std::shared_ptr<AVFrame>                                                                        video;
+               std::vector<std::shared_ptr<core::mutable_audio_buffer>>        audio;
+
+               tbb::parallel_invoke(
+               [&]
+               {
+                       if (!muxer_->video_ready() && video_decoder_)
+                               video = video_decoder_->poll();
+               },
+               [&]
+               {
+                       if (!muxer_->audio_ready())
+                       {
+                               for (auto& audio_decoder : audio_decoders_)
+                               {
+                                       auto audio_for_stream = audio_decoder->poll();
+
+                                       if (audio_for_stream)
+                                               audio.push_back(audio_for_stream);
+                               }
+                       }
+               });
+
+               muxer_->push(video);
+               muxer_->push(audio);
+
+               if (audio_decoders_.empty())
+               {
+                       if (video == flush_video())
+                               muxer_->push({ flush_audio() });
+                       else if (!muxer_->audio_ready())
+                               muxer_->push({ empty_audio() });
+               }
+
+               if (!video_decoder_)
+               {
+                       if (boost::count_if(audio, [](std::shared_ptr<core::mutable_audio_buffer> a) { return a == flush_audio(); }) > 0)
+                               muxer_->push(flush_video());
+                       else if (!muxer_->video_ready())
+                               muxer_->push(empty_video());
+               }
+
+               uint32_t file_frame_number = 0;
+               file_frame_number = std::max(file_frame_number, video_decoder_ ? video_decoder_->file_frame_number() : 0);
+
+               for (auto frame = muxer_->poll(); frame != core::draw_frame::empty(); frame = muxer_->poll())
+                       frame_buffer_.push(std::make_pair(frame, file_frame_number));
+       }
+
+       bool audio_only() const
+       {
+               return !video_decoder_;
+       }
+
+       boost::rational<int> get_out_framerate() const
+       {
+               return muxer_->out_framerate();
        }
 };
 
 void describe_producer(core::help_sink& sink, const core::help_repository& repo)
 {
        sink.short_description(L"A producer for playing media files supported by FFmpeg.");
-       sink.syntax(L"[clip:string] {[loop:LOOP]} {START,SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+       sink.syntax(L"[clip,url:string] {[loop:LOOP]} {SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
        sink.para()
                ->text(L"The FFmpeg Producer can play all media that FFmpeg can play, which includes many ")
                ->text(L"QuickTime video codec such as Animation, PNG, PhotoJPEG, MotionJPEG, as well as ")
                ->text(L"H.264, FLV, WMV and several audio codecs as well as uncompressed audio.");
        sink.definitions()
                ->item(L"clip", L"The file without the file extension to play. It should reside under the media folder.")
+               ->item(L"url", L"If clip contains :// it is instead treated as the URL parameter. The URL can either be any streaming protocol supported by FFmpeg, dshow://video={webcam_name} or v4l2://{video device}.")
                ->item(L"loop", L"Will cause the media file to loop between start and start + length")
                ->item(L"start", L"Optionally sets the start frame. 0 by default. If loop is specified this will be the frame where it starts over again.")
                ->item(L"length", L"Optionally sets the length of the clip. If not specified the clip will be played to the end. If loop is specified the file will jump to start position once this number of frames has been played.")
@@ -278,16 +626,20 @@ void describe_producer(core::help_sink& sink, const core::help_repository& repo)
        sink.para()->text(L"Examples:");
        sink.example(L">> PLAY 1-10 folder/clip", L"to play all frames in a clip and stop at the last frame.");
        sink.example(L">> PLAY 1-10 folder/clip LOOP", L"to loop a clip between the first frame and the last frame.");
-       sink.example(L">> PLAY 1-10 folder/clip LOOP START 10", L"to loop a clip between frame 10 and the last frame.");
-       sink.example(L">> PLAY 1-10 folder/clip LOOP START 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
-       sink.example(L">> PLAY 1-10 folder/clip START 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
+       sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10", L"to loop a clip between frame 10 and the last frame.");
+       sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
+       sink.example(L">> PLAY 1-10 folder/clip SEEK 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
        sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
        sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT film", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
        sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
+       sink.example(L">> PLAY 1-10 rtmp://example.com/live/stream", L"to play an RTMP stream.");
+       sink.example(L">> PLAY 1-10 \"dshow://video=Live! Cam Chat HD VF0790\"", L"to use a web camera as video input on Windows.");
+       sink.example(L">> PLAY 1-10 v4l2:///dev/video0", L"to use a web camera as video input on Linux.");
        sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via ")->code(L"CALL")->text(L":");
        sink.example(L">> CALL 1-10 LOOP 1");
        sink.example(L">> CALL 1-10 START 10");
        sink.example(L">> CALL 1-10 LENGTH 50");
+       sink.example(L">> CALL 1-10 SEEK 30");
        core::describe_framerate_producer(sink);
 }
 
@@ -296,37 +648,68 @@ spl::shared_ptr<core::frame_producer> create_producer(
                const std::vector<std::wstring>& params,
                const spl::shared_ptr<core::media_info_repository>& info_repo)
 {
-       auto filename = probe_stem(env::media_folder() + L"/" + params.at(0), false);
+       auto file_or_url        = params.at(0);
 
-       if(filename.empty())
+       if (!boost::contains(file_or_url, L"://"))
+       {
+               // File
+               file_or_url = probe_stem(env::media_folder() + L"/" + file_or_url, false);
+       }
+
+       if (file_or_url.empty())
                return core::frame_producer::empty();
-       
-       auto pipeline = ffmpeg_pipeline()
-                       .from_file(u8(filename))
-                       .loop(contains_param(L"LOOP", params))
-                       .start_frame(get_param(L"START", params, get_param(L"SEEK", params, static_cast<uint32_t>(0))))
-                       .length(get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max()))
-                       .vfilter(u8(get_param(L"FILTER", params, L"")))
-                       .to_memory(dependencies.frame_factory, dependencies.format_desc);
-
-       auto producer = create_destroy_proxy(spl::make_shared_ptr(std::make_shared<ffmpeg_producer>(
-                       pipeline,
-                       dependencies.format_desc)));
-
-       if (pipeline.framerate() == -1) // Audio only.
-               return producer;
-
-       auto source_framerate = pipeline.framerate();
-       auto target_framerate = boost::rational<int>(
-                       dependencies.format_desc.time_scale,
-                       dependencies.format_desc.duration);
-
-       return core::create_framerate_producer(
+
+       auto loop                                       = contains_param(L"LOOP",               params);
+       auto start                                      = get_param(L"SEEK",                    params, static_cast<uint32_t>(0));
+       auto length                                     = get_param(L"LENGTH",                  params, std::numeric_limits<uint32_t>::max());
+       auto filter_str                         = get_param(L"FILTER",                  params, L"");
+       auto custom_channel_order       = get_param(L"CHANNEL_LAYOUT",  params, L"");
+
+       boost::ireplace_all(filter_str, L"DEINTERLACE_BOB",     L"YADIF=1:-1");
+       boost::ireplace_all(filter_str, L"DEINTERLACE_LQ",      L"SEPARATEFIELDS");
+       boost::ireplace_all(filter_str, L"DEINTERLACE",         L"YADIF=0:-1");
+
+       ffmpeg_options vid_params;
+       bool haveFFMPEGStartIndicator = false;
+       for (size_t i = 0; i < params.size() - 1; ++i)
+       {
+               if (!haveFFMPEGStartIndicator && params[i] == L"--")
+               {
+                       haveFFMPEGStartIndicator = true;
+                       continue;
+               }
+               if (haveFFMPEGStartIndicator)
+               {
+                       auto name = u8(params.at(i++)).substr(1);
+                       auto value = u8(params.at(i));
+                       vid_params.push_back(std::make_pair(name, value));
+               }
+       }
+
+       auto producer = spl::make_shared<ffmpeg_producer>(
+                       dependencies.frame_factory,
+                       dependencies.format_desc,
+                       file_or_url,
+                       filter_str,
+                       loop,
+                       start,
+                       length,
+                       false,
+                       custom_channel_order,
+                       vid_params);
+
+       if (producer->audio_only())
+               return core::create_destroy_proxy(producer);
+
+       auto get_source_framerate       = [=] { return producer->get_out_framerate(); };
+       auto target_framerate           = dependencies.format_desc.framerate;
+
+       return core::create_destroy_proxy(core::create_framerate_producer(
                        producer,
-                       source_framerate,
+                       get_source_framerate,
                        target_framerate,
                        dependencies.format_desc.field_mode,
-                       dependencies.format_desc.audio_cadence);
+                       dependencies.format_desc.audio_cadence));
 }
 
 core::draw_frame create_thumbnail_frame(
@@ -340,67 +723,24 @@ core::draw_frame create_thumbnail_frame(
        if (filename.empty())
                return core::draw_frame::empty();
 
-       auto render_specific_frame = [&](std::int64_t frame_num)
-       {
-               auto pipeline = ffmpeg_pipeline()
-                       .from_file(u8(filename))
-                       .start_frame(static_cast<uint32_t>(frame_num))
-                       .to_memory(dependencies.frame_factory, dependencies.format_desc);
-               pipeline.start();
-
-               auto frame = core::draw_frame::empty();
-               while ((frame = pipeline.try_pop_frame()) == core::draw_frame::late())
-                       boost::this_thread::sleep_for(boost::chrono::milliseconds(1));
-               return frame;
-       };
-
-       auto info = info_repo->get(filename);
-
-       if (!info)
-               return core::draw_frame::empty();
-
-       auto total_frames = info->duration;
-       auto grid = env::properties().get(L"configuration.thumbnails.video-grid", 2);
-
-       if (grid < 1)
-       {
-               CASPAR_LOG(error) << L"configuration/thumbnails/video-grid cannot be less than 1";
-               BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
-       }
-
-       if (grid == 1)
-       {
-               return render_specific_frame(total_frames / 2);
-       }
-
-       auto num_snapshots = grid * grid;
-
-       std::vector<core::draw_frame> frames;
-
-       for (int i = 0; i < num_snapshots; ++i)
-       {
-               int x = i % grid;
-               int y = i / grid;
-               std::int64_t desired_frame;
-
-               if (i == 0)
-                       desired_frame = 0; // first
-               else if (i == num_snapshots - 1)
-                       desired_frame = total_frames - 2; // last
-               else
-                       // evenly distributed across the file.
-                       desired_frame = total_frames * i / (num_snapshots - 1);
-
-               auto frame = render_specific_frame(desired_frame);
-               frame.transform().image_transform.fill_scale[0] = 1.0 / static_cast<double>(grid);
-               frame.transform().image_transform.fill_scale[1] = 1.0 / static_cast<double>(grid);
-               frame.transform().image_transform.fill_translation[0] = 1.0 / static_cast<double>(grid) * x;
-               frame.transform().image_transform.fill_translation[1] = 1.0 / static_cast<double>(grid) * y;
-
-               frames.push_back(frame);
-       }
-
-       return core::draw_frame(frames);
+       auto loop               = false;
+       auto start              = 0;
+       auto length             = std::numeric_limits<uint32_t>::max();
+       auto filter_str = L"";
+
+       ffmpeg_options vid_params;
+       auto producer = spl::make_shared<ffmpeg_producer>(
+                       dependencies.frame_factory,
+                       dependencies.format_desc,
+                       filename,
+                       filter_str,
+                       loop,
+                       start,
+                       length,
+                       true,
+                       L"",
+                       vid_params);
+
+       return producer->create_thumbnail_frame();
 }
-
 }}
index e562fa377ba5fc2318a698cee0a9bfd3bc82c8c8..05159e4194deb18aa952c0888e55919239824001 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "../../ffmpeg_error.h"
 #include "../../ffmpeg.h"
+#include "../util/util.h"
 
 #include <common/assert.h>
 #include <common/except.h>
@@ -43,7 +44,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libavutil/avutil.h>
        #include <libavutil/imgutils.h>
@@ -71,30 +72,9 @@ std::string create_sourcefilter_str(const audio_input_pad& input_pad, std::strin
        return asrc_options;
 }
 
-std::string create_filter_list(const std::vector<std::string>& items)
-{
-       return boost::join(items, "|");
-}
-
-std::string channel_layout_to_string(int64_t channel_layout)
-{
-       return (boost::format("0x%|1$x|") % channel_layout).str();
-}
-
 std::string create_sinkfilter_str(const audio_output_pad& output_pad, std::string name)
 {
-       const auto asink_options = (boost::format("[%4%] abuffersink")//=sample_fmts=%1%:channel_layouts=%2%:sample_rates=%3%")
-               % create_filter_list(cpplinq::from(output_pad.sample_fmts)
-                               .select(&av_get_sample_fmt_name)
-                               .select([](const char* str) { return std::string(str); })
-                               .to_vector())
-               % create_filter_list(cpplinq::from(output_pad.sample_fmts)
-                               .select(&channel_layout_to_string)
-                               .to_vector())
-               % create_filter_list(cpplinq::from(output_pad.sample_rates)
-                               .select([](int samplerate) { return boost::lexical_cast<std::string>(samplerate); })
-                               .to_vector())
-               % name).str();
+       const auto asink_options = (boost::format("[%1%] abuffersink") % name).str();
 
        return asink_options;
 }
@@ -107,13 +87,16 @@ struct audio_filter::implementation
        std::vector<AVFilterContext*>   audio_graph_inputs_;
        std::vector<AVFilterContext*>   audio_graph_outputs_;
 
+       std::vector<audio_input_pad>    input_pads_;
+
        implementation(
-               std::vector<audio_input_pad> input_pads,
-               std::vector<audio_output_pad> output_pads,
-               const std::string& filtergraph)
+                       std::vector<audio_input_pad> input_pads,
+                       std::vector<audio_output_pad> output_pads,
+                       const std::string& filtergraph)
                : filtergraph_(boost::to_lower_copy(filtergraph))
+               , input_pads_(std::move(input_pads))
        {
-               if (input_pads.empty())
+               if (input_pads_.empty())
                        CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info("input_pads cannot be empty"));
 
                if (output_pads.empty())
@@ -130,7 +113,7 @@ struct audio_filter::implementation
 
                {
                        int i = 0;
-                       for (auto& input_pad : input_pads)
+                       for (auto& input_pad : input_pads_)
                                complete_filter_graph.push_back(create_sourcefilter_str(input_pad, "a:" + boost::lexical_cast<std::string>(i++)));
                }
 
@@ -142,20 +125,27 @@ struct audio_filter::implementation
                {
                        int i = 0;
                        for (auto& output_pad : output_pads)
+                       {
                                complete_filter_graph.push_back(create_sinkfilter_str(output_pad, "aout:" + boost::lexical_cast<std::string>(i++)));
+
+                               output_pad.sample_fmts.push_back(AVSampleFormat::AV_SAMPLE_FMT_NONE);
+                               output_pad.audio_channel_layouts.push_back(0);
+                               output_pad.sample_rates.push_back(-1);
+                       }
                }
 
                configure_filtergraph(
                                *audio_graph_,
                                boost::join(complete_filter_graph, ";"),
                                audio_graph_inputs_,
-                               audio_graph_outputs_);
-               
+                               audio_graph_outputs_,
+                               output_pads);
+
                if (is_logging_quiet_for_thread())
                        CASPAR_LOG(trace)
-                               <<      u16(std::string("\n") 
+                               <<      u16(std::string("\n")
                                        + avfilter_graph_dump(
-                                                       audio_graph_.get(), 
+                                                       audio_graph_.get(),
                                                        nullptr));
                else
                        CASPAR_LOG(debug)
@@ -164,81 +154,133 @@ struct audio_filter::implementation
                                                audio_graph_.get(),
                                                nullptr));
        }
-       
+
        void configure_filtergraph(
                        AVFilterGraph& graph,
                        const std::string& filtergraph,
                        std::vector<AVFilterContext*>& source_contexts,
-                       std::vector<AVFilterContext*>& sink_contexts)
+                       std::vector<AVFilterContext*>& sink_contexts,
+                       const std::vector<audio_output_pad>& output_pads)
        {
-               try
-               {
-                       AVFilterInOut* outputs  = nullptr;
-                       AVFilterInOut* inputs   = nullptr;
+               AVFilterInOut* outputs  = nullptr;
+               AVFilterInOut* inputs   = nullptr;
 
-                       FF(avfilter_graph_parse2(
-                                       &graph,
-                                       filtergraph.c_str(),
-                                       &inputs,
-                                       &outputs));
+               FF(avfilter_graph_parse2(
+                               &graph,
+                               filtergraph.c_str(),
+                               &inputs,
+                               &outputs));
 
-                       // Workaround because outputs and inputs are not filled in for some reason
-                       for (unsigned i = 0; i < graph.nb_filters; ++i)
-                       {
-                               auto filter = graph.filters[i];
+               // Workaround because outputs and inputs are not filled in for some reason
+               for (unsigned i = 0; i < graph.nb_filters; ++i)
+               {
+                       auto filter = graph.filters[i];
 
-                               if (std::string(filter->filter->name) == "abuffer")
-                                       source_contexts.push_back(filter);
+                       if (std::string(filter->filter->name) == "abuffer")
+                               source_contexts.push_back(filter);
 
-                               if (std::string(filter->filter->name) == "abuffersink")
-                                       sink_contexts.push_back(filter);
-                       }
+                       if (std::string(filter->filter->name) == "abuffersink")
+                               sink_contexts.push_back(filter);
+               }
 
-                       for (AVFilterInOut* iter = inputs; iter; iter = iter->next)
-                               source_contexts.push_back(iter->filter_ctx);
+               for (AVFilterInOut* iter = inputs; iter; iter = iter->next)
+                       source_contexts.push_back(iter->filter_ctx);
 
-                       for (AVFilterInOut* iter = outputs; iter; iter = iter->next)
-                               sink_contexts.push_back(iter->filter_ctx);
+               for (AVFilterInOut* iter = outputs; iter; iter = iter->next)
+                       sink_contexts.push_back(iter->filter_ctx);
 
-                       FF(avfilter_graph_config(
-                               &graph, 
-                               nullptr));
-               }
-               catch(...)
+               for (int i = 0; i < sink_contexts.size(); ++i)
                {
-                       //avfilter_inout_free(&outputs);
-                       //avfilter_inout_free(&inputs);
-                       throw;
+                       auto sink_context = sink_contexts.at(i);
+                       auto& output_pad = output_pads.at(i);
+
+#pragma warning (push)
+#pragma warning (disable : 4245)
+                       FF(av_opt_set_int_list(
+                               sink_context,
+                               "sample_fmts",
+                               output_pad.sample_fmts.data(),
+                               -1,
+                               AV_OPT_SEARCH_CHILDREN));
+
+                       FF(av_opt_set_int_list(
+                               sink_context,
+                               "channel_layouts",
+                               output_pad.audio_channel_layouts.data(),
+                               0,
+                               AV_OPT_SEARCH_CHILDREN));
+
+                       FF(av_opt_set_int_list(
+                               sink_context,
+                               "sample_rates",
+                               output_pad.sample_rates.data(),
+                               -1,
+                               AV_OPT_SEARCH_CHILDREN));
+#pragma warning (pop)
                }
+
+               FF(avfilter_graph_config(
+                       &graph,
+                       nullptr));
+       }
+
+       void set_guaranteed_output_num_samples_per_frame(int output_pad_id, int num_samples)
+       {
+               av_buffersink_set_frame_size(audio_graph_outputs_.at(output_pad_id), num_samples);
        }
 
        void push(int input_pad_id, const std::shared_ptr<AVFrame>& src_av_frame)
-       {               
+       {
                FF(av_buffersrc_add_frame(
                        audio_graph_inputs_.at(input_pad_id),
                        src_av_frame.get()));
        }
 
+       void push(int input_pad_id, const boost::iterator_range<const int32_t*>& frame_samples)
+       {
+               auto& input_pad                         = input_pads_.at(input_pad_id);
+               auto num_samples                        = frame_samples.size() / av_get_channel_layout_nb_channels(input_pad.audio_channel_layout);
+               auto input_frame                        = ffmpeg::create_frame();
+
+               input_frame->channels           = av_get_channel_layout_nb_channels(input_pad.audio_channel_layout);
+               input_frame->channel_layout     = input_pad.audio_channel_layout;
+               input_frame->sample_rate                = input_pad.sample_rate;
+               input_frame->nb_samples         = static_cast<int>(num_samples);
+               input_frame->format                     = input_pad.sample_fmt;
+               input_frame->pts                                = 0;
+
+               av_samples_fill_arrays(
+                               input_frame->extended_data,
+                               input_frame->linesize,
+                               reinterpret_cast<const std::uint8_t*>(frame_samples.begin()),
+                               input_frame->channels,
+                               input_frame->nb_samples,
+                               static_cast<AVSampleFormat>(input_frame->format),
+                               16);
+
+               push(input_pad_id, input_frame);
+       }
+
        std::shared_ptr<AVFrame> poll(int output_pad_id)
        {
-               std::shared_ptr<AVFrame> filt_frame(
-                       av_frame_alloc(), 
-                       [](AVFrame* p)
-                       {
-                               av_frame_free(&p);
-                       });
-               
+               auto filt_frame = create_frame();
+
                const auto ret = av_buffersink_get_frame(
                        audio_graph_outputs_.at(output_pad_id),
                        filt_frame.get());
-                               
+
                if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
                        return nullptr;
-                                       
+
                FF_RET(ret, "poll");
 
                return filt_frame;
        }
+
+       const AVFilterLink& get_output_pad_info(int output_pad_id) const
+       {
+               return *audio_graph_outputs_.at(output_pad_id)->inputs[0];
+       }
 };
 
 audio_filter::audio_filter(
@@ -250,11 +292,15 @@ audio_filter::audio_filter(
 }
 audio_filter::audio_filter(audio_filter&& other) : impl_(std::move(other.impl_)){}
 audio_filter& audio_filter::operator=(audio_filter&& other){impl_ = std::move(other.impl_); return *this;}
+void audio_filter::set_guaranteed_output_num_samples_per_frame(int output_pad_id, int num_samples) { impl_->set_guaranteed_output_num_samples_per_frame(output_pad_id, num_samples); }
 void audio_filter::push(int input_pad_id, const std::shared_ptr<AVFrame>& frame){impl_->push(input_pad_id, frame);}
+void audio_filter::push(int input_pad_id, const boost::iterator_range<const int32_t*>& frame_samples) { impl_->push(input_pad_id, frame_samples); }
 std::shared_ptr<AVFrame> audio_filter::poll(int output_pad_id){return impl_->poll(output_pad_id);}
 std::wstring audio_filter::filter_str() const{return u16(impl_->filtergraph_);}
+int audio_filter::get_num_output_pads() const { return static_cast<int>(impl_->audio_graph_outputs_.size()); }
+const AVFilterLink& audio_filter::get_output_pad_info(int output_pad_id) const { return impl_->get_output_pad_info(output_pad_id); }
 std::vector<spl::shared_ptr<AVFrame>> audio_filter::poll_all(int output_pad_id)
-{      
+{
        std::vector<spl::shared_ptr<AVFrame>> frames;
        for(auto frame = poll(output_pad_id); frame; frame = poll(output_pad_id))
                frames.push_back(spl::make_shared_ptr(frame));
index 370bed04530a868571cfccfc3671a7104f30e91e..86da3a43caca61c0fb4e4b232981a7ab04c929ec 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <boost/rational.hpp>
 #include <boost/noncopyable.hpp>
+#include <boost/range/iterator_range.hpp>
 
 #include <string>
 #include <vector>
@@ -42,6 +43,7 @@ extern "C"
 #endif
 
 struct AVFrame;
+struct AVFilterLink;
 
 namespace caspar { namespace ffmpeg {
 
@@ -50,13 +52,13 @@ struct audio_input_pad
        boost::rational<int>    time_base;
        int                                             sample_rate;
        AVSampleFormat                  sample_fmt;
-       std::int64_t                    audio_channel_layout;
+       std::uint64_t                   audio_channel_layout;
 
        audio_input_pad(
                        boost::rational<int> time_base,
                        int sample_rate,
                        AVSampleFormat sample_fmt,
-                       std::int64_t audio_channel_layout)
+                       std::uint64_t audio_channel_layout)
                : time_base(std::move(time_base))
                , sample_rate(sample_rate)
                , sample_fmt(sample_fmt)
@@ -69,12 +71,12 @@ struct audio_output_pad
 {
        std::vector<int>                        sample_rates;
        std::vector<AVSampleFormat>     sample_fmts;
-       std::vector<std::int64_t>       audio_channel_layouts;
+       std::vector<std::uint64_t>      audio_channel_layouts;
 
        audio_output_pad(
                        std::vector<int> sample_rates,
                        std::vector<AVSampleFormat> sample_fmts,
-                       std::vector<std::int64_t> audio_channel_layouts)
+                       std::vector<std::uint64_t> audio_channel_layouts)
                : sample_rates(std::move(sample_rates))
                , sample_fmts(std::move(sample_fmts))
                , audio_channel_layouts(std::move(audio_channel_layouts))
@@ -92,11 +94,15 @@ public:
        audio_filter(audio_filter&& other);
        audio_filter& operator=(audio_filter&& other);
 
+       void set_guaranteed_output_num_samples_per_frame(int output_pad_id, int num_samples);
        void push(int input_pad_id, const std::shared_ptr<AVFrame>& frame);
+       void push(int input_pad_id, const boost::iterator_range<const int32_t*>& frame_samples);
        std::shared_ptr<AVFrame> poll(int output_pad_id);
        std::vector<spl::shared_ptr<AVFrame>> poll_all(int output_pad_id);
 
        std::wstring filter_str() const;
+       int get_num_output_pads() const;
+       const AVFilterLink& get_output_pad_info(int output_pad_id) const;
 private:
        struct implementation;
        spl::shared_ptr<implementation> impl_;
index 537f2fb59b334cb7660230f549b853c267383a07..aa83cf36662584a583fd758a55b3850c03d0a53b 100644 (file)
@@ -25,6 +25,7 @@
 
 #include "../../ffmpeg_error.h"
 #include "../../ffmpeg.h"
+#include "../util/util.h"
 
 #include <common/assert.h>
 #include <common/except.h>
@@ -43,7 +44,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libavutil/avutil.h>
        #include <libavutil/imgutils.h>
@@ -58,7 +59,6 @@ extern "C"
 #endif
 
 namespace caspar { namespace ffmpeg {
-       
 struct filter::implementation
 {
        std::string                                                             filtergraph_;
@@ -68,7 +68,7 @@ struct filter::implementation
     AVFilterContext*                                           video_graph_out_;
 
        std::queue<std::shared_ptr<AVFrame>>    fast_path_;
-               
+
        implementation(
                        int in_width,
                        int in_height,
@@ -77,7 +77,8 @@ struct filter::implementation
                        boost::rational<int> in_sample_aspect_ratio,
                        AVPixelFormat in_pix_fmt,
                        std::vector<AVPixelFormat> out_pix_fmts,
-                       const std::string& filtergraph) 
+                       const std::string& filtergraph,
+                       bool multithreaded)
                : filtergraph_(boost::to_lower_copy(filtergraph))
        {
                if(out_pix_fmts.empty())
@@ -99,66 +100,73 @@ struct filter::implementation
                out_pix_fmts.push_back(AV_PIX_FMT_NONE);
 
                video_graph_.reset(
-                       avfilter_graph_alloc(), 
+                       avfilter_graph_alloc(),
                        [](AVFilterGraph* p)
                        {
                                avfilter_graph_free(&p);
                        });
-               
-               video_graph_->nb_threads  = 0;
-               video_graph_->thread_type = AVFILTER_THREAD_SLICE;
-                               
+
+               if (multithreaded)
+               {
+                       video_graph_->nb_threads        = 0;
+                       video_graph_->thread_type       = AVFILTER_THREAD_SLICE;
+               }
+               else
+               {
+                       video_graph_->nb_threads        = 1;
+               }
+
                const auto vsrc_options = (boost::format("video_size=%1%x%2%:pix_fmt=%3%:time_base=%4%/%5%:pixel_aspect=%6%/%7%:frame_rate=%8%/%9%")
                        % in_width % in_height
                        % in_pix_fmt
                        % in_time_base.numerator() % in_time_base.denominator()
                        % in_sample_aspect_ratio.numerator() % in_sample_aspect_ratio.denominator()
                        % in_frame_rate.numerator() % in_frame_rate.denominator()).str();
-                                       
-               AVFilterContext* filt_vsrc = nullptr;                   
+
+               AVFilterContext* filt_vsrc = nullptr;
                FF(avfilter_graph_create_filter(
                        &filt_vsrc,
-                       avfilter_get_by_name("buffer"), 
+                       avfilter_get_by_name("buffer"),
                        "filter_buffer",
-                       vsrc_options.c_str(), 
-                       nullptr, 
+                       vsrc_options.c_str(),
+                       nullptr,
                        video_graph_.get()));
-                               
+
                AVFilterContext* filt_vsink = nullptr;
                FF(avfilter_graph_create_filter(
                        &filt_vsink,
-                       avfilter_get_by_name("buffersink"), 
+                       avfilter_get_by_name("buffersink"),
                        "filter_buffersink",
-                       nullptr, 
-                       nullptr, 
+                       nullptr,
+                       nullptr,
                        video_graph_.get()));
-               
+
 #pragma warning (push)
 #pragma warning (disable : 4245)
 
                FF(av_opt_set_int_list(
-                       filt_vsink, 
-                       "pix_fmts", 
-                       out_pix_fmts.data(), 
+                       filt_vsink,
+                       "pix_fmts",
+                       out_pix_fmts.data(),
                        -1,
                        AV_OPT_SEARCH_CHILDREN));
 
 #pragma warning (pop)
-                       
+
                configure_filtergraph(
-                       *video_graph_, 
+                       *video_graph_,
                        filtergraph_,
                        *filt_vsrc,
                        *filt_vsink);
 
                video_graph_in_  = filt_vsrc;
                video_graph_out_ = filt_vsink;
-               
+
                if (is_logging_quiet_for_thread())
                        CASPAR_LOG(trace)
-                               <<      u16(std::string("\n") 
+                               <<      u16(std::string("\n")
                                        + avfilter_graph_dump(
-                                                       video_graph_.get(), 
+                                                       video_graph_.get(),
                                                        nullptr));
                else
                        CASPAR_LOG(debug)
@@ -167,61 +175,47 @@ struct filter::implementation
                                                        video_graph_.get(),
                                                        nullptr));
        }
-       
+
        void configure_filtergraph(
-               AVFilterGraph& graph, 
-               const std::string& filtergraph, 
-               AVFilterContext& source_ctx, 
+               AVFilterGraph& graph,
+               const std::string& filtergraph,
+               AVFilterContext& source_ctx,
                AVFilterContext& sink_ctx)
        {
-               AVFilterInOut* outputs = nullptr;
-               AVFilterInOut* inputs = nullptr;
-
-               try
+               if (!filtergraph.empty())
                {
-                       if(!filtergraph.empty()) 
-                       {
-                               outputs = avfilter_inout_alloc();
-                               inputs  = avfilter_inout_alloc();
+                       auto outputs = avfilter_inout_alloc();
+                       auto inputs  = avfilter_inout_alloc();
 
-                               CASPAR_VERIFY(outputs && inputs);
+                       CASPAR_VERIFY(outputs && inputs);
 
-                               outputs->name       = av_strdup("in");
-                               outputs->filter_ctx = &source_ctx;
-                               outputs->pad_idx    = 0;
-                               outputs->next       = nullptr;
+                       outputs->name       = av_strdup("in");
+                       outputs->filter_ctx = &source_ctx;
+                       outputs->pad_idx    = 0;
+                       outputs->next       = nullptr;
 
-                               inputs->name        = av_strdup("out");
-                               inputs->filter_ctx  = &sink_ctx;
-                               inputs->pad_idx     = 0;
-                               inputs->next        = nullptr;
+                       inputs->name        = av_strdup("out");
+                       inputs->filter_ctx  = &sink_ctx;
+                       inputs->pad_idx     = 0;
+                       inputs->next        = nullptr;
 
-                               FF(avfilter_graph_parse(
-                                       &graph, 
-                                       filtergraph.c_str(), 
+                       FF(avfilter_graph_parse(
+                                       &graph,
+                                       filtergraph.c_str(),
                                        inputs,
                                        outputs,
                                        nullptr));
-                       } 
-                       else 
-                       {
-                               FF(avfilter_link(
-                                       &source_ctx, 
-                                       0, 
-                                       &sink_ctx, 
-                                       0));
-                       }
-
-                       FF(avfilter_graph_config(
-                               &graph, 
-                               nullptr));
                }
-               catch(...)
+               else
                {
-                       //avfilter_inout_free(&outputs);
-                       //avfilter_inout_free(&inputs);
-                       throw;
+                       FF(avfilter_link(
+                                       &source_ctx,
+                                       0,
+                                       &sink_ctx,
+                                       0));
                }
+
+               FF(avfilter_graph_config(&graph, nullptr));
        }
 
        bool fast_path() const
@@ -235,7 +229,7 @@ struct filter::implementation
                        fast_path_.push(src_av_frame);
                else
                        FF(av_buffersrc_add_frame(
-                               video_graph_in_, 
+                               video_graph_in_,
                                src_av_frame.get()));
        }
 
@@ -251,20 +245,15 @@ struct filter::implementation
                        return result;
                }
 
-               std::shared_ptr<AVFrame> filt_frame(
-                       av_frame_alloc(), 
-                       [](AVFrame* p)
-                       {
-                               av_frame_free(&p);
-                       });
-               
+               auto filt_frame = create_frame();
+
                const auto ret = av_buffersink_get_frame(
-                       video_graph_out_, 
+                       video_graph_out_,
                        filt_frame.get());
-                               
+
                if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
                        return nullptr;
-                                       
+
                FF_RET(ret, "poll");
 
                return filt_frame;
@@ -279,8 +268,9 @@ filter::filter(
                boost::rational<int> in_sample_aspect_ratio,
                AVPixelFormat in_pix_fmt,
                std::vector<AVPixelFormat> out_pix_fmts,
-               const std::string& filtergraph) 
-               : impl_(new implementation(
+               const std::string& filtergraph,
+               bool multithreaded)
+       : impl_(new implementation(
                        in_width,
                        in_height,
                        in_time_base,
@@ -288,18 +278,18 @@ filter::filter(
                        in_sample_aspect_ratio,
                        in_pix_fmt,
                        out_pix_fmts,
-                       filtergraph)){}
+                       filtergraph,
+                       multithreaded)){}
 filter::filter(filter&& other) : impl_(std::move(other.impl_)){}
 filter& filter::operator=(filter&& other){impl_ = std::move(other.impl_); return *this;}
 void filter::push(const std::shared_ptr<AVFrame>& frame){impl_->push(frame);}
 std::shared_ptr<AVFrame> filter::poll(){return impl_->poll();}
 std::wstring filter::filter_str() const{return u16(impl_->filtergraph_);}
 std::vector<spl::shared_ptr<AVFrame>> filter::poll_all()
-{      
+{
        std::vector<spl::shared_ptr<AVFrame>> frames;
        for(auto frame = poll(); frame; frame = poll())
                frames.push_back(spl::make_shared_ptr(frame));
        return frames;
 }
-
 }}
index e8d62314168eb0eb2a814430cd6b68fab24770a2..86cdba98f5c0ee3b535d02fea75fd570983d78db 100644 (file)
@@ -67,7 +67,8 @@ public:
                boost::rational<int> in_sample_aspect_ratio,
                AVPixelFormat in_pix_fmt,
                std::vector<AVPixelFormat> out_pix_fmts,
-               const std::string& filtergraph);
+               const std::string& filtergraph,
+               bool multithreaded = true);
        filter(filter&& other);
        filter& operator=(filter&& other);
 
@@ -79,10 +80,13 @@ public:
                        
        static bool is_double_rate(const std::wstring& filters)
        {
-               if(boost::to_upper_copy(filters).find(L"YADIF=1") != std::string::npos)
+               if (boost::to_upper_copy(filters).find(L"YADIF=1") != std::string::npos)
                        return true;
-       
-               if(boost::to_upper_copy(filters).find(L"YADIF=3") != std::string::npos)
+
+               if (boost::to_upper_copy(filters).find(L"YADIF=3") != std::string::npos)
+                       return true;
+
+               if (boost::to_upper_copy(filters).find(L"SEPARATEFIELDS") != std::string::npos)
                        return true;
 
                return false;
@@ -90,8 +94,12 @@ public:
 
        static bool is_deinterlacing(const std::wstring& filters)
        {
-               if(boost::to_upper_copy(filters).find(L"YADIF") != std::string::npos)
-                       return true;    
+               if (boost::to_upper_copy(filters).find(L"YADIF") != std::string::npos)
+                       return true;
+
+               if (boost::to_upper_copy(filters).find(L"SEPARATEFIELDS") != std::string::npos)
+                       return true;
+
                return false;
        }       
 
index 9d4acbd235e9fd72056646356e7c96fb1c4d5e6b..ac772b8a9cdb50077234ea59b03c9ef25ae735b0 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 #include "input.h"
 
 #include "../util/util.h"
+#include "../util/flv.h"
 #include "../../ffmpeg_error.h"
 #include "../../ffmpeg.h"
 
+#include <core/video_format.h>
+
 #include <common/diagnostics/graph.h>
 #include <common/executor.h>
-#include <common/lock.h>
-//#include <common/except.h>
+#include <common/except.h>
 #include <common/os/general_protection_fault.h>
-#include <common/log.h>
-
-#include <core/video_format.h>
+#include <common/param.h>
+#include <common/scope_exit.h>
 
 #include <tbb/concurrent_queue.h>
 #include <tbb/atomic.h>
 #include <tbb/recursive_mutex.h>
 
+#include <boost/range/algorithm.hpp>
 #include <boost/thread/condition_variable.hpp>
 #include <boost/thread/mutex.hpp>
 #include <boost/thread/thread.hpp>
@@ -48,7 +50,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -58,212 +60,283 @@ extern "C"
 #pragma warning (pop)
 #endif
 
+static const size_t MAX_BUFFER_COUNT    = 100;
+static const size_t MAX_BUFFER_COUNT_RT = 3;
+static const size_t MIN_BUFFER_COUNT    = 50;
+static const size_t MAX_BUFFER_SIZE     = 64 * 1000000;
+
 namespace caspar { namespace ffmpeg {
+struct input::implementation : boost::noncopyable
+{
+       const spl::shared_ptr<diagnostics::graph>                                       graph_;
 
-static const int MAX_PUSH_WITHOUT_POP = 200;
-static const int MIN_FRAMES = 25;
+       const spl::shared_ptr<AVFormatContext>                                          format_context_; // Destroy this last
+       const int                                                                                                       default_stream_index_   = av_find_default_stream_index(format_context_.get());
 
-class stream
-{
-       stream(const stream&);
-       stream& operator=(const stream&);
+       const std::wstring                                                                                      filename_;
+       tbb::atomic<uint32_t>                                                                           start_;
+       tbb::atomic<uint32_t>                                                                           length_;
+       const bool                                                                                                      thumbnail_mode_;
+       tbb::atomic<bool>                                                                                       loop_;
+       uint32_t                                                                                                        frame_number_                   = 0;
 
-       typedef tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>::size_type size_type;
+       tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>        buffer_;
+       tbb::atomic<size_t>                                                                                     buffer_size_;
 
-       int                                                                                                                     index_;
-       tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>>        packets_;
-       tbb::atomic<int>                                                                                        push_since_pop_;
-public:
+       executor                                                                                                        executor_;
 
-       stream(int index) 
-               : index_(index)
+       explicit implementation(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+               : graph_(graph)
+               , format_context_(open_input(url_or_file, vid_params))
+               , filename_(url_or_file)
+               , thumbnail_mode_(thumbnail_mode)
+               , executor_(print())
        {
-               push_since_pop_ = 0;
-       }
+               if (thumbnail_mode_)
+                       executor_.invoke([]
+                       {
+                               enable_quiet_logging_for_thread();
+                       });
 
-       stream(stream&&) = default;
+               start_                  = start;
+               length_                 = length;
+               loop_                   = loop;
+               buffer_size_    = 0;
 
-       bool is_available() const
-       {
-               return index_ >= 0;
-       }
+               if(start_ > 0)
+                       queued_seek(start_);
 
-       int index() const
-       {
-               return index_;
+               graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
+               graph_->set_color("buffer-count", diagnostics::color(0.7f, 0.4f, 0.4f));
+               graph_->set_color("buffer-size", diagnostics::color(1.0f, 1.0f, 0.0f));
+
+               tick();
        }
-       
-       void push(const std::shared_ptr<AVPacket>& packet)
+
+       bool try_pop(std::shared_ptr<AVPacket>& packet)
        {
-               if(packet && packet->data && packet->stream_index != index_)
-                       return;
+               auto result = buffer_.try_pop(packet);
 
-               if (++push_since_pop_ > MAX_PUSH_WITHOUT_POP) // Out of memory protection for streams never being used.
+               if(result)
                {
-                       return;
+                       if(packet)
+                               buffer_size_ -= packet->size;
+                       tick();
                }
 
-               packets_.push(packet);
-       }
-
-       bool try_pop(std::shared_ptr<AVPacket>& packet)
-       {
-               push_since_pop_ = 0;
+               graph_->set_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);
+               graph_->set_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));
 
-               return packets_.try_pop(packet);
+               return result;
        }
 
-       void clear()
+       std::ptrdiff_t get_max_buffer_count() const
        {
-               std::shared_ptr<AVPacket> packet;
-               push_since_pop_ = 0;
-               while(packets_.try_pop(packet));
+               return thumbnail_mode_ ? 1 : MAX_BUFFER_COUNT;
        }
-               
-       size_type size() const
+
+       std::ptrdiff_t get_min_buffer_count() const
        {
-               return is_available() ? packets_.size() : std::numeric_limits<size_type>::max();
+               return thumbnail_mode_ ? 0 : MIN_BUFFER_COUNT;
        }
-};
-               
-struct input::impl : boost::noncopyable
-{              
-       const spl::shared_ptr<diagnostics::graph>       graph_;
-
-       const std::wstring                                                      filename_;
-       const spl::shared_ptr<AVFormatContext>          format_context_                 = open_input(filename_); // Destroy this last
-       const int                                                                       default_stream_index_   = av_find_default_stream_index(format_context_.get());
-
-       tbb::atomic<uint32_t>                                           start_;         
-       tbb::atomic<uint32_t>                                           length_;
-       tbb::atomic<bool>                                                       loop_;
-       tbb::atomic<bool>                                                       eof_;
-       double                                                                          fps_                                    = read_fps(*format_context_, 0.0);
-       uint32_t                                                                        frame_number_                   = 0;
-
-       stream                                                                          video_stream_                   {                                                       av_find_best_stream(format_context_.get(), AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0) };
-       std::vector<stream>                                                     audio_streams_;
-
-       boost::optional<uint32_t>                                       seek_target_;
-
-       tbb::atomic<bool>                                                       is_running_;
-       boost::mutex                                                            mutex_;
-       boost::condition_variable                                       cond_;
-       boost::thread                                                           thread_;
-       
-       impl(
-                       const spl::shared_ptr<diagnostics::graph> graph,
-                       const std::wstring& filename,
-                       const bool loop,
-                       const uint32_t start,
-                       const uint32_t length,
-                       bool thumbnail_mode)
-               : graph_(graph)
-               , filename_(filename)
-       {
-               start_                  = start;
-               length_                 = length;
-               loop_                   = loop;
-               eof_                    = false;
-               is_running_             = true;
 
-               if(start_ != 0)
-                       seek_target_ = start_;
-                                                                                                               
-               graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
+       std::future<bool> seek(uint32_t target)
+       {
+               if (!executor_.is_running())
+                       return make_ready_future(false);
 
-               if (!thumbnail_mode)
-                       for (unsigned i = 0; i < format_context_->nb_streams; ++i)
-                               if (format_context_->streams[i]->codec->codec_type == AVMediaType::AVMEDIA_TYPE_AUDIO)
-                                       audio_streams_.emplace_back(i);
+               return executor_.begin_invoke([=]() -> bool
+               {
+                       std::shared_ptr<AVPacket> packet;
+                       while(buffer_.try_pop(packet) && packet)
+                               buffer_size_ -= packet->size;
 
-               for (int i = 0; i < audio_streams_.size(); ++i)
-                       graph_->set_color("audio-buffer" + boost::lexical_cast<std::string>(i + 1), diagnostics::color(0.7f, 0.4f, 0.4f));
+                       queued_seek(target);
 
-               if (video_stream_.is_available())
-                       graph_->set_color("video-buffer", diagnostics::color(1.0f, 1.0f, 0.0f));
-               
-               for(int n = 0; n < 8; ++n)
                        tick();
 
-               thread_ = boost::thread([this, thumbnail_mode]{run(thumbnail_mode);});
+                       return true;
+               }, task_priority::high_priority);
        }
 
-       ~impl()
+       std::wstring print() const
        {
-               is_running_ = false;
-               cond_.notify_one();
-               thread_.join();
+               return L"ffmpeg_input[" + filename_ + L")]";
        }
-       
-       bool try_pop_video(std::shared_ptr<AVPacket>& packet)
-       {
-               if (!video_stream_.is_available())
-                       return false;
-
-               bool result = video_stream_.try_pop(packet);
 
-               if(result)
-                       cond_.notify_one();
-               
-               graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size())/MIN_FRAMES));
-                               
-               return result;
+       bool full() const
+       {
+               return (buffer_size_ > MAX_BUFFER_SIZE || buffer_.size() > get_max_buffer_count()) && buffer_.size() > get_min_buffer_count();
        }
-       
-       bool try_pop_audio(std::shared_ptr<AVPacket>& packet, int audio_stream_index)
+
+       void tick()
        {
-               if (audio_streams_.size() < audio_stream_index + 1)
-                       return false;
+               if(!executor_.is_running())
+                       return;
 
-               auto& audio_stream = audio_streams_.at(audio_stream_index);
-               bool result = audio_stream.try_pop(packet);
-               if(result)
-                       cond_.notify_one();
+               executor_.begin_invoke([this]
+               {
+                       if(full())
+                               return;
+
+                       try
+                       {
+                               auto packet = create_packet();
 
-               auto buffer_nr = boost::lexical_cast<std::string>(audio_stream_index + 1);
-               graph_->set_value("audio-buffer" + buffer_nr, std::min(1.0, static_cast<double>(audio_stream.size())/MIN_FRAMES));
+                               auto ret = av_read_frame(format_context_.get(), packet.get()); // packet is only valid until next call of av_read_frame. Use av_dup_packet to extend its life.
 
-               return result;
+                               if(is_eof(ret))
+                               {
+                                       frame_number_   = 0;
+
+                                       if(loop_)
+                                       {
+                                               queued_seek(start_);
+                                               graph_->set_tag(diagnostics::tag_severity::INFO, "seek");
+                                               CASPAR_LOG(trace) << print() << " Looping.";
+                                       }
+                                       else
+                                       {
+                                               // Needed by some decoders to decode remaining frames based on last packet.
+                                               auto flush_packet = create_packet();
+                                               flush_packet->data = nullptr;
+                                               flush_packet->size = 0;
+                                               flush_packet->pos = -1;
+
+                                               buffer_.push(flush_packet);
+
+                                               executor_.stop();
+                                       }
+                               }
+                               else
+                               {
+                                       THROW_ON_ERROR(ret, "av_read_frame", print());
+
+                                       if(packet->stream_index == default_stream_index_)
+                                               ++frame_number_;
+
+                                       THROW_ON_ERROR2(av_dup_packet(packet.get()), print());
+
+                                       // Make sure that the packet is correctly deallocated even if size and data is modified during decoding.
+                                       auto size = packet->size;
+                                       auto data = packet->data;
+
+                                       packet = spl::shared_ptr<AVPacket>(packet.get(), [packet, size, data](AVPacket*)
+                                       {
+                                               packet->size = size;
+                                               packet->data = data;
+                                       });
+
+                                       buffer_.try_push(packet);
+                                       buffer_size_ += packet->size;
+
+                                       graph_->set_value("buffer-size", (static_cast<double>(buffer_size_)+0.001)/MAX_BUFFER_SIZE);
+                                       graph_->set_value("buffer-count", (static_cast<double>(buffer_.size()+0.001)/MAX_BUFFER_COUNT));
+                               }
+
+                               tick();
+                       }
+                       catch(...)
+                       {
+                               if (!thumbnail_mode_)
+                                       CASPAR_LOG_CURRENT_EXCEPTION();
+                               executor_.stop();
+                       }
+               });
        }
 
-       void seek(uint32_t target)
+       spl::shared_ptr<AVFormatContext> open_input(const std::wstring& url_or_file, const ffmpeg_options& vid_params)
        {
+               AVDictionary* format_options = nullptr;
+
+               CASPAR_SCOPE_EXIT
                {
-                       boost::lock_guard<boost::mutex> lock(mutex_);
+                       if (format_options)
+                               av_dict_free(&format_options);
+               };
 
-                       seek_target_ = target;
-                       video_stream_.clear();
+               for (auto& option : vid_params)
+                       av_dict_set(&format_options, option.first.c_str(), option.second.c_str(), 0);
 
-                       for (auto& audio_stream : audio_streams_)
-                               audio_stream.clear();
+               auto resource_name                      = std::wstring();
+               auto parts                                      = caspar::protocol_split(url_or_file);
+               auto protocol                           = parts.at(0);
+               auto path                                       = parts.at(1);
+               AVInputFormat* input_format     = nullptr;
+
+               static const std::set<std::wstring> PROTOCOLS_TREATED_AS_FORMATS = { L"dshow", L"v4l2" };
+
+               if (protocol.empty())
+                       resource_name = path;
+               else if (PROTOCOLS_TREATED_AS_FORMATS.find(protocol) != PROTOCOLS_TREATED_AS_FORMATS.end())
+               {
+                       input_format = av_find_input_format(u8(protocol).c_str());
+                       resource_name = path;
                }
+               else
+                       resource_name = protocol + L"://" + path;
 
-               cond_.notify_one();
-       }
+               AVFormatContext* weak_context = nullptr;
+               THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), input_format, &format_options), resource_name);
 
-       int get_actual_audio_stream_index(int audio_stream_index) const
-       {
-               if (audio_stream_index + 1 > audio_streams_.size())
-                       CASPAR_THROW_EXCEPTION(averror_stream_not_found());
+               spl::shared_ptr<AVFormatContext> context(weak_context, [](AVFormatContext* ptr)
+               {
+                       avformat_close_input(&ptr);
+               });
 
-               return audio_streams_.at(audio_stream_index).index();
+               if (format_options)
+               {
+                       std::string unsupported_tokens = "";
+                       AVDictionaryEntry *t = NULL;
+                       while ((t = av_dict_get(format_options, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
+                       {
+                               if (!unsupported_tokens.empty())
+                                       unsupported_tokens += ", ";
+                               unsupported_tokens += t->key;
+                       }
+                       CASPAR_THROW_EXCEPTION(user_error() << msg_info(unsupported_tokens));
+               }
+
+               THROW_ON_ERROR2(avformat_find_stream_info(context.get(), nullptr), resource_name);
+               fix_meta_data(*context);
+               return context;
        }
-               
-       std::wstring print() const
+
+       void fix_meta_data(AVFormatContext& context)
        {
-               return L"ffmpeg_input[" + filename_ + L")]";
+               auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);
+
+               if (video_index > -1)
+               {
+                       auto video_stream = context.streams[video_index];
+                       auto video_context = context.streams[video_index]->codec;
+
+                       if (boost::filesystem::path(context.filename).extension().string() == ".flv")
+                       {
+                               try
+                               {
+                                       auto meta = read_flv_meta_info(context.filename);
+                                       double fps = boost::lexical_cast<double>(meta["framerate"]);
+                                       video_stream->nb_frames = static_cast<int64_t>(boost::lexical_cast<double>(meta["duration"])*fps);
+                               }
+                               catch (...) {}
+                       }
+                       else
+                       {
+                               auto stream_time = video_stream->time_base;
+                               auto duration = video_stream->duration;
+                               auto codec_time = video_context->time_base;
+                               auto ticks = video_context->ticks_per_frame;
+
+                               if (video_stream->nb_frames == 0)
+                                       video_stream->nb_frames = (duration*stream_time.num*codec_time.den) / (stream_time.den*codec_time.num*ticks);
+                       }
+               }
        }
 
-private:
-       void internal_seek(uint32_t target)
+       void queued_seek(const uint32_t target)
        {
-               eof_ = false;
-               graph_->set_tag(diagnostics::tag_severity::INFO, "seek");
-
-               if (is_logging_quiet_for_thread())
-                       CASPAR_LOG(trace) << print() << " Seeking: " << target;
-               else
+               if (!thumbnail_mode_)
                        CASPAR_LOG(debug) << print() << " Seeking: " << target;
 
                int flags = AVSEEK_FLAG_FRAME;
@@ -278,147 +351,55 @@ private:
                                        flags = AVSEEK_FLAG_BYTE;
                        }
                }
-               
-               auto stream                             = format_context_->streams[default_stream_index_];
-               auto fps                                = read_fps(*format_context_, 0.0);
-               auto target_timestamp = static_cast<int64_t>((target / fps * stream->time_base.den) / stream->time_base.num);
-               
-               THROW_ON_ERROR2(avformat_seek_file(
-                               format_context_.get(),
-                               default_stream_index_,
-                               std::numeric_limits<int64_t>::min(),
-                               target_timestamp,
-                               std::numeric_limits<int64_t>::max(),
-                               0), print());
-               
-               video_stream_.push(nullptr);
-
-               for (auto& audio_stream : audio_streams_)
-                       audio_stream.push(nullptr);
-       }
 
-       void tick()
-       {
-               if(seek_target_)                                
-               {
-                       internal_seek(*seek_target_);
-                       seek_target_.reset();
-               }
+               auto stream = format_context_->streams[default_stream_index_];
 
-               auto packet = create_packet();
-               
-               auto ret = av_read_frame(format_context_.get(), packet.get()); // packet is only valid until next call of av_read_frame. Use av_dup_packet to extend its life.  
-               
-               if(is_eof(ret))                                                                                                              
-               {
-                       if (loop_)
-                               internal_seek(start_);
-                       else
-                       {
-                               eof_ = true;
-                       }
-               }
-               else
-               {               
-                       THROW_ON_ERROR(ret, "av_read_frame", print());
-                                       
-                       THROW_ON_ERROR2(av_dup_packet(packet.get()), print());
-                               
-                       // Make sure that the packet is correctly deallocated even if size and data is modified during decoding.
-                       const auto size = packet->size;
-                       const auto data = packet->data;
-                       
-                       packet = spl::shared_ptr<AVPacket>(packet.get(), [packet, size, data](AVPacket*)
-                       {
-                               packet->size = size;
-                               packet->data = data;                            
-                       });
-                                       
-                       const auto stream_time_base = format_context_->streams[packet->stream_index]->time_base;
-                       const auto packet_frame_number = static_cast<uint32_t>((static_cast<double>(packet->pts * stream_time_base.num)/stream_time_base.den)*fps_);
-
-                       if(packet->stream_index == default_stream_index_)
-                               frame_number_ = packet_frame_number;
-                                       
-                       if(packet_frame_number >= start_ && packet_frame_number < length_)
-                       {
-                               video_stream_.push(packet);
-
-                               for (auto& audio_stream : audio_streams_)
-                                       audio_stream.push(packet);
-                       }
-               }       
 
-               if (video_stream_.is_available())
-                       graph_->set_value("video-buffer", std::min(1.0, static_cast<double>(video_stream_.size())/MIN_FRAMES));
+               auto fps = read_fps(*format_context_, 0.0);
 
-               for (int i = 0; i < audio_streams_.size(); ++i)
-                       graph_->set_value(
-                                       "audio-buffer" + boost::lexical_cast<std::string>(i + 1),
-                                       std::min(1.0, static_cast<double>(audio_streams_[i].size())/MIN_FRAMES));
-       }
-                       
-       bool full() const
-       {
-               bool video_full = video_stream_.size() >= MIN_FRAMES;
-
-               if (!video_full)
-                       return false;
-
-               for (auto& audio_stream : audio_streams_)
-                       if (audio_stream.size() < MIN_FRAMES)
-                               return false;
-
-               return true;
+               THROW_ON_ERROR2(avformat_seek_file(
+                       format_context_.get(),
+                       default_stream_index_,
+                       std::numeric_limits<int64_t>::min(),
+                       static_cast<int64_t>((target / fps * stream->time_base.den) / stream->time_base.num),
+                       std::numeric_limits<int64_t>::max(),
+                       0), print());
+
+               auto flush_packet       = create_packet();
+               flush_packet->data      = nullptr;
+               flush_packet->size      = 0;
+               flush_packet->pos       = target;
+
+               buffer_.push(flush_packet);
        }
 
-       void run(bool thumbnail_mode)
+       bool is_eof(int ret)
        {
-               ensure_gpf_handler_installed_for_thread(u8(print()).c_str());
-               auto quiet_logging = temporary_enable_quiet_logging_for_thread(thumbnail_mode);
-
-               while(is_running_)
-               {
-                       try
-                       {
-                               
-                               {
-                                       boost::unique_lock<boost::mutex> lock(mutex_);
+               if(ret == AVERROR(EIO))
+                       CASPAR_LOG(trace) << print() << " Received EIO, assuming EOF. ";
+               if(ret == AVERROR_EOF)
+                       CASPAR_LOG(trace) << print() << " Received EOF. ";
 
-                                       while(full() && !seek_target_ && is_running_)
-                                               cond_.wait(lock);
-                                       
-                                       tick();
-                               }
-                       }
-                       catch(...)
-                       {
-                               CASPAR_LOG_CURRENT_EXCEPTION();
-                               is_running_ = false;
-                       }
-               }
+               return ret == AVERROR_EOF || ret == AVERROR(EIO) || frame_number_ >= length_; // av_read_frame doesn't always correctly return AVERROR_EOF;
        }
-                       
-       bool is_eof(int ret)
+
+       int num_audio_streams() const
        {
-               #pragma warning (disable : 4146)
-               return ret == AVERROR_EOF || ret == AVERROR(EIO) || frame_number_ >= length_; // av_read_frame doesn't always correctly return AVERROR_EOF;
+               return 0; // TODO
        }
 };
 
-input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode)
-       : impl_(new impl(graph, filename, loop, start, length, thumbnail_mode)){}
-int input::get_actual_audio_stream_index(int audio_stream_index) const { return impl_->get_actual_audio_stream_index(audio_stream_index); };
-int input::num_audio_streams() const { return static_cast<int>(impl_->audio_streams_.size()); }
-bool input::try_pop_video(std::shared_ptr<AVPacket>& packet){return impl_->try_pop_video(packet);}
-bool input::try_pop_audio(std::shared_ptr<AVPacket>& packet, int audio_stream_index){return impl_->try_pop_audio(packet, audio_stream_index);}
-AVFormatContext& input::context(){return *impl_->format_context_;}
-void input::loop(bool value){impl_->loop_ = value;}
-bool input::loop() const{return impl_->loop_;}
-void input::seek(uint32_t target){impl_->seek(target);}
+input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+       : impl_(new implementation(graph, url_or_file, loop, start, length, thumbnail_mode, vid_params)){}
+bool input::eof() const {return !impl_->executor_.is_running();}
+bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}
+spl::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}
 void input::start(uint32_t value){impl_->start_ = value;}
 uint32_t input::start() const{return impl_->start_;}
 void input::length(uint32_t value){impl_->length_ = value;}
 uint32_t input::length() const{return impl_->length_;}
-bool input::eof() const { return impl_->eof_; }
+void input::loop(bool value){impl_->loop_ = value;}
+bool input::loop() const{return impl_->loop_;}
+int input::num_audio_streams() const { return impl_->num_audio_streams(); }
+std::future<bool> input::seek(uint32_t target){return impl_->seek(target);}
 }}
index 5f843b9d11770c8973a151cf7a695c6baa52dd2b..e1c70631237ec0be47bab47af2fe7cbd591ba118 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 
 #pragma once
 
+#include "../util/util.h"
+
 #include <common/memory.h>
 
 #include <memory>
 #include <string>
 #include <cstdint>
+#include <future>
 
 #include <boost/noncopyable.hpp>
+#include <boost/rational.hpp>
 
 struct AVFormatContext;
 struct AVPacket;
@@ -39,43 +43,33 @@ namespace diagnostics {
 class graph;
 
 }
-        
+
 namespace ffmpeg {
 
 class input : boost::noncopyable
 {
 public:
-       explicit input(
-                       const spl::shared_ptr<diagnostics::graph>& graph,
-                       const std::wstring& filename,
-                       bool loop, uint32_t start,
-                       uint32_t length,
-                       bool thumbnail_mode);
-
-       int                     num_audio_streams() const;
-       int                     get_actual_audio_stream_index(int audio_stream_index) const;
+       explicit input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params);
 
-       bool            try_pop_video(std::shared_ptr<AVPacket>& packet);
-       bool            try_pop_audio(std::shared_ptr<AVPacket>& packet, int audio_stream_index);
+       bool                                                            try_pop(std::shared_ptr<AVPacket>& packet);
+       bool                                                            eof() const;
 
-       void            loop(bool value);
-       bool            loop() const;
+       void                                                            start(uint32_t value);
+       uint32_t                                                        start() const;
+       void                                                            length(uint32_t value);
+       uint32_t                                                        length() const;
+       void                                                            loop(bool value);
+       bool                                                            loop() const;
 
-       void            start(uint32_t value);
-       uint32_t        start() const;
+       int                                                                     num_audio_streams() const;
 
-       void            length(uint32_t value);
-       uint32_t        length() const;
+       std::future<bool>                                       seek(uint32_t target);
 
-       bool            eof() const;
-
-       void            seek(uint32_t target);
-
-       AVFormatContext& context();
+       spl::shared_ptr<AVFormatContext>        context();
 private:
-       struct impl;
-       std::shared_ptr<impl> impl_;
+       struct implementation;
+       std::shared_ptr<implementation> impl_;
 };
 
-       
+
 }}
index 3a469d8e122da656b08851eb5a2f2de2f0d972f5..4e3882a4bcb72aafd2e3745f821366ba8ba6e686 100644 (file)
@@ -30,13 +30,7 @@ namespace caspar { namespace ffmpeg {
 enum class display_mode
 {
        simple,
-       duplicate,
-       half,
-       interlace,
        deinterlace_bob,
-       deinterlace_bob_reinterlace,
-       deinterlace,
-       count,
        invalid
 };
 
@@ -46,65 +40,14 @@ std::basic_ostream< CharT, TraitsT >& operator<< (std::basic_ostream<CharT, Trai
        switch(value)
        {
        case display_mode::simple:                                              return o << L"simple";
-       case display_mode::duplicate:                                   return o << L"duplicate";
-       case display_mode::half:                                                return o << L"half";
-       case display_mode::interlace:                                   return o << L"interlace";
        case display_mode::deinterlace_bob:                             return o << L"deinterlace_bob";
-       case display_mode::deinterlace_bob_reinterlace: return o << L"deinterlace_bob_reinterlace";
-       case display_mode::deinterlace:                                 return o << L"deinterlace";
        default:                                                                                return o << L"invalid";
        }
 }
 
-static display_mode get_display_mode(const core::field_mode in_mode, double in_fps, const core::field_mode out_mode, double out_fps)
-{              
-       static const auto epsilon = 2.0;
-
-       if(in_fps < 20.0 || in_fps > 80.0)
-       {
-               //if(out_mode != core::field_mode::progressive && in_mode == core::field_mode::progressive)
-               //      return display_mode::interlace;
-               
-               if(out_mode == core::field_mode::progressive && in_mode != core::field_mode::progressive)
-               {
-                       if(in_fps < 35.0)
-                               return display_mode::deinterlace;
-                       else
-                               return display_mode::deinterlace_bob;
-               }
-       }
-
-       if(std::abs(in_fps - out_fps) < epsilon)
-       {
-               if(in_mode != core::field_mode::progressive && out_mode == core::field_mode::progressive)
-                       return display_mode::deinterlace;
-               //else if(in_mode == core::field_mode::progressive && out_mode != core::field_mode::progressive)
-               //      simple(); // interlace_duplicate();
-               else
-                       return display_mode::simple;
-       }
-       else if(std::abs(in_fps/2.0 - out_fps) < epsilon)
-       {
-               if(in_mode != core::field_mode::progressive)
-                       return display_mode::invalid;
-
-               if(out_mode != core::field_mode::progressive)
-                       return display_mode::interlace;
-               else
-                       return display_mode::half;
-       }
-       else if(std::abs(in_fps - out_fps/2.0) < epsilon)
-       {
-               if(out_mode != core::field_mode::progressive)
-                       return display_mode::invalid;
-
-               if(in_mode != core::field_mode::progressive)
-                       return display_mode::deinterlace_bob;
-               else
-                       return display_mode::duplicate;
-       }
-
-       return display_mode::invalid;
+static display_mode get_display_mode(const core::field_mode in_mode)
+{
+       return in_mode == core::field_mode::progressive ? display_mode::simple : display_mode::deinterlace_bob;
 }
 
 }}
\ No newline at end of file
index 055ec9d0fd6572fa87a3112b4b3667d31ea412ae..0ee507054652f02d14896d8908b50e7cd580ff8a 100644 (file)
@@ -24,6 +24,7 @@
 #include "frame_muxer.h"
 
 #include "../filter/filter.h"
+#include "../filter/audio_filter.h"
 #include "../util/util.h"
 #include "../../ffmpeg.h"
 
@@ -43,7 +44,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -57,6 +58,8 @@ extern "C"
 #include <common/assert.h>
 #include <boost/range/algorithm_ext/push_back.hpp>
 #include <boost/algorithm/string/predicate.hpp>
+#include <boost/thread/mutex.hpp>
+#include <boost/optional.hpp>
 
 #include <deque>
 #include <queue>
@@ -66,62 +69,123 @@ using namespace caspar::core;
 
 namespace caspar { namespace ffmpeg {
 
-bool is_frame_format_changed(const AVFrame& lhs, const AVFrame& rhs)
+struct av_frame_format
 {
-       if (lhs.format != rhs.format)
-               return true;
+       int                                                                             pix_format;
+       std::array<int, AV_NUM_DATA_POINTERS>   line_sizes;
+       int                                                                             width;
+       int                                                                             height;
+
+       av_frame_format(const AVFrame& frame)
+               : pix_format(frame.format)
+               , width(frame.width)
+               , height(frame.height)
+       {
+               boost::copy(frame.linesize, line_sizes.begin());
+       }
+
+       bool operator==(const av_frame_format& other) const
+       {
+               return pix_format == other.pix_format
+                       && line_sizes == other.line_sizes
+                       && width == other.width
+                       && height == other.height;
+       }
+
+       bool operator!=(const av_frame_format& other) const
+       {
+               return !(*this == other);
+       }
+};
+
+std::unique_ptr<audio_filter> create_amerge_filter(std::vector<audio_input_pad> input_pads, const core::audio_channel_layout& layout)
+{
+       std::vector<audio_output_pad> output_pads;
+       std::wstring amerge;
+
+       output_pads.emplace_back(
+                       std::vector<int>                        { 48000 },
+                       std::vector<AVSampleFormat>     { AVSampleFormat::AV_SAMPLE_FMT_S32 },
+                       std::vector<uint64_t>           { static_cast<uint64_t>(av_get_default_channel_layout(layout.num_channels)) });
 
-       for (int i = 0; i < AV_NUM_DATA_POINTERS; ++i)
+       if (input_pads.size() > 1)
        {
-               if (lhs.linesize[i] != rhs.linesize[i])
-                       return true;
+               for (int i = 0; i < input_pads.size(); ++i)
+                       amerge += L"[a:" + boost::lexical_cast<std::wstring>(i) + L"]";
+
+               amerge += L"amerge=inputs=" + boost::lexical_cast<std::wstring>(input_pads.size());
        }
 
-       return false;
+       std::wstring afilter;
+
+       if (!amerge.empty())
+       {
+               afilter = amerge;
+               afilter += L"[aout:0]";
+       }
+
+       return std::unique_ptr<audio_filter>(new audio_filter(input_pads, output_pads, u8(afilter)));
 }
-       
+
 struct frame_muxer::impl : boost::noncopyable
-{      
-       std::queue<core::mutable_frame>                                 video_stream_;
-       core::mutable_audio_buffer                                              audio_stream_;
-       std::queue<draw_frame>                                                  frame_buffer_;
-       display_mode                                                                    display_mode_                   = display_mode::invalid;
-       const double                                                                    in_fps_;
+{
+       static constexpr std::size_t                                    max_stream_size                         = 120;
+       std::queue<std::queue<core::mutable_frame>>             video_streams_;
+       std::queue<core::mutable_audio_buffer>                  audio_streams_;
+       std::queue<core::draw_frame>                                    frame_buffer_;
+       display_mode                                                                    display_mode_                           = display_mode::invalid;
+       const boost::rational<int>                                              in_framerate_;
        const video_format_desc                                                 format_desc_;
-       audio_channel_layout                                                    channel_layout_;
-       
-       std::vector<int>                                                                audio_cadence_                  = format_desc_.audio_cadence;
-                       
+       const audio_channel_layout                                              audio_channel_layout_;
+
+       std::vector<int>                                                                audio_cadence_                          = format_desc_.audio_cadence;
+
        spl::shared_ptr<core::frame_factory>                    frame_factory_;
-       std::shared_ptr<AVFrame>                                                previous_frame_;
+       boost::optional<av_frame_format>                                previously_filtered_frame_;
 
        std::unique_ptr<filter>                                                 filter_;
        const std::wstring                                                              filter_str_;
-       bool                                                                                    force_deinterlacing_    = env::properties().get(L"configuration.force-deinterlace", true);
-               
+       std::unique_ptr<audio_filter>                                   audio_filter_;
+       const bool                                                                              multithreaded_filter_;
+       bool                                                                                    force_deinterlacing_            = env::properties().get(L"configuration.force-deinterlace", false);
+
+       mutable boost::mutex                                                    out_framerate_mutex_;
+       boost::rational<int>                                                    out_framerate_;
+
        impl(
-                       double in_fps,
+                       boost::rational<int> in_framerate,
+                       std::vector<audio_input_pad> audio_input_pads,
                        const spl::shared_ptr<core::frame_factory>& frame_factory,
                        const core::video_format_desc& format_desc,
                        const core::audio_channel_layout& channel_layout,
-                       const std::wstring& filter_str)
-               : in_fps_(in_fps)
+                       const std::wstring& filter_str,
+                       bool multithreaded_filter)
+               : in_framerate_(in_framerate)
                , format_desc_(format_desc)
-               , channel_layout_(channel_layout)
+               , audio_channel_layout_(channel_layout)
                , frame_factory_(frame_factory)
                , filter_str_(filter_str)
-       {               
-               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
-               // This cadence fills the audio mixer most optimally.
-               boost::range::rotate(audio_cadence_, std::end(audio_cadence_)-1);
+               , multithreaded_filter_(multithreaded_filter)
+       {
+               video_streams_.push(std::queue<core::mutable_frame>());
+               audio_streams_.push(core::mutable_audio_buffer());
+
+               set_out_framerate(in_framerate_);
+
+               if (!audio_input_pads.empty())
+               {
+                       audio_filter_ = create_amerge_filter(std::move(audio_input_pads), audio_channel_layout_);
+               }
        }
-       
-       void push_video(const std::shared_ptr<AVFrame>& video)
-       {               
-               if(!video)
+
+       void push(const std::shared_ptr<AVFrame>& video_frame)
+       {
+               if (!video_frame)
                        return;
 
-               if (previous_frame_ && video->data[0] && is_frame_format_changed(*previous_frame_, *video))
+               av_frame_format current_frame_format(*video_frame);
+
+               if (previously_filtered_frame_ && video_frame->data[0] && *previously_filtered_frame_ != current_frame_format)
                {
                        // Fixes bug where avfilter crashes server on some DV files (starts in YUV420p but changes to YUV411p after the first frame).
                        if (ffmpeg::is_logging_quiet_for_thread())
@@ -130,290 +194,281 @@ struct frame_muxer::impl : boost::noncopyable
                                CASPAR_LOG(info) << L"[frame_muxer] Frame format has changed. Resetting display mode.";
 
                        display_mode_ = display_mode::invalid;
+                       filter_.reset();
+                       previously_filtered_frame_ = boost::none;
                }
 
-               if(!video->data[0])
+               if (video_frame == flush_video())
+               {
+                       video_streams_.push(std::queue<core::mutable_frame>());
+               }
+               else if (video_frame == empty_video())
                {
-                       auto empty_frame = frame_factory_->create_frame(this, core::pixel_format_desc(core::pixel_format::invalid), channel_layout_);
-                       video_stream_.push(std::move(empty_frame));
+                       video_streams_.back().push(frame_factory_->create_frame(this, core::pixel_format::invalid, audio_channel_layout_));
                        display_mode_ = display_mode::simple;
                }
                else
                {
-                       if(!filter_ || display_mode_ == display_mode::invalid)
-                               update_display_mode(video);
-                               
-                       filter_->push(video);
-                       previous_frame_ = video;
-                       for (auto& av_frame : filter_->poll_all())
-                               video_stream_.push(make_frame(this, av_frame, *frame_factory_, channel_layout_));
+                       if (!filter_ || display_mode_ == display_mode::invalid)
+                               update_display_mode(video_frame);
+
+                       if (filter_)
+                       {
+                               filter_->push(video_frame);
+                               previously_filtered_frame_ = current_frame_format;
+
+                               for (auto& av_frame : filter_->poll_all())
+                                       video_streams_.back().push(make_frame(this, av_frame, *frame_factory_, audio_channel_layout_));
+                       }
                }
 
-               merge();
+               if (video_streams_.back().size() > max_stream_size)
+                       CASPAR_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("video-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));
        }
 
-       void push_audio(const std::shared_ptr<AVFrame>& audio)
+       void push(const std::vector<std::shared_ptr<core::mutable_audio_buffer>>& audio_samples_per_stream)
        {
-               if(!audio)
+               if (audio_samples_per_stream.empty())
                        return;
 
-               if(!audio->data[0])             
-               {
-                       if (channel_layout_ == core::audio_channel_layout::invalid())
-                               channel_layout_ = *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
+               bool is_flush = boost::count_if(
+                               audio_samples_per_stream,
+                               [](std::shared_ptr<core::mutable_audio_buffer> a) { return a == flush_audio(); }) > 0;
 
-                       boost::range::push_back(audio_stream_, core::mutable_audio_buffer(audio_cadence_.front() * channel_layout_.num_channels, 0));
+               if (is_flush)
+               {
+                       audio_streams_.push(core::mutable_audio_buffer());
+               }
+               else if (audio_samples_per_stream.at(0) == empty_audio())
+               {
+                       boost::range::push_back(audio_streams_.back(), core::mutable_audio_buffer(audio_cadence_.front() * audio_channel_layout_.num_channels, 0));
                }
                else
                {
-                       auto ptr = reinterpret_cast<int32_t*>(audio->data[0]);
-                       audio_stream_.insert(audio_stream_.end(), ptr, ptr + audio->linesize[0]/sizeof(int32_t));
+                       for (int i = 0; i < audio_samples_per_stream.size(); ++i)
+                       {
+                               auto range = boost::make_iterator_range_n(
+                                               audio_samples_per_stream.at(i)->data(),
+                                               audio_samples_per_stream.at(i)->size());
+
+                               audio_filter_->push(i, range);
+                       }
+
+                       for (auto frame : audio_filter_->poll_all(0))
+                       {
+                               auto audio = boost::make_iterator_range_n(
+                                               reinterpret_cast<std::int32_t*>(frame->extended_data[0]),
+                                               frame->nb_samples * frame->channels);
+
+                               boost::range::push_back(audio_streams_.back(), audio);
+                       }
                }
 
-               merge();
+               if (audio_streams_.back().size() > max_stream_size * audio_cadence_.front() * audio_channel_layout_.num_channels)
+                       CASPAR_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("audio-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));
        }
-       
+
        bool video_ready() const
        {
-               switch(display_mode_)
-               {
-               case display_mode::deinterlace_bob_reinterlace:                                 
-               case display_mode::interlace:   
-               case display_mode::half:
-                       return video_stream_.size() >= 2;
-               default:                                                                                
-                       return video_stream_.size() >= 1;
-               }
+               return video_streams_.size() > 1 || (video_streams_.size() >= audio_streams_.size() && video_ready2());
        }
-       
+
        bool audio_ready() const
        {
-               switch(display_mode_)
-               {
-               case display_mode::duplicate:                                   
-                       return audio_stream_.size() >= static_cast<size_t>(audio_cadence_[0] + audio_cadence_[1 % audio_cadence_.size()]) * channel_layout_.num_channels;
-               default:                                                                                
-                       return audio_stream_.size() >= static_cast<size_t>(audio_cadence_.front()) * channel_layout_.num_channels;
-               }
+               return audio_streams_.size() > 1 || (audio_streams_.size() >= video_streams_.size() && audio_ready2());
        }
 
-       bool empty() const
+       bool video_ready2() const
        {
-               return frame_buffer_.empty();
+               return video_streams_.front().size() >= 1;
        }
 
-       core::draw_frame front() const
+       bool audio_ready2() const
        {
-               return frame_buffer_.front();
+               return audio_streams_.front().size() >= audio_cadence_.front() * audio_channel_layout_.num_channels;
        }
 
-       void pop()
+       core::draw_frame poll()
        {
-               frame_buffer_.pop();
-       }
-               
-       void merge()
-       {
-               while(video_ready() && audio_ready() && display_mode_ != display_mode::invalid)
-               {                               
-                       auto frame1                     = pop_video();
-                       frame1.audio_data()     = pop_audio();
+               if (!frame_buffer_.empty())
+               {
+                       auto frame = frame_buffer_.front();
+                       frame_buffer_.pop();
+                       return frame;
+               }
 
-                       switch(display_mode_)
-                       {
-                       case display_mode::simple:                                              
-                       case display_mode::deinterlace_bob:                             
-                       case display_mode::deinterlace: 
-                               {
-                                       frame_buffer_.push(core::draw_frame(std::move(frame1)));
-                                       break;
-                               }
-                       case display_mode::interlace:                                   
-                       case display_mode::deinterlace_bob_reinterlace: 
-                               {                               
-                                       auto frame2 = pop_video();
-
-                                       frame_buffer_.push(core::draw_frame::interlace(
-                                               core::draw_frame(std::move(frame1)),
-                                               core::draw_frame(std::move(frame2)),
-                                               format_desc_.field_mode));      
-                                       break;
-                               }
-                       case display_mode::duplicate:   
-                               {
-                                       //boost::range::push_back(frame1.audio_data(), pop_audio());
-
-                                       auto second_audio_frame = core::mutable_frame(
-                                                       std::vector<array<std::uint8_t>>(),
-                                                       pop_audio(),
-                                                       frame1.stream_tag(),
-                                                       core::pixel_format_desc(),
-                                                       channel_layout_);
-                                       auto first_frame = core::draw_frame(std::move(frame1));
-                                       auto muted_first_frame = core::draw_frame(first_frame);
-                                       muted_first_frame.transform().audio_transform.volume = 0;
-                                       auto second_frame = core::draw_frame({ core::draw_frame(std::move(second_audio_frame)), muted_first_frame });
-
-                                       // Same video but different audio.
-                                       frame_buffer_.push(first_frame);
-                                       frame_buffer_.push(second_frame);
-                                       break;
-                               }
-                       case display_mode::half:        
-                               {                               
-                                       pop_video(); // Throw away
-
-                                       frame_buffer_.push(core::draw_frame(std::move(frame1)));
-                                       break;
-                               }
-                       default:
-                               CASPAR_THROW_EXCEPTION(invalid_operation());
-                       }
+               if (video_streams_.size() > 1 && audio_streams_.size() > 1 && (!video_ready2() || !audio_ready2()))
+               {
+                       if (!video_streams_.front().empty() || !audio_streams_.front().empty())
+                               CASPAR_LOG(trace) << "Truncating: " << video_streams_.front().size() << L" video-frames, " << audio_streams_.front().size() << L" audio-samples.";
+
+                       video_streams_.pop();
+                       audio_streams_.pop();
                }
+
+               if (!video_ready2() || !audio_ready2() || display_mode_ == display_mode::invalid)
+                       return core::draw_frame::empty();
+
+               auto frame                      = pop_video();
+               frame.audio_data()      = pop_audio();
+
+               frame_buffer_.push(core::draw_frame(std::move(frame)));
+
+               return poll();
        }
-       
+
        core::mutable_frame pop_video()
        {
-               auto frame = std::move(video_stream_.front());
-               video_stream_.pop();            
-               return std::move(frame);
+               auto frame = std::move(video_streams_.front().front());
+               video_streams_.front().pop();
+               return frame;
        }
 
        core::mutable_audio_buffer pop_audio()
        {
-               if (audio_stream_.size() < audio_cadence_.front() * channel_layout_.num_channels)
-                       CASPAR_THROW_EXCEPTION(out_of_range());
+               CASPAR_VERIFY(audio_streams_.front().size() >= audio_cadence_.front() * audio_channel_layout_.num_channels);
 
-               auto begin = audio_stream_.begin();
-               auto end   = begin + audio_cadence_.front() * channel_layout_.num_channels;
+               auto begin      = audio_streams_.front().begin();
+               auto end        = begin + (audio_cadence_.front() * audio_channel_layout_.num_channels);
 
                core::mutable_audio_buffer samples(begin, end);
-               audio_stream_.erase(begin, end);
-               
-               boost::range::rotate(audio_cadence_, std::begin(audio_cadence_)+1);
+               audio_streams_.front().erase(begin, end);
+
+               boost::range::rotate(audio_cadence_, std::begin(audio_cadence_) + 1);
 
                return samples;
        }
-                               
+
+       uint32_t calc_nb_frames(uint32_t nb_frames) const
+       {
+               uint64_t nb_frames2 = nb_frames;
+
+               if(filter_ && filter_->is_double_rate()) // Take into account transformations in filter.
+                       nb_frames2 *= 2;
+
+               return static_cast<uint32_t>(nb_frames2);
+       }
+
+       boost::rational<int> out_framerate() const
+       {
+               boost::lock_guard<boost::mutex> lock(out_framerate_mutex_);
+
+               return out_framerate_;
+       }
+private:
        void update_display_mode(const std::shared_ptr<AVFrame>& frame)
        {
-               std::wstring filter_str = filter_str_;
+               std::wstring filter_str = filter_str_;
 
                display_mode_ = display_mode::simple;
 
                auto mode = get_mode(*frame);
-               if(mode == core::field_mode::progressive && frame->height < 720 && in_fps_ < 50.0) // SD frames are interlaced. Probably incorrect meta-data. Fix it.
-                       mode = core::field_mode::upper;
-
-               auto fps  = in_fps_;
-
-               if(filter::is_deinterlacing(filter_str_))
-                       mode = core::field_mode::progressive;
-
-               if(filter::is_double_rate(filter_str_))
-                       fps *= 2;
-                       
-               display_mode_ = get_display_mode(mode, fps, format_desc_.field_mode, format_desc_.fps);
-                       
-               if((frame->height != 480 || format_desc_.height != 486) && // don't deinterlace for NTSC DV
-                               display_mode_ == display_mode::simple && mode != core::field_mode::progressive && format_desc_.field_mode != core::field_mode::progressive && 
-                               frame->height != format_desc_.height)
+
+               if (filter::is_deinterlacing(filter_str_))
                {
-                       display_mode_ = display_mode::deinterlace_bob_reinterlace; // The frame will most likely be scaled, we need to deinterlace->reinterlace 
+                       display_mode_ = display_mode::simple;
+               }
+               else if (mode != core::field_mode::progressive)
+               {
+                       if (force_deinterlacing_)
+                       {
+                               display_mode_ = display_mode::deinterlace_bob;
+                       }
+                       else
+                       {
+                               bool output_also_interlaced = format_desc_.field_mode != core::field_mode::progressive;
+                               bool interlaced_output_compatible =
+                                               output_also_interlaced
+                                               && (
+                                                               (frame->height == 480 && format_desc_.height == 486) // don't deinterlace for NTSC DV
+                                                               || frame->height == format_desc_.height
+                                               )
+                                               && in_framerate_ == format_desc_.framerate;
+
+                               display_mode_ = interlaced_output_compatible ? display_mode::simple : display_mode::deinterlace_bob;
+                       }
                }
 
-               // ALWAYS de-interlace, until we have GPU de-interlacing.
-               if(force_deinterlacing_ && frame->interlaced_frame && display_mode_ != display_mode::deinterlace_bob && display_mode_ != display_mode::deinterlace)
-                       display_mode_ = display_mode::deinterlace_bob_reinterlace;
-               
-               if(display_mode_ == display_mode::deinterlace)
-                       filter_str = append_filter(filter_str, L"YADIF=0:-1");
-               else if(display_mode_ == display_mode::deinterlace_bob || display_mode_ == display_mode::deinterlace_bob_reinterlace)
+               if (display_mode_ == display_mode::deinterlace_bob)
                        filter_str = append_filter(filter_str, L"YADIF=1:-1");
 
-               if(display_mode_ == display_mode::invalid)
-               {
-                       if (ffmpeg::is_logging_quiet_for_thread())
-                               CASPAR_LOG(debug) << L"[frame_muxer] Auto-transcode: Failed to detect display-mode.";
-                       else
-                               CASPAR_LOG(warning) << L"[frame_muxer] Auto-transcode: Failed to detect display-mode.";
+               auto out_framerate = in_framerate_;
 
-                       display_mode_ = display_mode::simple;
-               }
+               if (filter::is_double_rate(filter_str))
+                       out_framerate *= 2;
 
-               if(frame->height == 480) // NTSC DV
+               if (frame->height == 480) // NTSC DV
                {
                        auto pad_str = L"PAD=" + boost::lexical_cast<std::wstring>(frame->width) + L":486:0:2:black";
                        filter_str = append_filter(filter_str, pad_str);
                }
 
                filter_.reset (new filter(
-                       frame->width,
-                       frame->height,
-                       boost::rational<int>(1000000, static_cast<int>(in_fps_ * 1000000)),
-                       boost::rational<int>(static_cast<int>(in_fps_ * 1000000), 1000000),
-                       boost::rational<int>(frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den),
-                       static_cast<AVPixelFormat>(frame->format),
-                       std::vector<AVPixelFormat>(),
-                       u8(filter_str)));
+                               frame->width,
+                               frame->height,
+                               1 / in_framerate_,
+                               in_framerate_,
+                               boost::rational<int>(frame->sample_aspect_ratio.num, frame->sample_aspect_ratio.den),
+                               static_cast<AVPixelFormat>(frame->format),
+                               std::vector<AVPixelFormat>(),
+                               u8(filter_str)));
+
+               set_out_framerate(out_framerate);
+
+               auto in_fps = static_cast<double>(in_framerate_.numerator()) / static_cast<double>(in_framerate_.denominator());
 
                if (ffmpeg::is_logging_quiet_for_thread())
-                       CASPAR_LOG(debug) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps_, frame->interlaced_frame > 0);
+                       CASPAR_LOG(debug) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps, frame->interlaced_frame > 0);
                else
-                       CASPAR_LOG(info) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps_, frame->interlaced_frame > 0);
+                       CASPAR_LOG(info) << L"[frame_muxer] " << display_mode_ << L" " << print_mode(frame->width, frame->height, in_fps, frame->interlaced_frame > 0);
        }
-       
-       uint32_t calc_nb_frames(uint32_t nb_frames) const
-       {
-               uint64_t nb_frames2 = nb_frames;
-               
-               if(filter_ && filter_->is_double_rate()) // Take into account transformations in filter.
-                       nb_frames2 *= 2;
 
-               switch(display_mode_) // Take into account transformation in run.
+       void merge()
+       {
+               while (video_ready() && audio_ready() && display_mode_ != display_mode::invalid)
                {
-               case display_mode::deinterlace_bob_reinterlace:
-               case display_mode::interlace:   
-               case display_mode::half:
-                       nb_frames2 /= 2;
-                       break;
-               case display_mode::duplicate:
-                       nb_frames2 *= 2;
-                       break;
-               }
+                       auto frame1 = pop_video();
+                       frame1.audio_data() = pop_audio();
 
-               return static_cast<uint32_t>(nb_frames2);
+                       frame_buffer_.push(core::draw_frame(std::move(frame1)));
+               }
        }
 
-       void clear()
+       void set_out_framerate(boost::rational<int> out_framerate)
        {
-               while(!video_stream_.empty())
-                       video_stream_.pop();    
+               boost::lock_guard<boost::mutex> lock(out_framerate_mutex_);
 
-               audio_stream_.clear();
+               bool changed = out_framerate != out_framerate_;
+               out_framerate_ = std::move(out_framerate);
 
-               while(!frame_buffer_.empty())
-                       frame_buffer_.pop();
-               
-               filter_.reset();
+               if (changed)
+                       update_audio_cadence();
+       }
+
+       void update_audio_cadence()
+       {
+               audio_cadence_ = find_audio_cadence(out_framerate_);
+
+               // Note: Uses 1 step rotated cadence for 1001 modes (1602, 1602, 1601, 1602, 1601)
+               // This cadence fills the audio mixer most optimally.
+               boost::range::rotate(audio_cadence_, std::end(audio_cadence_) - 1);
        }
 };
 
 frame_muxer::frame_muxer(
-               double in_fps,
+               boost::rational<int> in_framerate,
+               std::vector<audio_input_pad> audio_input_pads,
                const spl::shared_ptr<core::frame_factory>& frame_factory,
                const core::video_format_desc& format_desc,
                const core::audio_channel_layout& channel_layout,
-               const std::wstring& filter)
-       : impl_(new impl(in_fps, frame_factory, format_desc, channel_layout, filter)){}
-void frame_muxer::push_video(const std::shared_ptr<AVFrame>& frame){impl_->push_video(frame);}
-void frame_muxer::push_audio(const std::shared_ptr<AVFrame>& frame){impl_->push_audio(frame);}
-bool frame_muxer::empty() const{return impl_->empty();}
-core::draw_frame frame_muxer::front() const{return impl_->front();}
-void frame_muxer::pop(){return impl_->pop();}
-void frame_muxer::clear(){impl_->clear();}
+               const std::wstring& filter,
+               bool multithreaded_filter)
+       : impl_(new impl(std::move(in_framerate), std::move(audio_input_pads), frame_factory, format_desc, channel_layout, filter, multithreaded_filter)){}
+void frame_muxer::push(const std::shared_ptr<AVFrame>& video){impl_->push(video);}
+void frame_muxer::push(const std::vector<std::shared_ptr<core::mutable_audio_buffer>>& audio_samples_per_stream){impl_->push(audio_samples_per_stream);}
+core::draw_frame frame_muxer::poll(){return impl_->poll();}
 uint32_t frame_muxer::calc_nb_frames(uint32_t nb_frames) const {return impl_->calc_nb_frames(nb_frames);}
 bool frame_muxer::video_ready() const{return impl_->video_ready();}
 bool frame_muxer::audio_ready() const{return impl_->audio_ready();}
-
-}}
\ No newline at end of file
+boost::rational<int> frame_muxer::out_framerate() const { return impl_->out_framerate(); }
+}}
index 0a78863635f688fdefc212c37198bbc1c52f634c..8364a282b3709b6855c6b8be59cfdf69b9defcbe 100644 (file)
 #pragma once
 
 #include "display_mode.h"
+#include "../filter/audio_filter.h"
 
 #include <common/forward.h>
 #include <common/memory.h>
 
+#include <core/frame/frame.h>
 #include <core/mixer/audio/audio_mixer.h>
 #include <core/fwd.h>
 
 #include <boost/noncopyable.hpp>
+#include <boost/rational.hpp>
 
 #include <vector>
 
@@ -41,23 +44,23 @@ class frame_muxer : boost::noncopyable
 {
 public:
        frame_muxer(
-                       double in_fps,
+                       boost::rational<int> in_framerate,
+                       std::vector<audio_input_pad> audio_input_pads,
                        const spl::shared_ptr<core::frame_factory>& frame_factory,
                        const core::video_format_desc& format_desc,
                        const core::audio_channel_layout& channel_layout,
-                       const std::wstring& filter);
-       
-       void push_video(const std::shared_ptr<AVFrame>& frame);
-       void push_audio(const std::shared_ptr<AVFrame>& frame);
-       
+                       const std::wstring& filter,
+                       bool multithreaded_filter);
+
+       void push(const std::shared_ptr<AVFrame>& video_frame);
+       void push(const std::vector<std::shared_ptr<core::mutable_audio_buffer>>& audio_samples_per_stream);
+
        bool video_ready() const;
        bool audio_ready() const;
 
-       void clear();
+       core::draw_frame poll();
 
-       bool empty() const;
-       core::draw_frame front() const;
-       void pop();
+       boost::rational<int> out_framerate() const;
 
        uint32_t calc_nb_frames(uint32_t nb_frames) const;
 private:
@@ -65,4 +68,4 @@ private:
        spl::shared_ptr<impl> impl_;
 };
 
-}}
\ No newline at end of file
+}}
index 3546305f55ae61eeee43b1dd0d33a08b249c96ed..6ed51aa1c8797beb7469dcc3b159871636b52fda 100644 (file)
@@ -27,6 +27,7 @@
 
 #include "../tbb_avcodec.h"
 #include "../../ffmpeg_error.h"
+#include "../../ffmpeg.h"
 
 #include <tbb/concurrent_unordered_map.h>
 #include <tbb/concurrent_queue.h>
@@ -57,7 +58,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libswscale/swscale.h>
        #include <libavcodec/avcodec.h>
@@ -68,7 +69,7 @@ extern "C"
 #endif
 
 namespace caspar { namespace ffmpeg {
-               
+
 core::field_mode get_mode(const AVFrame& frame)
 {
        if(!frame.interlaced_frame)
@@ -101,23 +102,23 @@ core::pixel_format get_pixel_format(PixelFormat pix_fmt)
 core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height)
 {
        // Get linesizes
-       AVPicture dummy_pict;   
+       AVPicture dummy_pict;
        avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);
 
        core::pixel_format_desc desc = get_pixel_format(pix_fmt);
-               
+
        switch(desc.format)
        {
        case core::pixel_format::gray:
        case core::pixel_format::luma:
                {
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));                                               
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));
                        return desc;
                }
        case core::pixel_format::bgr:
        case core::pixel_format::rgb:
                {
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/3, height, 3));                                             
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/3, height, 3));
                        return desc;
                }
        case core::pixel_format::bgra:
@@ -125,41 +126,41 @@ core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int he
        case core::pixel_format::rgba:
        case core::pixel_format::abgr:
                {
-                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));
                        return desc;
                }
        case core::pixel_format::ycbcr:
        case core::pixel_format::ycbcra:
-               {               
+               {
                        // Find chroma height
                        int size2 = static_cast<int>(dummy_pict.data[2] - dummy_pict.data[1]);
-                       int h2 = size2/dummy_pict.linesize[1];                  
+                       int h2 = size2/dummy_pict.linesize[1];
 
                        desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));
                        desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));
                        desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));
 
-                       if(desc.format == core::pixel_format::ycbcra)                                           
-                               desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       
+                       if(desc.format == core::pixel_format::ycbcra)
+                               desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));
                        return desc;
-               }               
-       default:                
+               }
+       default:
                desc.format = core::pixel_format::invalid;
                return desc;
        }
 }
 
 core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, core::frame_factory& frame_factory, const core::audio_channel_layout& channel_layout)
-{                      
+{
        static tbb::concurrent_unordered_map<int64_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contvalid_exts_;
-       
+
        if(decoded_frame->width < 1 || decoded_frame->height < 1)
                return frame_factory.create_frame(tag, core::pixel_format_desc(core::pixel_format::invalid), core::audio_channel_layout::invalid());
 
        const auto width  = decoded_frame->width;
        const auto height = decoded_frame->height;
        auto desc                 = pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);
-               
+
        if(desc.format == core::pixel_format::invalid)
        {
                auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);
@@ -177,7 +178,7 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>&
                        target_pix_fmt = PIX_FMT_YUV422P;
                else if(pix_fmt == PIX_FMT_YUV444P10)
                        target_pix_fmt = PIX_FMT_YUV444P;
-               
+
                auto target_desc = pixel_format_desc(target_pix_fmt, width, height);
 
                auto write = frame_factory.create_frame(tag, target_desc, channel_layout);
@@ -185,31 +186,31 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>&
                std::shared_ptr<SwsContext> sws_context;
 
                //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";
-               
-               int64_t key = ((static_cast<int64_t>(width)                      << 32) & 0xFFFF00000000) | 
-                                         ((static_cast<int64_t>(height)                 << 16) & 0xFFFF0000) | 
-                                         ((static_cast<int64_t>(pix_fmt)                <<  8) & 0xFF00) | 
+
+               int64_t key = ((static_cast<int64_t>(width)                      << 32) & 0xFFFF00000000) |
+                                         ((static_cast<int64_t>(height)                 << 16) & 0xFFFF0000) |
+                                         ((static_cast<int64_t>(pix_fmt)                <<  8) & 0xFF00) |
                                          ((static_cast<int64_t>(target_pix_fmt) <<  0) & 0xFF);
-                       
+
                auto& pool = sws_contvalid_exts_[key];
-                                               
+
                if(!pool.try_pop(sws_context))
                {
                        double param;
                        sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);
                }
-                       
+
                if(!sws_context)
                {
-                       CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << 
+                       CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") <<
                                                                        boost::errinfo_api_function("sws_getContext"));
-               }       
-               
+               }
+
                auto av_frame = create_frame();
                if(target_pix_fmt == PIX_FMT_BGRA)
                {
                        auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write.image_data(0).begin(), PIX_FMT_BGRA, width, height);
-                       CASPAR_VERIFY(size == write.image_data(0).size()); 
+                       CASPAR_VERIFY(size == write.image_data(0).size());
                }
                else
                {
@@ -222,22 +223,22 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>&
                        }
                }
 
-               sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);      
-               pool.push(sws_context); 
+               sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);
+               pool.push(sws_context);
 
                return std::move(write);
        }
        else
        {
                auto write = frame_factory.create_frame(tag, desc, channel_layout);
-               
+
                for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)
                {
                        auto plane            = desc.planes[n];
                        auto result           = write.image_data(n).begin();
                        auto decoded          = decoded_frame->data[n];
                        auto decoded_linesize = decoded_frame->linesize[n];
-                       
+
                        CASPAR_ASSERT(decoded);
                        CASPAR_ASSERT(write.image_data(n).begin());
 
@@ -256,7 +257,7 @@ core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>&
                                fast_memcpy(result, decoded, plane.size);
                        }
                }
-       
+
                return std::move(write);
        }
 }
@@ -273,16 +274,16 @@ spl::shared_ptr<AVFrame> make_av_frame(core::mutable_frame& frame)
 spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc)
 {
        auto av_frame = create_frame();
-       
+
        auto planes              = pix_desc.planes;
        auto format              = pix_desc.format;
 
        av_frame->width  = planes[0].width;
        av_frame->height = planes[0].height;
-       for(int n = 0; n < planes.size(); ++n)  
+       for(int n = 0; n < planes.size(); ++n)
        {
                av_frame->data[n]         = data[n];
-               av_frame->linesize[n] = planes[n].linesize;     
+               av_frame->linesize[n] = planes[n].linesize;
        }
 
        switch(format)
@@ -294,19 +295,19 @@ spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core:
                av_frame->format = PIX_FMT_BGR24;
                break;
        case core::pixel_format::rgba:
-               av_frame->format = PIX_FMT_RGBA; 
+               av_frame->format = PIX_FMT_RGBA;
                break;
        case core::pixel_format::argb:
-               av_frame->format = PIX_FMT_ARGB; 
+               av_frame->format = PIX_FMT_ARGB;
                break;
        case core::pixel_format::bgra:
-               av_frame->format = PIX_FMT_BGRA; 
+               av_frame->format = PIX_FMT_BGRA;
                break;
        case core::pixel_format::abgr:
-               av_frame->format = PIX_FMT_ABGR; 
+               av_frame->format = PIX_FMT_ABGR;
                break;
        case core::pixel_format::gray:
-               av_frame->format = PIX_FMT_GRAY8; 
+               av_frame->format = PIX_FMT_GRAY8;
                break;
        case core::pixel_format::ycbcr:
        {
@@ -344,8 +345,8 @@ bool is_sane_fps(AVRational time_base)
 AVRational fix_time_base(AVRational time_base)
 {
        if(time_base.num == 1)
-               time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(time_base.den)))-1));    
-                       
+               time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(time_base.den)))-1));
+
        if(!is_sane_fps(time_base))
        {
                auto tmp = time_base;
@@ -360,7 +361,7 @@ AVRational fix_time_base(AVRational time_base)
 double read_fps(AVFormatContext& context, double fail_value)
 {
        auto framerate = read_framerate(context, boost::rational<int>(static_cast<int>(fail_value * 1000000.0), 1000000));
-       
+
        return static_cast<double>(framerate.numerator()) / static_cast<double>(framerate.denominator());
 }
 
@@ -445,7 +446,7 @@ void fix_meta_data(AVFormatContext& context)
        {
                auto video_stream   = context.streams[video_index];
                auto video_context  = context.streams[video_index]->codec;
-                                               
+
                if(boost::filesystem::path(context.filename).extension().string() == ".flv")
                {
                        try
@@ -464,7 +465,7 @@ void fix_meta_data(AVFormatContext& context)
                        auto ticks               = video_context->ticks_per_frame;
 
                        if(video_stream->nb_frames == 0)
-                               video_stream->nb_frames = (duration*stream_time.num*codec_time.den)/(stream_time.den*codec_time.num*ticks);     
+                               video_stream->nb_frames = (duration*stream_time.num*codec_time.den)/(stream_time.den*codec_time.num*ticks);
                }
        }
 }
@@ -476,23 +477,46 @@ spl::shared_ptr<AVPacket> create_packet()
                av_free_packet(p);
                delete p;
        });
-       
+
        av_init_packet(packet.get());
        return packet;
 }
 
 spl::shared_ptr<AVFrame> create_frame()
-{      
+{
        spl::shared_ptr<AVFrame> frame(av_frame_alloc(), [](AVFrame* p)
        {
                av_frame_free(&p);
        });
-       avcodec_get_frame_defaults(frame.get());
        return frame;
 }
 
+std::shared_ptr<core::mutable_audio_buffer> flush_audio()
+{
+       static std::shared_ptr<core::mutable_audio_buffer> audio(new core::mutable_audio_buffer());
+       return audio;
+}
+
+std::shared_ptr<core::mutable_audio_buffer> empty_audio()
+{
+       static std::shared_ptr<core::mutable_audio_buffer> audio(new core::mutable_audio_buffer());
+       return audio;
+}
+
+std::shared_ptr<AVFrame> flush_video()
+{
+       static auto video = create_frame();
+       return video;
+}
+
+std::shared_ptr<AVFrame> empty_video()
+{
+       static auto video = create_frame();
+       return video;
+}
+
 spl::shared_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index, bool single_threaded)
-{      
+{
        AVCodec* decoder;
        index = THROW_ON_ERROR2(av_find_best_stream(&context, type, index, -1, &decoder, 0), "");
        //if(strcmp(decoder->name, "prores") == 0 && decoder->next && strcmp(decoder->next->name, "prores_lgpl") == 0)
@@ -524,7 +548,7 @@ std::wstring print_mode(int width, int height, double fps, bool interlaced)
 }
 
 bool is_valid_file(const std::wstring& filename, bool only_video)
-{                              
+{
        static const auto invalid_exts = {
                L".png",
                L".tga",
@@ -555,25 +579,27 @@ bool is_valid_file(const std::wstring& filename, bool only_video)
                L".mpg",
                L".dnxhd",
                L".h264",
-               L".prores"
+               L".prores",
+               L".mkv",
+               L".mxf"
        };
 
        auto ext = boost::to_lower_copy(boost::filesystem::path(filename).extension().wstring());
-               
+
        if(std::find(valid_exts.begin(), valid_exts.end(), ext) != valid_exts.end())
                return true;
 
        if (!only_video && std::find(only_audio.begin(), only_audio.end(), ext) != only_audio.end())
                return true;
-       
+
        if(std::find(invalid_exts.begin(), invalid_exts.end(), ext) != invalid_exts.end())
-               return false;   
+               return false;
 
        if (only_video && std::find(only_audio.begin(), only_audio.end(), ext) != only_audio.end())
                return false;
 
        auto u8filename = u8(filename);
-       
+
        int score = 0;
        AVProbeData pb = {};
        pb.filename = u8filename.c_str();
@@ -748,7 +774,7 @@ core::audio_channel_layout get_audio_channel_layout(int num_channels, std::uint6
 }
 
 // av_get_default_channel_layout does not work for layouts not predefined in ffmpeg. This is needed to support > 8 channels.
-std::int64_t create_channel_layout_bitmask(int num_channels)
+std::uint64_t create_channel_layout_bitmask(int num_channels)
 {
        if (num_channels > 63)
                CASPAR_THROW_EXCEPTION(invalid_argument() << msg_info(L"FFmpeg cannot handle more than 63 audio channels"));
@@ -758,7 +784,61 @@ std::int64_t create_channel_layout_bitmask(int num_channels)
        auto to_shift = 63 - num_channels;
        auto result = ALL_63_CHANNELS >> to_shift;
 
-       return static_cast<std::int64_t>(result);
+       return static_cast<std::uint64_t>(result);
+}
+
+std::string to_string(const boost::rational<int>& framerate)
+{
+       return boost::lexical_cast<std::string>(framerate.numerator())
+               + "/" + boost::lexical_cast<std::string>(framerate.denominator()) + " (" + boost::lexical_cast<std::string>(static_cast<double>(framerate.numerator()) / static_cast<double>(framerate.denominator())) + ") fps";
+}
+
+std::vector<int> find_audio_cadence(const boost::rational<int>& framerate)
+{
+       static std::map<boost::rational<int>, std::vector<int>> CADENCES_BY_FRAMERATE = []
+       {
+               std::map<boost::rational<int>, std::vector<int>> result;
+
+               for (core::video_format format : enum_constants<core::video_format>())
+               {
+                       core::video_format_desc desc(format);
+                       boost::rational<int> format_rate(desc.time_scale, desc.duration);
+
+                       result.insert(std::make_pair(format_rate, desc.audio_cadence));
+               }
+
+               return result;
+       }();
+
+       auto exact_match = CADENCES_BY_FRAMERATE.find(framerate);
+
+       if (exact_match != CADENCES_BY_FRAMERATE.end())
+               return exact_match->second;
+
+       boost::rational<int> closest_framerate_diff = std::numeric_limits<int>::max();
+       boost::rational<int> closest_framerate = 0;
+
+       for (auto format_framerate : CADENCES_BY_FRAMERATE | boost::adaptors::map_keys)
+       {
+               auto diff = boost::abs(framerate - format_framerate);
+
+               if (diff < closest_framerate_diff)
+               {
+                       closest_framerate_diff = diff;
+                       closest_framerate = format_framerate;
+               }
+       }
+
+       if (is_logging_quiet_for_thread())
+               CASPAR_LOG(debug) << "No exact audio cadence match found for framerate " << to_string(framerate)
+               << "\nClosest match is " << to_string(closest_framerate)
+               << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
+       else
+               CASPAR_LOG(warning) << "No exact audio cadence match found for framerate " << to_string(framerate)
+               << "\nClosest match is " << to_string(closest_framerate)
+               << "\nwhich is a " << to_string(closest_framerate_diff) << " difference.";
+
+       return CADENCES_BY_FRAMERATE[closest_framerate];
 }
 
 //
index 626cccfb454639d6f6ed80c4838acf38fdcfecc3..4c1f45c06c51d294562a89ed3ef4409b55eab79d 100644 (file)
 
 #include <core/video_format.h>
 #include <core/frame/pixel_format.h>
-#include <core/mixer/audio/audio_mixer.h>
+#include <core/frame/audio_channel_layout.h>
+#include <core/frame/frame.h>
 #include <core/fwd.h>
 
 #include <boost/rational.hpp>
 
 #include <array>
+#include <vector>
+#include <utility>
 
 #if defined(_MSC_VER)
 #pragma warning (push)
@@ -52,7 +55,9 @@ struct AVRational;
 struct AVCodecContext;
 
 namespace caspar { namespace ffmpeg {
-               
+
+typedef std::vector<std::pair<std::string, std::string>> ffmpeg_options;
+
 // Utils
 
 core::field_mode                                       get_mode(const AVFrame& frame);
@@ -71,6 +76,11 @@ spl::shared_ptr<AVFormatContext> open_input(const std::wstring& filename);
 bool is_sane_fps(AVRational time_base);
 AVRational fix_time_base(AVRational time_base);
 
+std::shared_ptr<core::mutable_audio_buffer>    flush_audio();
+std::shared_ptr<core::mutable_audio_buffer>    empty_audio();
+std::shared_ptr<AVFrame>                                       flush_video();
+std::shared_ptr<AVFrame>                                       empty_video();
+
 double read_fps(AVFormatContext& context, double fail_value);
 boost::rational<int> read_framerate(AVFormatContext& context, const boost::rational<int>& fail_value);
 
@@ -83,6 +93,8 @@ bool try_get_duration(const std::wstring filename, std::int64_t& duration, boost
 core::audio_channel_layout get_audio_channel_layout(int num_channels, std::uint64_t layout, const std::wstring& channel_layout_spec);
 
 // av_get_default_channel_layout does not work for layouts not predefined in ffmpeg. This is needed to support > 8 channels.
-std::int64_t create_channel_layout_bitmask(int num_channels);
+std::uint64_t create_channel_layout_bitmask(int num_channels);
+
+std::vector<int> find_audio_cadence(const boost::rational<int>& framerate);
 
 }}
index 87ee744dc8a56133de3d5c9808f72ae30875c147..9322baad4ba1db6a67669e8df3c184ace5eaef85 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 #include "video_decoder.h"
 
 #include "../util/util.h"
-#include "../input/input.h"
 
 #include "../../ffmpeg_error.h"
 
-#include <common/log.h>
 #include <core/frame/frame_transform.h>
 #include <core/frame/frame_factory.h>
 
+#include <boost/range/algorithm_ext/push_back.hpp>
 #include <boost/filesystem.hpp>
 
 #include <queue>
@@ -40,7 +39,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #include <libavcodec/avcodec.h>
        #include <libavformat/avformat.h>
@@ -50,129 +49,125 @@ extern "C"
 #endif
 
 namespace caspar { namespace ffmpeg {
-       
-struct video_decoder::impl : boost::noncopyable
+
+struct video_decoder::implementation : boost::noncopyable
 {
-       core::monitor::subject                                  monitor_subject_;
-       input*                                                                  input_;
        int                                                                             index_                          = -1;
        const spl::shared_ptr<AVCodecContext>   codec_context_;
 
        std::queue<spl::shared_ptr<AVPacket>>   packets_;
-       
-       const AVStream*                                                 stream_;
+
        const uint32_t                                                  nb_frames_;
-       const int                                                               width_;
-       const int                                                               height_;
 
+       const int                                                               width_                          = codec_context_->width;
+       const int                                                               height_                         = codec_context_->height;
        bool                                                                    is_progressive_;
-       uint32_t                                                                file_frame_number_;
-       boost::rational<int>                                    framerate_;
-       
-       std::shared_ptr<AVPacket>                               current_packet_;
+
+       tbb::atomic<uint32_t>                                   file_frame_number_;
 
 public:
-       explicit impl(input& in, bool single_threaded)
-               : input_(&in)
-               , codec_context_(open_codec(input_->context(), AVMEDIA_TYPE_VIDEO, index_, single_threaded))
-               , stream_(input_->context().streams[index_])
-               , nb_frames_(static_cast<uint32_t>(stream_->nb_frames))
-               , width_(codec_context_->width)
-               , height_(codec_context_->height)
-               , file_frame_number_(0)
-               , framerate_(read_framerate(input_->context(), 0))
+       explicit implementation(const spl::shared_ptr<AVFormatContext>& context)
+               : codec_context_(open_codec(*context, AVMEDIA_TYPE_VIDEO, index_, false))
+               , nb_frames_(static_cast<uint32_t>(context->streams[index_]->nb_frames))
+       {
+               file_frame_number_ = 0;
+
+               codec_context_->refcounted_frames = 1;
+       }
+
+       void push(const std::shared_ptr<AVPacket>& packet)
        {
+               if(!packet)
+                       return;
+
+               if(packet->stream_index == index_ || packet->data == nullptr)
+                       packets_.push(spl::make_shared_ptr(packet));
        }
-       
+
        std::shared_ptr<AVFrame> poll()
-       {                       
-               if(!current_packet_ && !input_->try_pop_video(current_packet_))
+       {
+               if(packets_.empty())
                        return nullptr;
-               
-               std::shared_ptr<AVFrame> frame;
 
-               if(!current_packet_)            
-               {
-                       avcodec_flush_buffers(codec_context_.get());    
-               }
-               else if(!current_packet_->data)
-               {
-                       if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)                       
-                               frame = decode(*current_packet_);
-                       
-                       if(!frame)
-                               current_packet_.reset();
-               }
-               else
+               auto packet = packets_.front();
+
+               if(packet->data == nullptr)
                {
-                       frame = decode(*current_packet_);
-                       
-                       if(current_packet_->size == 0)
-                               current_packet_.reset();
+                       if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)
+                       {
+                               auto video = decode(packet);
+                               if(video)
+                                       return video;
+                       }
+
+                       packets_.pop();
+
+                       if (packet->pos != -1)
+                       {
+                               file_frame_number_ = static_cast<uint32_t>(packet->pos);
+                               avcodec_flush_buffers(codec_context_.get());
+                               return flush_video();
+                       }
+                       else // Really EOF
+                               return nullptr;
                }
-                       
-               return frame;
+
+               packets_.pop();
+               return decode(packet);
        }
 
-       std::shared_ptr<AVFrame> decode(AVPacket& pkt)
+       std::shared_ptr<AVFrame> decode(spl::shared_ptr<AVPacket> pkt)
        {
-               auto frame = create_frame();
+               auto decoded_frame = create_frame();
 
-               int got_frame = 0;
-               auto len = THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), frame.get(), &got_frame, &pkt), "[video_decocer]");
-                               
-               if(len == 0)
-               {
-                       pkt.size = 0;
-                       return nullptr;
-               }
+               int frame_finished = 0;
+               THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), decoded_frame.get(), &frame_finished, pkt.get()), "[video_decoder]");
 
-        pkt.data += len;
-        pkt.size -= len;
+               // If a decoder consumes less then the whole packet then something is wrong
+               // that might be just harmless padding at the end, or a problem with the
+               // AVParser or demuxer which puted more then one frame in a AVPacket.
 
-               if(got_frame == 0)      
+               if(frame_finished == 0)
                        return nullptr;
-               
-               auto stream_time_base           = stream_->time_base;
-               auto fps = static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator());
-               auto packet_frame_number        = static_cast<uint32_t>((static_cast<double>(pkt.pts * stream_time_base.num) / stream_time_base.den) * fps);
-
-               file_frame_number_ = packet_frame_number;
-
-               is_progressive_ = !frame->interlaced_frame;
-               
-               if(frame->repeat_pict > 0)
-                       CASPAR_LOG(warning) << "[video_decoder] repeat_pict not implemented.";
-                               
-               monitor_subject_  << core::monitor::message("/file/video/width")        % width_
-                                               << core::monitor::message("/file/video/height") % height_
-                                               << core::monitor::message("/file/video/field")  % u8(!frame->interlaced_frame ? "progressive" : (frame->top_field_first ? "upper" : "lower"))
-                                               << core::monitor::message("/file/video/codec")  % u8(codec_context_->codec->long_name);
-               
-               return frame;
+
+               is_progressive_ = !decoded_frame->interlaced_frame;
+
+               if(decoded_frame->repeat_pict > 0)
+                       CASPAR_LOG(warning) << "[video_decoder] Field repeat_pict not implemented.";
+
+               ++file_frame_number_;
+
+               // This ties the life of the decoded_frame to the packet that it came from. For the
+               // current version of ffmpeg (0.8 or c17808c) the RAW_VIDEO codec returns frame data
+               // owned by the packet.
+               return std::shared_ptr<AVFrame>(decoded_frame.get(), [decoded_frame, pkt](AVFrame*){});
+       }
+
+       bool ready() const
+       {
+               return packets_.size() >= 8;
        }
-       
+
        uint32_t nb_frames() const
        {
-               return std::max(nb_frames_, file_frame_number_);
+               return std::max(nb_frames_, static_cast<uint32_t>(file_frame_number_));
        }
 
        std::wstring print() const
-       {               
+       {
                return L"[video-decoder] " + u16(codec_context_->codec->long_name);
        }
 };
 
-video_decoder::video_decoder(input& in, bool single_threaded) : impl_(new impl(in, single_threaded)){}
-video_decoder::video_decoder(video_decoder&& other) : impl_(std::move(other.impl_)){}
-video_decoder& video_decoder::operator=(video_decoder&& other){impl_ = std::move(other.impl_); return *this;}
-std::shared_ptr<AVFrame> video_decoder::operator()(){return impl_->poll();}
+video_decoder::video_decoder(const spl::shared_ptr<AVFormatContext>& context) : impl_(new implementation(context)){}
+void video_decoder::push(const std::shared_ptr<AVPacket>& packet){impl_->push(packet);}
+std::shared_ptr<AVFrame> video_decoder::poll(){return impl_->poll();}
+bool video_decoder::ready() const{return impl_->ready();}
 int video_decoder::width() const{return impl_->width_;}
 int video_decoder::height() const{return impl_->height_;}
 uint32_t video_decoder::nb_frames() const{return impl_->nb_frames();}
-uint32_t video_decoder::file_frame_number() const{return impl_->file_frame_number_;}
-boost::rational<int> video_decoder::framerate() const { return impl_->framerate_; }
-bool video_decoder::is_progressive() const{return impl_->is_progressive_;}
+uint32_t video_decoder::file_frame_number() const{return static_cast<uint32_t>(impl_->file_frame_number_);}
+bool   video_decoder::is_progressive() const{return impl_->is_progressive_;}
 std::wstring video_decoder::print() const{return impl_->print();}
-core::monitor::subject& video_decoder::monitor_output() { return impl_->monitor_subject_; }
+
 }}
index 5cc1aea1dcca1ab2867ee8bc5553e328003049a8..d954dc05b05610a1d8b24d26b8cd03b7a75e573a 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>
+* Copyright 2013 Sveriges Television AB http://casparcg.com/
 *
 * This file is part of CasparCG (www.casparcg.com).
 *
 #pragma once
 
 #include <common/memory.h>
-#include <common/forward.h>
-
-#include <core/monitor/monitor.h>
 
 #include <boost/noncopyable.hpp>
-#include <boost/rational.hpp>
 
 struct AVFormatContext;
 struct AVFrame;
 struct AVPacket;
 
-namespace caspar { namespace ffmpeg {
+namespace caspar {
+
+namespace core {
+       class frame_factory;
+       class write_frame;
+}
 
-class video_decoder : public boost::noncopyable
+namespace ffmpeg {
+
+class video_decoder : boost::noncopyable
 {
 public:
-       explicit video_decoder(class input& input, bool single_threaded);
+       explicit video_decoder(const spl::shared_ptr<AVFormatContext>& context);
        
-       video_decoder(video_decoder&& other);
-       video_decoder& operator=(video_decoder&& other);
-
-       std::shared_ptr<AVFrame> operator()();
+       bool                                            ready() const;
+       void                                            push(const std::shared_ptr<AVPacket>& packet);
+       std::shared_ptr<AVFrame>        poll();
        
-       int      width() const;
-       int      height() const;
-       bool is_progressive() const;
-       uint32_t file_frame_number() const;
-       boost::rational<int> framerate() const;
+       int                                                     width() const;
+       int                                                     height() const;
 
-       uint32_t nb_frames() const;
+       uint32_t                                        nb_frames() const;
+       uint32_t                                        file_frame_number() const;
+       bool                                            is_progressive() const;
 
-       std::wstring print() const;
-               
-       core::monitor::subject& monitor_output();
+       std::wstring                            print() const;
 
 private:
-       struct impl;
-       spl::shared_ptr<impl> impl_;
+       struct implementation;
+       spl::shared_ptr<implementation> impl_;
 };
 
 }}
\ No newline at end of file
index e0b85e92e65819228bc394d3c219d6bc59970a89..a5d4fcde9108b698da5a4d65717da09542074aab 100644 (file)
@@ -205,8 +205,35 @@ spl::shared_ptr<core::frame_producer> create_ct_producer(
        return producer;
 }
 
+void copy_template_hosts()
+{
+       try
+       {
+               for (auto it = boost::filesystem::directory_iterator(env::initial_folder()); it != boost::filesystem::directory_iterator(); ++it)
+               {
+                       if (it->path().wstring().find(L".fth") != std::wstring::npos)
+                       {
+                               auto from_path = *it;
+                               auto to_path = boost::filesystem::path(env::template_folder() + L"/" + it->path().filename().wstring());
+
+                               if (boost::filesystem::exists(to_path))
+                                       boost::filesystem::remove(to_path);
+
+                               boost::filesystem::copy_file(from_path, to_path);
+                       }
+               }
+       }
+       catch (...)
+       {
+               CASPAR_LOG_CURRENT_EXCEPTION();
+               CASPAR_LOG(error) << L"Failed to copy template-hosts from initial-path to template-path.";
+       }
+}
+
 void init(core::module_dependencies dependencies)
 {
+       copy_template_hosts();
+
        dependencies.producer_registry->register_producer_factory(L"Flash Producer (.ct)", create_ct_producer, describe_ct_producer);
        dependencies.producer_registry->register_producer_factory(L"Flash Producer (.swf)", create_swf_producer, describe_swf_producer);
        dependencies.media_info_repo->register_extractor([](const std::wstring& file, const std::wstring& extension, core::media_info& info)
@@ -249,11 +276,11 @@ std::wstring cg_version()
 }
 
 std::wstring version()
-{              
+{
        std::wstring version = L"Not found";
-#ifdef WIN32 
+#ifdef WIN32
        HKEY   hkey;
+
        DWORD dwType, dwSize;
        if(RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT("SOFTWARE\\Macromedia\\FlashPlayerActiveX"), 0, KEY_QUERY_VALUE, &hkey) == ERROR_SUCCESS)
        {
@@ -262,7 +289,7 @@ std::wstring version()
                dwType = REG_SZ;
                dwSize = sizeof(ver_str);
                RegQueryValueEx(hkey, TEXT("Version"), NULL, &dwType, (PBYTE)&ver_str, &dwSize);
+
                version = ver_str;
 
                RegCloseKey(hkey);
index 8cefe4d4cb633bef78e53d4813b1ea9874cddd21..cbbae1aa2286c8dcee28a132523619ebaaac08f6 100644 (file)
@@ -625,7 +625,7 @@ spl::shared_ptr<core::frame_producer> create_producer(const core::frame_producer
        auto filename = env::template_folder() + L"\\" + template_host.filename;
        
        if(!boost::filesystem::exists(filename))
-               CASPAR_THROW_EXCEPTION(file_not_found() << boost::errinfo_file_name(u8(filename)));     
+               CASPAR_THROW_EXCEPTION(file_not_found() << msg_info(L"Could not open flash movie " + filename));        
 
        return create_destroy_proxy(spl::make_shared<flash_producer>(dependencies.frame_factory, dependencies.format_desc, filename, template_host.width, template_host.height));
 }
index fdfe31e9365cf16f46cb4f630b06f3333629556a..dc7365b4a4a05b62456b0bb50f693a2475ffb360 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <common/executor.h>
 #include <common/future.h>
+#include <common/env.h>
 
 #include <core/producer/cg_proxy.h>
 
@@ -234,6 +235,7 @@ void init(core::module_dependencies dependencies)
        {
                CefSettings settings;
                settings.no_sandbox = true;
+               settings.remote_debugging_port = env::properties().get(L"configuration.html.remote-debugging-port", 0);
                //settings.windowless_rendering_enabled = true;
                CefInitialize(main_args, settings, nullptr, nullptr);
        });
index ab466f046ce6e804d9ba718db0f795da577a0a2e..4730402d7566fae1dba51a455bcf176811e4ff5a 100644 (file)
@@ -106,7 +106,7 @@ public:
                        try
                        {
                                auto filename2 = filename;
-                               
+
                                if (filename2.empty())
                                        filename2 = env::media_folder() +  boost::posix_time::to_iso_wstring(boost::posix_time::second_clock::local_time()) + L".png";
                                else
@@ -135,7 +135,7 @@ public:
        {
                return L"image[]";
        }
-       
+
        std::wstring name() const override
        {
                return L"image";
@@ -176,7 +176,8 @@ void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
        sink.example(L">> ADD 1 IMAGE", L"creating media/20130228T210946.png if the current time is 2013-02-28 21:09:46.");
 }
 
-spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_consumer(
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        if (params.size() < 1 || !boost::iequals(params.at(0), L"IMAGE"))
                return core::frame_consumer::empty();
index fc47c62543d371783c5bd8293a4b08efb88ae4a5..111e319c1425006bee55bfba2322bb6bef0280b2 100644 (file)
 #include <string>
 #include <vector>
 
-namespace caspar { 
+namespace caspar {
 
 namespace image {
-       
+
 void write_cropped_png(
                const class core::const_frame& frame,
                const core::video_format_desc& format_desc,
@@ -43,6 +43,7 @@ void write_cropped_png(
                int height);
 
 void describe_consumer(core::help_sink& sink, const core::help_repository& repo);
-spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params, struct core::interaction_sink*);
+spl::shared_ptr<core::frame_consumer> create_consumer(
+               const std::vector<std::wstring>& params, struct core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
 
 }}
index 0da1a981c39e69ec3efdf7093db789262eadf51b..112922ac5f68fe450de2feec264398800f94066f 100644 (file)
@@ -106,6 +106,11 @@ struct image_producer : public core::frame_producer_base
        void load(const std::shared_ptr<FIBITMAP>& bitmap)
        {
                FreeImage_FlipVertical(bitmap.get());
+               auto longest_side = static_cast<int>(std::max(FreeImage_GetWidth(bitmap.get()), FreeImage_GetHeight(bitmap.get())));
+
+               if (longest_side > frame_factory_->get_max_frame_size())
+                       CASPAR_THROW_EXCEPTION(user_error() << msg_info("Image too large for texture"));
+
                core::pixel_format_desc desc;
                desc.format = core::pixel_format::bgra;
                desc.planes.push_back(core::pixel_format_desc::plane(FreeImage_GetWidth(bitmap.get()), FreeImage_GetHeight(bitmap.get()), 4));
index 57c8f7b2b028ba4cbcf78449dd846629ebb62770..6bef3da8ee693c19fadb2507b262e948b0f9f56c 100644 (file)
@@ -302,12 +302,12 @@ struct image_scroll_producer : public core::frame_producer_base
                if (boost::iequals(cmd, L"SPEED"))
                {
                        if (params.size() == 1)
-                               return make_ready_future(boost::lexical_cast<std::wstring>(speed_.fetch()));
+                               return make_ready_future(boost::lexical_cast<std::wstring>(-speed_.fetch()));
 
                        auto val = boost::lexical_cast<double>(params.at(1));
                        int duration = params.size() > 2 ? boost::lexical_cast<int>(params.at(2)) : 0;
                        std::wstring tween = params.size() > 3 ? params.at(3) : L"linear";
-                       speed_ = speed_tweener(speed_.fetch(), val, duration, tween);
+                       speed_ = speed_tweener(speed_.fetch(), -val, duration, tween);
                }
 
                return make_ready_future<std::wstring>(L"");
index feade373c45b12e5f5f7e752cb8e012cecc9e9ff..f3d391795b6f733fae79d06ab79380f69abe2390 100644 (file)
@@ -18,7 +18,7 @@
 *
 * Author: Robert Nagy, ronag@live.com
 */
+
 #include "../StdAfx.h"
 
 #include "newtek_ivga_consumer.h"
@@ -76,13 +76,13 @@ public:
                graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
                diagnostics::register_graph(graph_);
        }
-       
+
        ~newtek_ivga_consumer()
        {
        }
 
        // frame_consumer
-       
+
        virtual void initialize(
                        const core::video_format_desc& format_desc,
                        const core::audio_channel_layout& channel_layout,
@@ -176,7 +176,7 @@ public:
        {
                return -1;
        }
-       
+
        virtual int index() const override
        {
                return 900;
@@ -191,7 +191,7 @@ public:
        {
                return false;
        }
-};     
+};
 
 void describe_ivga_consumer(core::help_sink& sink, const core::help_repository& repo)
 {
@@ -202,7 +202,7 @@ void describe_ivga_consumer(core::help_sink& sink, const core::help_repository&
        sink.example(L">> ADD 1 NEWTEK_IVGA");
 }
 
-spl::shared_ptr<core::frame_consumer> create_ivga_consumer(const std::vector<std::wstring>& params, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_ivga_consumer(const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        if (params.size() < 1 || !boost::iequals(params.at(0), L"NEWTEK_IVGA"))
                return core::frame_consumer::empty();
@@ -210,9 +210,9 @@ spl::shared_ptr<core::frame_consumer> create_ivga_consumer(const std::vector<std
        return spl::make_shared<newtek_ivga_consumer>();
 }
 
-spl::shared_ptr<core::frame_consumer> create_preconfigured_ivga_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*)
-{      
+spl::shared_ptr<core::frame_consumer> create_preconfigured_ivga_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
+{
        return spl::make_shared<newtek_ivga_consumer>();
 }
 
-}}
\ No newline at end of file
+}}
index fc41ca726c84e5bd3a7b913c314027f3e813b539..1fa43b7eec1333360d6a32cc409b656bc8059617 100644 (file)
@@ -32,7 +32,7 @@
 namespace caspar { namespace newtek {
 
 void describe_ivga_consumer(core::help_sink& sink, const core::help_repository& repo);
-spl::shared_ptr<core::frame_consumer> create_ivga_consumer(const std::vector<std::wstring>& params, core::interaction_sink*);
-spl::shared_ptr<core::frame_consumer> create_preconfigured_ivga_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*);
+spl::shared_ptr<core::frame_consumer> create_ivga_consumer(const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
+spl::shared_ptr<core::frame_consumer> create_preconfigured_ivga_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
 
-}}
\ No newline at end of file
+}}
index da6108c6917f497c499ca2c8bdb23301e1ba1023..d7c010ea27cb9959d6e47e0ce08af382e8e58888 100644 (file)
@@ -72,7 +72,7 @@ public:
 
                if(!context_)
                        CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Failed to create audio context."));
-                       
+
                if(alcMakeContextCurrent(context_) == ALC_FALSE)
                        CASPAR_THROW_EXCEPTION(invalid_operation() << msg_info("Failed to activate audio context."));
        }
@@ -98,7 +98,7 @@ void init_device()
 {
        static std::unique_ptr<device> instance;
        static boost::once_flag f = BOOST_ONCE_INIT;
-       
+
        boost::call_once(f, []{instance.reset(new device());});
 }
 
@@ -110,7 +110,7 @@ struct oal_consumer : public core::frame_consumer
        boost::timer                                                                    perf_timer_;
        tbb::atomic<int64_t>                                                    presentation_age_;
        int                                                                                             channel_index_          = -1;
-       
+
        core::video_format_desc                                                 format_desc_;
        core::audio_channel_layout                                              out_channel_layout_;
        std::unique_ptr<core::audio_channel_remapper>   channel_remapper_;
@@ -130,7 +130,7 @@ public:
 
                init_device();
 
-               graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));   
+               graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
                graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
                graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
                diagnostics::register_graph(graph_);
@@ -139,7 +139,7 @@ public:
        ~oal_consumer()
        {
                executor_.invoke([=]
-               {               
+               {
                        if(source_)
                        {
                                alSourceStop(source_);
@@ -158,7 +158,7 @@ public:
 
        void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) override
        {
-               format_desc_    = format_desc;          
+               format_desc_    = format_desc;
                channel_index_  = channel_index;
                if (out_channel_layout_ == core::audio_channel_layout::invalid())
                        out_channel_layout_ = channel_layout.num_channels == 2 ? channel_layout : *core::audio_channel_layout_repository::get_default()->get_layout(L"stereo");
@@ -169,21 +169,21 @@ public:
                graph_->set_text(print());
 
                executor_.begin_invoke([=]
-               {               
+               {
                        buffers_.resize(format_desc_.fps > 30 ? 8 : 4);
                        alGenBuffers(static_cast<ALsizei>(buffers_.size()), buffers_.data());
                        alGenSources(1, &source_);
 
                        for(std::size_t n = 0; n < buffers_.size(); ++n)
                        {
-                               audio_buffer_16 audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()]*2, 0);
+                               audio_buffer_16 audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()] * 2, 0);
                                alBufferData(buffers_[n], AL_FORMAT_STEREO16, audio.data(), static_cast<ALsizei>(audio.size()*sizeof(int16_t)), format_desc_.audio_sample_rate);
                                alSourceQueueBuffers(source_, 1, &buffers_[n]);
                        }
-                       
+
                        alSourcei(source_, AL_LOOPING, AL_FALSE);
 
-                       alSourcePlay(source_);  
+                       alSourcePlay(source_);
                });
        }
 
@@ -198,28 +198,28 @@ public:
                // exhausted, which should not happen
                executor_.begin_invoke([=]
                {
-                       ALenum state; 
+                       ALenum state;
                        alGetSourcei(source_, AL_SOURCE_STATE,&state);
                        if(state != AL_PLAYING)
                        {
                                for(int n = 0; n < buffers_.size()-1; ++n)
-                               {                                       
-                                       ALuint buffer = 0;  
+                               {
+                                       ALuint buffer = 0;
                                        alSourceUnqueueBuffers(source_, 1, &buffer);
                                        if(buffer)
                                        {
-                                               std::vector<int16_t> audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()] * 2, 0);
+                                               audio_buffer_16 audio(format_desc_.audio_cadence[n % format_desc_.audio_cadence.size()] * 2, 0);
                                                alBufferData(buffer, AL_FORMAT_STEREO16, audio.data(), static_cast<ALsizei>(audio.size()*sizeof(int16_t)), format_desc_.audio_sample_rate);
                                                alSourceQueueBuffers(source_, 1, &buffer);
                                        }
                                }
-                               alSourcePlay(source_);          
+                               alSourcePlay(source_);
                                graph_->set_tag(diagnostics::tag_severity::WARNING, "late-frame");
                        }
 
                        auto audio = core::audio_32_to_16(channel_remapper_->mix_and_rearrange(frame.audio_data()));
-                       
-                       ALuint buffer = 0;  
+
+                       ALuint buffer = 0;
                        alSourceUnqueueBuffers(source_, 1, &buffer);
                        if(buffer)
                        {
@@ -229,14 +229,14 @@ public:
                        else
                                graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
 
-                       graph_->set_value("tick-time", perf_timer_.elapsed()*format_desc_.fps*0.5);             
+                       graph_->set_value("tick-time", perf_timer_.elapsed()*format_desc_.fps*0.5);
                        perf_timer_.restart();
                        presentation_age_ = frame.get_age_millis() + latency_millis();
                });
 
                return make_ready_future(true);
        }
-       
+
        std::wstring print() const override
        {
                return L"oal[" + boost::lexical_cast<std::wstring>(channel_index_) + L"|" + format_desc_.name + L"]";
@@ -253,7 +253,7 @@ public:
                info.add(L"type", L"system-audio");
                return info;
        }
-       
+
        bool has_synchronization_clock() const override
        {
                return false;
@@ -263,14 +263,14 @@ public:
        {
                return latency_millis_;
        }
-       
+
        int buffer_depth() const override
        {
                int delay_in_frames = static_cast<int>(latency_millis() / (1000.0 / format_desc_.fps));
-               
+
                return delay_in_frames;
        }
-               
+
        int index() const override
        {
                return 500;
@@ -293,7 +293,8 @@ void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
        sink.example(L">> ADD 1 AUDIO LATENCY 500", L"Specifies that the system-audio chain: openal => driver => sound card => speaker output is 500ms");
 }
 
-spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_consumer(
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        if(params.size() < 1 || !boost::iequals(params.at(0), L"AUDIO"))
                return core::frame_consumer::empty();
@@ -316,7 +317,8 @@ spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wst
        return spl::make_shared<oal_consumer>(channel_layout, latency_millis);
 }
 
-spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink*)
+spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
+               const boost::property_tree::wptree& ptree, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        auto channel_layout                     = core::audio_channel_layout::invalid();
        auto channel_layout_spec        = ptree.get_optional<std::wstring>(L"channel-layout");
index abb7b7c32c00b5e79046ad4459a0c0381aaef980..a688d10948589e3efbdeca48118a223a084320f7 100644 (file)
 #include <boost/property_tree/ptree_fwd.hpp>
 
 namespace caspar { namespace oal {
-       
+
 void describe_consumer(core::help_sink& sink, const core::help_repository& repo);
 spl::shared_ptr<core::frame_consumer> create_consumer(
-               const std::vector<std::wstring>& params, core::interaction_sink*);
+               const std::vector<std::wstring>& params, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
 spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
-               const boost::property_tree::wptree&, core::interaction_sink*);
+               const boost::property_tree::wptree&, core::interaction_sink*, std::vector<spl::shared_ptr<core::video_channel>> channels);
 
-}}
\ No newline at end of file
+}}
index b7435021035bc62b16324ff5d5338e615d8a3cfa..d62cf5119b83e9ff98c4383a5490af5cfb0cd2d5 100644 (file)
@@ -454,7 +454,7 @@ public:
 \r
                auto shear_factor = (yx*c + yy*s) / (yy*c - yx * s);\r
                auto scale_y = 1.0;\r
-               if (abs(shear_factor) < 0.0001 || isnan(shear_factor)) {\r
+               if (abs(shear_factor) < 0.0001 || std::isnan(shear_factor)) {\r
                        shear_factor = 0;\r
                        scale_y = (abs(c) > 0.1) ? yy / c : yx / -s;\r
                }\r
@@ -644,11 +644,11 @@ bool layer::is_visible() { return (impl_->flags_ & 2) == 0; }     //the (PSD file-fo
 bool layer::is_position_protected() { return (impl_->protection_flags_& 4) == 4; }\r
 \r
 const layer::mask_info& layer::mask() const { return impl_->mask_; }\r
-
-const psd::point<double>& layer::text_pos() const { return impl_->text_pos_; }
-const psd::point<double>& layer::scale() const { return impl_->scale_; }
-const double layer::angle() const { return impl_->angle_; }
-const double layer::shear() const { return impl_->shear_; }
+\r
+const psd::point<double>& layer::text_pos() const { return impl_->text_pos_; }\r
+const psd::point<double>& layer::scale() const { return impl_->scale_; }\r
+const double layer::angle() const { return impl_->angle_; }\r
+const double layer::shear() const { return impl_->shear_; }\r
 \r
 bool layer::is_text() const { return !impl_->text_layer_info_.empty(); }\r
 const boost::property_tree::wptree& layer::text_data() const { return impl_->text_layer_info_; }\r
index 84b3c5a0766addd9aec503978272ee8a7b77408f..b992cfb69f12773d79f1be9f959fcd14d1110201 100644 (file)
@@ -42,6 +42,7 @@
 //#include <windows.h>
 
 #include <ffmpeg/producer/filter/filter.h>
+#include <ffmpeg/producer/util/util.h>
 
 #include <core/video_format.h>
 #include <core/frame/frame.h>
@@ -69,7 +70,7 @@
 #pragma warning (push)
 #pragma warning (disable : 4244)
 #endif
-extern "C" 
+extern "C"
 {
        #define __STDC_CONSTANT_MACROS
        #define __STDC_LIMIT_MACROS
@@ -81,7 +82,7 @@ extern "C"
 #endif
 
 namespace caspar { namespace screen {
-               
+
 enum class stretch
 {
        none,
@@ -98,7 +99,7 @@ struct configuration
                aspect_16_9,
                aspect_invalid,
        };
-               
+
        std::wstring    name                            = L"Screen consumer";
        int                             screen_index            = 0;
        screen::stretch stretch                         = screen::stretch::fill;
@@ -119,7 +120,7 @@ struct screen_consumer : boost::noncopyable
 
        GLuint                                                                                          texture_                = 0;
        std::vector<GLuint>                                                                     pbos_                   = std::vector<GLuint> { 0, 0 };
-                       
+
        float                                                                                           width_;
        float                                                                                           height_;
        int                                                                                                     screen_x_;
@@ -152,20 +153,20 @@ public:
                        const configuration& config,
                        const core::video_format_desc& format_desc,
                        int channel_index,
-                       core::interaction_sink* sink) 
+                       core::interaction_sink* sink)
                : config_(config)
                , format_desc_(format_desc)
                , channel_index_(channel_index)
                , pts_(0)
                , sink_(sink)
                , filter_([&]() -> ffmpeg::filter
-               {                       
-                       const auto sample_aspect_ratio = 
+               {
+                       const auto sample_aspect_ratio =
                                boost::rational<int>(
-                                       format_desc.square_width, 
+                                       format_desc.square_width,
                                        format_desc.square_height) /
                                boost::rational<int>(
-                                       format_desc.width, 
+                                       format_desc.width,
                                        format_desc.height);
 
                        return ffmpeg::filter(
@@ -178,7 +179,7 @@ public:
                                { AV_PIX_FMT_BGRA },
                                format_desc.field_mode == core::field_mode::progressive || !config.auto_deinterlace ? "" : "format=pix_fmts=gbrp,YADIF=1:-1");
                }())
-       {               
+       {
                if (format_desc_.format == core::video_format::ntsc && config_.aspect == configuration::aspect_ratio::aspect_4_3)
                {
                        // Use default values which are 4:3.
@@ -192,13 +193,13 @@ public:
                }
 
                frame_buffer_.set_capacity(1);
-               
-               graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));   
+
+               graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
                graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
                graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
                graph_->set_text(print());
                diagnostics::register_graph(graph_);
-                                                                       
+
                /*DISPLAY_DEVICE d_device = {sizeof(d_device), 0};
                std::vector<DISPLAY_DEVICE> displayDevices;
                for(int n = 0; EnumDisplayDevices(NULL, n, &d_device, NULL); ++n)
@@ -206,11 +207,11 @@ public:
 
                if(config_.screen_index >= displayDevices.size())
                        CASPAR_LOG(warning) << print() << L" Invalid screen-index: " << config_.screen_index;
-               
+
                DEVMODE devmode = {};
                if(!EnumDisplaySettings(displayDevices[config_.screen_index].DeviceName, ENUM_CURRENT_SETTINGS, &devmode))
                        CASPAR_LOG(warning) << print() << L" Could not find display settings for screen-index: " << config_.screen_index;
-               
+
                screen_x_               = devmode.dmPosition.x;
                screen_y_               = devmode.dmPosition.y;
                screen_width_   = config_.windowed ? square_width_ : devmode.dmPelsWidth;
@@ -219,13 +220,13 @@ public:
                screen_y_               = 0;
                screen_width_   = square_width_;
                screen_height_  = square_height_;
-               
+
                polling_event_ = false;
                is_running_ = true;
                current_presentation_age_ = 0;
                thread_ = boost::thread([this]{run();});
        }
-       
+
        ~screen_consumer()
        {
                is_running_ = false;
@@ -245,7 +246,7 @@ public:
                window_.setPosition(sf::Vector2i(screen_x_, screen_y_));
                window_.setSize(sf::Vector2u(screen_width_, screen_height_));
                window_.setActive();
-               
+
                if(!GLEW_VERSION_2_1 && glewInit() != GLEW_OK)
                        CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));
 
@@ -253,13 +254,13 @@ public:
                        CASPAR_THROW_EXCEPTION(not_supported() << msg_info("Missing OpenGL 2.1 support."));
 
                GL(glEnable(GL_TEXTURE_2D));
-               GL(glDisable(GL_DEPTH_TEST));           
+               GL(glDisable(GL_DEPTH_TEST));
                GL(glClearColor(0.0, 0.0, 0.0, 0.0));
                GL(glViewport(0, 0, format_desc_.width, format_desc_.height));
                GL(glLoadIdentity());
-                               
+
                calculate_aspect();
-                       
+
                GL(glGenTextures(1, &texture_));
                GL(glBindTexture(GL_TEXTURE_2D, texture_));
                GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
@@ -268,15 +269,15 @@ public:
                GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP));
                GL(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, format_desc_.width, format_desc_.height, 0, GL_BGRA, GL_UNSIGNED_BYTE, 0));
                GL(glBindTexture(GL_TEXTURE_2D, 0));
-                                       
+
                GL(glGenBuffers(2, pbos_.data()));
-                       
+
                glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbos_[0]);
                glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, format_desc_.size, 0, GL_STREAM_DRAW_ARB);
                glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbos_[1]);
                glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, format_desc_.size, 0, GL_STREAM_DRAW_ARB);
                glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
-               
+
                window_.setVerticalSyncEnabled(config_.vsync);
 
                if (config_.vsync)
@@ -286,7 +287,7 @@ public:
        }
 
        void uninit()
-       {               
+       {
                if(texture_)
                        glDeleteTextures(1, &texture_);
 
@@ -306,7 +307,7 @@ public:
                        init();
 
                        while(is_running_)
-                       {                       
+                       {
                                try
                                {
                                        auto poll_event = [this](sf::Event& e)
@@ -364,20 +365,20 @@ public:
                                                        }
                                                }
                                        }
-                       
+
                                        core::const_frame frame;
                                        frame_buffer_.pop(frame);
 
                                        render_and_draw_frame(frame);
-                                       
+
                                        /*perf_timer_.restart();
                                        render(frame);
-                                       graph_->set_value("frame-time", perf_timer_.elapsed()*format_desc_.fps*0.5);    
+                                       graph_->set_value("frame-time", perf_timer_.elapsed()*format_desc_.fps*0.5);
 
                                        window_.Display();*/
 
                                        current_presentation_age_ = frame.get_age_millis();
-                                       graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);     
+                                       graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);
                                        tick_timer_.restart();
                                }
                                catch(...)
@@ -414,11 +415,10 @@ public:
        }
 
        spl::shared_ptr<AVFrame> get_av_frame()
-       {               
-               spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [](AVFrame* p) { av_frame_free(&p); });
-               avcodec_get_frame_defaults(av_frame.get());
-                                               
-               av_frame->linesize[0]           = format_desc_.width*4;                 
+       {
+               auto av_frame = ffmpeg::create_frame();
+
+               av_frame->linesize[0]           = format_desc_.width*4;
                av_frame->format                        = PIX_FMT_BGRA;
                av_frame->width                         = format_desc_.width;
                av_frame->height                        = format_desc_.height;
@@ -436,7 +436,7 @@ public:
 
                if(screen_width_ == 0 && screen_height_ == 0)
                        return;
-                                       
+
                perf_timer_.restart();
                auto av_frame = get_av_frame();
                av_frame->data[0] = const_cast<uint8_t*>(input_frame.image_data().begin());
@@ -497,20 +497,20 @@ public:
                        {
                                fast_memcpy(ptr, av_frame->data[0], format_desc_.size);
                        }
-                       
+
                        GL(glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER)); // release the mapped buffer
                }
 
                GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0));
-                               
-               GL(glClear(GL_COLOR_BUFFER_BIT));                       
+
+               GL(glClear(GL_COLOR_BUFFER_BIT));
                glBegin(GL_QUADS);
                                glTexCoord2f(0.0f,        1.0f);        glVertex2f(-width_, -height_);
                                glTexCoord2f(1.0f,        1.0f);        glVertex2f( width_, -height_);
                                glTexCoord2f(1.0f,        0.0f);        glVertex2f( width_,  height_);
                                glTexCoord2f(0.0f,        0.0f);        glVertex2f(-width_,  height_);
                glEnd();
-               
+
                GL(glBindTexture(GL_TEXTURE_2D, 0));
 
                std::rotate(pbos_.begin(), pbos_.begin() + 1, pbos_.end());
@@ -531,10 +531,10 @@ public:
        }
 
        std::wstring print() const
-       {       
+       {
                return config_.name + L" " + channel_and_format();
        }
-       
+
        void calculate_aspect()
        {
                if(config_.windowed)
@@ -542,7 +542,7 @@ public:
                        screen_height_ = window_.getSize().y;
                        screen_width_ = window_.getSize().x;
                }
-               
+
                GL(glViewport(0, 0, screen_width_, screen_height_));
 
                std::pair<float, float> target_ratio = None();
@@ -556,7 +556,7 @@ public:
                width_ = target_ratio.first;
                height_ = target_ratio.second;
        }
-               
+
        std::pair<float, float> None()
        {
                float width = static_cast<float>(square_width_)/static_cast<float>(screen_width_);
@@ -607,7 +607,7 @@ public:
                , sink_(sink)
        {
        }
-       
+
        // frame_consumer
 
        void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout&, int channel_index) override
@@ -625,7 +625,7 @@ public:
        {
                return consumer_->send(frame);
        }
-       
+
        std::wstring print() const override
        {
                return consumer_ ? consumer_->print() : L"[screen_consumer]";
@@ -651,7 +651,7 @@ public:
        {
                return false;
        }
-       
+
        int buffer_depth() const override
        {
                return 1;
@@ -666,7 +666,7 @@ public:
        {
                return monitor_subject_;
        }
-};     
+};
 
 void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
 {
@@ -696,16 +696,17 @@ void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
        sink.example(L">> ADD 1 SCREEN 1 BORDERLESS", L"opens a screen consumer without borders/window decorations on screen 1.");
 }
 
-spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wstring>& params, core::interaction_sink* sink)
+spl::shared_ptr<core::frame_consumer> create_consumer(
+               const std::vector<std::wstring>& params, core::interaction_sink* sink, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        if (params.size() < 1 || !boost::iequals(params.at(0), L"SCREEN"))
                return core::frame_consumer::empty();
-       
+
        configuration config;
-               
+
        if (params.size() > 1)
                config.screen_index = boost::lexical_cast<int>(params.at(1));
-               
+
        config.windowed                 = !contains_param(L"FULLSCREEN", params);
        config.key_only                 =  contains_param(L"KEY_ONLY", params);
        config.interactive              = !contains_param(L"NON_INTERACTIVE", params);
@@ -718,7 +719,8 @@ spl::shared_ptr<core::frame_consumer> create_consumer(const std::vector<std::wst
        return spl::make_shared<screen_consumer_proxy>(config, sink);
 }
 
-spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink* sink) 
+spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
+               const boost::property_tree::wptree& ptree, core::interaction_sink* sink, std::vector<spl::shared_ptr<core::video_channel>> channels)
 {
        configuration config;
        config.name                             = ptree.get(L"name",                            config.name);
@@ -741,7 +743,7 @@ spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost:
                config.aspect = configuration::aspect_ratio::aspect_16_9;
        else if(aspect_str == L"4:3")
                config.aspect = configuration::aspect_ratio::aspect_4_3;
-       
+
        return spl::make_shared<screen_consumer_proxy>(config, sink);
 }
 
index f4293c1f3ef6c4fe224b57d4eaff7e85808b6f20..fb754db6e8287954e8839578a62e725bbf1e8112 100644 (file)
@@ -33,9 +33,11 @@ namespace caspar { namespace screen {
 void describe_consumer(core::help_sink& sink, const core::help_repository& repo);
 spl::shared_ptr<core::frame_consumer> create_consumer(
                const std::vector<std::wstring>& params,
-               core::interaction_sink* sink);
+               core::interaction_sink* sink,
+               std::vector<spl::shared_ptr<core::video_channel>> channels);
 spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(
                const boost::property_tree::wptree& ptree,
-               core::interaction_sink* sink);
+               core::interaction_sink* sink,
+               std::vector<spl::shared_ptr<core::video_channel>> channels);
 
-}}
\ No newline at end of file
+}}
index 7022a7bf98dc663ad2d47b1e784f77bd53bc68a9..a490da5d714ffb71db4de3eff5a9ac36d1c51e6d 100644 (file)
 /* Return codes
 
 102 [action]                   Information that [action] has happened
-101 [action]                   Information that [action] has happened plus one row of data  
+101 [action]                   Information that [action] has happened plus one row of data
 
 202 [command] OK               [command] has been executed
-201 [command] OK               [command] has been executed, plus one row of data  
+201 [command] OK               [command] has been executed, plus one row of data
 200 [command] OK               [command] has been executed, plus multiple lines of data. ends with an empty line
 
 400 ERROR                              the command could not be understood
 401 [command] ERROR            invalid/missing channel
 402 [command] ERROR            parameter missing
-403 [command] ERROR            invalid parameter  
+403 [command] ERROR            invalid parameter
 404 [command] ERROR            file not found
 
 500 FAILED                             internal error
@@ -134,7 +134,7 @@ std::wstring read_utf8_file(const boost::filesystem::path& file)
        std::wstringstream result;
        boost::filesystem::wifstream filestream(file);
 
-       if (filestream) 
+       if (filestream)
        {
                // Consume BOM first
                filestream.get();
@@ -234,11 +234,11 @@ std::wstring MediaInfo(const boost::filesystem::path& path, const spl::shared_pt
 }
 
 std::wstring ListMedia(const spl::shared_ptr<media_info_repository>& media_info_repo)
-{      
+{
        std::wstringstream replyString;
        for (boost::filesystem::recursive_directory_iterator itr(env::media_folder()), end; itr != end; ++itr)
                replyString << MediaInfo(itr->path(), media_info_repo);
-       
+
        return boost::to_upper_copy(replyString.str());
 }
 
@@ -247,7 +247,7 @@ std::wstring ListTemplates(const spl::shared_ptr<core::cg_producer_registry>& cg
        std::wstringstream replyString;
 
        for (boost::filesystem::recursive_directory_iterator itr(env::template_folder()), end; itr != end; ++itr)
-       {               
+       {
                if(boost::filesystem::is_regular_file(itr->path()) && cg_registry->is_cg_extension(itr->path().extension().wstring()))
                {
                        auto relativePath = get_relative_without_extension(itr->path(), env::template_folder());
@@ -264,7 +264,7 @@ std::wstring ListTemplates(const spl::shared_ptr<core::cg_producer_registry>& cg
                        auto dir = relativePath.parent_path();
                        auto file = boost::to_upper_copy(relativePath.filename().wstring());
                        relativePath = dir / file;
-                                               
+
                        auto str = relativePath.generic_wstring();
                        boost::trim_if(str, boost::is_any_of("\\/"));
 
@@ -280,13 +280,18 @@ std::wstring ListTemplates(const spl::shared_ptr<core::cg_producer_registry>& cg
        return replyString.str();
 }
 
+std::vector<spl::shared_ptr<core::video_channel>> get_channels(const command_context& ctx)
+{
+       return cpplinq::from(ctx.channels)
+               .select([](channel_context c) { return spl::make_shared_ptr(c.channel); })
+               .to_vector();
+}
+
 core::frame_producer_dependencies get_producer_dependencies(const std::shared_ptr<core::video_channel>& channel, const command_context& ctx)
 {
        return core::frame_producer_dependencies(
                        channel->frame_factory(),
-                       cpplinq::from(ctx.channels)
-                                       .select([](channel_context c) { return spl::make_shared_ptr(c.channel); })
-                                       .to_vector(),
+                       get_channels(ctx),
                        channel->video_format_desc(),
                        ctx.producer_registry);
 }
@@ -536,7 +541,7 @@ void call_describer(core::help_sink& sink, const core::help_repository& repo)
 
 std::wstring call_command(command_context& ctx)
 {
-       auto result = ctx.channel.channel->stage().call(ctx.layer_index(), ctx.parameters);
+       auto result = ctx.channel.channel->stage().call(ctx.layer_index(), ctx.parameters).get();
 
        // TODO: because of std::async deferred timed waiting does not work
 
@@ -545,10 +550,10 @@ std::wstring call_command(command_context& ctx)
        CASPAR_THROW_EXCEPTION(timed_out());*/
 
        std::wstringstream replyString;
-       if (result.get().empty())
+       if (result.empty())
                replyString << L"202 CALL OK\r\n";
        else
-               replyString << L"201 CALL OK\r\n" << result.get() << L"\r\n";
+               replyString << L"201 CALL OK\r\n" << result << L"\r\n";
 
        return replyString.str();
 }
@@ -616,6 +621,7 @@ void add_describer(core::help_sink& sink, const core::help_repository& repo)
        sink.example(L">> ADD 1 SCREEN");
        sink.example(L">> ADD 1 AUDIO");
        sink.example(L">> ADD 1 IMAGE filename");
+       sink.example(L">> ADD 2 SYNCTO 1");
        sink.example(L">> ADD 1 FILE filename.mov");
        sink.example(L">> ADD 1 FILE filename.mov SEPARATE_KEY");
        sink.example(
@@ -635,7 +641,7 @@ std::wstring add_command(command_context& ctx)
        core::diagnostics::scoped_call_context save;
        core::diagnostics::call_context::for_thread().video_channel = ctx.channel_index + 1;
 
-       auto consumer = ctx.consumer_registry->create_consumer(ctx.parameters, &ctx.channel.channel->stage());
+       auto consumer = ctx.consumer_registry->create_consumer(ctx.parameters, &ctx.channel.channel->stage(), get_channels(ctx));
        ctx.channel.channel->output().add(ctx.layer_index(consumer->index()), consumer);
 
        return L"202 ADD OK\r\n";
@@ -660,7 +666,7 @@ void remove_describer(core::help_sink& sink, const core::help_repository& repo)
 std::wstring remove_command(command_context& ctx)
 {
        auto index = ctx.layer_index(std::numeric_limits<int>::min());
-       
+
        if (index == std::numeric_limits<int>::min())
        {
                replace_placeholders(
@@ -668,7 +674,7 @@ std::wstring remove_command(command_context& ctx)
                                ctx.client->address(),
                                ctx.parameters);
 
-               index = ctx.consumer_registry->create_consumer(ctx.parameters, &ctx.channel.channel->stage())->index();
+               index = ctx.consumer_registry->create_consumer(ctx.parameters, &ctx.channel.channel->stage(), get_channels(ctx))->index();
        }
 
        ctx.channel.channel->output().remove(index);
@@ -689,7 +695,7 @@ void print_describer(core::help_sink& sink, const core::help_repository& repo)
 
 std::wstring print_command(command_context& ctx)
 {
-       ctx.channel.channel->output().add(ctx.consumer_registry->create_consumer({ L"IMAGE" }, &ctx.channel.channel->stage()));
+       ctx.channel.channel->output().add(ctx.consumer_registry->create_consumer({ L"IMAGE" }, &ctx.channel.channel->stage(), get_channels(ctx)));
 
        return L"202 PRINT OK\r\n";
 }
@@ -818,7 +824,7 @@ std::wstring data_store_command(command_context& ctx)
 void data_retrieve_describer(core::help_sink& sink, const core::help_repository& repo)
 {
        sink.short_description(L"Retrieve a dataset.");
-       sink.syntax(L"DATA RETRIEVE [name:string] [data:string]");
+       sink.syntax(L"DATA RETRIEVE [name:string]");
        sink.para()->text(L"Returns the data saved under the name ")->code(L"name")->text(L".");
        sink.para()->text(L"Examples:");
        sink.example(L">> DATA RETRIEVE my_data");
@@ -2045,7 +2051,7 @@ std::wstring channel_grid_command(command_context& ctx)
        params.push_back(L"0");
        params.push_back(L"NAME");
        params.push_back(L"Channel Grid Window");
-       auto screen = ctx.consumer_registry->create_consumer(params, &self.channel->stage());
+       auto screen = ctx.consumer_registry->create_consumer(params, &self.channel->stage(), get_channels(ctx));
 
        self.channel->output().add(screen);
 
@@ -2093,14 +2099,14 @@ std::wstring thumbnail_list_command(command_context& ctx)
        std::wstringstream replyString;
        replyString << L"200 THUMBNAIL LIST OK\r\n";
 
-       for (boost::filesystem::recursive_directory_iterator itr(env::thumbnails_folder()), end; itr != end; ++itr)
+       for (boost::filesystem::recursive_directory_iterator itr(env::thumbnail_folder()), end; itr != end; ++itr)
        {
                if (boost::filesystem::is_regular_file(itr->path()))
                {
                        if (!boost::iequals(itr->path().extension().wstring(), L".png"))
                                continue;
 
-                       auto relativePath = get_relative_without_extension(itr->path(), env::thumbnails_folder());
+                       auto relativePath = get_relative_without_extension(itr->path(), env::thumbnail_folder());
                        auto str = relativePath.generic_wstring();
 
                        if (str[0] == '\\' || str[0] == '/')
@@ -2133,7 +2139,7 @@ void thumbnail_retrieve_describer(core::help_sink& sink, const core::help_reposi
 
 std::wstring thumbnail_retrieve_command(command_context& ctx)
 {
-       std::wstring filename = env::thumbnails_folder();
+       std::wstring filename = env::thumbnail_folder();
        filename.append(ctx.parameters.at(0));
        filename.append(L".png");
 
@@ -2300,6 +2306,14 @@ void version_describer(core::help_sink& sink, const core::help_repository& repo)
                L">> VERSION FLASH\n"
                L"<< 201 VERSION OK\n"
                L"<< 11.8.800.94");
+       sink.example(
+               L">> VERSION TEMPLATEHOST\n"
+               L"<< 201 VERSION OK\n"
+               L"<< unknown");
+       sink.example(
+               L">> VERSION CEF\n"
+               L"<< 201 VERSION OK\n"
+               L"<< 3.1750.1805");
 }
 
 std::wstring version_command(command_context& ctx)
@@ -2429,8 +2443,14 @@ void info_paths_describer(core::help_sink& sink, const core::help_repository& re
 std::wstring info_paths_command(command_context& ctx)
 {
        boost::property_tree::wptree info;
-       info.add_child(L"paths", caspar::env::properties().get_child(L"configuration.paths"));
-       info.add(L"paths.initial-path", boost::filesystem::initial_path().wstring() + L"/");
+
+       info.add(L"paths.media-path",           caspar::env::media_folder());
+       info.add(L"paths.log-path",                     caspar::env::log_folder());
+       info.add(L"paths.data-path",                    caspar::env::data_folder());
+       info.add(L"paths.template-path",                caspar::env::template_folder());
+       info.add(L"paths.thumbnail-path",       caspar::env::thumbnail_folder());
+       info.add(L"paths.font-path",                    caspar::env::font_folder());
+       info.add(L"paths.initial-path",         caspar::env::initial_folder() + L"/");
 
        return create_info_xml_reply(info, L"PATHS");
 }
index 8348c56b97f4754d7bbaf3ddd992643e6b7b77c9..d404becdbb9c5711f5d6f2203a8a14864ef49441 100644 (file)
@@ -329,6 +329,12 @@ struct AsyncEventServer::implementation : public spl::enable_shared_from_this<im
                
         if (!error)
                {
+                       boost::system::error_code ec;
+                       socket->set_option(boost::asio::socket_base::keep_alive(true), ec);
+
+                       if (ec)
+                               CASPAR_LOG(warning) << print() << L" Failed to enable TCP keep-alive on socket";
+                       
                        auto conn = connection::create(service_, socket, protocol_factory_, connection_set_);
                        connection_set_->insert(conn);
 
@@ -341,6 +347,11 @@ struct AsyncEventServer::implementation : public spl::enable_shared_from_this<im
                start_accept();
     }
 
+       std::wstring print() const
+       {
+               return L"async_event_server[:" + boost::lexical_cast<std::wstring>(acceptor_.local_endpoint().port()) + L"]";
+       }
+
        void add_client_lifecycle_object_factory(const lifecycle_factory_t& factory)
        {
                auto self = shared_from_this();
index 9a5be08856f8a8d42f61a3bf7637b8c6a471437f..c549e44aa130cf0ce449d7d9aa52bae7000e153b 100644 (file)
 <!--\r
 <log-level>           info  [trace|debug|info|warning|error|fatal]</log-level>\r
 <log-categories>      communication  [calltrace|communication|calltrace,communication]</log-categories>\r
-<force-deinterlace>   true  [true|false]</force-deinterlacing>\r
+<force-deinterlace>   false  [true|false]</force-deinterlace>\r
 <channel-grid>        false [true|false]</channel-grid>\r
 <mixer>\r
     <blend-modes>          false [true|false]</blend-modes>\r
-    <mipmapping_default_on>false [true|false]</mipmapping_default_on>\r
+    <mipmapping-default-on>false [true|false]</mipmapping-default-on>\r
     <straight-alpha>       false [true|false]</straight-alpha>\r
 </mixer>\r
-<auto-transcode>      true  [true|false]</auto-transcode>\r
 <accelerator>auto [cpu|gpu|auto]</accelerator>\r
 <template-hosts>\r
     <template-host>\r
-        <video-mode/>\r
-        <filename/>\r
-        <width/>\r
-        <height/>\r
+        <video-mode />\r
+        <filename />\r
+        <width />\r
+        <height />\r
     </template-host>\r
 </template-hosts>\r
 <flash>\r
     <buffer-depth>auto [auto|1..]</buffer-depth>\r
 </flash>\r
+<html>\r
+    <remote-debugging-port>0 [0|1024-65535]</remote-debugging-port>\r
+</html>\r
 <thumbnails>\r
     <generate-thumbnails>true [true|false]</generate-thumbnails>\r
     <width>256</width>\r
                 <borderless>false [true|false]</borderless>\r
             </screen>\r
             <newtek-ivga></newtek-ivga>\r
-            <file>\r
-                <path></path>\r
-                <vcodec>libx264 [libx264|qtrle]</vcodec>\r
+            <ffmpeg>\r
+                <path>[file|url]</path>\r
+                <args>[most ffmpeg arguments related to filtering and output codecs]</args>\r
                 <separate-key>false [true|false]</separate-key>\r
-            </file>\r
-            <stream>\r
-                <path>udp://localhost:9250</path>\r
-                <args>-format mpegts -vcodec libx264 -crf 25 -tune zerolatency -preset ultrafast</args>\r
-            </stream>\r
+                <mono-streams>false [true|false]</mono-streams>\r
+            </ffmpeg>\r
+            <syncto>\r
+                <channel-id>1</channel-id>\r
+            </syncto>\r
         </consumers>\r
     </channel>\r
 </channels>\r
 <osc>\r
   <default-port>6250</default-port>\r
+  <disable-send-to-amcp-clients>false [true|false]</disable-send-to-amcp-clients>\r
   <predefined-clients>\r
     <predefined-client>\r
       <address>127.0.0.1</address>\r
index d742cb0d9c633f6fe2361cae894035aaa11178f3..7db474e7e21f990dcbc663cd2b3788f5f9b069cd 100644 (file)
@@ -19,9 +19,9 @@
 * Author: Robert Nagy, ronag89@gmail.com
 */
 
-// tbbmalloc_proxy: 
-// Replace the standard memory allocation routines in Microsoft* C/C++ RTL 
-// (malloc/free, global new/delete, etc.) with the TBB memory allocator. 
+// tbbmalloc_proxy:
+// Replace the standard memory allocation routines in Microsoft* C/C++ RTL
+// (malloc/free, global new/delete, etc.) with the TBB memory allocator.
 
 #include "stdafx.h"
 
 #include <set>
 
 #include <csignal>
+#include <clocale>
 
 using namespace caspar;
-       
+
 void setup_global_locale()
 {
        boost::locale::generator gen;
        gen.categories(boost::locale::codepage_facet);
 
        std::locale::global(gen(""));
+
+       // sscanf is used in for example FFmpeg where we want decimals to be parsed as .
+       std::setlocale(LC_ALL, "C");
 }
 
 void print_info()
@@ -110,11 +114,12 @@ void do_run(
                std::promise<bool>& shutdown_server_now,
                tbb::atomic<bool>& should_wait_for_keypress)
 {
+       ensure_gpf_handler_installed_for_thread("Console thread");
        std::wstring wcmd;
        while(true)
        {
                std::getline(std::wcin, wcmd); // TODO: It's blocking...
-                               
+
                //boost::to_upper(wcmd);
 
                if(boost::iequals(wcmd, L"EXIT") || boost::iequals(wcmd, L"Q") || boost::iequals(wcmd, L"QUIT") || boost::iequals(wcmd, L"BYE"))
@@ -139,11 +144,11 @@ void do_run(
                        else if(wcmd.substr(0, 1) == L"5")
                        {
                                auto file = wcmd.substr(2, wcmd.length()-1);
-                               wcmd = L"PLAY 1-1 " + file + L" LOOP\r\n" 
-                                               L"PLAY 1-2 " + file + L" LOOP\r\n" 
+                               wcmd = L"PLAY 1-1 " + file + L" LOOP\r\n"
+                                               L"PLAY 1-2 " + file + L" LOOP\r\n"
                                                L"PLAY 1-3 " + file + L" LOOP\r\n"
-                                               L"PLAY 2-1 " + file + L" LOOP\r\n" 
-                                               L"PLAY 2-2 " + file + L" LOOP\r\n" 
+                                               L"PLAY 2-1 " + file + L" LOOP\r\n"
+                                               L"PLAY 2-2 " + file + L" LOOP\r\n"
                                                L"PLAY 2-3 " + file + L" LOOP\r\n";
                        }
                        else if(wcmd.substr(0, 1) == L"7")
@@ -217,6 +222,9 @@ bool run(const std::wstring& config_file_name, tbb::atomic<bool>& should_wait_fo
        // Create server object which initializes channels, protocols and controllers.
        std::unique_ptr<server> caspar_server(new server(shutdown_server_now));
 
+       // For example CEF resets the global locale, so this is to reset it back to "our" preference.
+       setup_global_locale();
+
        // Print environment information.
        print_system_info(caspar_server->get_system_info_provider_repo());
 
@@ -224,7 +232,7 @@ bool run(const std::wstring& config_file_name, tbb::atomic<bool>& should_wait_fo
        boost::property_tree::xml_writer_settings<std::wstring> w(' ', 3);
        boost::property_tree::write_xml(str, env::properties(), w);
        CASPAR_LOG(info) << config_file_name << L":\n-----------------------------------------\n" << str.str() << L"-----------------------------------------";
-       
+
        {
                CASPAR_SCOPED_CONTEXT_MSG(config_file_name + L": ")
                caspar_server->start();
@@ -242,7 +250,7 @@ bool run(const std::wstring& config_file_name, tbb::atomic<bool>& should_wait_fo
                                                        caspar_server->get_amcp_command_repository())))->create(console_client);
        std::weak_ptr<IO::protocol_strategy<wchar_t>> weak_amcp = amcp;
 
-       // Use separate thread for the blocking console input, will be terminated 
+       // Use separate thread for the blocking console input, will be terminated
        // anyway when the main thread terminates.
        boost::thread stdin_thread(std::bind(do_run, weak_amcp, std::ref(shutdown_server_now), std::ref(should_wait_for_keypress)));    //compiler didn't like lambda here...
        stdin_thread.detach();
@@ -271,7 +279,7 @@ int main(int argc, char** argv)
        setup_global_locale();
 
        std::wcout << L"Type \"q\" to close application." << std::endl;
-       
+
        // Set debug mode.
        auto debugging_environment = setup_debugging_environment();
 
@@ -293,8 +301,8 @@ int main(int argc, char** argv)
 
        tbb::task_scheduler_init init;
        std::wstring config_file_name(L"casparcg.config");
-       
-       try 
+
+       try
        {
                // Configure environment properties from configuration.
                if (argc >= 2)
@@ -316,7 +324,10 @@ int main(int argc, char** argv)
                log::add_file_sink(env::log_folder() + L"caspar",               caspar::log::category != caspar::log::log_category::calltrace);
                log::add_file_sink(env::log_folder() + L"calltrace",    caspar::log::category == caspar::log::log_category::calltrace);
                std::wcout << L"Logging [info] or higher severity to " << env::log_folder() << std::endl << std::endl;
-               
+
+               // Once logging to file, log configuration warnings.
+               env::log_configuration_warnings();
+
                // Setup console window.
                setup_console_window();
 
@@ -330,7 +341,7 @@ int main(int argc, char** argv)
                        if (thread->name != "main thread" && thread->name != "tbb-worker-thread")
                                CASPAR_LOG(warning) << L"Thread left running: " << thread->name << L" (" << thread->native_id << L")";
                }
-               
+
                CASPAR_LOG(info) << "Successfully shutdown CasparCG Server.";
 
                if (should_wait_for_keypress)
@@ -338,14 +349,12 @@ int main(int argc, char** argv)
        }
        catch(const boost::property_tree::file_parser_error& e)
        {
-               CASPAR_LOG_CURRENT_EXCEPTION();
                CASPAR_LOG(fatal) << "At " << u8(config_file_name) << ":" << e.line() << ": " << e.message() << ". Please check the configuration file (" << u8(config_file_name) << ") for errors.";
                wait_for_keypress();
        }
        catch (const user_error& e)
        {
-               CASPAR_LOG_CURRENT_EXCEPTION_AT_LEVEL(debug);
-               CASPAR_LOG(fatal) << get_message_and_context(e) << " Please check the configuration file (" << u8(config_file_name) << ") for errors. Turn on log level debug for stacktrace.";
+               CASPAR_LOG(fatal) << get_message_and_context(e) << " Please check the configuration file (" << u8(config_file_name) << ") for errors.";
                wait_for_keypress();
        }
        catch(...)
index 2c2c5f3d36be07e467ff86a8f2b877acddcd7177..172979fdd239d402baafce2c80b245f4f6ea1138 100644 (file)
@@ -44,6 +44,7 @@
 #include <core/producer/text/text_producer.h>
 #include <core/producer/color/color_producer.h>
 #include <core/consumer/output.h>
+#include <core/consumer/syncto/syncto_consumer.h>
 #include <core/mixer/mixer.h>
 #include <core/mixer/image/image_mixer.h>
 #include <core/thumbnail_generator.h>
@@ -108,12 +109,15 @@ std::shared_ptr<boost::asio::io_service> create_running_io_service()
                                CASPAR_LOG_CURRENT_EXCEPTION();
                        }
                }
+
+               CASPAR_LOG(info) << "[asio] Global io_service uninitialized.";
        });
 
        return std::shared_ptr<boost::asio::io_service>(
                        service.get(),
                        [service, work, thread](void*) mutable
                        {
+                               CASPAR_LOG(info) << "[asio] Shutting down global io_service.";
                                work.reset();
                                service->stop();
                                if (thread->get_id() != boost::this_thread::get_id())
@@ -146,7 +150,7 @@ struct server::impl : boost::noncopyable
        std::shared_ptr<thumbnail_generator>                            thumbnail_generator_;
        std::promise<bool>&                                                                     shutdown_server_now_;
 
-       explicit impl(std::promise<bool>& shutdown_server_now)          
+       explicit impl(std::promise<bool>& shutdown_server_now)
                : accelerator_(env::properties().get(L"configuration.accelerator", L"auto"))
                , media_info_repo_(create_in_memory_media_info_repository())
                , producer_registry_(spl::make_shared<core::frame_producer_registry>(help_repo_))
@@ -168,6 +172,7 @@ struct server::impl : boost::noncopyable
                initialize_modules(dependencies);
                core::text::init(dependencies);
                core::scene::init(dependencies);
+               core::syncto::init(dependencies);
                help_repo_->register_item({ L"producer" }, L"Color Producer", &core::describe_color_producer);
        }
 
@@ -212,7 +217,7 @@ struct server::impl : boost::noncopyable
                destroy_producers_synchronously();
                destroy_consumers_synchronously();
                channels_.clear();
-               
+
                while (weak_io_service.lock())
                        boost::this_thread::sleep_for(boost::chrono::milliseconds(100));
 
@@ -248,10 +253,14 @@ struct server::impl : boost::noncopyable
        }
 
        void setup_channels(const boost::property_tree::wptree& pt)
-       {   
+       {
                using boost::property_tree::wptree;
+
+               std::vector<wptree> xml_channels;
+
                for (auto& xml_channel : pt | witerate_children(L"configuration.channels") | welement_context_iteration)
                {
+                       xml_channels.push_back(xml_channel.second);
                        ptree_verify_element_name(xml_channel, L"channel");
 
                        auto format_desc_str = xml_channel.second.get(L"video-mode", L"PAL");
@@ -267,17 +276,24 @@ struct server::impl : boost::noncopyable
                        auto channel_id = static_cast<int>(channels_.size() + 1);
                        auto channel = spl::make_shared<video_channel>(channel_id, format_desc, *channel_layout, accelerator_.create_image_mixer(channel_id));
 
+                       channel->monitor_output().attach_parent(monitor_subject_);
+                       channel->mixer().set_straight_alpha_output(xml_channel.second.get(L"straight-alpha-output", false));
+                       channels_.push_back(channel);
+               }
+
+               for (auto& channel : channels_)
+               {
                        core::diagnostics::scoped_call_context save;
                        core::diagnostics::call_context::for_thread().video_channel = channel->index();
 
-                       for (auto& xml_consumer : xml_channel.second | witerate_children(L"consumers") | welement_context_iteration)
+                       for (auto& xml_consumer : xml_channels.at(channel->index() - 1) | witerate_children(L"consumers") | welement_context_iteration)
                        {
                                auto name = xml_consumer.first;
 
                                try
                                {
                                        if (name != L"<xmlcomment>")
-                                               channel->output().add(consumer_registry_->create_consumer(name, xml_consumer.second, &channel->stage()));
+                                               channel->output().add(consumer_registry_->create_consumer(name, xml_consumer.second, &channel->stage(), channels_));
                                }
                                catch (const user_error& e)
                                {
@@ -288,11 +304,7 @@ struct server::impl : boost::noncopyable
                                {
                                        CASPAR_LOG_CURRENT_EXCEPTION();
                                }
-                       }               
-
-                   channel->monitor_output().attach_parent(monitor_subject_);
-                       channel->mixer().set_straight_alpha_output(xml_channel.second.get(L"straight-alpha-output", false));
-                       channels_.push_back(channel);
+                       }
                }
 
                // Dummy diagnostics channel
@@ -309,14 +321,16 @@ struct server::impl : boost::noncopyable
        }
 
        void setup_osc(const boost::property_tree::wptree& pt)
-       {               
+       {
                using boost::property_tree::wptree;
                using namespace boost::asio::ip;
 
                monitor_subject_->attach_parent(osc_client_->sink());
-               
+
                auto default_port =
                                pt.get<unsigned short>(L"configuration.osc.default-port", 6250);
+               auto disable_send_to_amcp_clients =
+                               pt.get(L"configuration.osc.disable-send-to-amcp-clients", false);
                auto predefined_clients =
                                pt.get_child_optional(L"configuration.osc.predefined-clients");
 
@@ -337,7 +351,7 @@ struct server::impl : boost::noncopyable
                        }
                }
 
-               if (primary_amcp_server_)
+               if (!disable_send_to_amcp_clients && primary_amcp_server_)
                        primary_amcp_server_->add_client_lifecycle_object_factory(
                                        [=] (const std::string& ipv4_address)
                                                        -> std::pair<std::wstring, std::shared_ptr<void>>
@@ -363,9 +377,9 @@ struct server::impl : boost::noncopyable
 
                polling_filesystem_monitor_factory monitor_factory(io_service_, scan_interval_millis);
                thumbnail_generator_.reset(new thumbnail_generator(
-                       monitor_factory, 
+                       monitor_factory,
                        env::media_folder(),
-                       env::thumbnails_folder(),
+                       env::thumbnail_folder(),
                        pt.get(L"configuration.thumbnails.width", 256),
                        pt.get(L"configuration.thumbnails.height", 144),
                        core::video_format_desc(pt.get(L"configuration.thumbnails.video-mode", L"720p2500")),
@@ -376,7 +390,7 @@ struct server::impl : boost::noncopyable
                        producer_registry_,
                        pt.get(L"configuration.thumbnails.mipmap", true)));
        }
-               
+
        void setup_controllers(const boost::property_tree::wptree& pt)
        {
                amcp_command_repo_ = spl::make_shared<amcp::amcp_command_repository>(
@@ -399,7 +413,7 @@ struct server::impl : boost::noncopyable
                        auto protocol = ptree_get<std::wstring>(xml_controller.second, L"protocol");
 
                        if(name == L"tcp")
-                       {                                       
+                       {
                                auto port = ptree_get<unsigned int>(xml_controller.second, L"port");
                                auto asyncbootstrapper = spl::make_shared<IO::AsyncEventServer>(
                                                io_service_,
@@ -411,7 +425,7 @@ struct server::impl : boost::noncopyable
                                        primary_amcp_server_ = asyncbootstrapper;
                        }
                        else
-                               CASPAR_LOG(warning) << "Invalid controller: " << name;  
+                               CASPAR_LOG(warning) << "Invalid controller: " << name;
                }
        }
 
index 75c35dd54d5ed03976150f7b9a6e01ce40d1565c..9d4367974a9dafe9bc851b2b0e9572c6cc18842b 100644 (file)
Binary files a/shell/shell.rc and b/shell/shell.rc differ
index 88529a9bbd160813ea7c3cca9da3912d3bb19612..456a2b3aa2de223ee566983c1989ff311ef5619b 100644 (file)
@@ -1,5 +1,6 @@
 #define CASPAR_GEN 2\r
 #define CASPAR_MAYOR 1\r
 #define CASPAR_MINOR 0\r
-#define CASPAR_TAG "UNSTABLE"\r
-#define CASPAR_REV "${GIT_REV}"\r
+#define CASPAR_TAG "Beta 1"\r
+#define CASPAR_REV ${GIT_REV}\r
+#define CASPAR_HASH "${GIT_HASH}"\r