#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#include <libswscale/swscale.h>
#include <libavcodec/avcodec.h>
#endif
namespace caspar { namespace accelerator { namespace cpu {
-
+
struct item
{
core::pixel_format_desc pix_desc = core::pixel_format::invalid;
{
return !(lhs == rhs);
}
-
+
// 100% accurate blending with correct rounding.
inline xmm::s8_x blend(xmm::s8_x d, xmm::s8_x s)
-{
+{
using namespace xmm;
-
+
// C(S, D) = S + D - (((T >> 8) + T) >> 8);
// T(S, D) = S * D[A] + 0x80
auto aaaa = s8_x::shuffle(d, s8_x(15, 15, 15, 15, 11, 11, 11, 11, 7, 7, 7, 7, 3, 3, 3, 3));
d = s8_x(u8_x::min(u8_x(d), u8_x(aaaa))); // Overflow guard. Some source files have color values which incorrectly exceed pre-multiplied alpha values, e.g. red(255) > alpha(254).
- auto xaxa = s16_x(aaaa) >> 8;
-
- auto t1 = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;
+ auto xaxa = s16_x(aaaa) >> 8;
+
+ auto t1 = s16_x::multiply_low(s16_x(s) & 0x00FF, xaxa) + 0x80;
auto t2 = s16_x::multiply_low(s16_x(s) >> 8 , xaxa) + 0x80;
-
- auto xyxy = s8_x(((t1 >> 8) + t1) >> 8);
- auto yxyx = s8_x((t2 >> 8) + t2);
+
+ auto xyxy = s8_x(((t1 >> 8) + t1) >> 8);
+ auto yxyx = s8_x((t2 >> 8) + t2);
auto argb = s8_x::blend(xyxy, yxyx, s8_x(-1, 0, -1, 0));
return s8_x(s) + (d - argb);
}
-
+
template<typename temporal, typename alignment>
static void kernel(uint8_t* dest, const uint8_t* source, size_t count)
-{
+{
using namespace xmm;
- for(auto n = 0; n < count; n += 32)
+ for(auto n = 0; n < count; n += 32)
{
auto s0 = s8_x::load<temporal_tag, alignment>(dest+n+0);
auto s1 = s8_x::load<temporal_tag, alignment>(dest+n+16);
auto d0 = s8_x::load<temporal_tag, alignment>(source+n+0);
auto d1 = s8_x::load<temporal_tag, alignment>(source+n+16);
-
+
auto argb0 = blend(d0, s0);
auto argb1 = blend(d1, s1);
s8_x::store<temporal, alignment>(argb0, dest+n+0 );
s8_x::store<temporal, alignment>(argb1, dest+n+16);
- }
+ }
}
template<typename temporal>
static void kernel(uint8_t* dest, const uint8_t* source, size_t count)
-{
+{
using namespace xmm;
if(reinterpret_cast<std::uint64_t>(dest) % 16 != 0 || reinterpret_cast<std::uint64_t>(source) % 16 != 0)
tbb::concurrent_unordered_map<int64_t, tbb::concurrent_bounded_queue<std::shared_ptr<SwsContext>>> sws_devices_;
tbb::concurrent_bounded_queue<spl::shared_ptr<buffer>> temp_buffers_;
core::video_format_desc format_desc_;
-public:
+public:
std::future<array<const std::uint8_t>> operator()(std::vector<item> items, const core::video_format_desc& format_desc)
{
if (format_desc != format_desc_)
sws_devices_.clear();
}
- convert(items, format_desc.width, format_desc.height);
-
+ convert(items, format_desc.width, format_desc.height);
+
// Remove first field stills.
boost::range::remove_erase_if(items, [&](const item& item)
{
return item.transform.is_still && item.transform.field_mode == format_desc.field_mode; // only us last field for stills.
});
-
+
// Stills are progressive
for (auto& item : items)
{
auto result = spl::make_shared<buffer>(format_desc.size, 0);
if(format_desc.field_mode != core::field_mode::progressive)
- {
+ {
draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::upper);
draw(items, result->data(), format_desc.width, format_desc.height, core::field_mode::lower);
}
}
temp_buffers_.clear();
-
+
return make_ready_future(array<const std::uint8_t>(result->data(), format_desc.size, true, result));
}
private:
void draw(std::vector<item> items, uint8_t* dest, std::size_t width, std::size_t height, core::field_mode field_mode)
- {
+ {
for (auto& item : items)
item.transform.field_mode &= field_mode;
-
+
// Remove empty items.
boost::range::remove_erase_if(items, [&](const item& item)
{
if(items.empty())
return;
-
+
auto start = field_mode == core::field_mode::lower ? 1 : 0;
auto step = field_mode == core::field_mode::progressive ? 1 : 2;
-
+
// TODO: Add support for fill translations.
// TODO: Add support for mask rect.
// TODO: Add support for opacity.
for(std::size_t n = 0; n < items.size()-1; ++n)
kernel<xmm::temporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);
-
- std::size_t n = items.size()-1;
+
+ std::size_t n = items.size()-1;
kernel<xmm::nontemporal_tag>(dest + y*width*4, items[n].data.at(0) + y*width*4, width*4);
}
_mm_mfence();
});
}
-
+
void convert(std::vector<item>& source_items, int width, int height)
{
std::set<std::array<const uint8_t*, 4>> buffers;
for (auto& item : source_items)
buffers.insert(item.data);
-
+
auto dest_items = source_items;
tbb::parallel_for_each(buffers.begin(), buffers.end(), [&](const std::array<const uint8_t*, 4>& data)
- {
+ {
auto pix_desc = std::find_if(source_items.begin(), source_items.end(), [&](const item& item){return item.data == data;})->pix_desc;
- if(pix_desc.format == core::pixel_format::bgra &&
+ if(pix_desc.format == core::pixel_format::bgra &&
pix_desc.planes.at(0).width == width &&
pix_desc.planes.at(0).height == height)
return;
auto input_av_frame = ffmpeg::make_av_frame(data2, pix_desc);
-
- int64_t key = ((static_cast<int64_t>(input_av_frame->width) << 32) & 0xFFFF00000000) |
- ((static_cast<int64_t>(input_av_frame->height) << 16) & 0xFFFF0000) |
+
+ int64_t key = ((static_cast<int64_t>(input_av_frame->width) << 32) & 0xFFFF00000000) |
+ ((static_cast<int64_t>(input_av_frame->height) << 16) & 0xFFFF0000) |
((static_cast<int64_t>(input_av_frame->format) << 8) & 0xFF00);
auto& pool = sws_devices_[key];
if(!pool.try_pop(sws_device))
{
double param;
- sws_device.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<PixelFormat>(input_av_frame->format), width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);
+ sws_device.reset(sws_getContext(input_av_frame->width, input_av_frame->height, static_cast<AVPixelFormat>(input_av_frame->format), width, height, AVPixelFormat::AV_PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);
}
-
- if(!sws_device)
- CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling device.") << boost::errinfo_api_function("sws_getContext"));
-
+
+ if(!sws_device)
+ CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling device.") << boost::errinfo_api_function("sws_getContext"));
+
auto dest_frame = spl::make_shared<buffer>(width*height*4);
temp_buffers_.push(dest_frame);
{
auto dest_av_frame = ffmpeg::create_frame();
- avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), PIX_FMT_BGRA, width, height);
-
- sws_scale(sws_device.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);
+ avpicture_fill(reinterpret_cast<AVPicture*>(dest_av_frame.get()), dest_frame->data(), AVPixelFormat::AV_PIX_FMT_BGRA, width, height);
+
+ sws_scale(sws_device.get(), input_av_frame->data, input_av_frame->linesize, 0, input_av_frame->height, dest_av_frame->data, dest_av_frame->linesize);
pool.push(sws_device);
}
-
+
for(std::size_t n = 0; n < source_items.size(); ++n)
{
if(source_items[n].data == data)
dest_items[n].transform = source_items[n].transform;
}
}
- });
+ });
source_items = std::move(dest_items);
}
};
-
+
struct image_mixer::impl : boost::noncopyable
-{
+{
image_renderer renderer_;
std::vector<core::image_transform> transform_stack_;
std::vector<item> items_; // layer/stream/items
public:
impl(int channel_id)
- : transform_stack_(1)
+ : transform_stack_(1)
{
CASPAR_LOG(info) << L"Initialized Streaming SIMD Extensions Accelerated CPU Image Mixer for channel " << channel_id;
}
-
+
void push(const core::frame_transform& transform)
{
transform_stack_.push_back(transform_stack_.back()*transform.image_transform);
}
-
+
void visit(const core::const_frame& frame)
- {
+ {
if(frame.pixel_format_desc().format == core::pixel_format::invalid)
return;
if(frame.pixel_format_desc().planes.empty())
return;
-
+
if(frame.pixel_format_desc().planes.at(0).size < 16)
return;
item.pix_desc = frame.pixel_format_desc();
item.transform = transform_stack_.back();
for(int n = 0; n < item.pix_desc.planes.size(); ++n)
- item.data.at(n) = frame.image_data(n).begin();
+ item.data.at(n) = frame.image_data(n).begin();
items_.push_back(item);
}
{
transform_stack_.pop_back();
}
-
+
std::future<array<const std::uint8_t>> render(const core::video_format_desc& format_desc)
{
return renderer_(std::move(items_), format_desc);
}
-
+
core::mutable_frame create_frame(const void* tag, const core::pixel_format_desc& desc, const core::audio_channel_layout& channel_layout)
{
std::vector<array<std::uint8_t>> buffers;
video_frame->data[0] = reinterpret_cast<uint8_t*>(video_bytes);
video_frame->linesize[0] = video->GetRowBytes();
- video_frame->format = PIX_FMT_UYVY422;
+ video_frame->format = AVPixelFormat::AV_PIX_FMT_UYVY422;
video_frame->width = video->GetWidth();
video_frame->height = video->GetHeight();
video_frame->interlaced_frame = in_format_desc_.field_mode != core::field_mode::progressive;
#pragma warning(push, 1)
-extern "C"
+extern "C"
{
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
#include <libavcodec/avcodec.h>
- #include <libavfilter/avcodec.h>
#include <libavfilter/avfilter.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavfilter/avfilter.h>
- #include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
}
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavfilter/avfilter.h>
- #include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
}
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
#endif
namespace caspar {
-
+
static const int MAX_THREADS = 16; // See mpegvideo.h
int thread_execute(AVCodecContext* s, int (*func)(AVCodecContext *c2, void *arg2), void* arg, int* ret, int count, int size)
tbb::parallel_for(0, count, 1, [&](int i)
{
int r = func(s, (char*)arg + i*size);
- if(ret)
+ if(ret)
ret[i] = r;
});
}
int thread_execute2(AVCodecContext* s, int (*func)(AVCodecContext* c2, void* arg2, int, int), void* arg, int* ret, int count)
-{
+{
// TODO: Micro-optimize...
std::array<std::vector<int>, 16> jobs;
-
- for(int n = 0; n < count; ++n)
- jobs[(n*MAX_THREADS) / count].push_back(n);
-
- tbb::parallel_for(0, MAX_THREADS, [&](int n)
- {
+
+ for(int n = 0; n < count; ++n)
+ jobs[(n*MAX_THREADS) / count].push_back(n);
+
+ tbb::parallel_for(0, MAX_THREADS, [&](int n)
+ {
for (auto k : jobs[n])
{
int r = func(s, arg, k, n);
- if(ret)
+ if(ret)
ret[k]= r;
}
- });
+ });
- return 0;
+ return 0;
}
void thread_init(AVCodecContext* s)
{
static int dummy_opaque;
- s->active_thread_type = FF_THREAD_SLICE;
- s->thread_opaque = &dummy_opaque;
- s->execute = thread_execute;
- s->execute2 = thread_execute2;
- s->thread_count = MAX_THREADS; // We are using a task-scheduler, so use as many "threads/tasks" as possible.
+ s->active_thread_type = FF_THREAD_SLICE;
+ s->opaque = &dummy_opaque;
+ s->execute = thread_execute;
+ s->execute2 = thread_execute2;
+ s->thread_count = MAX_THREADS; // We are using a task-scheduler, so use as many "threads/tasks" as possible.
}
void thread_free(AVCodecContext* s)
{
- if(!s->thread_opaque)
+ if(!s->opaque)
return;
- s->thread_opaque = nullptr;
+ s->opaque = nullptr;
}
int tbb_avcodec_open(AVCodecContext* avctx, AVCodec* codec, bool single_threaded)
if(!single_threaded && codec->capabilities & CODEC_CAP_SLICE_THREADS)
thread_init(avctx);
-
+
// ff_thread_init will not be executed since thread_opaque != nullptr || thread_count == 1.
- return avcodec_open2(avctx, codec, nullptr);
+ return avcodec_open2(avctx, codec, nullptr);
}
int tbb_avcodec_close(AVCodecContext* avctx)
{
thread_free(avctx);
// ff_thread_free will not be executed since thread_opaque == nullptr.
- return avcodec_close(avctx);
+ return avcodec_close(avctx);
}
}
return frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;
}
-core::pixel_format get_pixel_format(PixelFormat pix_fmt)
+core::pixel_format get_pixel_format(AVPixelFormat pix_fmt)
{
switch(pix_fmt)
{
- case PIX_FMT_GRAY8: return core::pixel_format::gray;
- case PIX_FMT_RGB24: return core::pixel_format::rgb;
- case PIX_FMT_BGR24: return core::pixel_format::bgr;
- case PIX_FMT_BGRA: return core::pixel_format::bgra;
- case PIX_FMT_ARGB: return core::pixel_format::argb;
- case PIX_FMT_RGBA: return core::pixel_format::rgba;
- case PIX_FMT_ABGR: return core::pixel_format::abgr;
- case PIX_FMT_YUV444P: return core::pixel_format::ycbcr;
- case PIX_FMT_YUV422P: return core::pixel_format::ycbcr;
- case PIX_FMT_YUV420P: return core::pixel_format::ycbcr;
- case PIX_FMT_YUV411P: return core::pixel_format::ycbcr;
- case PIX_FMT_YUV410P: return core::pixel_format::ycbcr;
- case PIX_FMT_YUVA420P: return core::pixel_format::ycbcra;
- default: return core::pixel_format::invalid;
+ case AVPixelFormat::AV_PIX_FMT_GRAY8: return core::pixel_format::gray;
+ case AVPixelFormat::AV_PIX_FMT_RGB24: return core::pixel_format::rgb;
+ case AVPixelFormat::AV_PIX_FMT_BGR24: return core::pixel_format::bgr;
+ case AVPixelFormat::AV_PIX_FMT_BGRA: return core::pixel_format::bgra;
+ case AVPixelFormat::AV_PIX_FMT_ARGB: return core::pixel_format::argb;
+ case AVPixelFormat::AV_PIX_FMT_RGBA: return core::pixel_format::rgba;
+ case AVPixelFormat::AV_PIX_FMT_ABGR: return core::pixel_format::abgr;
+ case AVPixelFormat::AV_PIX_FMT_YUV444P: return core::pixel_format::ycbcr;
+ case AVPixelFormat::AV_PIX_FMT_YUV422P: return core::pixel_format::ycbcr;
+ case AVPixelFormat::AV_PIX_FMT_YUV420P: return core::pixel_format::ycbcr;
+ case AVPixelFormat::AV_PIX_FMT_YUV411P: return core::pixel_format::ycbcr;
+ case AVPixelFormat::AV_PIX_FMT_YUV410P: return core::pixel_format::ycbcr;
+ case AVPixelFormat::AV_PIX_FMT_YUVA420P: return core::pixel_format::ycbcra;
+ default: return core::pixel_format::invalid;
}
}
-core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height)
+core::pixel_format_desc pixel_format_desc(AVPixelFormat pix_fmt, int width, int height)
{
// Get linesizes
AVPicture dummy_pict;
const auto width = decoded_frame->width;
const auto height = decoded_frame->height;
- auto desc = pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);
+ auto desc = pixel_format_desc(static_cast<AVPixelFormat>(decoded_frame->format), width, height);
if(desc.format == core::pixel_format::invalid)
{
- auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);
- auto target_pix_fmt = PIX_FMT_BGRA;
-
- if(pix_fmt == PIX_FMT_UYVY422)
- target_pix_fmt = PIX_FMT_YUV422P;
- else if(pix_fmt == PIX_FMT_YUYV422)
- target_pix_fmt = PIX_FMT_YUV422P;
- else if(pix_fmt == PIX_FMT_UYYVYY411)
- target_pix_fmt = PIX_FMT_YUV411P;
- else if(pix_fmt == PIX_FMT_YUV420P10)
- target_pix_fmt = PIX_FMT_YUV420P;
- else if(pix_fmt == PIX_FMT_YUV422P10)
- target_pix_fmt = PIX_FMT_YUV422P;
- else if(pix_fmt == PIX_FMT_YUV444P10)
- target_pix_fmt = PIX_FMT_YUV444P;
+ auto pix_fmt = static_cast<AVPixelFormat>(decoded_frame->format);
+ auto target_pix_fmt = AVPixelFormat::AV_PIX_FMT_BGRA;
+
+ if(pix_fmt == AVPixelFormat::AV_PIX_FMT_UYVY422)
+ target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV422P;
+ else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUYV422)
+ target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV422P;
+ else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_UYYVYY411)
+ target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV411P;
+ else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV420P10)
+ target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV420P;
+ else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV422P10)
+ target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV422P;
+ else if(pix_fmt == AVPixelFormat::AV_PIX_FMT_YUV444P10)
+ target_pix_fmt = AVPixelFormat::AV_PIX_FMT_YUV444P;
auto target_desc = pixel_format_desc(target_pix_fmt, width, height);
}
auto av_frame = create_frame();
- if(target_pix_fmt == PIX_FMT_BGRA)
+ if(target_pix_fmt == AVPixelFormat::AV_PIX_FMT_BGRA)
{
- auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write.image_data(0).begin(), PIX_FMT_BGRA, width, height);
+ auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write.image_data(0).begin(), AVPixelFormat::AV_PIX_FMT_BGRA, width, height);
CASPAR_VERIFY(size == write.image_data(0).size());
}
else
switch(format)
{
case core::pixel_format::rgb:
- av_frame->format = PIX_FMT_RGB24;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_RGB24;
break;
case core::pixel_format::bgr:
- av_frame->format = PIX_FMT_BGR24;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_BGR24;
break;
case core::pixel_format::rgba:
- av_frame->format = PIX_FMT_RGBA;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_RGBA;
break;
case core::pixel_format::argb:
- av_frame->format = PIX_FMT_ARGB;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_ARGB;
break;
case core::pixel_format::bgra:
- av_frame->format = PIX_FMT_BGRA;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_BGRA;
break;
case core::pixel_format::abgr:
- av_frame->format = PIX_FMT_ABGR;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_ABGR;
break;
case core::pixel_format::gray:
- av_frame->format = PIX_FMT_GRAY8;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_GRAY8;
break;
case core::pixel_format::ycbcr:
{
int c_h = planes[1].height;
if(c_h == y_h && c_w == y_w)
- av_frame->format = PIX_FMT_YUV444P;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV444P;
else if(c_h == y_h && c_w*2 == y_w)
- av_frame->format = PIX_FMT_YUV422P;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV422P;
else if(c_h == y_h && c_w*4 == y_w)
- av_frame->format = PIX_FMT_YUV411P;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV411P;
else if(c_h*2 == y_h && c_w*2 == y_w)
- av_frame->format = PIX_FMT_YUV420P;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV420P;
else if(c_h*2 == y_h && c_w*4 == y_w)
- av_frame->format = PIX_FMT_YUV410P;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_YUV410P;
break;
}
case core::pixel_format::ycbcra:
- av_frame->format = PIX_FMT_YUVA420P;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_YUVA420P;
break;
}
return av_frame;
spl::shared_ptr<AVFrame> make_av_frame(core::mutable_frame& frame);
spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc);
-core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height);
+core::pixel_format_desc pixel_format_desc(AVPixelFormat pix_fmt, int width, int height);
spl::shared_ptr<AVPacket> create_packet();
spl::shared_ptr<AVFrame> create_frame();
auto av_frame = ffmpeg::create_frame();
av_frame->linesize[0] = format_desc_.width*4;
- av_frame->format = PIX_FMT_BGRA;
+ av_frame->format = AVPixelFormat::AV_PIX_FMT_BGRA;
av_frame->width = format_desc_.width;
- av_frame->height = format_desc_.height;
+ av_frame->height = format_desc_.height;
av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;
av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper ? 1 : 0;
av_frame->pts = pts_++;