#endif\r
\r
namespace caspar { namespace ffmpeg {\r
- \r
+\r
+static int query_formats_444(AVFilterContext *ctx)
+{
+ static const int pix_fmts[] = {PIX_FMT_YUV444P, PIX_FMT_NONE};
+ avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
+ return 0;
+}\r
+\r
+static int query_formats_422(AVFilterContext *ctx)
+{
+ static const int pix_fmts[] = {PIX_FMT_YUV422P, PIX_FMT_NONE};
+ avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
+ return 0;
+}\r
+\r
+static int query_formats_420(AVFilterContext *ctx)
+{
+ static const int pix_fmts[] = {PIX_FMT_YUV420P, PIX_FMT_NONE};
+ avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
+ return 0;
+}\r
+\r
struct filter::implementation\r
{\r
std::string filters_;\r
\r
THROW_ON_ERROR2(avfilter_graph_parse(graph_.get(), filters_.c_str(), &inputs, &outputs, NULL), "[filter]");\r
\r
+ for(size_t n = 0; n < graph_->filter_count; ++n)\r
+ {\r
+ auto filter_name = graph_->filters[n]->name;\r
+ if(strstr(filter_name, "yadif") != 0)\r
+ {\r
+ if(frame->format == PIX_FMT_UYVY422)\r
+ graph_->filters[n]->filter->query_formats = query_formats_422;\r
+ else if(frame->format == PIX_FMT_YUV420P10)\r
+ graph_->filters[n]->filter->query_formats = query_formats_420;\r
+ else if(frame->format == PIX_FMT_YUV422P10)\r
+ graph_->filters[n]->filter->query_formats = query_formats_422;\r
+ else if(frame->format == PIX_FMT_YUV444P10)\r
+ graph_->filters[n]->filter->query_formats = query_formats_444;\r
+ }\r
+ }\r
+\r
avfilter_inout_free(&inputs);\r
avfilter_inout_free(&outputs);\r
\r
for(size_t n = 0; n < graph_->filter_count; ++n)\r
{\r
auto filter_name = graph_->filters[n]->name;\r
- if(strstr(filter_name, "yadif") != 0)\r
- parallel_yadif_ctx_ = make_parallel_yadif(graph_->filters[n]);\r
+ if(strstr(filter_name, "yadif") != 0) \r
+ parallel_yadif_ctx_ = make_parallel_yadif(graph_->filters[n]); \r
}\r
}\r
catch(...)\r
if(desc.pix_fmt == core::pixel_format::invalid)\r
{\r
auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
+ auto target_pix_fmt = PIX_FMT_BGRA;\r
+\r
+ if(pix_fmt == PIX_FMT_UYVY422)\r
+ target_pix_fmt = PIX_FMT_YUV422P;\r
+ else if(pix_fmt == PIX_FMT_YUV420P10)\r
+ target_pix_fmt = PIX_FMT_YUV420P;\r
+ else if(pix_fmt == PIX_FMT_YUV422P10)\r
+ target_pix_fmt = PIX_FMT_YUV422P;\r
+ else if(pix_fmt == PIX_FMT_YUV444P10)\r
+ target_pix_fmt = PIX_FMT_YUV444P;\r
+ \r
+ auto target_desc = get_pixel_format_desc(target_pix_fmt, width, height);\r
\r
- write = frame_factory->create_frame(tag, get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
+ write = frame_factory->create_frame(tag, target_desc);\r
write->set_type(get_mode(*decoded_frame));\r
\r
std::shared_ptr<SwsContext> sws_context;\r
\r
//CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
\r
- size_t key = width << 20 | height << 8 | pix_fmt;\r
+ size_t key = ((width << 22) & 0xFFC00000) | ((height << 6) & 0x003FC000) | ((pix_fmt << 7) & 0x00007F00) | ((target_pix_fmt << 0) & 0x0000007F);\r
\r
auto& pool = sws_contexts_[key];\r
\r
if(!pool.try_pop(sws_context))\r
{\r
double param;\r
- sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
+ sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);\r
}\r
\r
if(!sws_context)\r
BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
boost::errinfo_api_function("sws_getContext"));\r
} \r
-\r
- // Use sws_scale when provided colorspace has no hw-accel.\r
+ \r
safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free); \r
avcodec_get_frame_defaults(av_frame.get()); \r
- auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
- CASPAR_VERIFY(size == write->image_data().size()); \r
+ if(target_pix_fmt == PIX_FMT_BGRA)\r
+ {\r
+ auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
+ CASPAR_VERIFY(size == write->image_data().size()); \r
+ }\r
+ else\r
+ {\r
+ av_frame->width = width;\r
+ av_frame->height = height;\r
+ for(size_t n = 0; n < target_desc.planes.size(); ++n)\r
+ {\r
+ av_frame->data[n] = write->image_data(n).begin();\r
+ av_frame->linesize[n] = target_desc.planes[n].linesize;\r
+ }\r
+ }\r
\r
sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize); \r
pool.push(sws_context);\r
\r
- write->commit();\r
+ write->commit(); \r
}\r
else\r
{\r