GL(glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));\r
GL(glTexImage2D(GL_TEXTURE_2D, 0, INTERNAL_FORMAT[stride_], width_, height_, 0, FORMAT[stride_], GL_UNSIGNED_BYTE, NULL));\r
GL(glBindTexture(GL_TEXTURE_2D, 0));\r
- CASPAR_LOG(trace) << "[device_buffer] allocated size:" << width*height*stride; \r
+ CASPAR_LOG(debug) << "[device_buffer] allocated size:" << width*height*stride; \r
clear();\r
} \r
\r
if(!pbo_)\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("Failed to allocate buffer."));\r
\r
- CASPAR_LOG(trace) << "[host_buffer] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
+ CASPAR_LOG(debug) << "[host_buffer] allocated size:" << size_ << " usage: " << (usage == write_only ? "write_only" : "read_only");\r
} \r
\r
~implementation()\r
safe_ptr<core::basic_frame> tail_;\r
\r
std::exception_ptr exception_;\r
- std::unique_ptr<filter> filter_;\r
+ filter filter_;\r
\r
core::frame_muxer muxer_;\r
\r
, device_index_(device_index)\r
, frame_factory_(frame_factory)\r
, tail_(core::basic_frame::empty())\r
- , filter_(filter.empty() ? nullptr : new caspar::filter(filter))\r
+ , filter_(filter)\r
, muxer_(double_rate(filter) ? format_desc.fps * 2.0 : format_desc.fps, frame_factory->get_video_format_desc().mode, frame_factory->get_video_format_desc().fps)\r
{\r
frame_buffer_.set_capacity(2);\r
av_frame->height = video->GetHeight();\r
av_frame->interlaced_frame = format_desc_.mode != core::video_mode::progressive;\r
av_frame->top_field_first = format_desc_.mode == core::video_mode::upper ? 1 : 0;\r
- \r
- if(filter_)\r
- {\r
- filter_->push(av_frame);\r
- BOOST_FOREACH(auto& av_frame2, filter_->poll())\r
- muxer_.push(make_write_frame(this, av_frame2, frame_factory_));\r
- }\r
- else \r
- muxer_.push(make_write_frame(this, av_frame, frame_factory_)); \r
+ \r
+ filter_.push(av_frame);\r
+ BOOST_FOREACH(auto& av_frame2, filter_.poll())\r
+ muxer_.push(make_write_frame(this, av_frame2, frame_factory_)); \r
\r
// It is assumed that audio is always equal or ahead of video.\r
if(audio && SUCCEEDED(audio->GetBytes(&bytes)))\r
* along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
*\r
*/\r
+\r
#include "StdAfx.h"\r
\r
#include "consumer/ffmpeg_consumer.h"\r
#include "producer/ffmpeg_producer.h"\r
\r
+#include <common/log/log.h>\r
+\r
#include <core/consumer/frame_consumer.h>\r
#include <core/producer/frame_producer.h>\r
\r
\r
#if defined(_MSC_VER)\r
#pragma warning (disable : 4244)\r
+#pragma warning (disable : 4603)\r
+#pragma warning (disable : 4996)\r
#endif\r
\r
extern "C" \r
return 0; \r
} \r
\r
+static void sanitize(uint8_t *line)
+{
+ while(*line)
+ {
+ if(*line < 0x08 || (*line > 0x0D && *line < 0x20))
+ *line='?';
+ line++;
+ }
+}\r
+\r
+void log_callback(void* ptr, int level, const char* fmt, va_list vl)\r
+{
+ static int print_prefix=1;
+ static int count;
+ static char prev[1024];
+ char line[8192];
+ static int is_atty;
+ AVClass* avc= ptr ? *(AVClass**)ptr : NULL;
+ if(level > av_log_get_level())
+ return;
+ line[0]=0;
+
+#undef fprintf
+ if(print_prefix && avc)
+ {
+ if (avc->parent_log_context_offset)
+ {
+ AVClass** parent= *(AVClass***)(((uint8_t*)ptr) + avc->parent_log_context_offset);
+ if(parent && *parent)
+ std::sprintf(line, "[%s @ %p] ", (*parent)->item_name(parent), parent);
+ }
+ std::sprintf(line + strlen(line), "[%s @ %p] ", avc->item_name(ptr), ptr);
+ }
+
+ std::vsprintf(line + strlen(line), fmt, vl);
+
+ print_prefix = strlen(line) && line[strlen(line)-1] == '\n';
+
+ //if(print_prefix && !strcmp(line, prev)){
+ // count++;
+ // if(is_atty==1)
+ // fprintf(stderr, " Last message repeated %d times\r", count);
+ // return;
+ //}
+ //if(count>0){
+ // fprintf(stderr, " Last message repeated %d times\n", count);
+ // count=0;
+ //}
+ strcpy(prev, line);
+ sanitize((uint8_t*)line);
+
+ CASPAR_LOG(trace) << L"[FFMPEG] [" << av_clip(level>>3, 0, 6) << L"] " << line;
+
+ //colored_fputs(av_clip(level>>3, 0, 6), line);\r
+}\r
+\r
void init_ffmpeg()\r
{\r
avfilter_register_all();\r
avcodec_init();\r
avcodec_register_all();\r
av_lockmgr_register(ffmpeg_lock_callback);\r
+ av_log_set_callback(log_callback);\r
\r
core::register_consumer_factory([](const std::vector<std::wstring>& params){return create_ffmpeg_consumer(params);});\r
core::register_producer_factory(create_ffmpeg_producer);\r
AVFilterContext* buffersrc_ctx_;\r
\r
implementation(const std::wstring& filters) \r
- : filters_(narrow(filters))\r
+ : filters_(filters.empty() ? "null" : narrow(filters))\r
{\r
std::transform(filters_.begin(), filters_.end(), filters_.begin(), ::tolower);\r
}\r
inputs->next = NULL;\r
\r
errn = avfilter_graph_parse(graph_.get(), filters_.c_str(), &inputs, &outputs, NULL);\r
+\r
+ avfilter_inout_free(&inputs);\r
+ avfilter_inout_free(&outputs);\r
+\r
if(errn < 0)\r
{\r
BOOST_THROW_EXCEPTION(caspar_exception() << msg_info(av_error_str(errn)) <<\r
safe_ptr<AVFrame> frame(avcodec_alloc_frame(), [=](AVFrame* p)\r
{\r
av_free(p);\r
- avfilter_unref_buffer(picref);\r
+ avfilter_unref_buffer(picref);\r
});\r
\r
avcodec_get_frame_defaults(frame.get()); \r
\r
- for(size_t n = 0; n < 4; ++n)\r
- {\r
- frame->data[n] = picref->data[n];\r
- frame->linesize[n] = picref->linesize[n];\r
- }\r
- \r
- frame->format = picref->format;\r
- frame->width = picref->video->w;\r
- frame->height = picref->video->h;\r
- frame->interlaced_frame = picref->video->interlaced;\r
- frame->top_field_first = picref->video->top_field_first;\r
- frame->key_frame = picref->video->key_frame;\r
+ memcpy(frame->data, picref->data, sizeof(frame->data));\r
+ memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));\r
+ frame->format = picref->format;\r
+ frame->width = picref->video->w;\r
+ frame->height = picref->video->h;\r
+ frame->pkt_pos = picref->pos;\r
+ frame->interlaced_frame = picref->video->interlaced;\r
+ frame->top_field_first = picref->video->top_field_first;\r
+ frame->key_frame = picref->video->key_frame;\r
+ frame->pict_type = picref->video->pict_type;\r
+ frame->sample_aspect_ratio = picref->video->sample_aspect_ratio;\r
\r
result.push_back(frame);\r
}\r