return frame2;\r
\r
auto my_frame1 = spl::make_shared<draw_frame>(frame1);\r
- auto my_frame2 = draw_frame::mute_audio(spl::make_shared<draw_frame>(frame2));\r
+ auto my_frame2 = spl::make_shared<draw_frame>(frame2);\r
if(mode == field_mode::upper)\r
{\r
my_frame1->get_frame_transform().field_mode = field_mode::upper; \r
return spl::make_shared<draw_frame>(std::move(frames));\r
}\r
\r
-spl::shared_ptr<draw_frame> draw_frame::mute_audio(const spl::shared_ptr<draw_frame>& frame)\r
+spl::shared_ptr<draw_frame> draw_frame::mute(const spl::shared_ptr<draw_frame>& frame)\r
{\r
auto frame2 = spl::make_shared<draw_frame>(frame);\r
frame2->get_frame_transform().volume = 0.0;\r
static spl::shared_ptr<draw_frame> interlace(const spl::shared_ptr<draw_frame>& frame1, const spl::shared_ptr<draw_frame>& frame2, field_mode mode);\r
static spl::shared_ptr<draw_frame> over(const spl::shared_ptr<draw_frame>& frame1, const spl::shared_ptr<draw_frame>& frame2);\r
static spl::shared_ptr<draw_frame> mask(const spl::shared_ptr<draw_frame>& fill, const spl::shared_ptr<draw_frame>& key);\r
- static spl::shared_ptr<draw_frame> mute_audio(const spl::shared_ptr<draw_frame>& frame);\r
+ static spl::shared_ptr<draw_frame> mute(const spl::shared_ptr<draw_frame>& frame);\r
\r
static const spl::shared_ptr<draw_frame>& eof();\r
static const spl::shared_ptr<draw_frame>& empty();\r
} \r
\r
std::map<const void*, audio_stream> next_audio_streams;\r
+ std::vector<const void*> used_tags;\r
\r
BOOST_FOREACH(auto& item, items_)\r
{ \r
auto next_transform = item.transform;\r
auto prev_transform = next_transform;\r
\r
- const auto it = audio_streams_.find(item.tag);\r
+ auto tag = item.tag;\r
+\r
+ if(boost::range::find(used_tags, tag) != used_tags.end())\r
+ continue;\r
+ \r
+ used_tags.push_back(tag);\r
+\r
+ const auto it = audio_streams_.find(tag);\r
if(it != audio_streams_.end())\r
{ \r
prev_transform = it->second.prev_transform;\r
for(size_t n = 0; n < item.audio_data.size(); ++n)\r
next_audio.push_back(item.audio_data[n] * (prev_volume + (n/format_desc_.audio_channels) * alpha));\r
\r
- next_audio_streams[item.tag].prev_transform = std::move(next_transform); // Store all active tags, inactive tags will be removed at the end.\r
- next_audio_streams[item.tag].audio_data = std::move(next_audio); \r
+ next_audio_streams[tag].prev_transform = std::move(next_transform); // Store all active tags, inactive tags will be removed at the end.\r
+ next_audio_streams[tag].audio_data = std::move(next_audio); \r
} \r
\r
items_.clear();\r
try\r
{\r
if(is_paused_)\r
- return draw_frame::mute_audio(foreground_->last_frame());\r
+ return draw_frame::mute(foreground_->last_frame());\r
\r
auto frame = receive_and_follow(foreground_, flags.value());\r
if(frame == core::draw_frame::late())\r
- return draw_frame::mute_audio(foreground_->last_frame());\r
+ return draw_frame::mute(foreground_->last_frame());\r
\r
++frame_number_;\r
\r
\r
// For interlaced transitions. Seperate fields into seperate frames which are transitioned accordingly.\r
\r
+ src_frame->get_frame_transform().volume = 1.0-delta2;\r
auto s_frame1 = spl::make_shared<draw_frame>(src_frame);\r
auto s_frame2 = spl::make_shared<draw_frame>(src_frame);\r
-\r
- s_frame1->get_frame_transform().volume = 0.0;\r
- s_frame2->get_frame_transform().volume = 1.0-delta2;\r
-\r
+ \r
+ dest_frame->get_frame_transform().volume = delta2;\r
auto d_frame1 = spl::make_shared<draw_frame>(dest_frame);\r
auto d_frame2 = spl::make_shared<draw_frame>(dest_frame);\r
\r
- d_frame1->get_frame_transform().volume = 0.0;\r
- d_frame2->get_frame_transform().volume = delta2;\r
-\r
if(info_.type == transition_type::mix)\r
{\r
d_frame1->get_frame_transform().opacity = delta1; \r
\r
virtual spl::shared_ptr<core::draw_frame> last_frame() const override\r
{\r
- return core::draw_frame::mute_audio(last_frame_);\r
+ return core::draw_frame::mute(last_frame_);\r
}\r
\r
virtual uint32_t nb_frames() const override\r