]> git.sesse.net Git - nageru/commitdiff
Implement fades.
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Wed, 10 Oct 2018 22:43:20 +0000 (00:43 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Wed, 10 Oct 2018 22:43:20 +0000 (00:43 +0200)
clip_list.cpp
clip_list.h
jpeg_frame_view.cpp
jpeg_frame_view.h
player.cpp
state.proto
video_stream.cpp
video_stream.h
ycbcr_converter.cpp
ycbcr_converter.h

index 2f805756003264470a37b25682e12db736c1ac3f..cdffba9365db3306919b9cb18cd4bc4bd76eea8f 100644 (file)
@@ -122,6 +122,7 @@ QVariant PlayList::data(const QModelIndex &parent, int role) const {
                case Column::IN:
                case Column::OUT:
                case Column::DURATION:
+               case Column::FADE_TIME:
                        return Qt::AlignRight + Qt::AlignVCenter;
                case Column::CAMERA:
                        return Qt::AlignCenter;
@@ -173,6 +174,13 @@ QVariant PlayList::data(const QModelIndex &parent, int role) const {
                return qlonglong(clips[row].stream_idx + 1);
        case Column::DESCRIPTION:
                return QString::fromStdString(clips[row].descriptions[clips[row].stream_idx]);
+       case Column::FADE_TIME: {
+               stringstream ss;
+               ss.imbue(locale("C"));
+               ss.precision(3);
+               ss << fixed << clips[row].fade_time_seconds;
+               return QString::fromStdString(ss.str());
+       }
        default:
                return "";
        }
@@ -223,6 +231,8 @@ QVariant PlayList::headerData(int section, Qt::Orientation orientation, int role
                return "Camera";
        case Column::DESCRIPTION:
                return "Description";
+       case Column::FADE_TIME:
+               return "Fade time";
        default:
                return "";
        }
@@ -258,6 +268,7 @@ Qt::ItemFlags PlayList::flags(const QModelIndex &index) const
        switch (Column(column)) {
        case Column::DESCRIPTION:
        case Column::CAMERA:
+       case Column::FADE_TIME:
                return Qt::ItemIsEnabled | Qt::ItemIsSelectable | Qt::ItemIsEditable;
                return Qt::ItemIsEnabled | Qt::ItemIsSelectable | Qt::ItemIsEditable;
        default:
@@ -315,6 +326,16 @@ bool PlayList::setData(const QModelIndex &index, const QVariant &value, int role
                emit_data_changed(row);
                return true;
        }
+       case Column::FADE_TIME: {
+               bool ok;
+               double val = value.toDouble(&ok);
+               if (!ok || !(val >= 0.0)) {
+                       return false;
+               }
+               clips[row].fade_time_seconds = val;
+               emit_data_changed(row);
+               return true;
+       }
        default:
                return false;
        }
@@ -409,6 +430,7 @@ Clip deserialize_clip(const ClipProto &clip_proto)
                clip.descriptions[camera_idx] = clip_proto.description(camera_idx);
        }
        clip.stream_idx = clip_proto.stream_idx();
+       clip.fade_time_seconds = clip_proto.fade_time_seconds();
        return clip;
 }
 
@@ -420,6 +442,7 @@ void serialize_clip(const Clip &clip, ClipProto *clip_proto)
                *clip_proto->add_description() = clip.descriptions[camera_idx];
        }
        clip_proto->set_stream_idx(clip.stream_idx);
+       clip_proto->set_fade_time_seconds(clip.fade_time_seconds);
 }
 
 }  // namespace
index eb1340f2514702046632c7c5317b46e415b14710..c853263c13e28047048fcaa7c53aa59df6253d41 100644 (file)
@@ -15,6 +15,7 @@ struct Clip {
        int64_t pts_in = -1, pts_out = -1;  // pts_in is inclusive, pts_out is exclusive.
        std::string descriptions[NUM_CAMERAS];
        unsigned stream_idx = 0;  // For the playlist only.
+       double fade_time_seconds = 0.5;  // For the playlist only.
 };
 
 class DataChangedReceiver {
@@ -99,6 +100,7 @@ public:
                DURATION,
                CAMERA,
                DESCRIPTION,
+               FADE_TIME,
                NUM_COLUMNS
        };
 
index 3d95a7701299a6c96ccf51c6cc57e50b6dab4af9..4598383b9dd6b30c40bd1d54f785ee0aa641a71b 100644 (file)
@@ -45,11 +45,17 @@ struct LRUFrame {
        size_t last_used;
 };
 
+struct PendingDecode {
+       JPEGID primary, secondary;
+       float fade_alpha;  // Irrelevant if secondary.stream_idx == -1.
+       JPEGFrameView *destination;
+};
+
 thread JPEGFrameView::jpeg_decoder_thread;
 mutex cache_mu;
 map<JPEGID, LRUFrame, JPEGIDLexicalOrder> cache;  // Under cache_mu.
 condition_variable any_pending_decodes, cache_updated;
-deque<pair<JPEGID, JPEGFrameView *>> pending_decodes;  // Under cache_mu.
+deque<PendingDecode> pending_decodes;  // Under cache_mu.
 atomic<size_t> event_counter{0};
 extern QGLWidget *global_share_widget;
 extern atomic<bool> should_quit;
@@ -202,8 +208,7 @@ void jpeg_decoder_thread_func()
 
        pthread_setname_np(pthread_self(), "JPEGDecoder");
        while (!should_quit.load()) {
-               JPEGID id;
-               JPEGFrameView *dest;
+               PendingDecode decode;
                CacheMissBehavior cache_miss_behavior = DECODE_IF_NOT_IN_CACHE;
                {
                        unique_lock<mutex> lock(cache_mu);  // TODO: Perhaps under another lock?
@@ -211,13 +216,12 @@ void jpeg_decoder_thread_func()
                                return !pending_decodes.empty() || should_quit.load();
                        });
                        if (should_quit.load()) break;
-                       id = pending_decodes.front().first;
-                       dest = pending_decodes.front().second;
+                       decode = pending_decodes.front();
                        pending_decodes.pop_front();
 
                        size_t num_pending = 0;
-                       for (const pair<JPEGID, JPEGFrameView *> &decode : pending_decodes) {
-                               if (decode.second == dest) {
+                       for (const PendingDecode &other_decode : pending_decodes) {
+                               if (other_decode.destination == decode.destination) {
                                        ++num_pending;
                                }
                        }
@@ -226,49 +230,68 @@ void jpeg_decoder_thread_func()
                        }
                }
 
-               bool found_in_cache;
-               shared_ptr<Frame> frame;
-               if (id.interpolated) {
-                       // Interpolated frames are never decoded by us,
-                       // put directly into the cache from VideoStream.
-                       unique_lock<mutex> lock(cache_mu);
-                       cache_updated.wait(lock, [id] {
-                               return cache.count(id) != 0 || should_quit.load();
-                       });
-                       if (should_quit.load()) break;
-                       found_in_cache = true;  // Don't count it as a decode.
+               shared_ptr<Frame> primary_frame, secondary_frame;
+               bool drop = false;
+               for (int subframe_idx = 0; subframe_idx < 2; ++subframe_idx) {
+                       const JPEGID &id = (subframe_idx == 0 ? decode.primary : decode.secondary);
+                       if (id.stream_idx == (unsigned)-1) {
+                               // No secondary frame.
+                               continue;
+                       }
 
-                       auto it = cache.find(id);
-                       assert(it != cache.end());
+                       bool found_in_cache;
+                       shared_ptr<Frame> frame;
+                       if (id.interpolated) {
+                               // Interpolated frames are never decoded by us,
+                               // put directly into the cache from VideoStream.
+                               unique_lock<mutex> lock(cache_mu);
+                               cache_updated.wait(lock, [id] {
+                                       return cache.count(id) != 0 || should_quit.load();
+                               });
+                               if (should_quit.load()) break;
+                               found_in_cache = true;  // Don't count it as a decode.
+
+                               auto it = cache.find(id);
+                               assert(it != cache.end());
+
+                               it->second.last_used = event_counter++;
+                               frame = it->second.frame;
+                               if (frame == nullptr) {
+                                       // We inserted a nullptr as signal that the frame was never
+                                       // interpolated and that we should stop waiting.
+                                       // But don't let it linger in the cache anymore.
+                                       cache.erase(it);
+                               }
+                       } else {
+                               frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache);
+                       }
 
-                       it->second.last_used = event_counter++;
-                       frame = it->second.frame;
                        if (frame == nullptr) {
-                               // We inserted a nullptr as signal that the frame was never
-                               // interpolated and that we should stop waiting.
-                               // But don't let it linger in the cache anymore.
-                               cache.erase(it);
+                               assert(id.interpolated || cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE);
+                               drop = true;
+                               break;
                        }
-               } else {
-                       frame = decode_jpeg_with_cache(id, cache_miss_behavior, &found_in_cache);
-               }
 
-               if (frame == nullptr) {
-                       assert(id.interpolated || cache_miss_behavior == RETURN_NULLPTR_IF_NOT_IN_CACHE);
+                       if (!found_in_cache) {
+                               ++num_decoded;
+                               if (num_decoded % 1000 == 0) {
+                                       fprintf(stderr, "Decoded %zu images, dropped %zu (%.2f%% dropped)\n",
+                                               num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped));
+                               }
+                       }
+                       if (subframe_idx == 0) {
+                               primary_frame = move(frame);
+                       } else {
+                               secondary_frame = move(frame);
+                       }
+               }
+               if (drop) {
                        ++num_dropped;
                        continue;
                }
 
-               if (!found_in_cache) {
-                       ++num_decoded;
-                       if (num_decoded % 1000 == 0) {
-                               fprintf(stderr, "Decoded %zu images, dropped %zu (%.2f%% dropped)\n",
-                                       num_decoded, num_dropped, (100.0 * num_dropped) / (num_decoded + num_dropped));
-                       }
-               }
-
                // TODO: Could we get jitter between non-interpolated and interpolated frames here?
-               dest->setDecodedFrame(frame);
+               decode.destination->setDecodedFrame(primary_frame, secondary_frame, decode.fade_alpha);
        }
 }
 
@@ -282,12 +305,17 @@ JPEGFrameView::JPEGFrameView(QWidget *parent)
        : QGLWidget(parent, global_share_widget) {
 }
 
-void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts, bool interpolated)
+void JPEGFrameView::setFrame(unsigned stream_idx, int64_t pts, bool interpolated, int secondary_stream_idx, int64_t secondary_pts, float fade_alpha)
 {
-       current_stream_idx = stream_idx;
+       current_stream_idx = stream_idx;  // TODO: Does this interact with fades?
 
        unique_lock<mutex> lock(cache_mu);
-       pending_decodes.emplace_back(JPEGID{ stream_idx, pts, interpolated }, this);
+       PendingDecode decode;
+       decode.primary = JPEGID{ stream_idx, pts, interpolated };
+       decode.secondary = JPEGID{ (unsigned)secondary_stream_idx, secondary_pts, /*interpolated=*/false };
+       decode.fade_alpha = fade_alpha;
+       decode.destination = this;
+       pending_decodes.push_back(decode);
        any_pending_decodes.notify_all();
 }
 
@@ -365,11 +393,22 @@ void JPEGFrameView::paintGL()
        }
 }
 
-void JPEGFrameView::setDecodedFrame(std::shared_ptr<Frame> frame)
+namespace {
+
+
+}  // namespace
+
+void JPEGFrameView::setDecodedFrame(shared_ptr<Frame> frame, shared_ptr<Frame> secondary_frame, float fade_alpha)
 {
-       post_to_main_thread([this, frame] {
+       post_to_main_thread([this, frame, secondary_frame, fade_alpha] {
                current_frame = frame;
-               current_chain = ycbcr_converter->prepare_chain_for_conversion(frame);
+               current_secondary_frame = secondary_frame;
+
+               if (secondary_frame != nullptr) {
+                       current_chain = ycbcr_converter->prepare_chain_for_fade(frame, secondary_frame, fade_alpha);
+               } else {
+                       current_chain = ycbcr_converter->prepare_chain_for_conversion(frame);
+               }
                update();
        });
 }
index d8babbb17ed7dc72a6e014cbc8537920bb0bb9cd..9a4621d46b3bc512b27a04613bc7cd42d6d8642f 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <movit/effect_chain.h>
 #include <movit/flat_input.h>
+#include <movit/mix_effect.h>
 #include <movit/ycbcr_input.h>
 
 #include <memory>
@@ -36,14 +37,14 @@ class JPEGFrameView : public QGLWidget {
 public:
        JPEGFrameView(QWidget *parent);
 
-       void setFrame(unsigned stream_idx, int64_t pts, bool interpolated);
+       void setFrame(unsigned stream_idx, int64_t pts, bool interpolated, int secondary_stream_idx = -1, int64_t secondary_pts = -1, float fade_alpha = 0.0f);
        static void insert_interpolated_frame(unsigned stream_idx, int64_t pts, std::shared_ptr<Frame> frame);
 
        void mousePressEvent(QMouseEvent *event) override;
 
        unsigned get_stream_idx() const { return current_stream_idx; }
 
-       void setDecodedFrame(std::shared_ptr<Frame> frame);
+       void setDecodedFrame(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> secondary_frame, float fade_alpha);
        void set_overlay(const std::string &text);  // Blank for none.
 
        static void shutdown();
@@ -64,6 +65,7 @@ private:
        movit::EffectChain *current_chain = nullptr;  // Owned by ycbcr_converter.
 
        std::shared_ptr<Frame> current_frame;  // So that we hold on to the pixels.
+       std::shared_ptr<Frame> current_secondary_frame;  // Same.
 
        static constexpr int overlay_base_width = 16, overlay_base_height = 16;
        int overlay_width = overlay_base_width, overlay_height = overlay_base_height;
index 079f5cf3116d3f603e181d05bdfc3bdc8c930d51..0e5b225798988be9c9c8615c56b8ca8c74b24091 100644 (file)
@@ -50,6 +50,9 @@ void Player::thread_func(bool also_output_to_stream)
 
        constexpr double output_framerate = 60000.0 / 1001.0;  // FIXME: make configurable
        int64_t pts = 0;
+       Clip next_clip;
+       bool got_next_clip = false;
+       double next_clip_fade_time = -1.0;
 
        for ( ;; ) {
                // Wait until we're supposed to play something.
@@ -104,6 +107,42 @@ got_clip:
                                continue;
                        }
 
+                       double time_left_this_clip = double(clip.pts_out - in_pts) / TIMEBASE / speed;
+                       if (!got_next_clip && next_clip_callback != nullptr && time_left_this_clip <= clip.fade_time_seconds) {
+                               // Find the next clip so that we can begin a fade.
+                               next_clip = next_clip_callback();
+                               if (next_clip.pts_in != -1) {
+                                       got_next_clip = true;
+
+                                       double duration_next_clip = (next_clip.pts_out - next_clip.pts_in) / TIMEBASE / speed;
+                                       next_clip_fade_time = std::min(time_left_this_clip, duration_next_clip);
+                                       fprintf(stderr, "decided on %.3f seconds fade time [%f %f]\n", next_clip_fade_time, time_left_this_clip, duration_next_clip);
+                               }
+                       }
+
+                       // TODO: If more than half-way through the fade, interpolate the next clip
+                       // instead of the current one.
+
+                       int secondary_stream_idx = -1;
+                       int64_t secondary_pts = -1;
+                       float fade_alpha = 0.0f;
+                       if (got_next_clip) {
+                               int64_t in_pts_lower, in_pts_upper;
+                               bool ok = find_surrounding_frames(in_pts, next_clip.stream_idx, &in_pts_lower, &in_pts_upper);
+                               if (ok) {
+                                       secondary_stream_idx = next_clip.stream_idx;
+                                       secondary_pts = in_pts_lower;
+                                       fade_alpha = 1.0f - time_left_this_clip / next_clip_fade_time;
+                               }
+                       }
+
+                       if (progress_callback != nullptr) {
+                               // NOTE: None of this will take into account any snapping done below.
+                               double played_this_clip = double(in_pts - clip.pts_in) / TIMEBASE / speed;
+                               double total_length = double(clip.pts_out - clip.pts_in) / TIMEBASE / speed;
+                               progress_callback(played_this_clip, total_length);
+                       }
+
                        int64_t in_pts_lower, in_pts_upper;
                        bool ok = find_surrounding_frames(in_pts, stream_idx, &in_pts_lower, &in_pts_upper);
                        if (!ok || in_pts_upper >= clip.pts_out) {
@@ -124,17 +163,14 @@ got_clip:
                                }
                        }
 
-                       if (progress_callback != nullptr) {
-                               // NOTE: None of this will take into account any snapping done below.
-                               double played_this_clip = double(in_pts - clip.pts_in) / TIMEBASE / speed;
-                               double total_length = double(clip.pts_out - clip.pts_in) / TIMEBASE / speed;
-                               progress_callback(played_this_clip, total_length);
-                       }
-
                        if (in_pts_lower == in_pts_upper) {
-                               destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
+                               destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false, secondary_stream_idx, secondary_pts, fade_alpha);
                                if (video_stream != nullptr) {
-                                       video_stream->schedule_original_frame(pts, stream_idx, in_pts_lower);
+                                       if (secondary_stream_idx == -1) {
+                                               video_stream->schedule_original_frame(pts, stream_idx, in_pts_lower);
+                                       } else {
+                                               video_stream->schedule_faded_frame(pts, stream_idx, in_pts_lower, secondary_stream_idx, secondary_pts, fade_alpha);
+                                       }
                                }
                                continue;
                        }
@@ -145,9 +181,13 @@ got_clip:
                        for (int64_t snap_pts : { in_pts_lower, in_pts_upper }) {
                                double snap_pts_as_frameno = (snap_pts - in_pts_origin) * output_framerate / TIMEBASE / speed;
                                if (fabs(snap_pts_as_frameno - frameno) < 0.01) {
-                                       destination->setFrame(stream_idx, snap_pts, /*interpolated=*/false);
+                                       destination->setFrame(stream_idx, snap_pts, /*interpolated=*/false, secondary_stream_idx, secondary_pts, fade_alpha);
                                        if (video_stream != nullptr) {
-                                               video_stream->schedule_original_frame(pts, stream_idx, snap_pts);
+                                               if (secondary_stream_idx == -1) {
+                                                       video_stream->schedule_original_frame(pts, stream_idx, snap_pts);
+                                               } else {
+                                                       video_stream->schedule_faded_frame(pts, stream_idx, snap_pts, secondary_stream_idx, secondary_pts, fade_alpha);
+                                               }
                                        }
                                        in_pts_origin += snap_pts - in_pts;
                                        snapped = true;
@@ -168,25 +208,35 @@ got_clip:
 
                        if (video_stream == nullptr) {
                                // Previews don't do any interpolation.
-                               destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false);
+                               assert(secondary_stream_idx == -1);
+                               destination->setFrame(stream_idx, in_pts_lower, /*interpolated=*/false, fade_alpha);
                        } else {
                                // Calculate the interpolated frame. When it's done, the destination
                                // will be unblocked.
-                               destination->setFrame(stream_idx, pts, /*interpolated=*/true);
-                               video_stream->schedule_interpolated_frame(pts, stream_idx, in_pts_lower, in_pts_upper, alpha);
+                               destination->setFrame(stream_idx, pts, /*interpolated=*/true, secondary_stream_idx, secondary_pts, fade_alpha);
+                               video_stream->schedule_interpolated_frame(pts, stream_idx, in_pts_lower, in_pts_upper, alpha, secondary_stream_idx, secondary_pts, fade_alpha);
                        }
                }
 
-               if (next_clip_callback != nullptr) {
-                       Clip next_clip = next_clip_callback();
+               // The clip ended.
+
+               // Last-ditch effort to get the next clip (if e.g. the fade time was zero seconds).
+               if (!got_next_clip && next_clip_callback != nullptr) {
+                       next_clip = next_clip_callback();
                        if (next_clip.pts_in != -1) {
-                               clip = next_clip;
-                               stream_idx = next_clip.stream_idx;  // Override is used for previews only, and next_clip is used for live ony.
-                               if (done_callback != nullptr) {
-                                       done_callback();
-                               }
-                               goto got_clip;
+                               got_next_clip = true;
+                       }
+               }
+
+               // Switch to next clip if we got it.
+               if (got_next_clip) {
+                       clip = next_clip;
+                       stream_idx = next_clip.stream_idx;  // Override is used for previews only, and next_clip is used for live ony.
+                       if (done_callback != nullptr) {
+                               done_callback();
                        }
+                       got_next_clip = false;
+                       goto got_clip;
                }
 
                {
index 7fe78dc34957dbda82d2f3dc57fd2d4cc799c297..d76bf02c6c66472a4f4d0b32727af1b46b98a00a 100644 (file)
@@ -6,6 +6,7 @@ message ClipProto {
        int64 pts_out = 2;
        repeated string description = 3;
        int64 stream_idx = 4;
+       double fade_time_seconds = 5;
 }
 
 message ClipListProto {
index c01ec5ce1e557ca236db22c26f532900553a89b7..69dfbe6f0fea07ff22a09f60fabf09122b0e93e4 100644 (file)
@@ -150,15 +150,20 @@ vector<uint8_t> encode_jpeg(const uint8_t *y_data, const uint8_t *cb_data, const
 VideoStream::VideoStream()
 {
        ycbcr_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_DUAL_YCBCR, /*resource_pool=*/nullptr));
+       ycbcr_semiplanar_converter.reset(new YCbCrConverter(YCbCrConverter::OUTPUT_TO_SEMIPLANAR, /*resource_pool=*/nullptr));
 
        GLuint input_tex[num_interpolate_slots], gray_tex[num_interpolate_slots];
+       GLuint fade_y_output_tex[num_interpolate_slots], fade_cbcr_output_tex[num_interpolate_slots];
        GLuint cb_tex[num_interpolate_slots], cr_tex[num_interpolate_slots];
 
        glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, input_tex);
        glCreateTextures(GL_TEXTURE_2D_ARRAY, 10, gray_tex);
+       glCreateTextures(GL_TEXTURE_2D, 10, fade_y_output_tex);
+       glCreateTextures(GL_TEXTURE_2D, 10, fade_cbcr_output_tex);
        glCreateTextures(GL_TEXTURE_2D, 10, cb_tex);
        glCreateTextures(GL_TEXTURE_2D, 10, cr_tex);
        check_error();
+
        constexpr size_t width = 1280, height = 720;  // FIXME: adjustable width, height
        int levels = find_num_levels(width, height);
        for (size_t i = 0; i < num_interpolate_slots; ++i) {
@@ -166,6 +171,10 @@ VideoStream::VideoStream()
                check_error();
                glTextureStorage3D(gray_tex[i], levels, GL_R8, width, height, 2);
                check_error();
+               glTextureStorage2D(fade_y_output_tex[i], 1, GL_R8, width, height);
+               check_error();
+               glTextureStorage2D(fade_cbcr_output_tex[i], 1, GL_RG8, width, height);
+               check_error();
                glTextureStorage2D(cb_tex[i], 1, GL_R8, width / 2, height);
                check_error();
                glTextureStorage2D(cr_tex[i], 1, GL_R8, width / 2, height);
@@ -174,10 +183,14 @@ VideoStream::VideoStream()
                InterpolatedFrameResources resource;
                resource.input_tex = input_tex[i];
                resource.gray_tex = gray_tex[i];
+               resource.fade_y_output_tex = fade_y_output_tex[i];
+               resource.fade_cbcr_output_tex = fade_cbcr_output_tex[i];
                resource.cb_tex = cb_tex[i];
                resource.cr_tex = cr_tex[i];
                glCreateFramebuffers(2, resource.input_fbos);
                check_error();
+               glCreateFramebuffers(1, &resource.fade_fbo);
+               check_error();
 
                glNamedFramebufferTextureLayer(resource.input_fbos[0], GL_COLOR_ATTACHMENT0, input_tex[i], 0, 0);
                check_error();
@@ -187,12 +200,18 @@ VideoStream::VideoStream()
                check_error();
                glNamedFramebufferTextureLayer(resource.input_fbos[1], GL_COLOR_ATTACHMENT1, gray_tex[i], 0, 1);
                check_error();
+               glNamedFramebufferTexture(resource.fade_fbo, GL_COLOR_ATTACHMENT0, fade_y_output_tex[i], 0);
+               check_error();
+               glNamedFramebufferTexture(resource.fade_fbo, GL_COLOR_ATTACHMENT1, fade_cbcr_output_tex[i], 0);
+               check_error();
 
                GLuint bufs[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
                glNamedFramebufferDrawBuffers(resource.input_fbos[0], 2, bufs);
                check_error();
                glNamedFramebufferDrawBuffers(resource.input_fbos[1], 2, bufs);
                check_error();
+               glNamedFramebufferDrawBuffers(resource.fade_fbo, 2, bufs);
+               check_error();
 
                glCreateBuffers(1, &resource.pbo);
                check_error();
@@ -206,6 +225,7 @@ VideoStream::VideoStream()
 
        compute_flow.reset(new DISComputeFlow(width, height, operating_point2));
        interpolate.reset(new Interpolate(operating_point2, /*split_ycbcr_output=*/true));
+       interpolate_no_split.reset(new Interpolate(operating_point2, /*split_ycbcr_output=*/false));
        chroma_subsampler.reset(new ChromaSubsampler);
        check_error();
 }
@@ -256,9 +276,84 @@ void VideoStream::schedule_original_frame(int64_t output_pts, unsigned stream_id
        queue_nonempty.notify_all();
 }
 
-void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha)
+void VideoStream::schedule_faded_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha)
+{
+       fprintf(stderr, "output_pts=%ld  faded         input_pts=%ld,%ld  fade_alpha=%.2f\n", output_pts, input_pts, secondary_input_pts, fade_alpha);
+
+       // Get the temporary OpenGL resources we need for doing the fade.
+       // (We share these with interpolated frames, which is slightly
+       // overkill, but there's no need to waste resources on keeping
+       // separate pools around.)
+       InterpolatedFrameResources resources;
+       {
+               unique_lock<mutex> lock(queue_lock);
+               if (interpolate_resources.empty()) {
+                       fprintf(stderr, "WARNING: Too many interpolated frames already in transit; dropping one.\n");
+                       return;
+               }
+               resources = interpolate_resources.front();
+               interpolate_resources.pop_front();
+       }
+
+       bool did_decode;
+
+       JPEGID jpeg_id1;
+       jpeg_id1.stream_idx = stream_idx;
+       jpeg_id1.pts = input_pts;
+       jpeg_id1.interpolated = false;
+       shared_ptr<Frame> frame1 = decode_jpeg_with_cache(jpeg_id1, DECODE_IF_NOT_IN_CACHE, &did_decode);
+
+       JPEGID jpeg_id2;
+       jpeg_id2.stream_idx = secondary_stream_idx;
+       jpeg_id2.pts = secondary_input_pts;
+       jpeg_id2.interpolated = false;
+       shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id2, DECODE_IF_NOT_IN_CACHE, &did_decode);
+
+       ycbcr_semiplanar_converter->prepare_chain_for_fade(frame1, frame2, fade_alpha)->render_to_fbo(resources.fade_fbo, 1280, 720);
+
+       QueuedFrame qf;
+       qf.type = QueuedFrame::FADED;
+       qf.output_pts = output_pts;
+       qf.stream_idx = stream_idx;
+       qf.resources = resources;
+       qf.input_first_pts = input_pts;
+
+       qf.secondary_stream_idx = secondary_stream_idx;
+       qf.secondary_input_pts = secondary_input_pts;
+
+       // Subsample and split Cb/Cr.
+       chroma_subsampler->subsample_chroma(resources.fade_cbcr_output_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
+
+       // Read it down (asynchronously) to the CPU.
+       glPixelStorei(GL_PACK_ROW_LENGTH, 0);
+       glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
+       check_error();
+       glGetTextureImage(resources.fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+       check_error();
+       glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
+       check_error();
+       glGetTextureImage(resources.cr_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3 - 640 * 720, BUFFER_OFFSET(1280 * 720 + 640 * 720));
+       check_error();
+       glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+
+       // Set a fence we can wait for to make sure the CPU sees the read.
+       glMemoryBarrier(GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+       check_error();
+       qf.fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+       check_error();
+
+       unique_lock<mutex> lock(queue_lock);
+       frame_queue.push_back(qf);
+       queue_nonempty.notify_all();
+}
+
+void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha)
 {
-       fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
+       if (secondary_stream_idx != -1) {
+               fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f  secondary_pts=%ld  fade_alpha=%.2f\n", output_pts, input_first_pts, input_second_pts, alpha, secondary_input_pts, fade_alpha);
+       } else {
+               fprintf(stderr, "output_pts=%ld  interpolated  input_pts1=%ld input_pts2=%ld alpha=%.3f\n", output_pts, input_first_pts, input_second_pts, alpha);
+       }
 
        // Get the temporary OpenGL resources we need for doing the interpolation.
        InterpolatedFrameResources resources;
@@ -274,7 +369,7 @@ void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned strea
        }
 
        QueuedFrame qf;
-       qf.type = QueuedFrame::INTERPOLATED;
+       qf.type = (secondary_stream_idx == -1) ? QueuedFrame::INTERPOLATED : QueuedFrame::FADED_INTERPOLATED;
        qf.output_pts = output_pts;
        qf.stream_idx = stream_idx;
        qf.resources = resources;
@@ -300,11 +395,33 @@ void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned strea
        // Compute the interpolated frame.
        qf.flow_tex = compute_flow->exec(resources.gray_tex, DISComputeFlow::FORWARD_AND_BACKWARD, DISComputeFlow::DO_NOT_RESIZE_FLOW);
        check_error();
-       tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
-       check_error();
 
-       // Subsample and split Cb/Cr.
-       chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
+       if (secondary_stream_idx != -1) {
+               // Fade. First kick off the interpolation.
+               tie(qf.output_tex, ignore) = interpolate_no_split->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
+               check_error();
+
+               // Now decode the image we are fading against.
+               JPEGID jpeg_id;
+               jpeg_id.stream_idx = secondary_stream_idx;
+               jpeg_id.pts = secondary_input_pts;
+               jpeg_id.interpolated = false;
+               bool did_decode;
+               shared_ptr<Frame> frame2 = decode_jpeg_with_cache(jpeg_id, DECODE_IF_NOT_IN_CACHE, &did_decode);
+
+               // Then fade against it, putting it into the fade Y' and CbCr textures.
+               ycbcr_semiplanar_converter->prepare_chain_for_fade_from_texture(qf.output_tex, frame2, fade_alpha)->render_to_fbo(resources.fade_fbo, 1280, 720);
+
+               // Subsample and split Cb/Cr.
+               chroma_subsampler->subsample_chroma(resources.fade_cbcr_output_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
+       } else {
+               tie(qf.output_tex, qf.cbcr_tex) = interpolate->exec(resources.input_tex, resources.gray_tex, qf.flow_tex, 1280, 720, alpha);
+               check_error();
+
+               // Subsample and split Cb/Cr.
+               chroma_subsampler->subsample_chroma(qf.cbcr_tex, 1280, 720, resources.cb_tex, resources.cr_tex);
+       }
+
 
        // We could have released qf.flow_tex here, but to make sure we don't cause a stall
        // when trying to reuse it for the next frame, we can just as well hold on to it
@@ -314,7 +431,11 @@ void VideoStream::schedule_interpolated_frame(int64_t output_pts, unsigned strea
        glPixelStorei(GL_PACK_ROW_LENGTH, 0);
        glBindBuffer(GL_PIXEL_PACK_BUFFER, resources.pbo);
        check_error();
-       glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+       if (secondary_stream_idx != -1) {
+               glGetTextureImage(resources.fade_y_output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+       } else {
+               glGetTextureImage(qf.output_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 4, BUFFER_OFFSET(0));
+       }
        check_error();
        glGetTextureImage(resources.cb_tex, 0, GL_RED, GL_UNSIGNED_BYTE, 1280 * 720 * 3, BUFFER_OFFSET(1280 * 720));
        check_error();
@@ -395,19 +516,38 @@ void VideoStream::encode_thread_func()
                        pkt.data = (uint8_t *)jpeg.data();
                        pkt.size = jpeg.size();
                        stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
-               } else if (qf.type == QueuedFrame::INTERPOLATED) {
+               } else if (qf.type == QueuedFrame::FADED) {
                        glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 
+                       shared_ptr<Frame> frame = frame_from_pbo(qf.resources.pbo_contents, 1280, 720);
+
+                       // Now JPEG encode it, and send it on to the stream.
+                       vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
+
+                       AVPacket pkt;
+                       av_init_packet(&pkt);
+                       pkt.stream_index = 0;
+                       pkt.data = (uint8_t *)jpeg.data();
+                       pkt.size = jpeg.size();
+                       stream_mux->add_packet(pkt, qf.output_pts, qf.output_pts);
+
+                       // Put the frame resources back.
+                       unique_lock<mutex> lock(queue_lock);
+                       interpolate_resources.push_back(qf.resources);
+               } else if (qf.type == QueuedFrame::INTERPOLATED || qf.type == QueuedFrame::FADED_INTERPOLATED) {
+                       glClientWaitSync(qf.fence.get(), /*flags=*/0, GL_TIMEOUT_IGNORED);
 
                        // Send a copy of the frame on to display.
                        shared_ptr<Frame> frame = frame_from_pbo(qf.resources.pbo_contents, 1280, 720);
-                       JPEGFrameView::insert_interpolated_frame(qf.stream_idx, qf.output_pts, frame);
+                       JPEGFrameView::insert_interpolated_frame(qf.stream_idx, qf.output_pts, frame);  // TODO: this is wrong for fades
 
                        // Now JPEG encode it, and send it on to the stream.
                        vector<uint8_t> jpeg = encode_jpeg(frame->y.get(), frame->cb.get(), frame->cr.get(), 1280, 720);
                        compute_flow->release_texture(qf.flow_tex);
-                       interpolate->release_texture(qf.output_tex);
-                       interpolate->release_texture(qf.cbcr_tex);
+                       if (qf.type != QueuedFrame::FADED_INTERPOLATED) {
+                               interpolate->release_texture(qf.output_tex);
+                               interpolate->release_texture(qf.cbcr_tex);
+                       }
 
                        AVPacket pkt;
                        av_init_packet(&pkt);
@@ -419,6 +559,8 @@ void VideoStream::encode_thread_func()
                        // Put the frame resources back.
                        unique_lock<mutex> lock(queue_lock);
                        interpolate_resources.push_back(qf.resources);
+               } else {
+                       assert(false);
                }
        }
 }
index f05a10a5b3b2c839087145596df1f9f2d9f2d029..146df0bedbae2abbb4887869b21d536319dc98ac 100644 (file)
@@ -15,6 +15,7 @@ extern "C" {
 #include <thread>
 
 #include <movit/effect_chain.h>
+#include <movit/mix_effect.h>
 #include <movit/ycbcr_input.h>
 
 #include "ref_counted_gl_sync.h"
@@ -35,7 +36,8 @@ public:
        void stop();
 
        void schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts);
-       void schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha);
+       void schedule_faded_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha);
+       void schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha, int secondary_stream_idx = -1, int64_t secondary_inputs_pts = -1, float fade_alpha = 0.0f); // -1 = no secondary frame.
 
 private:
 
@@ -48,10 +50,16 @@ private:
        // Allocated at the very start; if we're empty, we start dropping frames
        // (so that we don't build up an infinite interpolation backlog).
        struct InterpolatedFrameResources {
-               GLuint input_tex;  // Layered (contains both input frames).
-               GLuint gray_tex;  // Same.
-               GLuint cb_tex, cr_tex;
+               GLuint input_tex;  // Layered (contains both input frames), Y'CbCr.
+               GLuint gray_tex;  // Same, but Y only.
                GLuint input_fbos[2];  // For rendering to the two layers of input_tex.
+
+               // Destination textures and FBO if there is a fade.
+               GLuint fade_y_output_tex, fade_cbcr_output_tex;
+               GLuint fade_fbo;
+
+               GLuint cb_tex, cr_tex;  // Subsampled, final output.
+
                GLuint pbo;  // For reading the data back.
                void *pbo_contents;  // Persistently mapped.
        };
@@ -60,10 +68,14 @@ private:
 
        struct QueuedFrame {
                int64_t output_pts;
-               enum Type { ORIGINAL, INTERPOLATED } type;
+               enum Type { ORIGINAL, FADED, INTERPOLATED, FADED_INTERPOLATED } type;
                unsigned stream_idx;
                int64_t input_first_pts;  // The only pts for original frames.  
 
+               // For fades only (including fades against interpolated frames).
+               int secondary_stream_idx = -1;
+               int64_t secondary_input_pts;
+
                // For interpolated frames only.
                int64_t input_second_pts;
                float alpha;
@@ -80,10 +92,11 @@ private:
        bool seen_sync_markers = false;
 
        std::unique_ptr<YCbCrConverter> ycbcr_converter;
+       std::unique_ptr<YCbCrConverter> ycbcr_semiplanar_converter;
 
        // Frame interpolation.
        std::unique_ptr<DISComputeFlow> compute_flow;
-       std::unique_ptr<Interpolate> interpolate;
+       std::unique_ptr<Interpolate> interpolate, interpolate_no_split;
        std::unique_ptr<ChromaSubsampler> chroma_subsampler;
 };
 
index d038fc46f518f862a684cf96baae7090cb44375c..40d1a9cd771d04cc1f37d1d809dba47ed000f964 100644 (file)
@@ -8,6 +8,30 @@
 using namespace std;
 using namespace movit;
 
+namespace {
+
+void setup_outputs(YCbCrConverter::OutputMode output_mode, const ImageFormat &output_format, const YCbCrFormat &ycbcr_output_format, EffectChain *chain)
+{
+       if (output_mode == YCbCrConverter::OUTPUT_TO_RGBA) {
+               chain->add_output(output_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
+               chain->set_output_origin(OUTPUT_ORIGIN_BOTTOM_LEFT);
+       } else if (output_mode == YCbCrConverter::OUTPUT_TO_SEMIPLANAR) {
+               chain->add_ycbcr_output(output_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format, YCBCR_OUTPUT_SPLIT_Y_AND_CBCR);
+               chain->set_output_origin(OUTPUT_ORIGIN_TOP_LEFT);
+       } else {
+               assert(output_mode == YCbCrConverter::OUTPUT_TO_DUAL_YCBCR);
+
+               // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the
+               // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way
+               // of getting the gray data into a layered texture.
+               chain->add_ycbcr_output(output_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
+               chain->add_ycbcr_output(output_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
+               chain->set_output_origin(OUTPUT_ORIGIN_TOP_LEFT);
+       }
+}
+
+}  // namespace
+
 YCbCrConverter::YCbCrConverter(YCbCrConverter::OutputMode output_mode, ResourcePool *resource_pool)
 {
        ImageFormat inout_format;
@@ -30,36 +54,58 @@ YCbCrConverter::YCbCrConverter(YCbCrConverter::OutputMode output_mode, ResourceP
        // Planar Y'CbCr decoding chain.
        planar_chain.reset(new EffectChain(1280, 720, resource_pool));
        ycbcr_planar_input = (YCbCrInput *)planar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_PLANAR));
-       if (output_mode == OUTPUT_TO_RGBA) {
-               planar_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
-               planar_chain->set_output_origin(OUTPUT_ORIGIN_BOTTOM_LEFT);
-       } else {
-               assert(output_mode == OUTPUT_TO_DUAL_YCBCR);
-
-               // One full Y'CbCr texture (for interpolation), one that's just Y (throwing away the
-               // Cb and Cr channels). The second copy is sort of redundant, but it's the easiest way
-               // of getting the gray data into a layered texture.
-               planar_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
-               planar_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
-               planar_chain->set_output_origin(OUTPUT_ORIGIN_TOP_LEFT);
-       }
+       setup_outputs(output_mode, inout_format, ycbcr_output_format, planar_chain.get());
        planar_chain->set_dither_bits(8);
        planar_chain->finalize();
 
        // Semiplanar Y'CbCr decoding chain (for images coming from VA-API).
        semiplanar_chain.reset(new EffectChain(1280, 720, resource_pool));
        ycbcr_semiplanar_input = (YCbCrInput *)semiplanar_chain->add_input(new YCbCrInput(inout_format, ycbcr_format, 1280, 720, YCBCR_INPUT_SPLIT_Y_AND_CBCR));
-       if (output_mode == OUTPUT_TO_RGBA) {
-               semiplanar_chain->add_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED);
-               semiplanar_chain->set_output_origin(OUTPUT_ORIGIN_BOTTOM_LEFT);
-       } else {
-               // See above.
-               semiplanar_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
-               semiplanar_chain->add_ycbcr_output(inout_format, OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED, ycbcr_output_format);
-               semiplanar_chain->set_output_origin(OUTPUT_ORIGIN_TOP_LEFT);
-       }
+       setup_outputs(output_mode, inout_format, ycbcr_output_format, semiplanar_chain.get());
        semiplanar_chain->set_dither_bits(8);
        semiplanar_chain->finalize();
+
+       // Fade chains.
+       for (bool first_input_is_semiplanar : { false, true }) {
+               for (bool second_input_is_semiplanar : { false, true }) {
+                       FadeChain &fade_chain = fade_chains[first_input_is_semiplanar][second_input_is_semiplanar];
+                       fade_chain.chain.reset(new EffectChain(1280, 720, resource_pool));
+                       fade_chain.input[0] = (movit::YCbCrInput *)fade_chain.chain->add_input(
+                               new YCbCrInput(inout_format, ycbcr_format, 1280, 720,
+                                       first_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
+                       fade_chain.input[1] = (movit::YCbCrInput *)fade_chain.chain->add_input(
+                               new YCbCrInput(inout_format, ycbcr_format, 1280, 720,
+                                       second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
+                       fade_chain.mix_effect = (movit::MixEffect *)fade_chain.chain->add_effect(
+                               new MixEffect, fade_chain.input[0], fade_chain.input[1]);
+                       setup_outputs(output_mode, inout_format, ycbcr_output_format, fade_chain.chain.get());
+                       fade_chain.chain->set_dither_bits(8);
+                       fade_chain.chain->finalize();
+               }
+       }
+
+       // Fade from interleaved chain (ie., first input is interleaved, since it comes
+       // directly from the GPU anyway).
+       for (bool second_input_is_semiplanar : { false, true }) {
+               FadeChain &fade_chain = interleaved_fade_chains[second_input_is_semiplanar];
+               fade_chain.chain.reset(new EffectChain(1280, 720, resource_pool));
+
+               ycbcr_format.chroma_subsampling_x = 1;
+               fade_chain.input[0] = (movit::YCbCrInput *)fade_chain.chain->add_input(
+                       new YCbCrInput(inout_format, ycbcr_format, 1280, 720,
+                               YCBCR_INPUT_INTERLEAVED));
+
+               ycbcr_format.chroma_subsampling_x = 2;
+               fade_chain.input[1] = (movit::YCbCrInput *)fade_chain.chain->add_input(
+                       new YCbCrInput(inout_format, ycbcr_format, 1280, 720,
+                               second_input_is_semiplanar ? YCBCR_INPUT_SPLIT_Y_AND_CBCR : YCBCR_INPUT_PLANAR));
+
+               fade_chain.mix_effect = (movit::MixEffect *)fade_chain.chain->add_effect(
+                       new MixEffect, fade_chain.input[0], fade_chain.input[1]);
+               setup_outputs(output_mode, inout_format, ycbcr_output_format, fade_chain.chain.get());
+               fade_chain.chain->set_dither_bits(8);
+               fade_chain.chain->finalize();
+       }
 }
 
 EffectChain *YCbCrConverter::prepare_chain_for_conversion(shared_ptr<Frame> frame)
@@ -73,6 +119,42 @@ EffectChain *YCbCrConverter::prepare_chain_for_conversion(shared_ptr<Frame> fram
        }
 }
 
+EffectChain *YCbCrConverter::prepare_chain_for_fade(shared_ptr<Frame> frame, shared_ptr<Frame> secondary_frame, float fade_alpha)
+{
+       const FadeChain &fade_chain = fade_chains[frame->is_semiplanar][secondary_frame->is_semiplanar];
+       setup_input_for_frame(frame, ycbcr_format, fade_chain.input[0]);
+       setup_input_for_frame(secondary_frame, ycbcr_format, fade_chain.input[1]);
+       bool ok = fade_chain.mix_effect->set_float("strength_first", 1.0f - fade_alpha);
+       ok |= fade_chain.mix_effect->set_float("strength_second", fade_alpha);
+       assert(ok);
+       return fade_chain.chain.get();
+}
+
+EffectChain *YCbCrConverter::prepare_chain_for_fade_from_texture(GLuint tex, std::shared_ptr<Frame> secondary_frame, float fade_alpha)
+{
+       const FadeChain &fade_chain = interleaved_fade_chains[secondary_frame->is_semiplanar];
+       {
+               YCbCrFormat format_copy = ycbcr_format;
+               format_copy.chroma_subsampling_x = 1;
+               format_copy.chroma_subsampling_y = 1;
+               fade_chain.input[0]->change_ycbcr_format(format_copy);
+
+               fade_chain.input[0]->set_width(1280);  // FIXME
+               fade_chain.input[0]->set_height(720);
+               fade_chain.input[0]->set_texture_num(0, tex);
+
+               glTextureParameteri(tex, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+               glTextureParameteri(tex, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+               glTextureParameteri(tex, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
+               glTextureParameteri(tex, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
+       }
+       setup_input_for_frame(secondary_frame, ycbcr_format, fade_chain.input[1]);
+       bool ok = fade_chain.mix_effect->set_float("strength_first", 1.0f - fade_alpha);
+       ok |= fade_chain.mix_effect->set_float("strength_second", fade_alpha);
+       assert(ok);
+       return fade_chain.chain.get();
+}
+
 void setup_input_for_frame(shared_ptr<Frame> frame, const YCbCrFormat &ycbcr_format, YCbCrInput *input)
 {
        YCbCrFormat format_copy = ycbcr_format;
index 928ebed5950f8f8ecfff9ed70b24fcdf6daaabce..a7a6179269fbb60d82a4d24ec9129ec5fec93d21 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <memory>
 
+#include <epoxy/gl.h>
 #include <movit/ycbcr_input.h>
 
 namespace movit {
@@ -20,12 +21,17 @@ struct YCbCrConverter {
 public:
        enum OutputMode {
                OUTPUT_TO_RGBA,         // One texture (bottom-left origin): RGBA
+               OUTPUT_TO_SEMIPLANAR,   // Two textures (top-left origin):   Y, CbCr
                OUTPUT_TO_DUAL_YCBCR    // Two textures (top-left origin):   Y'CbCr, Y'CbCr
        };
        YCbCrConverter(OutputMode output_mode, movit::ResourcePool *resource_pool);
 
        // Returns the appropriate chain for rendering.
        movit::EffectChain *prepare_chain_for_conversion(std::shared_ptr<Frame> frame);
+       movit::EffectChain *prepare_chain_for_fade(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> secondary_frame, float fade_alpha);
+
+       // <tex> must be interleaved Y'CbCr.
+       movit::EffectChain *prepare_chain_for_fade_from_texture(GLuint tex, std::shared_ptr<Frame> secondary_frame, float fade_alpha);
 
 private:
        movit::YCbCrFormat ycbcr_format;
@@ -34,6 +40,19 @@ private:
        // TODO: Have a separate version with ResampleEffect, for scaling?
        std::unique_ptr<movit::EffectChain> planar_chain, semiplanar_chain;
        movit::YCbCrInput *ycbcr_planar_input, *ycbcr_semiplanar_input;
+
+       // These do fades, parametrized on whether the two inputs are planar
+       // or semiplanar.
+       struct FadeChain {
+               std::unique_ptr<movit::EffectChain> chain;
+               movit::YCbCrInput *input[2];
+               movit::MixEffect *mix_effect;
+       };
+       FadeChain fade_chains[2][2];
+
+       // These do fades, where the first input is interleaved and the second is
+       // either planar or semiplanar.
+       FadeChain interleaved_fade_chains[2];
 };
 
 // TODO: make private