]> git.sesse.net Git - nageru/blobdiff - video_stream.h
Do not try to show a frame until we've computed it (do it in a callback instead).
[nageru] / video_stream.h
index 925cace4c62f5465c7bfb4d21bd0e64aca17a3c3..c1b808e9baf069e0b6d65258fb6fe063ea1e60f2 100644 (file)
@@ -1,29 +1,33 @@
 #ifndef _VIDEO_STREAM_H
 #define _VIDEO_STREAM_H 1
 
-#include <stdint.h>
 #include <epoxy/gl.h>
+#include <stdint.h>
 
 extern "C" {
 #include <libavformat/avio.h>
 }
 
+#include "jpeg_frame_view.h"
+#include "ref_counted_gl_sync.h"
+
 #include <condition_variable>
 #include <deque>
+#include <functional>
+#include <movit/effect_chain.h>
+#include <movit/mix_effect.h>
+#include <movit/ycbcr_input.h>
 #include <mutex>
 #include <string>
 #include <thread>
 
-#include <movit/effect_chain.h>
-#include <movit/ycbcr_input.h>
-
-#include "ref_counted_gl_sync.h"
-
+class ChromaSubsampler;
 class DISComputeFlow;
 class Interpolate;
 class Mux;
 class QSurface;
 class QSurfaceFormat;
+class YCbCrConverter;
 
 class VideoStream {
 public:
@@ -32,11 +36,14 @@ public:
        void start();
        void stop();
 
-       void schedule_original_frame(int64_t output_pts, unsigned stream_idx, int64_t input_pts);
-       void schedule_interpolated_frame(int64_t output_pts, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha);
+       // “display_func” is called after the frame has been calculated (if needed)
+       // and has gone out to the stream.
+       void schedule_original_frame(int64_t output_pts, std::function<void()> &&display_func, unsigned stream_idx, int64_t input_pts);
+       void schedule_faded_frame(int64_t output_pts, std::function<void()> &&display_func, unsigned stream_idx, int64_t input_pts, int secondary_stream_idx, int64_t secondary_input_pts, float fade_alpha);
+       void schedule_interpolated_frame(int64_t output_pts, std::function<void()> &&display_func, unsigned stream_idx, int64_t input_first_pts, int64_t input_second_pts, float alpha, int secondary_stream_idx = -1, int64_t secondary_inputs_pts = -1, float fade_alpha = 0.0f);  // -1 = no secondary frame.
+       void schedule_refresh_frame(int64_t output_pts, std::function<void()> &&display_func);
 
 private:
-
        void encode_thread_func();
        std::thread encode_thread;
 
@@ -46,9 +53,16 @@ private:
        // Allocated at the very start; if we're empty, we start dropping frames
        // (so that we don't build up an infinite interpolation backlog).
        struct InterpolatedFrameResources {
-               GLuint input_tex;  // Layered (contains both input frames).
-               GLuint gray_tex;  // Same.
+               GLuint input_tex;  // Layered (contains both input frames), Y'CbCr.
+               GLuint gray_tex;  // Same, but Y only.
                GLuint input_fbos[2];  // For rendering to the two layers of input_tex.
+
+               // Destination textures and FBO if there is a fade.
+               GLuint fade_y_output_tex, fade_cbcr_output_tex;
+               GLuint fade_fbo;
+
+               GLuint cb_tex, cr_tex;  // Subsampled, final output.
+
                GLuint pbo;  // For reading the data back.
                void *pbo_contents;  // Persistently mapped.
        };
@@ -57,16 +71,23 @@ private:
 
        struct QueuedFrame {
                int64_t output_pts;
-               enum Type { ORIGINAL, INTERPOLATED } type;
+               enum Type { ORIGINAL, FADED, INTERPOLATED, FADED_INTERPOLATED, REFRESH } type;
                unsigned stream_idx;
-               int64_t input_first_pts;  // The only pts for original frames.  
+               int64_t input_first_pts;  // The only pts for original frames.
+
+               // For fades only (including fades against interpolated frames).
+               int secondary_stream_idx = -1;
+               int64_t secondary_input_pts;
 
                // For interpolated frames only.
                int64_t input_second_pts;
                float alpha;
                InterpolatedFrameResources resources;
                RefCountedGLsync fence;  // Set when the interpolated image is read back to the CPU.
-               GLuint flow_tex, output_tex;  // Released in the receiving thread; not really used for anything else.
+               GLuint flow_tex, output_tex, cbcr_tex;  // Released in the receiving thread; not really used for anything else.
+               JPEGID id;
+
+               std::function<void()> display_func;  // Called when the image is done decoding.
        };
        std::deque<QueuedFrame> frame_queue;  // Under <queue_lock>.
        std::mutex queue_lock;
@@ -76,18 +97,15 @@ private:
        std::string stream_mux_header;
        bool seen_sync_markers = false;
 
-       QSurface *gl_surface;
-
-       // Effectively only converts from 4:2:2 to 4:4:4.
-       // TODO: Have a separate version with ResampleEffect, for scaling?
-       std::unique_ptr<movit::EffectChain> ycbcr_convert_chain;
-
-       movit::YCbCrInput *ycbcr_input;
-       movit::YCbCrFormat ycbcr_format;
+       std::unique_ptr<YCbCrConverter> ycbcr_converter;
+       std::unique_ptr<YCbCrConverter> ycbcr_semiplanar_converter;
 
        // Frame interpolation.
        std::unique_ptr<DISComputeFlow> compute_flow;
-       std::unique_ptr<Interpolate> interpolate;
+       std::unique_ptr<Interpolate> interpolate, interpolate_no_split;
+       std::unique_ptr<ChromaSubsampler> chroma_subsampler;
+
+       std::vector<uint8_t> last_frame;
 };
 
 #endif  // !defined(_VIDEO_STREAM_H)