X-Git-Url: https://git.sesse.net/?p=movit;a=blobdiff_plain;f=ycbcr_input.h;h=bf6d800eeb0c918c1b535bd2f6bf2c65c0bfa354;hp=eb8d0ec335db4b90bb8b138f45b3c67ddb31fbde;hb=refs%2Fheads%2Fepoxy;hpb=a164e03033bce0a1fff8044468b12e600722b188 diff --git a/ycbcr_input.h b/ycbcr_input.h index eb8d0ec..bf6d800 100644 --- a/ycbcr_input.h +++ b/ycbcr_input.h @@ -5,7 +5,7 @@ // imprecisely, called “YUV”), which is typically what you get from a video decoder. // It upsamples planes as needed, using the default linear upsampling OpenGL gives you. -#include +#include #include #include @@ -14,6 +14,8 @@ #include "image_format.h" #include "input.h" +namespace movit { + class ResourcePool; struct YCbCrFormat { @@ -44,11 +46,6 @@ public: virtual std::string effect_type_id() const { return "YCbCrInput"; } - // Create the texture itself. We cannot do this in the constructor, - // because we don't necessarily know all the settings (sRGB texture, - // mipmap generation) at that point. - void finalize(); - virtual bool can_output_linear_gamma() const { return false; } virtual AlphaHandling alpha_handling() const { return OUTPUT_BLANK_ALPHA; } @@ -80,14 +77,12 @@ public: invalidate_pixel_data(); } - void invalidate_pixel_data() - { - needs_update = true; - } + void invalidate_pixel_data(); void set_pitch(unsigned channel, unsigned pitch) { assert(channel >= 0 && channel < 3); this->pitch[channel] = pitch; + invalidate_pixel_data(); } virtual void inform_added(EffectChain *chain) @@ -99,7 +94,6 @@ private: ImageFormat image_format; YCbCrFormat ycbcr_format; GLuint pbos[3], texture_num[3]; - bool needs_update, finalized; int needs_mipmaps; @@ -109,4 +103,6 @@ private: ResourcePool *resource_pool; }; +} // namespace movit + #endif // !defined(_MOVIT_YCBCR_INPUT_H)