// imprecisely, called “YUV”), which is typically what you get from a video decoder.
// It upsamples planes as needed, using the default linear upsampling OpenGL gives you.
-#include <GL/glew.h>
+#include <epoxy/gl.h>
#include <assert.h>
#include <string>
#include "effect.h"
+#include "effect_chain.h"
#include "image_format.h"
#include "input.h"
+namespace movit {
+
+class ResourcePool;
+
struct YCbCrFormat {
// Which formula for Y' to use.
YCbCrLumaCoefficients luma_coefficients;
virtual std::string effect_type_id() const { return "YCbCrInput"; }
- // Create the texture itself. We cannot do this in the constructor,
- // because we don't necessarily know all the settings (sRGB texture,
- // mipmap generation) at that point.
- void finalize();
-
virtual bool can_output_linear_gamma() const { return false; }
virtual AlphaHandling alpha_handling() const { return OUTPUT_BLANK_ALPHA; }
invalidate_pixel_data();
}
- void invalidate_pixel_data()
- {
- needs_update = true;
- }
+ void invalidate_pixel_data();
void set_pitch(unsigned channel, unsigned pitch) {
assert(channel >= 0 && channel < 3);
this->pitch[channel] = pitch;
+ invalidate_pixel_data();
+ }
+
+ virtual void inform_added(EffectChain *chain)
+ {
+ resource_pool = chain->get_resource_pool();
}
private:
ImageFormat image_format;
YCbCrFormat ycbcr_format;
GLuint pbos[3], texture_num[3];
- bool needs_update, finalized;
int needs_mipmaps;
unsigned width, height, widths[3], heights[3];
const unsigned char *pixel_data[3];
unsigned pitch[3];
+ ResourcePool *resource_pool;
};
+} // namespace movit
+
#endif // !defined(_MOVIT_YCBCR_INPUT_H)