#ifndef _MOVIT_FLAT_INPUT_H
#define _MOVIT_FLAT_INPUT_H 1
-#include <GL/glew.h>
+#include <epoxy/gl.h>
#include <assert.h>
#include <string>
#include "effect_chain.h"
#include "fp16.h"
#include "image_format.h"
-#include "init.h"
#include "input.h"
namespace movit {
virtual std::string effect_type_id() const { return "FlatInput"; }
virtual bool can_output_linear_gamma() const {
- return (movit_srgb_textures_supported &&
- type == GL_UNSIGNED_BYTE &&
+ // On desktop OpenGL, there's also GL_SLUMINANCE8 which could give us
+ // support for single-channel sRGB decoding, but it's not supported
+ // on GLES, and we're already actively rewriting single-channel inputs
+ // to GL_RED (even on desktop), so we stick to 3- and 4-channel inputs.
+ return (type == GL_UNSIGNED_BYTE &&
+ (pixel_format == FORMAT_RGB ||
+ pixel_format == FORMAT_RGBA_POSTMULTIPLIED_ALPHA) &&
(image_format.gamma_curve == GAMMA_LINEAR ||
image_format.gamma_curve == GAMMA_sRGB));
}
virtual AlphaHandling alpha_handling() const {
switch (pixel_format) {
case FORMAT_RGBA_PREMULTIPLIED_ALPHA:
- case FORMAT_BGRA_PREMULTIPLIED_ALPHA:
return INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA;
case FORMAT_RGBA_POSTMULTIPLIED_ALPHA:
- case FORMAT_BGRA_POSTMULTIPLIED_ALPHA:
return OUTPUT_POSTMULTIPLIED_ALPHA;
+ case FORMAT_R:
case FORMAT_RG:
case FORMAT_RGB:
- case FORMAT_BGR:
- case FORMAT_GRAYSCALE:
return OUTPUT_BLANK_ALPHA;
default:
assert(false);
void invalidate_pixel_data();
+ // Note: Sets pitch to width, so even if your pitch is unchanged,
+ // you will need to re-set it after this call.
+ void set_width(unsigned width)
+ {
+ assert(width != 0);
+ this->pitch = this->width = width;
+ invalidate_pixel_data();
+ }
+
+ void set_height(unsigned height)
+ {
+ assert(height != 0);
+ this->height = height;
+ invalidate_pixel_data();
+ }
+
void set_pitch(unsigned pitch) {
+ assert(pitch != 0);
this->pitch = pitch;
invalidate_pixel_data();
}
+ // Tells the input to use the specific OpenGL texture as pixel data.
+ // This is useful if you want to share the same texture between multiple
+ // EffectChain instances, or if you somehow can get the data into a texture more
+ // efficiently than through a normal upload (e.g. a video codec decoding straight
+ // into a texture). Note that you are responsible for setting the right sampler
+ // parameters (e.g. clamp-to-edge) yourself, as well as generate any mipmaps
+ // if they are needed.
+ //
+ // NOTE: The input does not take ownership of this texture; you are responsible
+ // for releasing it yourself. In particular, if you call invalidate_pixel_data()
+ // or anything calling it, the texture will silently be removed from the input.
+ void set_texture_num(GLuint texture_num)
+ {
+ possibly_release_texture();
+ this->texture_num = texture_num;
+ this->owns_texture = false;
+ }
+
virtual void inform_added(EffectChain *chain)
{
resource_pool = chain->get_resource_pool();
}
private:
+ // Release the texture if we have any, and it is owned by us.
+ void possibly_release_texture();
+
ImageFormat image_format;
MovitPixelFormat pixel_format;
GLenum type;
GLuint pbo, texture_num;
int output_linear_gamma, needs_mipmaps;
unsigned width, height, pitch;
+ bool owns_texture;
const void *pixel_data;
ResourcePool *resource_pool;
+ bool fixup_swap_rb, fixup_red_to_grayscale;
+ GLint uniform_tex;
};
} // namespace movit