// but if so, the threads' contexts need to be set up to share resources, since
// the EffectChain holds textures and other OpenGL objects that are tied to the
// context.
+//
+// Memory management (only relevant if you use multiple contexts):
+// See corresponding comment in resource_pool.h. This holds even if you don't
+// allocate your own ResourcePool, but let EffectChain hold its own.
#include <epoxy/gl.h>
#include <stdio.h>
+#include <list>
#include <map>
#include <set>
#include <string>
#include <vector>
+#include <Eigen/Core>
+#include "effect.h"
#include "image_format.h"
+#include "ycbcr.h"
namespace movit {
OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED,
};
+// RGBA output is nearly always packed; Y'CbCr, however, is often planar
+// due to chroma subsampling. This enum controls how add_ycbcr_output()
+// distributes the color channels between the fragment shader outputs.
+// Obviously, anything except YCBCR_OUTPUT_INTERLEAVED will be meaningless
+// unless you use render_to_fbo() and have an FBO with multiple render
+// targets attached (the other outputs will be discarded).
+enum YCbCrOutputSplitting {
+ // Only one output: Store Y'CbCr into the first three output channels,
+ // respectively, plus alpha. This is also called “chunked” or
+ // ”packed” mode.
+ YCBCR_OUTPUT_INTERLEAVED,
+
+ // Store Y' and alpha into the first output (in the red and alpha
+ // channels; effect to the others is undefined), and Cb and Cr into
+ // the first two channels of the second output. This is particularly
+ // useful if you want to end up in a format like NV12, where all the
+ // Y' samples come first and then Cb and Cr come interlevaed afterwards.
+ // You will still need to do the chroma subsampling yourself to actually
+ // get down to NV12, though.
+ YCBCR_OUTPUT_SPLIT_Y_AND_CBCR,
+
+ // Store Y' and alpha into the first output, Cb into the first channel
+ // of the second output and Cr into the first channel of the third output.
+ // (Effect on the other channels is undefined.) Essentially gives you
+ // 4:4:4 planar, or ”yuv444p”.
+ YCBCR_OUTPUT_PLANAR,
+};
+
+// Where (0,0) is taken to be in the output. If you want to render to an
+// OpenGL screen, you should keep the default of bottom-left, as that is
+// OpenGL's natural coordinate system. However, there are cases, such as if you
+// render to an FBO and read the pixels back into some other system, where
+// you'd want a top-left origin; if so, an additional flip step will be added
+// at the very end (but done in a vertex shader, so it will have zero extra
+// cost).
+//
+// Note that Movit's coordinate system in general consistently puts (0,0) in
+// the top left for _input_, no matter what you set as output origin.
+enum OutputOrigin {
+ OUTPUT_ORIGIN_BOTTOM_LEFT,
+ OUTPUT_ORIGIN_TOP_LEFT,
+};
+
+// Transformation to apply (if any) to pixel data in temporary buffers.
+// See set_intermediate_format() below for more information.
+enum FramebufferTransformation {
+ // The default; just store the value. This is what you usually want.
+ NO_FRAMEBUFFER_TRANSFORMATION,
+
+ // If the values are in linear light, store sqrt(x) to the framebuffer
+ // instead of x itself, of course undoing it with x² on read. Useful as
+ // a rough approximation to the sRGB curve. (If the values are not in
+ // linear light, just store them as-is.)
+ SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION,
+};
+
// A node in the graph; basically an effect and some associated information.
class Node {
public:
std::vector<Node *> outgoing_links;
std::vector<Node *> incoming_links;
+ // For unit tests only. Do not use from other code.
+ // Will contain an arbitrary choice if the node is in multiple phases.
+ Phase *containing_phase;
+
private:
// Logical size of the output of this effect, ie. the resolution
// you would get if you sampled it as a texture. If it is undefined
// they will be equal.
unsigned output_width, output_height;
- // If output goes to RTT, which phase it is in (otherwise unset).
- // This is a bit ugly; we should probably fix so that Phase takes other
- // phases as inputs, instead of Node.
- Phase *phase;
-
// If the effect has is_single_texture(), or if the output went to RTT
// and that texture has been bound to a sampler, the sampler number
// will be stored here.
Colorspace output_color_space;
GammaCurve output_gamma_curve;
AlphaType output_alpha_type;
+ bool needs_mipmaps; // Directly or indirectly.
+
+ // Set if this effect, and all effects consuming output from this node
+ // (in the same phase) have one_to_one_sampling() set.
+ bool one_to_one_sampling;
friend class EffectChain;
};
// A rendering phase; a single GLSL program rendering a single quad.
struct Phase {
+ Node *output_node;
+
GLuint glsl_program_num; // Owned by the resource_pool.
+
+ // Position and texcoord attribute indexes, although it doesn't matter
+ // which is which, because they contain the same data.
+ std::set<GLint> attribute_indexes;
+
bool input_needs_mipmaps;
// Inputs are only inputs from other phases (ie., those that come from RTT);
- // input textures are not counted here.
- std::vector<Node *> inputs;
-
+ // input textures are counted as part of <effects>.
+ std::vector<Phase *> inputs;
+ // Bound sampler numbers for each input. Redundant in a sense
+ // (it always corresponds to the index), but we need somewhere
+ // to hold the value for the uniform.
+ std::vector<int> input_samplers;
std::vector<Node *> effects; // In order.
unsigned output_width, output_height, virtual_output_width, virtual_output_height;
+ // Whether this phase is compiled as a compute shader, ie., the last effect is
+ // marked as one.
+ bool is_compute_shader;
+
+ // If <is_compute_shader>, which image unit the output buffer is bound to.
+ // This is used as source for a Uniform<int> below.
+ int outbuf_image_unit;
+
+ // These are used in transforming from unnormalized to normalized coordinates
+ // in compute shaders.
+ Point2D inv_output_size, output_texcoord_adjust;
+
// Identifier used to create unique variables in GLSL.
// Unique per-phase to increase cacheability of compiled shaders.
std::map<Node *, std::string> effect_ids;
+
+ // Uniforms for this phase; combined from all the effects.
+ std::vector<Uniform<int>> uniforms_image2d;
+ std::vector<Uniform<int>> uniforms_sampler2d;
+ std::vector<Uniform<bool>> uniforms_bool;
+ std::vector<Uniform<int>> uniforms_int;
+ std::vector<Uniform<float>> uniforms_float;
+ std::vector<Uniform<float>> uniforms_vec2;
+ std::vector<Uniform<float>> uniforms_vec3;
+ std::vector<Uniform<float>> uniforms_vec4;
+ std::vector<Uniform<Eigen::Matrix3d>> uniforms_mat3;
+
+ // For measurement of GPU time used.
+ std::list<GLuint> timer_query_objects_running;
+ std::list<GLuint> timer_query_objects_free;
+ uint64_t time_elapsed_ns;
+ uint64_t num_measured_iterations;
};
class EffectChain {
public:
// Aspect: e.g. 16.0f, 9.0f for 16:9.
// resource_pool is a pointer to a ResourcePool with which to share shaders
- // and other resources (see resource_pool.h). If NULL (the default),
+ // and other resources (see resource_pool.h). If nullptr (the default),
// will create its own that is not shared with anything else. Does not take
// ownership of the passed-in ResourcePool, but will naturally take ownership
// of its own internal one if created.
- EffectChain(float aspect_nom, float aspect_denom, ResourcePool *resource_pool = NULL);
+ EffectChain(float aspect_nom, float aspect_denom, ResourcePool *resource_pool = nullptr);
~EffectChain();
// User API:
inputs.push_back(input3);
return add_effect(effect, inputs);
}
+ Effect *add_effect(Effect *effect, Effect *input1, Effect *input2, Effect *input3, Effect *input4) {
+ std::vector<Effect *> inputs;
+ inputs.push_back(input1);
+ inputs.push_back(input2);
+ inputs.push_back(input3);
+ inputs.push_back(input4);
+ return add_effect(effect, inputs);
+ }
+ Effect *add_effect(Effect *effect, Effect *input1, Effect *input2, Effect *input3, Effect *input4, Effect *input5) {
+ std::vector<Effect *> inputs;
+ inputs.push_back(input1);
+ inputs.push_back(input2);
+ inputs.push_back(input3);
+ inputs.push_back(input4);
+ inputs.push_back(input5);
+ return add_effect(effect, inputs);
+ }
Effect *add_effect(Effect *effect, const std::vector<Effect *> &inputs);
+ // Adds an RGBA output. Note that you can have at most one RGBA output and two
+ // Y'CbCr outputs (see below for details).
void add_output(const ImageFormat &format, OutputAlphaFormat alpha_format);
+ // Adds an YCbCr output. Note that you can only have at most two Y'CbCr
+ // outputs, and they must have the same <ycbcr_format> and <type>.
+ // (This limitation may be lifted in the future, to allow e.g. simultaneous
+ // 8- and 10-bit output. Currently, multiple Y'CbCr outputs are only
+ // useful in some very limited circumstances, like if one texture goes
+ // to some place you cannot easily read from later.)
+ //
+ // Only 4:4:4 output is supported due to fragment shader limitations,
+ // so chroma_subsampling_x and chroma_subsampling_y must both be 1.
+ // <type> should match the data type of the FBO you are rendering to,
+ // so that if you use 16-bit output (GL_UNSIGNED_SHORT), you will get
+ // 8-, 10- or 12-bit output correctly as determined by <ycbcr_format.num_levels>.
+ // Using e.g. ycbcr_format.num_levels == 1024 with GL_UNSIGNED_BYTE is
+ // nonsensical and invokes undefined behavior.
+ //
+ // If you have both RGBA and Y'CbCr output(s), the RGBA output will come
+ // in the last draw buffer. Also, <format> and <alpha_format> must be
+ // identical between the two.
+ void add_ycbcr_output(const ImageFormat &format, OutputAlphaFormat alpha_format,
+ const YCbCrFormat &ycbcr_format,
+ YCbCrOutputSplitting output_splitting = YCBCR_OUTPUT_INTERLEAVED,
+ GLenum output_type = GL_UNSIGNED_BYTE);
+
+ // Change Y'CbCr output format. (This can be done also after finalize()).
+ // Note that you are not allowed to change subsampling parameters;
+ // however, you can change the color space parameters, ie.,
+ // luma_coefficients, full_range and num_levels.
+ void change_ycbcr_output_format(const YCbCrFormat &ycbcr_format);
+
// Set number of output bits, to scale the dither.
// 8 is the right value for most outputs.
+ //
+ // Special note for 10- and 12-bit Y'CbCr packed into GL_UNSIGNED_SHORT:
+ // This is relative to the actual output, not the logical one, so you should
+ // specify 16 here, not 10 or 12.
+ //
// The default, 0, is a special value that means no dither.
void set_dither_bits(unsigned num_bits)
{
this->num_dither_bits = num_bits;
}
- void finalize();
+ // Set where (0,0) is taken to be in the output. The default is
+ // OUTPUT_ORIGIN_BOTTOM_LEFT, which is usually what you want
+ // (see OutputOrigin above for more details).
+ void set_output_origin(OutputOrigin output_origin)
+ {
+ this->output_origin = output_origin;
+ }
+ // Set intermediate format for framebuffers used when we need to bounce
+ // to a temporary texture. The default, GL_RGBA16F, is good for most uses;
+ // it is precise, has good range, and is relatively efficient. However,
+ // if you need even more speed and your chain can do with some loss of
+ // accuracy, you can change the format here (before calling finalize).
+ // Calculations between bounce buffers are still in 32-bit floating-point
+ // no matter what you specify.
+ //
+ // Of special interest is GL_SRGB8_ALPHA8, which stores sRGB-encoded RGB
+ // and linear alpha; this is half the memory bandwidth of GL_RGBA16F,
+ // while retaining reasonable precision for typical image data. It will,
+ // however, cause some gamut clipping if your colorspace is far from sRGB,
+ // as it cannot represent values outside [0,1]. NOTE: If you construct
+ // a chain where you end up bouncing pixels in non-linear light
+ // (gamma different from GAMMA_LINEAR), this will be the wrong thing.
+ // However, it's hard to see how this could happen in a non-contrived
+ // chain; few effects ever need texture bounce or resizing without also
+ // combining multiple pixels, which really needs linear light and thus
+ // triggers a conversion before the bounce.
+ //
+ // If you don't need alpha (or can do with very little of it), GL_RGB10_A2
+ // is even better, as it has two more bits for each color component. There
+ // is no GL_SRGB10, unfortunately, so on its own, it is somewhat worse than
+ // GL_SRGB8, but you can set <transformation> to SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION,
+ // and sqrt(x) will be stored instead of x. This is a rough approximation to
+ // the sRGB curve, and reduces maximum error (in sRGB distance) by almost an
+ // order of magnitude, well below what you can get from 8-bit true sRGB.
+ // (Note that this strategy avoids the problem with bounced non-linear data
+ // above, since the square root is turned off in that case.) However, texture
+ // filtering will happen on the transformed values, so if you have heavy
+ // downscaling or the likes (e.g. mipmaps), you could get subtly bad results.
+ // You'll need to see which of the two that works the best for you in practice.
+ void set_intermediate_format(
+ GLenum intermediate_format,
+ FramebufferTransformation transformation = NO_FRAMEBUFFER_TRANSFORMATION)
+ {
+ this->intermediate_format = intermediate_format;
+ this->intermediate_transformation = transformation;
+ }
+
+ void finalize();
- //void render(unsigned char *src, unsigned char *dst);
+ // Measure the GPU time used for each actual phase during rendering.
+ // Note that this is only available if GL_ARB_timer_query
+ // (or, equivalently, OpenGL 3.3) is available. Also note that measurement
+ // will incur a performance cost, as we wait for the measurements to
+ // complete at the end of rendering.
+ void enable_phase_timing(bool enable);
+ void reset_phase_timing();
+ void print_phase_timing();
+
+ // Note: If you already know the width and height of the viewport,
+ // calling render_to_fbo() directly will be slightly more efficient,
+ // as it saves it from getting it from OpenGL.
void render_to_screen()
{
render_to_fbo(0, 0, 0);
Effect *last_added_effect() {
if (nodes.empty()) {
- return NULL;
+ return nullptr;
} else {
return nodes.back()->effect;
}
// single-sampler input, or from an RTT texture.
GLenum get_input_sampler(Node *node, unsigned input_num) const;
+ // Whether input <input_num> of <node> corresponds to a single sampler
+ // (see get_input_sampler()). Normally, you should not need to call this;
+ // however, if the input Effect has set override_texture_bounce(),
+ // this will return false, and you could be flexible and check it first
+ // if you want.
+ GLenum has_input_sampler(Node *node, unsigned input_num) const;
+
// Get the current resource pool assigned to this EffectChain.
// Primarily to let effects allocate textures as needed.
// Any resources you get from the pool must be returned to the pool
// output gamma different from GAMMA_LINEAR.
void find_all_nonlinear_inputs(Node *effect, std::vector<Node *> *nonlinear_inputs);
- // Create a GLSL program computing the given effects in order.
- Phase *compile_glsl_program(const std::vector<Node *> &inputs,
- const std::vector<Node *> &effects);
+ // Create a GLSL program computing the effects for this phase in order.
+ void compile_glsl_program(Phase *phase);
// Create all GLSL programs needed to compute the given effect, and all outputs
- // that depends on it (whenever possible).
- void construct_glsl_programs(Node *output);
+ // that depend on it (whenever possible). Returns the phase that has <output>
+ // as the last effect. Also pushes all phases in order onto <phases>.
+ Phase *construct_phase(Node *output, std::map<Node *, Phase *> *completed_effects);
+
+ // Execute one phase, ie. set up all inputs, effects and outputs, and render the quad.
+ void execute_phase(Phase *phase, bool last_phase,
+ std::map<Phase *, GLuint> *output_textures,
+ std::set<Phase *> *generated_mipmaps);
+
+ // Set up uniforms for one phase. The program must already be bound.
+ void setup_uniforms(Phase *phase);
+
+ // Set up the given sampler number for sampling from an RTT texture.
+ void setup_rtt_sampler(int sampler_num, bool use_mipmaps);
// Output the current graph to the given file in a Graphviz-compatible format;
// only useful for debugging.
void fix_internal_gamma_by_asking_inputs(unsigned step);
void fix_internal_gamma_by_inserting_nodes(unsigned step);
void fix_output_gamma();
+ void add_ycbcr_conversion_if_needed();
void add_dither_if_needed();
+ void add_dummy_effect_if_needed();
float aspect_nom, aspect_denom;
ImageFormat output_format;
OutputAlphaFormat output_alpha_format;
+ bool output_color_rgba;
+ int num_output_color_ycbcr; // Max 2.
+ YCbCrFormat output_ycbcr_format; // If num_output_color_ycbcr is > 0.
+ GLenum output_ycbcr_type; // If num_output_color_ycbcr is > 0.
+ YCbCrOutputSplitting output_ycbcr_splitting[2]; // If num_output_color_ycbcr is > N.
+
std::vector<Node *> nodes;
std::map<Effect *, Node *> node_map;
Effect *dither_effect;
+ Node *ycbcr_conversion_effect_node;
std::vector<Input *> inputs; // Also contained in nodes.
std::vector<Phase *> phases;
+ GLenum intermediate_format;
+ FramebufferTransformation intermediate_transformation;
unsigned num_dither_bits;
+ OutputOrigin output_origin;
bool finalized;
+ GLuint vbo; // Contains vertex and texture coordinate data.
ResourcePool *resource_pool;
bool owns_resource_pool;
+
+ bool do_phase_timing;
};
} // namespace movit