1 #ifndef _MOVIT_EFFECT_CHAIN_H
2 #define _MOVIT_EFFECT_CHAIN_H 1
4 // An EffectChain is the largest basic entity in Movit; it contains everything
5 // needed to connects a series of effects, from inputs to outputs, and render
6 // them. Generally you set up your effect chain once and then call its render
7 // functions once per frame; setting one up can be relatively expensive,
8 // but rendering is fast.
10 // Threading considerations: EffectChain is “thread-compatible”; you can use
11 // different EffectChains in multiple threads at the same time (assuming the
12 // threads do not use the same OpenGL context, but this is a good idea anyway),
13 // but you may not use one EffectChain from multiple threads simultaneously.
14 // You _are_ allowed to use one EffectChain from multiple threads as long as
15 // you only use it from one at a time (possibly by doing your own locking),
16 // but if so, the threads' contexts need to be set up to share resources, since
17 // the EffectChain holds textures and other OpenGL objects that are tied to the
20 // Memory management (only relevant if you use multiple contexts):
21 // See corresponding comment in resource_pool.h. This holds even if you don't
22 // allocate your own ResourcePool, but let EffectChain hold its own.
34 #include "image_format.h"
44 // For internal use within Node.
52 // Whether you want pre- or postmultiplied alpha in the output
53 // (see effect.h for a discussion of pre- versus postmultiplied alpha).
54 enum OutputAlphaFormat {
55 OUTPUT_ALPHA_FORMAT_PREMULTIPLIED,
56 OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED,
59 // RGBA output is nearly always packed; Y'CbCr, however, is often planar
60 // due to chroma subsampling. This enum controls how add_ycbcr_output()
61 // distributes the color channels between the fragment shader outputs.
62 // Obviously, anything except YCBCR_OUTPUT_INTERLEAVED will be meaningless
63 // unless you use render_to_fbo() and have an FBO with multiple render
64 // targets attached (the other outputs will be discarded).
65 enum YCbCrOutputSplitting {
66 // Only one output: Store Y'CbCr into the first three output channels,
67 // respectively, plus alpha. This is also called “chunked” or
69 YCBCR_OUTPUT_INTERLEAVED,
71 // Store Y' and alpha into the first output (in the red and alpha
72 // channels; effect to the others is undefined), and Cb and Cr into
73 // the first two channels of the second output. This is particularly
74 // useful if you want to end up in a format like NV12, where all the
75 // Y' samples come first and then Cb and Cr come interlevaed afterwards.
76 // You will still need to do the chroma subsampling yourself to actually
77 // get down to NV12, though.
78 YCBCR_OUTPUT_SPLIT_Y_AND_CBCR,
80 // Store Y' and alpha into the first output, Cb into the first channel
81 // of the second output and Cr into the first channel of the third output.
82 // (Effect on the other channels is undefined.) Essentially gives you
83 // 4:4:4 planar, or ”yuv444p”.
87 // Where (0,0) is taken to be in the output. If you want to render to an
88 // OpenGL screen, you should keep the default of bottom-left, as that is
89 // OpenGL's natural coordinate system. However, there are cases, such as if you
90 // render to an FBO and read the pixels back into some other system, where
91 // you'd want a top-left origin; if so, an additional flip step will be added
92 // at the very end (but done in a vertex shader, so it will have zero extra
95 // Note that Movit's coordinate system in general consistently puts (0,0) in
96 // the top left for _input_, no matter what you set as output origin.
98 OUTPUT_ORIGIN_BOTTOM_LEFT,
99 OUTPUT_ORIGIN_TOP_LEFT,
102 // Transformation to apply (if any) to pixel data in temporary buffers.
103 // See set_intermediate_format() below for more information.
104 enum FramebufferTransformation {
105 // The default; just store the value. This is what you usually want.
106 NO_FRAMEBUFFER_TRANSFORMATION,
108 // If the values are in linear light, store sqrt(x) to the framebuffer
109 // instead of x itself, of course undoing it with x² on read. Useful as
110 // a rough approximation to the sRGB curve. (If the values are not in
111 // linear light, just store them as-is.)
112 SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION,
115 // A node in the graph; basically an effect and some associated information.
121 // Edges in the graph (forward and backward).
122 std::vector<Node *> outgoing_links;
123 std::vector<Node *> incoming_links;
125 // For unit tests only. Do not use from other code.
126 // Will contain an arbitrary choice if the node is in multiple phases.
127 Phase *containing_phase;
130 // Logical size of the output of this effect, ie. the resolution
131 // you would get if you sampled it as a texture. If it is undefined
132 // (since the inputs differ in resolution), it will be 0x0.
133 // If both this and output_texture_{width,height} are set,
134 // they will be equal.
135 unsigned output_width, output_height;
137 // If the effect has is_single_texture(), or if the output went to RTT
138 // and that texture has been bound to a sampler, the sampler number
139 // will be stored here.
141 // TODO: Can an RTT texture be used as inputs to multiple effects
142 // within the same phase? If so, we have a problem with modifying
143 // sampler state here.
144 int bound_sampler_num;
146 // Used during the building of the effect chain.
147 Colorspace output_color_space;
148 GammaCurve output_gamma_curve;
149 AlphaType output_alpha_type;
150 bool needs_mipmaps; // Directly or indirectly.
152 // Set if this effect, and all effects consuming output from this node
153 // (in the same phase) have one_to_one_sampling() set.
154 bool one_to_one_sampling;
156 friend class EffectChain;
159 // A rendering phase; a single GLSL program rendering a single quad.
163 GLuint glsl_program_num; // Owned by the resource_pool.
165 // Position and texcoord attribute indexes, although it doesn't matter
166 // which is which, because they contain the same data.
167 std::set<GLint> attribute_indexes;
169 bool input_needs_mipmaps;
171 // Inputs are only inputs from other phases (ie., those that come from RTT);
172 // input textures are counted as part of <effects>.
173 std::vector<Phase *> inputs;
174 // Bound sampler numbers for each input. Redundant in a sense
175 // (it always corresponds to the index), but we need somewhere
176 // to hold the value for the uniform.
177 std::vector<int> input_samplers;
178 std::vector<Node *> effects; // In order.
179 unsigned output_width, output_height, virtual_output_width, virtual_output_height;
181 // Identifier used to create unique variables in GLSL.
182 // Unique per-phase to increase cacheability of compiled shaders.
183 std::map<Node *, std::string> effect_ids;
185 // Uniforms for this phase; combined from all the effects.
186 std::vector<Uniform<int> > uniforms_sampler2d;
187 std::vector<Uniform<bool> > uniforms_bool;
188 std::vector<Uniform<int> > uniforms_int;
189 std::vector<Uniform<float> > uniforms_float;
190 std::vector<Uniform<float> > uniforms_vec2;
191 std::vector<Uniform<float> > uniforms_vec3;
192 std::vector<Uniform<float> > uniforms_vec4;
193 std::vector<Uniform<Eigen::Matrix3d> > uniforms_mat3;
195 // For measurement of GPU time used.
196 std::list<GLuint> timer_query_objects_running;
197 std::list<GLuint> timer_query_objects_free;
198 uint64_t time_elapsed_ns;
199 uint64_t num_measured_iterations;
204 // Aspect: e.g. 16.0f, 9.0f for 16:9.
205 // resource_pool is a pointer to a ResourcePool with which to share shaders
206 // and other resources (see resource_pool.h). If NULL (the default),
207 // will create its own that is not shared with anything else. Does not take
208 // ownership of the passed-in ResourcePool, but will naturally take ownership
209 // of its own internal one if created.
210 EffectChain(float aspect_nom, float aspect_denom, ResourcePool *resource_pool = NULL);
214 // input, effects, output, finalize need to come in that specific order.
216 // EffectChain takes ownership of the given input.
217 // input is returned back for convenience.
218 Input *add_input(Input *input);
220 // EffectChain takes ownership of the given effect.
221 // effect is returned back for convenience.
222 Effect *add_effect(Effect *effect) {
223 return add_effect(effect, last_added_effect());
225 Effect *add_effect(Effect *effect, Effect *input) {
226 std::vector<Effect *> inputs;
227 inputs.push_back(input);
228 return add_effect(effect, inputs);
230 Effect *add_effect(Effect *effect, Effect *input1, Effect *input2) {
231 std::vector<Effect *> inputs;
232 inputs.push_back(input1);
233 inputs.push_back(input2);
234 return add_effect(effect, inputs);
236 Effect *add_effect(Effect *effect, Effect *input1, Effect *input2, Effect *input3) {
237 std::vector<Effect *> inputs;
238 inputs.push_back(input1);
239 inputs.push_back(input2);
240 inputs.push_back(input3);
241 return add_effect(effect, inputs);
243 Effect *add_effect(Effect *effect, Effect *input1, Effect *input2, Effect *input3, Effect *input4) {
244 std::vector<Effect *> inputs;
245 inputs.push_back(input1);
246 inputs.push_back(input2);
247 inputs.push_back(input3);
248 inputs.push_back(input4);
249 return add_effect(effect, inputs);
251 Effect *add_effect(Effect *effect, Effect *input1, Effect *input2, Effect *input3, Effect *input4, Effect *input5) {
252 std::vector<Effect *> inputs;
253 inputs.push_back(input1);
254 inputs.push_back(input2);
255 inputs.push_back(input3);
256 inputs.push_back(input4);
257 inputs.push_back(input5);
258 return add_effect(effect, inputs);
260 Effect *add_effect(Effect *effect, const std::vector<Effect *> &inputs);
262 // Adds an RGBA output. Note that you can have at most one RGBA output and two
263 // Y'CbCr outputs (see below for details).
264 void add_output(const ImageFormat &format, OutputAlphaFormat alpha_format);
266 // Adds an YCbCr output. Note that you can only have at most two Y'CbCr
267 // outputs, and they must have the same <ycbcr_format> and <type>.
268 // (This limitation may be lifted in the future, to allow e.g. simultaneous
269 // 8- and 10-bit output. Currently, multiple Y'CbCr outputs are only
270 // useful in some very limited circumstances, like if one texture goes
271 // to some place you cannot easily read from later.)
273 // Only 4:4:4 output is supported due to fragment shader limitations,
274 // so chroma_subsampling_x and chroma_subsampling_y must both be 1.
275 // <type> should match the data type of the FBO you are rendering to,
276 // so that if you use 16-bit output (GL_UNSIGNED_SHORT), you will get
277 // 8-, 10- or 12-bit output correctly as determined by <ycbcr_format.num_levels>.
278 // Using e.g. ycbcr_format.num_levels == 1024 with GL_UNSIGNED_BYTE is
279 // nonsensical and invokes undefined behavior.
281 // If you have both RGBA and Y'CbCr output(s), the RGBA output will come
282 // in the last draw buffer. Also, <format> and <alpha_format> must be
283 // identical between the two.
284 void add_ycbcr_output(const ImageFormat &format, OutputAlphaFormat alpha_format,
285 const YCbCrFormat &ycbcr_format,
286 YCbCrOutputSplitting output_splitting = YCBCR_OUTPUT_INTERLEAVED,
287 GLenum output_type = GL_UNSIGNED_BYTE);
289 // Change Y'CbCr output format. (This can be done also after finalize()).
290 // Note that you are not allowed to change subsampling parameters;
291 // however, you can change the color space parameters, ie.,
292 // luma_coefficients, full_range and num_levels.
293 void change_ycbcr_output_format(const YCbCrFormat &ycbcr_format);
295 // Set number of output bits, to scale the dither.
296 // 8 is the right value for most outputs.
298 // Special note for 10- and 12-bit Y'CbCr packed into GL_UNSIGNED_SHORT:
299 // This is relative to the actual output, not the logical one, so you should
300 // specify 16 here, not 10 or 12.
302 // The default, 0, is a special value that means no dither.
303 void set_dither_bits(unsigned num_bits)
305 this->num_dither_bits = num_bits;
308 // Set where (0,0) is taken to be in the output. The default is
309 // OUTPUT_ORIGIN_BOTTOM_LEFT, which is usually what you want
310 // (see OutputOrigin above for more details).
311 void set_output_origin(OutputOrigin output_origin)
313 this->output_origin = output_origin;
316 // Set intermediate format for framebuffers used when we need to bounce
317 // to a temporary texture. The default, GL_RGBA16F, is good for most uses;
318 // it is precise, has good range, and is relatively efficient. However,
319 // if you need even more speed and your chain can do with some loss of
320 // accuracy, you can change the format here (before calling finalize).
321 // Calculations between bounce buffers are still in 32-bit floating-point
322 // no matter what you specify.
324 // Of special interest is GL_SRGB8_ALPHA8, which stores sRGB-encoded RGB
325 // and linear alpha; this is half the memory bandwidth of GL_RGBA16F,
326 // while retaining reasonable precision for typical image data. It will,
327 // however, cause some gamut clipping if your colorspace is far from sRGB,
328 // as it cannot represent values outside [0,1]. NOTE: If you construct
329 // a chain where you end up bouncing pixels in non-linear light
330 // (gamma different from GAMMA_LINEAR), this will be the wrong thing.
331 // However, it's hard to see how this could happen in a non-contrived
332 // chain; few effects ever need texture bounce or resizing without also
333 // combining multiple pixels, which really needs linear light and thus
334 // triggers a conversion before the bounce.
336 // If you don't need alpha (or can do with very little of it), GL_RGB10_A2
337 // is even better, as it has two more bits for each color component. There
338 // is no GL_SRGB10, unfortunately, so on its own, it is somewhat worse than
339 // GL_SRGB8, but you can set <transformation> to SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION,
340 // and sqrt(x) will be stored instead of x. This is a rough approximation to
341 // the sRGB curve, and reduces maximum error (in sRGB distance) by almost an
342 // order of magnitude, well below what you can get from 8-bit true sRGB.
343 // (Note that this strategy avoids the problem with bounced non-linear data
344 // above, since the square root is turned off in that case.) However, texture
345 // filtering will happen on the transformed values, so if you have heavy
346 // downscaling or the likes (e.g. mipmaps), you could get subtly bad results.
347 // You'll need to see which of the two that works the best for you in practice.
348 void set_intermediate_format(
349 GLenum intermediate_format,
350 FramebufferTransformation transformation = NO_FRAMEBUFFER_TRANSFORMATION)
352 this->intermediate_format = intermediate_format;
353 this->intermediate_transformation = transformation;
358 // Measure the GPU time used for each actual phase during rendering.
359 // Note that this is only available if GL_ARB_timer_query
360 // (or, equivalently, OpenGL 3.3) is available. Also note that measurement
361 // will incur a performance cost, as we wait for the measurements to
362 // complete at the end of rendering.
363 void enable_phase_timing(bool enable);
364 void reset_phase_timing();
365 void print_phase_timing();
367 void render_to_screen()
369 render_to_fbo(0, 0, 0);
372 // Render the effect chain to the given FBO. If width=height=0, keeps
373 // the current viewport.
374 void render_to_fbo(GLuint fbo, unsigned width, unsigned height);
376 Effect *last_added_effect() {
380 return nodes.back()->effect;
384 // API for manipulating the graph directly. Intended to be used from
385 // effects and by EffectChain itself.
387 // Note that for nodes with multiple inputs, the order of calls to
388 // connect_nodes() will matter.
389 Node *add_node(Effect *effect);
390 void connect_nodes(Node *sender, Node *receiver);
391 void replace_receiver(Node *old_receiver, Node *new_receiver);
392 void replace_sender(Node *new_sender, Node *receiver);
393 void insert_node_between(Node *sender, Node *middle, Node *receiver);
394 Node *find_node_for_effect(Effect *effect) { return node_map[effect]; }
396 // Get the OpenGL sampler (GL_TEXTURE0, GL_TEXTURE1, etc.) for the
397 // input of the given node, so that one can modify the sampler state
398 // directly. Only valid to call during set_gl_state().
400 // Also, for this to be allowed, <node>'s effect must have
401 // needs_texture_bounce() set, so that it samples directly from a
402 // single-sampler input, or from an RTT texture.
403 GLenum get_input_sampler(Node *node, unsigned input_num) const;
405 // Whether input <input_num> of <node> corresponds to a single sampler
406 // (see get_input_sampler()). Normally, you should not need to call this;
407 // however, if the input Effect has set override_texture_bounce(),
408 // this will return false, and you could be flexible and check it first
410 GLenum has_input_sampler(Node *node, unsigned input_num) const;
412 // Get the current resource pool assigned to this EffectChain.
413 // Primarily to let effects allocate textures as needed.
414 // Any resources you get from the pool must be returned to the pool
415 // no later than in the Effect's destructor.
416 ResourcePool *get_resource_pool() { return resource_pool; }
419 // Make sure the output rectangle is at least large enough to hold
420 // the given input rectangle in both dimensions, and is of the
421 // current aspect ratio (aspect_nom/aspect_denom).
422 void size_rectangle_to_fit(unsigned width, unsigned height, unsigned *output_width, unsigned *output_height);
424 // Compute the input sizes for all inputs for all effects in a given phase,
425 // and inform the effects about the results.
426 void inform_input_sizes(Phase *phase);
428 // Determine the preferred output size of a given phase.
429 // Requires that all input phases (if any) already have output sizes set.
430 void find_output_size(Phase *phase);
432 // Find all inputs eventually feeding into this effect that have
433 // output gamma different from GAMMA_LINEAR.
434 void find_all_nonlinear_inputs(Node *effect, std::vector<Node *> *nonlinear_inputs);
436 // Create a GLSL program computing the effects for this phase in order.
437 void compile_glsl_program(Phase *phase);
439 // Create all GLSL programs needed to compute the given effect, and all outputs
440 // that depend on it (whenever possible). Returns the phase that has <output>
441 // as the last effect. Also pushes all phases in order onto <phases>.
442 Phase *construct_phase(Node *output, std::map<Node *, Phase *> *completed_effects);
444 // Execute one phase, ie. set up all inputs, effects and outputs, and render the quad.
445 void execute_phase(Phase *phase, bool last_phase,
446 std::map<Phase *, GLuint> *output_textures,
447 std::set<Phase *> *generated_mipmaps);
449 // Set up uniforms for one phase. The program must already be bound.
450 void setup_uniforms(Phase *phase);
452 // Set up the given sampler number for sampling from an RTT texture.
453 void setup_rtt_sampler(int sampler_num, bool use_mipmaps);
455 // Output the current graph to the given file in a Graphviz-compatible format;
456 // only useful for debugging.
457 void output_dot(const char *filename);
458 std::vector<std::string> get_labels_for_edge(const Node *from, const Node *to);
459 void output_dot_edge(FILE *fp,
460 const std::string &from_node_id,
461 const std::string &to_node_id,
462 const std::vector<std::string> &labels);
464 // Some of the graph algorithms assume that the nodes array is sorted
465 // topologically (inputs are always before outputs), but some operations
466 // (like graph rewriting) can change that. This function restores that order.
467 void sort_all_nodes_topologically();
469 // Do the actual topological sort. <nodes> must be a connected, acyclic subgraph;
470 // links that go to nodes not in the set will be ignored.
471 std::vector<Node *> topological_sort(const std::vector<Node *> &nodes);
473 // Utility function used by topological_sort() to do a depth-first search.
474 // The reason why we store nodes left to visit instead of a more conventional
475 // list of nodes to visit is that we want to be able to limit ourselves to
476 // a subgraph instead of all nodes. The set thus serves a dual purpose.
477 void topological_sort_visit_node(Node *node, std::set<Node *> *nodes_left_to_visit, std::vector<Node *> *sorted_list);
479 // Used during finalize().
480 void find_color_spaces_for_inputs();
481 void propagate_alpha();
482 void propagate_gamma_and_color_space();
483 Node *find_output_node();
485 bool node_needs_colorspace_fix(Node *node);
486 void fix_internal_color_spaces();
487 void fix_output_color_space();
489 bool node_needs_alpha_fix(Node *node);
490 void fix_internal_alpha(unsigned step);
491 void fix_output_alpha();
493 bool node_needs_gamma_fix(Node *node);
494 void fix_internal_gamma_by_asking_inputs(unsigned step);
495 void fix_internal_gamma_by_inserting_nodes(unsigned step);
496 void fix_output_gamma();
497 void add_ycbcr_conversion_if_needed();
498 void add_dither_if_needed();
500 float aspect_nom, aspect_denom;
501 ImageFormat output_format;
502 OutputAlphaFormat output_alpha_format;
504 bool output_color_rgba;
505 int num_output_color_ycbcr; // Max 2.
506 YCbCrFormat output_ycbcr_format; // If num_output_color_ycbcr is > 0.
507 GLenum output_ycbcr_type; // If num_output_color_ycbcr is > 0.
508 YCbCrOutputSplitting output_ycbcr_splitting[2]; // If num_output_color_ycbcr is > N.
510 std::vector<Node *> nodes;
511 std::map<Effect *, Node *> node_map;
512 Effect *dither_effect;
513 Node *ycbcr_conversion_effect_node;
515 std::vector<Input *> inputs; // Also contained in nodes.
516 std::vector<Phase *> phases;
518 GLenum intermediate_format;
519 FramebufferTransformation intermediate_transformation;
520 unsigned num_dither_bits;
521 OutputOrigin output_origin;
523 GLuint vbo; // Contains vertex and texture coordinate data.
525 ResourcePool *resource_pool;
526 bool owns_resource_pool;
528 bool do_phase_timing;
533 #endif // !defined(_MOVIT_EFFECT_CHAIN_H)