15 #include "alpha_division_effect.h"
16 #include "alpha_multiplication_effect.h"
17 #include "colorspace_conversion_effect.h"
18 #include "dither_effect.h"
20 #include "effect_chain.h"
21 #include "effect_util.h"
22 #include "gamma_compression_effect.h"
23 #include "gamma_expansion_effect.h"
26 #include "resource_pool.h"
28 #include "ycbcr_conversion_effect.h"
30 using namespace Eigen;
37 // An effect whose only purpose is to sit in a phase on its own and take the
38 // texture output from a compute shader and display it to the normal backbuffer
39 // (or any FBO). That phase can be skipped when rendering using render_to_textures().
40 class ComputeShaderOutputDisplayEffect : public Effect {
42 ComputeShaderOutputDisplayEffect() {}
43 string effect_type_id() const override { return "ComputeShaderOutputDisplayEffect"; }
44 string output_fragment_shader() override { return read_file("identity.frag"); }
45 bool needs_texture_bounce() const override { return true; }
50 EffectChain::EffectChain(float aspect_nom, float aspect_denom, ResourcePool *resource_pool)
51 : aspect_nom(aspect_nom),
52 aspect_denom(aspect_denom),
53 output_color_rgba(false),
54 num_output_color_ycbcr(0),
55 dither_effect(nullptr),
56 ycbcr_conversion_effect_node(nullptr),
57 intermediate_format(GL_RGBA16F),
58 intermediate_transformation(NO_FRAMEBUFFER_TRANSFORMATION),
60 output_origin(OUTPUT_ORIGIN_BOTTOM_LEFT),
62 resource_pool(resource_pool),
63 do_phase_timing(false) {
64 if (resource_pool == nullptr) {
65 this->resource_pool = new ResourcePool();
66 owns_resource_pool = true;
68 owns_resource_pool = false;
71 // Generate a VBO with some data in (shared position and texture coordinate data).
77 vbo = generate_vbo(2, GL_FLOAT, sizeof(vertices), vertices);
80 EffectChain::~EffectChain()
82 for (unsigned i = 0; i < nodes.size(); ++i) {
83 delete nodes[i]->effect;
86 for (unsigned i = 0; i < phases.size(); ++i) {
87 resource_pool->release_glsl_program(phases[i]->glsl_program_num);
90 if (owns_resource_pool) {
93 glDeleteBuffers(1, &vbo);
97 Input *EffectChain::add_input(Input *input)
100 inputs.push_back(input);
105 void EffectChain::add_output(const ImageFormat &format, OutputAlphaFormat alpha_format)
108 assert(!output_color_rgba);
109 output_format = format;
110 output_alpha_format = alpha_format;
111 output_color_rgba = true;
114 void EffectChain::add_ycbcr_output(const ImageFormat &format, OutputAlphaFormat alpha_format,
115 const YCbCrFormat &ycbcr_format, YCbCrOutputSplitting output_splitting,
119 assert(num_output_color_ycbcr < 2);
120 output_format = format;
121 output_alpha_format = alpha_format;
123 if (num_output_color_ycbcr == 1) {
124 // Check that the format is the same.
125 assert(output_ycbcr_format.luma_coefficients == ycbcr_format.luma_coefficients);
126 assert(output_ycbcr_format.full_range == ycbcr_format.full_range);
127 assert(output_ycbcr_format.num_levels == ycbcr_format.num_levels);
128 assert(output_ycbcr_format.chroma_subsampling_x == 1);
129 assert(output_ycbcr_format.chroma_subsampling_y == 1);
130 assert(output_ycbcr_type == output_type);
132 output_ycbcr_format = ycbcr_format;
133 output_ycbcr_type = output_type;
135 output_ycbcr_splitting[num_output_color_ycbcr++] = output_splitting;
137 assert(ycbcr_format.chroma_subsampling_x == 1);
138 assert(ycbcr_format.chroma_subsampling_y == 1);
141 void EffectChain::change_ycbcr_output_format(const YCbCrFormat &ycbcr_format)
143 assert(num_output_color_ycbcr > 0);
144 assert(output_ycbcr_format.chroma_subsampling_x == 1);
145 assert(output_ycbcr_format.chroma_subsampling_y == 1);
147 output_ycbcr_format = ycbcr_format;
149 YCbCrConversionEffect *effect = (YCbCrConversionEffect *)(ycbcr_conversion_effect_node->effect);
150 effect->change_output_format(ycbcr_format);
154 Node *EffectChain::add_node(Effect *effect)
156 for (unsigned i = 0; i < nodes.size(); ++i) {
157 assert(nodes[i]->effect != effect);
160 Node *node = new Node;
161 node->effect = effect;
162 node->disabled = false;
163 node->output_color_space = COLORSPACE_INVALID;
164 node->output_gamma_curve = GAMMA_INVALID;
165 node->output_alpha_type = ALPHA_INVALID;
166 node->needs_mipmaps = false;
167 node->one_to_one_sampling = false;
168 node->strong_one_to_one_sampling = false;
170 nodes.push_back(node);
171 node_map[effect] = node;
172 effect->inform_added(this);
176 void EffectChain::connect_nodes(Node *sender, Node *receiver)
178 sender->outgoing_links.push_back(receiver);
179 receiver->incoming_links.push_back(sender);
182 void EffectChain::replace_receiver(Node *old_receiver, Node *new_receiver)
184 new_receiver->incoming_links = old_receiver->incoming_links;
185 old_receiver->incoming_links.clear();
187 for (unsigned i = 0; i < new_receiver->incoming_links.size(); ++i) {
188 Node *sender = new_receiver->incoming_links[i];
189 for (unsigned j = 0; j < sender->outgoing_links.size(); ++j) {
190 if (sender->outgoing_links[j] == old_receiver) {
191 sender->outgoing_links[j] = new_receiver;
197 void EffectChain::replace_sender(Node *old_sender, Node *new_sender)
199 new_sender->outgoing_links = old_sender->outgoing_links;
200 old_sender->outgoing_links.clear();
202 for (unsigned i = 0; i < new_sender->outgoing_links.size(); ++i) {
203 Node *receiver = new_sender->outgoing_links[i];
204 for (unsigned j = 0; j < receiver->incoming_links.size(); ++j) {
205 if (receiver->incoming_links[j] == old_sender) {
206 receiver->incoming_links[j] = new_sender;
212 void EffectChain::insert_node_between(Node *sender, Node *middle, Node *receiver)
214 for (unsigned i = 0; i < sender->outgoing_links.size(); ++i) {
215 if (sender->outgoing_links[i] == receiver) {
216 sender->outgoing_links[i] = middle;
217 middle->incoming_links.push_back(sender);
220 for (unsigned i = 0; i < receiver->incoming_links.size(); ++i) {
221 if (receiver->incoming_links[i] == sender) {
222 receiver->incoming_links[i] = middle;
223 middle->outgoing_links.push_back(receiver);
227 assert(middle->incoming_links.size() == middle->effect->num_inputs());
230 GLenum EffectChain::get_input_sampler(Node *node, unsigned input_num) const
232 assert(node->effect->needs_texture_bounce());
233 assert(input_num < node->incoming_links.size());
234 assert(node->incoming_links[input_num]->bound_sampler_num >= 0);
235 assert(node->incoming_links[input_num]->bound_sampler_num < 8);
236 return GL_TEXTURE0 + node->incoming_links[input_num]->bound_sampler_num;
239 GLenum EffectChain::has_input_sampler(Node *node, unsigned input_num) const
241 assert(input_num < node->incoming_links.size());
242 return node->incoming_links[input_num]->bound_sampler_num >= 0 &&
243 node->incoming_links[input_num]->bound_sampler_num < 8;
246 void EffectChain::find_all_nonlinear_inputs(Node *node, vector<Node *> *nonlinear_inputs)
248 if (node->output_gamma_curve == GAMMA_LINEAR &&
249 node->effect->effect_type_id() != "GammaCompressionEffect") {
252 if (node->effect->num_inputs() == 0) {
253 nonlinear_inputs->push_back(node);
255 assert(node->effect->num_inputs() == node->incoming_links.size());
256 for (unsigned i = 0; i < node->incoming_links.size(); ++i) {
257 find_all_nonlinear_inputs(node->incoming_links[i], nonlinear_inputs);
262 Effect *EffectChain::add_effect(Effect *effect, const vector<Effect *> &inputs)
265 assert(inputs.size() == effect->num_inputs());
266 Node *node = add_node(effect);
267 for (unsigned i = 0; i < inputs.size(); ++i) {
268 assert(node_map.count(inputs[i]) != 0);
269 connect_nodes(node_map[inputs[i]], node);
274 // ESSL doesn't support token pasting. Replace PREFIX(x) with <effect_id>_x.
275 string replace_prefix(const string &text, const string &prefix)
280 while (start < text.size()) {
281 size_t pos = text.find("PREFIX(", start);
282 if (pos == string::npos) {
283 output.append(text.substr(start, string::npos));
287 output.append(text.substr(start, pos - start));
288 output.append(prefix);
291 pos += strlen("PREFIX(");
293 // Output stuff until we find the matching ), which we then eat.
295 size_t end_arg_pos = pos;
296 while (end_arg_pos < text.size()) {
297 if (text[end_arg_pos] == '(') {
299 } else if (text[end_arg_pos] == ')') {
307 output.append(text.substr(pos, end_arg_pos - pos));
318 void extract_uniform_declarations(const vector<Uniform<T>> &effect_uniforms,
319 const string &type_specifier,
320 const string &effect_id,
321 vector<Uniform<T>> *phase_uniforms,
324 for (unsigned i = 0; i < effect_uniforms.size(); ++i) {
325 phase_uniforms->push_back(effect_uniforms[i]);
326 phase_uniforms->back().prefix = effect_id;
328 *glsl_string += string("uniform ") + type_specifier + " " + effect_id
329 + "_" + effect_uniforms[i].name + ";\n";
334 void extract_uniform_array_declarations(const vector<Uniform<T>> &effect_uniforms,
335 const string &type_specifier,
336 const string &effect_id,
337 vector<Uniform<T>> *phase_uniforms,
340 for (unsigned i = 0; i < effect_uniforms.size(); ++i) {
341 phase_uniforms->push_back(effect_uniforms[i]);
342 phase_uniforms->back().prefix = effect_id;
345 snprintf(buf, sizeof(buf), "uniform %s %s_%s[%d];\n",
346 type_specifier.c_str(), effect_id.c_str(),
347 effect_uniforms[i].name.c_str(),
348 int(effect_uniforms[i].num_values));
354 void collect_uniform_locations(GLuint glsl_program_num, vector<Uniform<T>> *phase_uniforms)
356 for (unsigned i = 0; i < phase_uniforms->size(); ++i) {
357 Uniform<T> &uniform = (*phase_uniforms)[i];
358 uniform.location = get_uniform_location(glsl_program_num, uniform.prefix, uniform.name);
364 void EffectChain::compile_glsl_program(Phase *phase)
366 string frag_shader_header;
367 if (phase->is_compute_shader) {
368 frag_shader_header = read_file("header.comp");
370 frag_shader_header = read_version_dependent_file("header", "frag");
372 string frag_shader = "";
374 // Create functions and uniforms for all the texture inputs that we need.
375 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
376 Node *input = phase->inputs[i]->output_node;
378 sprintf(effect_id, "in%u", i);
379 phase->effect_ids.insert(make_pair(input, effect_id));
381 frag_shader += string("uniform sampler2D tex_") + effect_id + ";\n";
382 frag_shader += string("vec4 ") + effect_id + "(vec2 tc) {\n";
383 frag_shader += "\tvec4 tmp = tex2D(tex_" + string(effect_id) + ", tc);\n";
385 if (intermediate_transformation == SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION &&
386 phase->inputs[i]->output_node->output_gamma_curve == GAMMA_LINEAR) {
387 frag_shader += "\ttmp.rgb *= tmp.rgb;\n";
390 frag_shader += "\treturn tmp;\n";
391 frag_shader += "}\n";
394 Uniform<int> uniform;
395 uniform.name = effect_id;
396 uniform.value = &phase->input_samplers[i];
397 uniform.prefix = "tex";
398 uniform.num_values = 1;
399 uniform.location = -1;
400 phase->uniforms_sampler2d.push_back(uniform);
403 // Give each effect in the phase its own ID.
404 for (unsigned i = 0; i < phase->effects.size(); ++i) {
405 Node *node = phase->effects[i];
407 sprintf(effect_id, "eff%u", i);
408 phase->effect_ids.insert(make_pair(node, effect_id));
411 for (unsigned i = 0; i < phase->effects.size(); ++i) {
412 Node *node = phase->effects[i];
413 const string effect_id = phase->effect_ids[node];
414 if (node->incoming_links.size() == 1) {
415 Node *input = node->incoming_links[0];
416 if (i != 0 && input->effect->is_compute_shader()) {
417 // First effect after the compute shader reads the value
418 // that cs_output() wrote to a global variable.
419 frag_shader += string("#define INPUT(tc) CS_OUTPUT_VAL\n");
421 frag_shader += string("#define INPUT ") + phase->effect_ids[input] + "\n";
424 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
425 assert(!node->incoming_links[j]->effect->is_compute_shader());
427 sprintf(buf, "#define INPUT%d %s\n", j + 1, phase->effect_ids[node->incoming_links[j]].c_str());
433 frag_shader += string("#define FUNCNAME ") + effect_id + "\n";
434 if (node->effect->is_compute_shader()) {
435 frag_shader += string("#define NORMALIZE_TEXTURE_COORDS(tc) ((tc) * ") + effect_id + "_inv_output_size + " + effect_id + "_output_texcoord_adjust)\n";
437 frag_shader += replace_prefix(node->effect->output_fragment_shader(), effect_id);
438 frag_shader += "#undef FUNCNAME\n";
439 if (node->incoming_links.size() == 1) {
440 frag_shader += "#undef INPUT\n";
442 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
444 sprintf(buf, "#undef INPUT%d\n", j + 1);
450 if (phase->is_compute_shader) {
451 frag_shader += string("#define INPUT ") + phase->effect_ids[phase->compute_shader_node] + "\n";
452 if (phase->compute_shader_node == phase->effects.back()) {
453 // No postprocessing.
454 frag_shader += "#define CS_POSTPROC(tc) CS_OUTPUT_VAL\n";
456 frag_shader += string("#define CS_POSTPROC ") + phase->effect_ids[phase->effects.back()] + "\n";
459 frag_shader += string("#define INPUT ") + phase->effect_ids[phase->effects.back()] + "\n";
462 // If we're the last phase, add the right #defines for Y'CbCr multi-output as needed.
463 vector<string> frag_shader_outputs; // In order.
464 if (phase->output_node->outgoing_links.empty() && num_output_color_ycbcr > 0) {
465 switch (output_ycbcr_splitting[0]) {
466 case YCBCR_OUTPUT_INTERLEAVED:
468 frag_shader_outputs.push_back("FragColor");
470 case YCBCR_OUTPUT_SPLIT_Y_AND_CBCR:
471 frag_shader += "#define YCBCR_OUTPUT_SPLIT_Y_AND_CBCR 1\n";
472 frag_shader_outputs.push_back("Y");
473 frag_shader_outputs.push_back("Chroma");
475 case YCBCR_OUTPUT_PLANAR:
476 frag_shader += "#define YCBCR_OUTPUT_PLANAR 1\n";
477 frag_shader_outputs.push_back("Y");
478 frag_shader_outputs.push_back("Cb");
479 frag_shader_outputs.push_back("Cr");
485 if (num_output_color_ycbcr > 1) {
486 switch (output_ycbcr_splitting[1]) {
487 case YCBCR_OUTPUT_INTERLEAVED:
488 frag_shader += "#define SECOND_YCBCR_OUTPUT_INTERLEAVED 1\n";
489 frag_shader_outputs.push_back("YCbCr2");
491 case YCBCR_OUTPUT_SPLIT_Y_AND_CBCR:
492 frag_shader += "#define SECOND_YCBCR_OUTPUT_SPLIT_Y_AND_CBCR 1\n";
493 frag_shader_outputs.push_back("Y2");
494 frag_shader_outputs.push_back("Chroma2");
496 case YCBCR_OUTPUT_PLANAR:
497 frag_shader += "#define SECOND_YCBCR_OUTPUT_PLANAR 1\n";
498 frag_shader_outputs.push_back("Y2");
499 frag_shader_outputs.push_back("Cb2");
500 frag_shader_outputs.push_back("Cr2");
507 if (output_color_rgba) {
508 // Note: Needs to come in the header, because not only the
509 // output needs to see it (YCbCrConversionEffect and DitherEffect
511 frag_shader_header += "#define YCBCR_ALSO_OUTPUT_RGBA 1\n";
512 frag_shader_outputs.push_back("RGBA");
516 // If we're bouncing to a temporary texture, signal transformation if desired.
517 if (!phase->output_node->outgoing_links.empty()) {
518 if (intermediate_transformation == SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION &&
519 phase->output_node->output_gamma_curve == GAMMA_LINEAR) {
520 frag_shader += "#define SQUARE_ROOT_TRANSFORMATION 1\n";
524 if (phase->is_compute_shader) {
525 frag_shader.append(read_file("footer.comp"));
526 phase->compute_shader_node->effect->register_uniform_ivec2("output_size", phase->uniform_output_size);
527 phase->compute_shader_node->effect->register_uniform_vec2("inv_output_size", (float *)&phase->inv_output_size);
528 phase->compute_shader_node->effect->register_uniform_vec2("output_texcoord_adjust", (float *)&phase->output_texcoord_adjust);
530 frag_shader.append(read_file("footer.frag"));
533 // Collect uniforms from all effects and output them. Note that this needs
534 // to happen after output_fragment_shader(), even though the uniforms come
535 // before in the output source, since output_fragment_shader() is allowed
536 // to register new uniforms (e.g. arrays that are of unknown length until
537 // finalization time).
538 // TODO: Make a uniform block for platforms that support it.
539 string frag_shader_uniforms = "";
540 for (unsigned i = 0; i < phase->effects.size(); ++i) {
541 Node *node = phase->effects[i];
542 Effect *effect = node->effect;
543 const string effect_id = phase->effect_ids[node];
544 extract_uniform_declarations(effect->uniforms_image2d, "image2D", effect_id, &phase->uniforms_image2d, &frag_shader_uniforms);
545 extract_uniform_declarations(effect->uniforms_sampler2d, "sampler2D", effect_id, &phase->uniforms_sampler2d, &frag_shader_uniforms);
546 extract_uniform_declarations(effect->uniforms_bool, "bool", effect_id, &phase->uniforms_bool, &frag_shader_uniforms);
547 extract_uniform_declarations(effect->uniforms_int, "int", effect_id, &phase->uniforms_int, &frag_shader_uniforms);
548 extract_uniform_declarations(effect->uniforms_ivec2, "ivec2", effect_id, &phase->uniforms_ivec2, &frag_shader_uniforms);
549 extract_uniform_declarations(effect->uniforms_float, "float", effect_id, &phase->uniforms_float, &frag_shader_uniforms);
550 extract_uniform_declarations(effect->uniforms_vec2, "vec2", effect_id, &phase->uniforms_vec2, &frag_shader_uniforms);
551 extract_uniform_declarations(effect->uniforms_vec3, "vec3", effect_id, &phase->uniforms_vec3, &frag_shader_uniforms);
552 extract_uniform_declarations(effect->uniforms_vec4, "vec4", effect_id, &phase->uniforms_vec4, &frag_shader_uniforms);
553 extract_uniform_array_declarations(effect->uniforms_float_array, "float", effect_id, &phase->uniforms_float, &frag_shader_uniforms);
554 extract_uniform_array_declarations(effect->uniforms_vec2_array, "vec2", effect_id, &phase->uniforms_vec2, &frag_shader_uniforms);
555 extract_uniform_array_declarations(effect->uniforms_vec3_array, "vec3", effect_id, &phase->uniforms_vec3, &frag_shader_uniforms);
556 extract_uniform_array_declarations(effect->uniforms_vec4_array, "vec4", effect_id, &phase->uniforms_vec4, &frag_shader_uniforms);
557 extract_uniform_declarations(effect->uniforms_mat3, "mat3", effect_id, &phase->uniforms_mat3, &frag_shader_uniforms);
560 string vert_shader = read_version_dependent_file("vs", "vert");
562 // If we're the last phase and need to flip the picture to compensate for
563 // the origin, tell the vertex or compute shader so.
565 if (has_dummy_effect) {
566 is_last_phase = (phase->output_node->outgoing_links.size() == 1 &&
567 phase->output_node->outgoing_links[0]->effect->effect_type_id() == "ComputeShaderOutputDisplayEffect");
569 is_last_phase = phase->output_node->outgoing_links.empty();
571 if (is_last_phase && output_origin == OUTPUT_ORIGIN_TOP_LEFT) {
572 if (phase->is_compute_shader) {
573 frag_shader_header += "#define FLIP_ORIGIN 1\n";
575 const string needle = "#define FLIP_ORIGIN 0";
576 size_t pos = vert_shader.find(needle);
577 assert(pos != string::npos);
579 vert_shader[pos + needle.size() - 1] = '1';
583 frag_shader = frag_shader_header + frag_shader_uniforms + frag_shader;
585 if (phase->is_compute_shader) {
586 phase->glsl_program_num = resource_pool->compile_glsl_compute_program(frag_shader);
588 Uniform<int> uniform;
589 uniform.name = "outbuf";
590 uniform.value = &phase->outbuf_image_unit;
591 uniform.prefix = "tex";
592 uniform.num_values = 1;
593 uniform.location = -1;
594 phase->uniforms_image2d.push_back(uniform);
596 phase->glsl_program_num = resource_pool->compile_glsl_program(vert_shader, frag_shader, frag_shader_outputs);
598 GLint position_attribute_index = glGetAttribLocation(phase->glsl_program_num, "position");
599 GLint texcoord_attribute_index = glGetAttribLocation(phase->glsl_program_num, "texcoord");
600 if (position_attribute_index != -1) {
601 phase->attribute_indexes.insert(position_attribute_index);
603 if (texcoord_attribute_index != -1) {
604 phase->attribute_indexes.insert(texcoord_attribute_index);
607 // Collect the resulting location numbers for each uniform.
608 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_image2d);
609 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_sampler2d);
610 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_bool);
611 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_int);
612 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_ivec2);
613 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_float);
614 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_vec2);
615 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_vec3);
616 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_vec4);
617 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_mat3);
620 // Construct GLSL programs, starting at the given effect and following
621 // the chain from there. We end a program every time we come to an effect
622 // marked as "needs texture bounce", one that is used by multiple other
623 // effects, every time we need to bounce due to output size change
624 // (not all size changes require ending), and of course at the end.
626 // We follow a quite simple depth-first search from the output, although
627 // without recursing explicitly within each phase.
628 Phase *EffectChain::construct_phase(Node *output, map<Node *, Phase *> *completed_effects)
630 if (completed_effects->count(output)) {
631 return (*completed_effects)[output];
634 Phase *phase = new Phase;
635 phase->output_node = output;
636 phase->is_compute_shader = false;
637 phase->compute_shader_node = nullptr;
639 // If the output effect has one-to-one sampling, we try to trace this
640 // status down through the dependency chain. This is important in case
641 // we hit an effect that changes output size (and not sets a virtual
642 // output size); if we have one-to-one sampling, we don't have to break
644 output->one_to_one_sampling = output->effect->one_to_one_sampling();
645 output->strong_one_to_one_sampling = output->effect->strong_one_to_one_sampling();
647 // Effects that we have yet to calculate, but that we know should
648 // be in the current phase.
649 stack<Node *> effects_todo_this_phase;
650 effects_todo_this_phase.push(output);
652 while (!effects_todo_this_phase.empty()) {
653 Node *node = effects_todo_this_phase.top();
654 effects_todo_this_phase.pop();
656 assert(node->effect->one_to_one_sampling() >= node->effect->strong_one_to_one_sampling());
658 if (node->effect->needs_mipmaps()) {
659 node->needs_mipmaps = true;
662 // This should currently only happen for effects that are inputs
663 // (either true inputs or phase outputs). We special-case inputs,
664 // and then deduplicate phase outputs below.
665 if (node->effect->num_inputs() == 0) {
666 if (find(phase->effects.begin(), phase->effects.end(), node) != phase->effects.end()) {
670 assert(completed_effects->count(node) == 0);
673 phase->effects.push_back(node);
674 if (node->effect->is_compute_shader()) {
675 phase->is_compute_shader = true;
676 phase->compute_shader_node = node;
679 // Find all the dependencies of this effect, and add them to the stack.
680 vector<Node *> deps = node->incoming_links;
681 assert(node->effect->num_inputs() == deps.size());
682 for (unsigned i = 0; i < deps.size(); ++i) {
683 bool start_new_phase = false;
685 if (node->effect->needs_texture_bounce() &&
686 !deps[i]->effect->is_single_texture() &&
687 !deps[i]->effect->override_disable_bounce()) {
688 start_new_phase = true;
691 // Propagate information about needing mipmaps down the chain,
692 // breaking the phase if we notice an incompatibility.
694 // Note that we cannot do this propagation as a normal pass,
695 // because it needs information about where the phases end
696 // (we should not propagate the flag across phases).
697 if (node->needs_mipmaps) {
698 if (deps[i]->effect->num_inputs() == 0) {
699 Input *input = static_cast<Input *>(deps[i]->effect);
700 start_new_phase |= !input->can_supply_mipmaps();
702 deps[i]->needs_mipmaps = true;
706 if (deps[i]->outgoing_links.size() > 1) {
707 if (!deps[i]->effect->is_single_texture()) {
708 // More than one effect uses this as the input,
709 // and it is not a texture itself.
710 // The easiest thing to do (and probably also the safest
711 // performance-wise in most cases) is to bounce it to a texture
712 // and then let the next passes read from that.
713 start_new_phase = true;
715 assert(deps[i]->effect->num_inputs() == 0);
717 // For textures, we try to be slightly more clever;
718 // if none of our outputs need a bounce, we don't bounce
719 // but instead simply use the effect many times.
721 // Strictly speaking, we could bounce it for some outputs
722 // and use it directly for others, but the processing becomes
723 // somewhat simpler if the effect is only used in one such way.
724 for (unsigned j = 0; j < deps[i]->outgoing_links.size(); ++j) {
725 Node *rdep = deps[i]->outgoing_links[j];
726 start_new_phase |= rdep->effect->needs_texture_bounce();
731 if (deps[i]->effect->is_compute_shader()) {
732 // Only one compute shader per phase; we should have been stopped
733 // already due to the fact that compute shaders are not one-to-one.
734 assert(!phase->is_compute_shader);
736 // If all nodes so far are strong one-to-one, we can put them after
737 // the compute shader (ie., process them on the output).
738 start_new_phase = !node->strong_one_to_one_sampling;
739 } else if (deps[i]->effect->sets_virtual_output_size()) {
740 assert(deps[i]->effect->changes_output_size());
741 // If the next effect sets a virtual size to rely on OpenGL's
742 // bilinear sampling, we'll really need to break the phase here.
743 start_new_phase = true;
744 } else if (deps[i]->effect->changes_output_size() && !node->one_to_one_sampling) {
745 // If the next effect changes size and we don't have one-to-one sampling,
746 // we also need to break here.
747 start_new_phase = true;
750 if (start_new_phase) {
751 phase->inputs.push_back(construct_phase(deps[i], completed_effects));
753 effects_todo_this_phase.push(deps[i]);
755 // Propagate the one-to-one status down through the dependency.
756 deps[i]->one_to_one_sampling = node->one_to_one_sampling &&
757 deps[i]->effect->one_to_one_sampling();
758 deps[i]->strong_one_to_one_sampling = node->strong_one_to_one_sampling &&
759 deps[i]->effect->strong_one_to_one_sampling();
764 // No more effects to do this phase. Take all the ones we have,
765 // and create a GLSL program for it.
766 assert(!phase->effects.empty());
768 // Deduplicate the inputs, but don't change the ordering e.g. by sorting;
769 // that would be nondeterministic and thus reduce cacheability.
770 // TODO: Make this even more deterministic.
771 vector<Phase *> dedup_inputs;
772 set<Phase *> seen_inputs;
773 for (size_t i = 0; i < phase->inputs.size(); ++i) {
774 if (seen_inputs.insert(phase->inputs[i]).second) {
775 dedup_inputs.push_back(phase->inputs[i]);
778 swap(phase->inputs, dedup_inputs);
780 // Allocate samplers for each input.
781 phase->input_samplers.resize(phase->inputs.size());
783 // We added the effects from the output and back, but we need to output
784 // them in topological sort order in the shader.
785 phase->effects = topological_sort(phase->effects);
787 // Figure out if we need mipmaps or not, and if so, tell the inputs that.
788 phase->input_needs_mipmaps = false;
789 for (unsigned i = 0; i < phase->effects.size(); ++i) {
790 Node *node = phase->effects[i];
791 phase->input_needs_mipmaps |= node->effect->needs_mipmaps();
793 for (unsigned i = 0; i < phase->effects.size(); ++i) {
794 Node *node = phase->effects[i];
795 if (node->effect->num_inputs() == 0) {
796 Input *input = static_cast<Input *>(node->effect);
797 assert(!phase->input_needs_mipmaps || input->can_supply_mipmaps());
798 CHECK(input->set_int("needs_mipmaps", phase->input_needs_mipmaps));
802 // Tell each node which phase it ended up in, so that the unit test
803 // can check that the phases were split in the right place.
804 // Note that this ignores that effects may be part of multiple phases;
805 // if the unit tests need to test such cases, we'll reconsider.
806 for (unsigned i = 0; i < phase->effects.size(); ++i) {
807 phase->effects[i]->containing_phase = phase;
810 // Actually make the shader for this phase.
811 compile_glsl_program(phase);
813 // Initialize timers.
814 if (movit_timer_queries_supported) {
815 phase->time_elapsed_ns = 0;
816 phase->num_measured_iterations = 0;
819 assert(completed_effects->count(output) == 0);
820 completed_effects->insert(make_pair(output, phase));
821 phases.push_back(phase);
825 void EffectChain::output_dot(const char *filename)
827 if (movit_debug_level != MOVIT_DEBUG_ON) {
831 FILE *fp = fopen(filename, "w");
837 fprintf(fp, "digraph G {\n");
838 fprintf(fp, " output [shape=box label=\"(output)\"];\n");
839 for (unsigned i = 0; i < nodes.size(); ++i) {
840 // Find out which phase this event belongs to.
841 vector<int> in_phases;
842 for (unsigned j = 0; j < phases.size(); ++j) {
843 const Phase* p = phases[j];
844 if (find(p->effects.begin(), p->effects.end(), nodes[i]) != p->effects.end()) {
845 in_phases.push_back(j);
849 if (in_phases.empty()) {
850 fprintf(fp, " n%ld [label=\"%s\"];\n", (long)nodes[i], nodes[i]->effect->effect_type_id().c_str());
851 } else if (in_phases.size() == 1) {
852 fprintf(fp, " n%ld [label=\"%s\" style=\"filled\" fillcolor=\"/accent8/%d\"];\n",
853 (long)nodes[i], nodes[i]->effect->effect_type_id().c_str(),
854 (in_phases[0] % 8) + 1);
856 // If we had new enough Graphviz, style="wedged" would probably be ideal here.
858 fprintf(fp, " n%ld [label=\"%s [in multiple phases]\" style=\"filled\" fillcolor=\"/accent8/%d\"];\n",
859 (long)nodes[i], nodes[i]->effect->effect_type_id().c_str(),
860 (in_phases[0] % 8) + 1);
863 char from_node_id[256];
864 snprintf(from_node_id, 256, "n%ld", (long)nodes[i]);
866 for (unsigned j = 0; j < nodes[i]->outgoing_links.size(); ++j) {
867 char to_node_id[256];
868 snprintf(to_node_id, 256, "n%ld", (long)nodes[i]->outgoing_links[j]);
870 vector<string> labels = get_labels_for_edge(nodes[i], nodes[i]->outgoing_links[j]);
871 output_dot_edge(fp, from_node_id, to_node_id, labels);
874 if (nodes[i]->outgoing_links.empty() && !nodes[i]->disabled) {
876 vector<string> labels = get_labels_for_edge(nodes[i], nullptr);
877 output_dot_edge(fp, from_node_id, "output", labels);
885 vector<string> EffectChain::get_labels_for_edge(const Node *from, const Node *to)
887 vector<string> labels;
889 if (to != nullptr && to->effect->needs_texture_bounce()) {
890 labels.push_back("needs_bounce");
892 if (from->effect->changes_output_size()) {
893 labels.push_back("resize");
896 switch (from->output_color_space) {
897 case COLORSPACE_INVALID:
898 labels.push_back("spc[invalid]");
900 case COLORSPACE_REC_601_525:
901 labels.push_back("spc[rec601-525]");
903 case COLORSPACE_REC_601_625:
904 labels.push_back("spc[rec601-625]");
910 switch (from->output_gamma_curve) {
912 labels.push_back("gamma[invalid]");
915 labels.push_back("gamma[sRGB]");
917 case GAMMA_REC_601: // and GAMMA_REC_709
918 labels.push_back("gamma[rec601/709]");
924 switch (from->output_alpha_type) {
926 labels.push_back("alpha[invalid]");
929 labels.push_back("alpha[blank]");
931 case ALPHA_POSTMULTIPLIED:
932 labels.push_back("alpha[postmult]");
941 void EffectChain::output_dot_edge(FILE *fp,
942 const string &from_node_id,
943 const string &to_node_id,
944 const vector<string> &labels)
946 if (labels.empty()) {
947 fprintf(fp, " %s -> %s;\n", from_node_id.c_str(), to_node_id.c_str());
949 string label = labels[0];
950 for (unsigned k = 1; k < labels.size(); ++k) {
951 label += ", " + labels[k];
953 fprintf(fp, " %s -> %s [label=\"%s\"];\n", from_node_id.c_str(), to_node_id.c_str(), label.c_str());
957 void EffectChain::size_rectangle_to_fit(unsigned width, unsigned height, unsigned *output_width, unsigned *output_height)
959 unsigned scaled_width, scaled_height;
961 if (float(width) * aspect_denom >= float(height) * aspect_nom) {
962 // Same aspect, or W/H > aspect (image is wider than the frame).
963 // In either case, keep width, and adjust height.
964 scaled_width = width;
965 scaled_height = lrintf(width * aspect_denom / aspect_nom);
967 // W/H < aspect (image is taller than the frame), so keep height,
969 scaled_width = lrintf(height * aspect_nom / aspect_denom);
970 scaled_height = height;
973 // We should be consistently larger or smaller then the existing choice,
974 // since we have the same aspect.
975 assert(!(scaled_width < *output_width && scaled_height > *output_height));
976 assert(!(scaled_height < *output_height && scaled_width > *output_width));
978 if (scaled_width >= *output_width && scaled_height >= *output_height) {
979 *output_width = scaled_width;
980 *output_height = scaled_height;
984 // Propagate input texture sizes throughout, and inform effects downstream.
985 // (Like a lot of other code, we depend on effects being in topological order.)
986 void EffectChain::inform_input_sizes(Phase *phase)
988 // All effects that have a defined size (inputs and RTT inputs)
989 // get that. Reset all others.
990 for (unsigned i = 0; i < phase->effects.size(); ++i) {
991 Node *node = phase->effects[i];
992 if (node->effect->num_inputs() == 0) {
993 Input *input = static_cast<Input *>(node->effect);
994 node->output_width = input->get_width();
995 node->output_height = input->get_height();
996 assert(node->output_width != 0);
997 assert(node->output_height != 0);
999 node->output_width = node->output_height = 0;
1002 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
1003 Phase *input = phase->inputs[i];
1004 input->output_node->output_width = input->virtual_output_width;
1005 input->output_node->output_height = input->virtual_output_height;
1006 assert(input->output_node->output_width != 0);
1007 assert(input->output_node->output_height != 0);
1010 // Now propagate from the inputs towards the end, and inform as we go.
1011 // The rules are simple:
1013 // 1. Don't touch effects that already have given sizes (ie., inputs
1014 // or effects that change the output size).
1015 // 2. If all of your inputs have the same size, that will be your output size.
1016 // 3. Otherwise, your output size is 0x0.
1017 for (unsigned i = 0; i < phase->effects.size(); ++i) {
1018 Node *node = phase->effects[i];
1019 if (node->effect->num_inputs() == 0) {
1022 unsigned this_output_width = 0;
1023 unsigned this_output_height = 0;
1024 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1025 Node *input = node->incoming_links[j];
1026 node->effect->inform_input_size(j, input->output_width, input->output_height);
1028 this_output_width = input->output_width;
1029 this_output_height = input->output_height;
1030 } else if (input->output_width != this_output_width || input->output_height != this_output_height) {
1032 this_output_width = 0;
1033 this_output_height = 0;
1036 if (node->effect->changes_output_size()) {
1037 // We cannot call get_output_size() before we've done inform_input_size()
1039 unsigned real_width, real_height;
1040 node->effect->get_output_size(&real_width, &real_height,
1041 &node->output_width, &node->output_height);
1042 assert(node->effect->sets_virtual_output_size() ||
1043 (real_width == node->output_width &&
1044 real_height == node->output_height));
1046 node->output_width = this_output_width;
1047 node->output_height = this_output_height;
1052 // Note: You should call inform_input_sizes() before this, as the last effect's
1053 // desired output size might change based on the inputs.
1054 void EffectChain::find_output_size(Phase *phase)
1056 Node *output_node = phase->is_compute_shader ? phase->compute_shader_node : phase->effects.back();
1058 // If the last effect explicitly sets an output size, use that.
1059 if (output_node->effect->changes_output_size()) {
1060 output_node->effect->get_output_size(&phase->output_width, &phase->output_height,
1061 &phase->virtual_output_width, &phase->virtual_output_height);
1062 assert(output_node->effect->sets_virtual_output_size() ||
1063 (phase->output_width == phase->virtual_output_width &&
1064 phase->output_height == phase->virtual_output_height));
1068 // If all effects have the same size, use that.
1069 unsigned output_width = 0, output_height = 0;
1070 bool all_inputs_same_size = true;
1072 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
1073 Phase *input = phase->inputs[i];
1074 assert(input->output_width != 0);
1075 assert(input->output_height != 0);
1076 if (output_width == 0 && output_height == 0) {
1077 output_width = input->virtual_output_width;
1078 output_height = input->virtual_output_height;
1079 } else if (output_width != input->virtual_output_width ||
1080 output_height != input->virtual_output_height) {
1081 all_inputs_same_size = false;
1084 for (unsigned i = 0; i < phase->effects.size(); ++i) {
1085 Effect *effect = phase->effects[i]->effect;
1086 if (effect->num_inputs() != 0) {
1090 Input *input = static_cast<Input *>(effect);
1091 if (output_width == 0 && output_height == 0) {
1092 output_width = input->get_width();
1093 output_height = input->get_height();
1094 } else if (output_width != input->get_width() ||
1095 output_height != input->get_height()) {
1096 all_inputs_same_size = false;
1100 if (all_inputs_same_size) {
1101 assert(output_width != 0);
1102 assert(output_height != 0);
1103 phase->virtual_output_width = phase->output_width = output_width;
1104 phase->virtual_output_height = phase->output_height = output_height;
1108 // If not, fit all the inputs into the current aspect, and select the largest one.
1111 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
1112 Phase *input = phase->inputs[i];
1113 assert(input->output_width != 0);
1114 assert(input->output_height != 0);
1115 size_rectangle_to_fit(input->output_width, input->output_height, &output_width, &output_height);
1117 for (unsigned i = 0; i < phase->effects.size(); ++i) {
1118 Effect *effect = phase->effects[i]->effect;
1119 if (effect->num_inputs() != 0) {
1123 Input *input = static_cast<Input *>(effect);
1124 size_rectangle_to_fit(input->get_width(), input->get_height(), &output_width, &output_height);
1126 assert(output_width != 0);
1127 assert(output_height != 0);
1128 phase->virtual_output_width = phase->output_width = output_width;
1129 phase->virtual_output_height = phase->output_height = output_height;
1132 void EffectChain::sort_all_nodes_topologically()
1134 nodes = topological_sort(nodes);
1137 vector<Node *> EffectChain::topological_sort(const vector<Node *> &nodes)
1139 set<Node *> nodes_left_to_visit(nodes.begin(), nodes.end());
1140 vector<Node *> sorted_list;
1141 for (unsigned i = 0; i < nodes.size(); ++i) {
1142 topological_sort_visit_node(nodes[i], &nodes_left_to_visit, &sorted_list);
1144 reverse(sorted_list.begin(), sorted_list.end());
1148 void EffectChain::topological_sort_visit_node(Node *node, set<Node *> *nodes_left_to_visit, vector<Node *> *sorted_list)
1150 if (nodes_left_to_visit->count(node) == 0) {
1153 nodes_left_to_visit->erase(node);
1154 for (unsigned i = 0; i < node->outgoing_links.size(); ++i) {
1155 topological_sort_visit_node(node->outgoing_links[i], nodes_left_to_visit, sorted_list);
1157 sorted_list->push_back(node);
1160 void EffectChain::find_color_spaces_for_inputs()
1162 for (unsigned i = 0; i < nodes.size(); ++i) {
1163 Node *node = nodes[i];
1164 if (node->disabled) {
1167 if (node->incoming_links.size() == 0) {
1168 Input *input = static_cast<Input *>(node->effect);
1169 node->output_color_space = input->get_color_space();
1170 node->output_gamma_curve = input->get_gamma_curve();
1172 Effect::AlphaHandling alpha_handling = input->alpha_handling();
1173 switch (alpha_handling) {
1174 case Effect::OUTPUT_BLANK_ALPHA:
1175 node->output_alpha_type = ALPHA_BLANK;
1177 case Effect::INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA:
1178 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1180 case Effect::OUTPUT_POSTMULTIPLIED_ALPHA:
1181 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1183 case Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK:
1184 case Effect::DONT_CARE_ALPHA_TYPE:
1189 if (node->output_alpha_type == ALPHA_PREMULTIPLIED) {
1190 assert(node->output_gamma_curve == GAMMA_LINEAR);
1196 // Propagate gamma and color space information as far as we can in the graph.
1197 // The rules are simple: Anything where all the inputs agree, get that as
1198 // output as well. Anything else keeps having *_INVALID.
1199 void EffectChain::propagate_gamma_and_color_space()
1201 // We depend on going through the nodes in order.
1202 sort_all_nodes_topologically();
1204 for (unsigned i = 0; i < nodes.size(); ++i) {
1205 Node *node = nodes[i];
1206 if (node->disabled) {
1209 assert(node->incoming_links.size() == node->effect->num_inputs());
1210 if (node->incoming_links.size() == 0) {
1211 assert(node->output_color_space != COLORSPACE_INVALID);
1212 assert(node->output_gamma_curve != GAMMA_INVALID);
1216 Colorspace color_space = node->incoming_links[0]->output_color_space;
1217 GammaCurve gamma_curve = node->incoming_links[0]->output_gamma_curve;
1218 for (unsigned j = 1; j < node->incoming_links.size(); ++j) {
1219 if (node->incoming_links[j]->output_color_space != color_space) {
1220 color_space = COLORSPACE_INVALID;
1222 if (node->incoming_links[j]->output_gamma_curve != gamma_curve) {
1223 gamma_curve = GAMMA_INVALID;
1227 // The conversion effects already have their outputs set correctly,
1228 // so leave them alone.
1229 if (node->effect->effect_type_id() != "ColorspaceConversionEffect") {
1230 node->output_color_space = color_space;
1232 if (node->effect->effect_type_id() != "GammaCompressionEffect" &&
1233 node->effect->effect_type_id() != "GammaExpansionEffect") {
1234 node->output_gamma_curve = gamma_curve;
1239 // Propagate alpha information as far as we can in the graph.
1240 // Similar to propagate_gamma_and_color_space().
1241 void EffectChain::propagate_alpha()
1243 // We depend on going through the nodes in order.
1244 sort_all_nodes_topologically();
1246 for (unsigned i = 0; i < nodes.size(); ++i) {
1247 Node *node = nodes[i];
1248 if (node->disabled) {
1251 assert(node->incoming_links.size() == node->effect->num_inputs());
1252 if (node->incoming_links.size() == 0) {
1253 assert(node->output_alpha_type != ALPHA_INVALID);
1257 // The alpha multiplication/division effects are special cases.
1258 if (node->effect->effect_type_id() == "AlphaMultiplicationEffect") {
1259 assert(node->incoming_links.size() == 1);
1260 assert(node->incoming_links[0]->output_alpha_type == ALPHA_POSTMULTIPLIED);
1261 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1264 if (node->effect->effect_type_id() == "AlphaDivisionEffect") {
1265 assert(node->incoming_links.size() == 1);
1266 assert(node->incoming_links[0]->output_alpha_type == ALPHA_PREMULTIPLIED);
1267 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1271 // GammaCompressionEffect and GammaExpansionEffect are also a special case,
1272 // because they are the only one that _need_ postmultiplied alpha.
1273 if (node->effect->effect_type_id() == "GammaCompressionEffect" ||
1274 node->effect->effect_type_id() == "GammaExpansionEffect") {
1275 assert(node->incoming_links.size() == 1);
1276 if (node->incoming_links[0]->output_alpha_type == ALPHA_BLANK) {
1277 node->output_alpha_type = ALPHA_BLANK;
1278 } else if (node->incoming_links[0]->output_alpha_type == ALPHA_POSTMULTIPLIED) {
1279 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1281 node->output_alpha_type = ALPHA_INVALID;
1286 // Only inputs can have unconditional alpha output (OUTPUT_BLANK_ALPHA
1287 // or OUTPUT_POSTMULTIPLIED_ALPHA), and they have already been
1288 // taken care of above. Rationale: Even if you could imagine
1289 // e.g. an effect that took in an image and set alpha=1.0
1290 // unconditionally, it wouldn't make any sense to have it as
1291 // e.g. OUTPUT_BLANK_ALPHA, since it wouldn't know whether it
1292 // got its input pre- or postmultiplied, so it wouldn't know
1293 // whether to divide away the old alpha or not.
1294 Effect::AlphaHandling alpha_handling = node->effect->alpha_handling();
1295 assert(alpha_handling == Effect::INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA ||
1296 alpha_handling == Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK ||
1297 alpha_handling == Effect::DONT_CARE_ALPHA_TYPE);
1299 // If the node has multiple inputs, check that they are all valid and
1301 bool any_invalid = false;
1302 bool any_premultiplied = false;
1303 bool any_postmultiplied = false;
1305 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1306 switch (node->incoming_links[j]->output_alpha_type) {
1311 // Blank is good as both pre- and postmultiplied alpha,
1312 // so just ignore it.
1314 case ALPHA_PREMULTIPLIED:
1315 any_premultiplied = true;
1317 case ALPHA_POSTMULTIPLIED:
1318 any_postmultiplied = true;
1326 node->output_alpha_type = ALPHA_INVALID;
1330 // Inputs must be of the same type.
1331 if (any_premultiplied && any_postmultiplied) {
1332 node->output_alpha_type = ALPHA_INVALID;
1336 if (alpha_handling == Effect::INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA ||
1337 alpha_handling == Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK) {
1338 // This combination (requiring premultiplied alpha, but _not_ requiring
1339 // linear light) is illegal, since the combination of premultiplied alpha
1340 // and nonlinear inputs is meaningless.
1341 assert(node->effect->needs_linear_light());
1343 // If the effect has asked for premultiplied alpha, check that it has got it.
1344 if (any_postmultiplied) {
1345 node->output_alpha_type = ALPHA_INVALID;
1346 } else if (!any_premultiplied &&
1347 alpha_handling == Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK) {
1348 // Blank input alpha, and the effect preserves blank alpha.
1349 node->output_alpha_type = ALPHA_BLANK;
1351 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1354 // OK, all inputs are the same, and this effect is not going
1356 assert(alpha_handling == Effect::DONT_CARE_ALPHA_TYPE);
1357 if (any_premultiplied) {
1358 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1359 } else if (any_postmultiplied) {
1360 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1362 node->output_alpha_type = ALPHA_BLANK;
1368 bool EffectChain::node_needs_colorspace_fix(Node *node)
1370 if (node->disabled) {
1373 if (node->effect->num_inputs() == 0) {
1377 // propagate_gamma_and_color_space() has already set our output
1378 // to COLORSPACE_INVALID if the inputs differ, so we can rely on that.
1379 if (node->output_color_space == COLORSPACE_INVALID) {
1382 return (node->effect->needs_srgb_primaries() && node->output_color_space != COLORSPACE_sRGB);
1385 // Fix up color spaces so that there are no COLORSPACE_INVALID nodes left in
1386 // the graph. Our strategy is not always optimal, but quite simple:
1387 // Find an effect that's as early as possible where the inputs are of
1388 // unacceptable colorspaces (that is, either different, or, if the effect only
1389 // wants sRGB, not sRGB.) Add appropriate conversions on all its inputs,
1390 // propagate the information anew, and repeat until there are no more such
1392 void EffectChain::fix_internal_color_spaces()
1394 unsigned colorspace_propagation_pass = 0;
1398 for (unsigned i = 0; i < nodes.size(); ++i) {
1399 Node *node = nodes[i];
1400 if (!node_needs_colorspace_fix(node)) {
1404 // Go through each input that is not sRGB, and insert
1405 // a colorspace conversion after it.
1406 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1407 Node *input = node->incoming_links[j];
1408 assert(input->output_color_space != COLORSPACE_INVALID);
1409 if (input->output_color_space == COLORSPACE_sRGB) {
1412 Node *conversion = add_node(new ColorspaceConversionEffect());
1413 CHECK(conversion->effect->set_int("source_space", input->output_color_space));
1414 CHECK(conversion->effect->set_int("destination_space", COLORSPACE_sRGB));
1415 conversion->output_color_space = COLORSPACE_sRGB;
1416 replace_sender(input, conversion);
1417 connect_nodes(input, conversion);
1420 // Re-sort topologically, and propagate the new information.
1421 propagate_gamma_and_color_space();
1428 sprintf(filename, "step5-colorspacefix-iter%u.dot", ++colorspace_propagation_pass);
1429 output_dot(filename);
1430 assert(colorspace_propagation_pass < 100);
1431 } while (found_any);
1433 for (unsigned i = 0; i < nodes.size(); ++i) {
1434 Node *node = nodes[i];
1435 if (node->disabled) {
1438 assert(node->output_color_space != COLORSPACE_INVALID);
1442 bool EffectChain::node_needs_alpha_fix(Node *node)
1444 if (node->disabled) {
1448 // propagate_alpha() has already set our output to ALPHA_INVALID if the
1449 // inputs differ or we are otherwise in mismatch, so we can rely on that.
1450 return (node->output_alpha_type == ALPHA_INVALID);
1453 // Fix up alpha so that there are no ALPHA_INVALID nodes left in
1454 // the graph. Similar to fix_internal_color_spaces().
1455 void EffectChain::fix_internal_alpha(unsigned step)
1457 unsigned alpha_propagation_pass = 0;
1461 for (unsigned i = 0; i < nodes.size(); ++i) {
1462 Node *node = nodes[i];
1463 if (!node_needs_alpha_fix(node)) {
1467 // If we need to fix up GammaExpansionEffect, then clearly something
1468 // is wrong, since the combination of premultiplied alpha and nonlinear inputs
1470 assert(node->effect->effect_type_id() != "GammaExpansionEffect");
1472 AlphaType desired_type = ALPHA_PREMULTIPLIED;
1474 // GammaCompressionEffect is special; it needs postmultiplied alpha.
1475 if (node->effect->effect_type_id() == "GammaCompressionEffect") {
1476 assert(node->incoming_links.size() == 1);
1477 assert(node->incoming_links[0]->output_alpha_type == ALPHA_PREMULTIPLIED);
1478 desired_type = ALPHA_POSTMULTIPLIED;
1481 // Go through each input that is not premultiplied alpha, and insert
1482 // a conversion before it.
1483 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1484 Node *input = node->incoming_links[j];
1485 assert(input->output_alpha_type != ALPHA_INVALID);
1486 if (input->output_alpha_type == desired_type ||
1487 input->output_alpha_type == ALPHA_BLANK) {
1491 if (desired_type == ALPHA_PREMULTIPLIED) {
1492 conversion = add_node(new AlphaMultiplicationEffect());
1494 conversion = add_node(new AlphaDivisionEffect());
1496 conversion->output_alpha_type = desired_type;
1497 replace_sender(input, conversion);
1498 connect_nodes(input, conversion);
1501 // Re-sort topologically, and propagate the new information.
1502 propagate_gamma_and_color_space();
1510 sprintf(filename, "step%u-alphafix-iter%u.dot", step, ++alpha_propagation_pass);
1511 output_dot(filename);
1512 assert(alpha_propagation_pass < 100);
1513 } while (found_any);
1515 for (unsigned i = 0; i < nodes.size(); ++i) {
1516 Node *node = nodes[i];
1517 if (node->disabled) {
1520 assert(node->output_alpha_type != ALPHA_INVALID);
1524 // Make so that the output is in the desired color space.
1525 void EffectChain::fix_output_color_space()
1527 Node *output = find_output_node();
1528 if (output->output_color_space != output_format.color_space) {
1529 Node *conversion = add_node(new ColorspaceConversionEffect());
1530 CHECK(conversion->effect->set_int("source_space", output->output_color_space));
1531 CHECK(conversion->effect->set_int("destination_space", output_format.color_space));
1532 conversion->output_color_space = output_format.color_space;
1533 connect_nodes(output, conversion);
1535 propagate_gamma_and_color_space();
1539 // Make so that the output is in the desired pre-/postmultiplication alpha state.
1540 void EffectChain::fix_output_alpha()
1542 Node *output = find_output_node();
1543 assert(output->output_alpha_type != ALPHA_INVALID);
1544 if (output->output_alpha_type == ALPHA_BLANK) {
1545 // No alpha output, so we don't care.
1548 if (output->output_alpha_type == ALPHA_PREMULTIPLIED &&
1549 output_alpha_format == OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED) {
1550 Node *conversion = add_node(new AlphaDivisionEffect());
1551 connect_nodes(output, conversion);
1553 propagate_gamma_and_color_space();
1555 if (output->output_alpha_type == ALPHA_POSTMULTIPLIED &&
1556 output_alpha_format == OUTPUT_ALPHA_FORMAT_PREMULTIPLIED) {
1557 Node *conversion = add_node(new AlphaMultiplicationEffect());
1558 connect_nodes(output, conversion);
1560 propagate_gamma_and_color_space();
1564 bool EffectChain::node_needs_gamma_fix(Node *node)
1566 if (node->disabled) {
1570 // Small hack since the output is not an explicit node:
1571 // If we are the last node and our output is in the wrong
1572 // space compared to EffectChain's output, we need to fix it.
1573 // This will only take us to linear, but fix_output_gamma()
1574 // will come and take us to the desired output gamma
1577 // This needs to be before everything else, since it could
1578 // even apply to inputs (if they are the only effect).
1579 if (node->outgoing_links.empty() &&
1580 node->output_gamma_curve != output_format.gamma_curve &&
1581 node->output_gamma_curve != GAMMA_LINEAR) {
1585 if (node->effect->num_inputs() == 0) {
1589 // propagate_gamma_and_color_space() has already set our output
1590 // to GAMMA_INVALID if the inputs differ, so we can rely on that,
1591 // except for GammaCompressionEffect.
1592 if (node->output_gamma_curve == GAMMA_INVALID) {
1595 if (node->effect->effect_type_id() == "GammaCompressionEffect") {
1596 assert(node->incoming_links.size() == 1);
1597 return node->incoming_links[0]->output_gamma_curve != GAMMA_LINEAR;
1600 return (node->effect->needs_linear_light() && node->output_gamma_curve != GAMMA_LINEAR);
1603 // Very similar to fix_internal_color_spaces(), but for gamma.
1604 // There is one difference, though; before we start adding conversion nodes,
1605 // we see if we can get anything out of asking the sources to deliver
1606 // linear gamma directly. fix_internal_gamma_by_asking_inputs()
1607 // does that part, while fix_internal_gamma_by_inserting_nodes()
1608 // inserts nodes as needed afterwards.
1609 void EffectChain::fix_internal_gamma_by_asking_inputs(unsigned step)
1611 unsigned gamma_propagation_pass = 0;
1615 for (unsigned i = 0; i < nodes.size(); ++i) {
1616 Node *node = nodes[i];
1617 if (!node_needs_gamma_fix(node)) {
1621 // See if all inputs can give us linear gamma. If not, leave it.
1622 vector<Node *> nonlinear_inputs;
1623 find_all_nonlinear_inputs(node, &nonlinear_inputs);
1624 assert(!nonlinear_inputs.empty());
1627 for (unsigned i = 0; i < nonlinear_inputs.size(); ++i) {
1628 Input *input = static_cast<Input *>(nonlinear_inputs[i]->effect);
1629 all_ok &= input->can_output_linear_gamma();
1636 for (unsigned i = 0; i < nonlinear_inputs.size(); ++i) {
1637 CHECK(nonlinear_inputs[i]->effect->set_int("output_linear_gamma", 1));
1638 nonlinear_inputs[i]->output_gamma_curve = GAMMA_LINEAR;
1641 // Re-sort topologically, and propagate the new information.
1642 propagate_gamma_and_color_space();
1649 sprintf(filename, "step%u-gammafix-iter%u.dot", step, ++gamma_propagation_pass);
1650 output_dot(filename);
1651 assert(gamma_propagation_pass < 100);
1652 } while (found_any);
1655 void EffectChain::fix_internal_gamma_by_inserting_nodes(unsigned step)
1657 unsigned gamma_propagation_pass = 0;
1661 for (unsigned i = 0; i < nodes.size(); ++i) {
1662 Node *node = nodes[i];
1663 if (!node_needs_gamma_fix(node)) {
1667 // Special case: We could be an input and still be asked to
1668 // fix our gamma; if so, we should be the only node
1669 // (as node_needs_gamma_fix() would only return true in
1670 // for an input in that case). That means we should insert
1671 // a conversion node _after_ ourselves.
1672 if (node->incoming_links.empty()) {
1673 assert(node->outgoing_links.empty());
1674 Node *conversion = add_node(new GammaExpansionEffect());
1675 CHECK(conversion->effect->set_int("source_curve", node->output_gamma_curve));
1676 conversion->output_gamma_curve = GAMMA_LINEAR;
1677 connect_nodes(node, conversion);
1680 // If not, go through each input that is not linear gamma,
1681 // and insert a gamma conversion after it.
1682 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1683 Node *input = node->incoming_links[j];
1684 assert(input->output_gamma_curve != GAMMA_INVALID);
1685 if (input->output_gamma_curve == GAMMA_LINEAR) {
1688 Node *conversion = add_node(new GammaExpansionEffect());
1689 CHECK(conversion->effect->set_int("source_curve", input->output_gamma_curve));
1690 conversion->output_gamma_curve = GAMMA_LINEAR;
1691 replace_sender(input, conversion);
1692 connect_nodes(input, conversion);
1695 // Re-sort topologically, and propagate the new information.
1697 propagate_gamma_and_color_space();
1704 sprintf(filename, "step%u-gammafix-iter%u.dot", step, ++gamma_propagation_pass);
1705 output_dot(filename);
1706 assert(gamma_propagation_pass < 100);
1707 } while (found_any);
1709 for (unsigned i = 0; i < nodes.size(); ++i) {
1710 Node *node = nodes[i];
1711 if (node->disabled) {
1714 assert(node->output_gamma_curve != GAMMA_INVALID);
1718 // Make so that the output is in the desired gamma.
1719 // Note that this assumes linear input gamma, so it might create the need
1720 // for another pass of fix_internal_gamma().
1721 void EffectChain::fix_output_gamma()
1723 Node *output = find_output_node();
1724 if (output->output_gamma_curve != output_format.gamma_curve) {
1725 Node *conversion = add_node(new GammaCompressionEffect());
1726 CHECK(conversion->effect->set_int("destination_curve", output_format.gamma_curve));
1727 conversion->output_gamma_curve = output_format.gamma_curve;
1728 connect_nodes(output, conversion);
1732 // If the user has requested Y'CbCr output, we need to do this conversion
1733 // _after_ GammaCompressionEffect etc., but before dither (see below).
1734 // This is because Y'CbCr, with the exception of a special optional mode
1735 // in Rec. 2020 (which we currently don't support), is defined to work on
1736 // gamma-encoded data.
1737 void EffectChain::add_ycbcr_conversion_if_needed()
1739 assert(output_color_rgba || num_output_color_ycbcr > 0);
1740 if (num_output_color_ycbcr == 0) {
1743 Node *output = find_output_node();
1744 ycbcr_conversion_effect_node = add_node(new YCbCrConversionEffect(output_ycbcr_format, output_ycbcr_type));
1745 connect_nodes(output, ycbcr_conversion_effect_node);
1748 // If the user has requested dither, add a DitherEffect right at the end
1749 // (after GammaCompressionEffect etc.). This needs to be done after everything else,
1750 // since dither is about the only effect that can _not_ be done in linear space.
1751 void EffectChain::add_dither_if_needed()
1753 if (num_dither_bits == 0) {
1756 Node *output = find_output_node();
1757 Node *dither = add_node(new DitherEffect());
1758 CHECK(dither->effect->set_int("num_bits", num_dither_bits));
1759 connect_nodes(output, dither);
1761 dither_effect = dither->effect;
1764 // Compute shaders can't output to the framebuffer, so if the last
1765 // phase ends in a compute shader, add a dummy phase at the end that
1766 // only blits directly from the temporary texture.
1768 // TODO: Add an API for rendering directly to textures, for the cases
1769 // where we're only rendering to an FBO anyway.
1770 void EffectChain::add_dummy_effect_if_needed()
1772 Node *output = find_output_node();
1774 // See if the last effect that's not strong one-to-one is a compute shader.
1775 Node *last_effect = output;
1776 while (last_effect->effect->num_inputs() == 1 &&
1777 last_effect->effect->strong_one_to_one_sampling()) {
1778 last_effect = last_effect->incoming_links[0];
1780 if (last_effect->effect->is_compute_shader()) {
1781 Node *dummy = add_node(new ComputeShaderOutputDisplayEffect());
1782 connect_nodes(output, dummy);
1783 has_dummy_effect = true;
1787 // Find the output node. This is, simply, one that has no outgoing links.
1788 // If there are multiple ones, the graph is malformed (we do not support
1789 // multiple outputs right now).
1790 Node *EffectChain::find_output_node()
1792 vector<Node *> output_nodes;
1793 for (unsigned i = 0; i < nodes.size(); ++i) {
1794 Node *node = nodes[i];
1795 if (node->disabled) {
1798 if (node->outgoing_links.empty()) {
1799 output_nodes.push_back(node);
1802 assert(output_nodes.size() == 1);
1803 return output_nodes[0];
1806 void EffectChain::finalize()
1808 // Output the graph as it is before we do any conversions on it.
1809 output_dot("step0-start.dot");
1811 // Give each effect in turn a chance to rewrite its own part of the graph.
1812 // Note that if more effects are added as part of this, they will be
1813 // picked up as part of the same for loop, since they are added at the end.
1814 for (unsigned i = 0; i < nodes.size(); ++i) {
1815 nodes[i]->effect->rewrite_graph(this, nodes[i]);
1817 output_dot("step1-rewritten.dot");
1819 find_color_spaces_for_inputs();
1820 output_dot("step2-input-colorspace.dot");
1823 output_dot("step3-propagated-alpha.dot");
1825 propagate_gamma_and_color_space();
1826 output_dot("step4-propagated-all.dot");
1828 fix_internal_color_spaces();
1829 fix_internal_alpha(6);
1830 fix_output_color_space();
1831 output_dot("step7-output-colorspacefix.dot");
1833 output_dot("step8-output-alphafix.dot");
1835 // Note that we need to fix gamma after colorspace conversion,
1836 // because colorspace conversions might create needs for gamma conversions.
1837 // Also, we need to run an extra pass of fix_internal_gamma() after
1838 // fixing the output gamma, as we only have conversions to/from linear,
1839 // and fix_internal_alpha() since GammaCompressionEffect needs
1840 // postmultiplied input.
1841 fix_internal_gamma_by_asking_inputs(9);
1842 fix_internal_gamma_by_inserting_nodes(10);
1844 output_dot("step11-output-gammafix.dot");
1846 output_dot("step12-output-alpha-propagated.dot");
1847 fix_internal_alpha(13);
1848 output_dot("step14-output-alpha-fixed.dot");
1849 fix_internal_gamma_by_asking_inputs(15);
1850 fix_internal_gamma_by_inserting_nodes(16);
1852 output_dot("step17-before-ycbcr.dot");
1853 add_ycbcr_conversion_if_needed();
1855 output_dot("step18-before-dither.dot");
1856 add_dither_if_needed();
1858 output_dot("step19-before-dummy-effect.dot");
1859 add_dummy_effect_if_needed();
1861 output_dot("step20-final.dot");
1863 // Construct all needed GLSL programs, starting at the output.
1864 // We need to keep track of which effects have already been computed,
1865 // as an effect with multiple users could otherwise be calculated
1867 map<Node *, Phase *> completed_effects;
1868 construct_phase(find_output_node(), &completed_effects);
1870 output_dot("step21-split-to-phases.dot");
1872 assert(phases[0]->inputs.empty());
1877 void EffectChain::render_to_fbo(GLuint dest_fbo, unsigned width, unsigned height)
1879 // Save original viewport.
1880 GLuint x = 0, y = 0;
1882 if (width == 0 && height == 0) {
1884 glGetIntegerv(GL_VIEWPORT, viewport);
1887 width = viewport[2];
1888 height = viewport[3];
1891 render(dest_fbo, {}, x, y, width, height);
1894 void EffectChain::render_to_texture(const vector<DestinationTexture> &destinations, unsigned width, unsigned height)
1897 assert(!destinations.empty());
1899 if (!has_dummy_effect) {
1900 // We don't end in a compute shader, so there's nothing specific for us to do.
1901 // Create an FBO for this set of textures, and just render to that.
1902 GLuint texnums[4] = { 0, 0, 0, 0 };
1903 for (unsigned i = 0; i < destinations.size() && i < 4; ++i) {
1904 texnums[i] = destinations[i].texnum;
1906 GLuint dest_fbo = resource_pool->create_fbo(texnums[0], texnums[1], texnums[2], texnums[3]);
1907 render(dest_fbo, {}, 0, 0, width, height);
1908 resource_pool->release_fbo(dest_fbo);
1910 render((GLuint)-1, destinations, 0, 0, width, height);
1914 void EffectChain::render(GLuint dest_fbo, const vector<DestinationTexture> &destinations, unsigned x, unsigned y, unsigned width, unsigned height)
1917 assert(destinations.size() <= 1);
1919 // This needs to be set anew, in case we are coming from a different context
1920 // from when we initialized.
1922 glDisable(GL_DITHER);
1925 const bool final_srgb = glIsEnabled(GL_FRAMEBUFFER_SRGB);
1927 bool current_srgb = final_srgb;
1931 glDisable(GL_BLEND);
1933 glDisable(GL_DEPTH_TEST);
1935 glDepthMask(GL_FALSE);
1938 set<Phase *> generated_mipmaps;
1940 // We keep one texture per output, but only for as long as we actually have any
1941 // phases that need it as an input. (We don't make any effort to reorder phases
1942 // to minimize the number of textures in play, as register allocation can be
1943 // complicated and we rarely have much to gain, since our graphs are typically
1945 map<Phase *, GLuint> output_textures;
1946 map<Phase *, int> ref_counts;
1947 for (Phase *phase : phases) {
1948 for (Phase *input : phase->inputs) {
1949 ++ref_counts[input];
1953 size_t num_phases = phases.size();
1954 if (destinations.empty()) {
1955 assert(dest_fbo != (GLuint)-1);
1957 assert(has_dummy_effect);
1960 assert(num_phases >= 2);
1961 assert(!phases.back()->is_compute_shader);
1962 assert(phases.back()->effects.size() == 1);
1963 assert(phases.back()->effects[0]->effect->effect_type_id() == "ComputeShaderOutputDisplayEffect");
1965 // We are rendering to a set of textures, so we can run the compute shader
1966 // directly and skip the dummy phase.
1970 for (unsigned phase_num = 0; phase_num < num_phases; ++phase_num) {
1971 Phase *phase = phases[phase_num];
1973 if (do_phase_timing) {
1974 GLuint timer_query_object;
1975 if (phase->timer_query_objects_free.empty()) {
1976 glGenQueries(1, &timer_query_object);
1978 timer_query_object = phase->timer_query_objects_free.front();
1979 phase->timer_query_objects_free.pop_front();
1981 glBeginQuery(GL_TIME_ELAPSED, timer_query_object);
1982 phase->timer_query_objects_running.push_back(timer_query_object);
1984 bool last_phase = (phase_num == num_phases - 1);
1985 if (phase_num == num_phases - 1) {
1986 // Last phase goes to the output the user specified.
1987 if (!phase->is_compute_shader) {
1988 glBindFramebuffer(GL_FRAMEBUFFER, dest_fbo);
1990 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
1991 assert(status == GL_FRAMEBUFFER_COMPLETE);
1992 glViewport(x, y, width, height);
1994 if (dither_effect != nullptr) {
1995 CHECK(dither_effect->set_int("output_width", width));
1996 CHECK(dither_effect->set_int("output_height", height));
2000 // Enable sRGB rendering for intermediates in case we are
2001 // rendering to an sRGB format.
2002 // TODO: Support this for compute shaders.
2003 bool needs_srgb = last_phase ? final_srgb : true;
2004 if (needs_srgb && !current_srgb) {
2005 glEnable(GL_FRAMEBUFFER_SRGB);
2007 current_srgb = true;
2008 } else if (!needs_srgb && current_srgb) {
2009 glDisable(GL_FRAMEBUFFER_SRGB);
2011 current_srgb = true;
2014 // Find a texture for this phase.
2015 inform_input_sizes(phase);
2016 find_output_size(phase);
2017 vector<DestinationTexture> phase_destinations;
2019 GLuint tex_num = resource_pool->create_2d_texture(intermediate_format, phase->output_width, phase->output_height);
2020 output_textures.insert(make_pair(phase, tex_num));
2021 phase_destinations.push_back(DestinationTexture{ tex_num, intermediate_format });
2023 // The output texture needs to have valid state to be written to by a compute shader.
2024 glActiveTexture(GL_TEXTURE0);
2026 glBindTexture(GL_TEXTURE_2D, tex_num);
2028 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
2030 } else if (phase->is_compute_shader) {
2031 assert(!destinations.empty());
2032 phase_destinations = destinations;
2035 execute_phase(phase, output_textures, phase_destinations, &generated_mipmaps);
2036 if (do_phase_timing) {
2037 glEndQuery(GL_TIME_ELAPSED);
2040 // Drop any input textures we don't need anymore.
2041 for (Phase *input : phase->inputs) {
2042 assert(ref_counts[input] > 0);
2043 if (--ref_counts[input] == 0) {
2044 resource_pool->release_2d_texture(output_textures[input]);
2045 output_textures.erase(input);
2050 for (const auto &phase_and_texnum : output_textures) {
2051 resource_pool->release_2d_texture(phase_and_texnum.second);
2054 glBindFramebuffer(GL_FRAMEBUFFER, 0);
2059 glBindBuffer(GL_ARRAY_BUFFER, 0);
2061 glBindVertexArray(0);
2064 if (do_phase_timing) {
2065 // Get back the timer queries.
2066 for (unsigned phase_num = 0; phase_num < phases.size(); ++phase_num) {
2067 Phase *phase = phases[phase_num];
2068 for (auto timer_it = phase->timer_query_objects_running.cbegin();
2069 timer_it != phase->timer_query_objects_running.cend(); ) {
2070 GLint timer_query_object = *timer_it;
2072 glGetQueryObjectiv(timer_query_object, GL_QUERY_RESULT_AVAILABLE, &available);
2074 GLuint64 time_elapsed;
2075 glGetQueryObjectui64v(timer_query_object, GL_QUERY_RESULT, &time_elapsed);
2076 phase->time_elapsed_ns += time_elapsed;
2077 ++phase->num_measured_iterations;
2078 phase->timer_query_objects_free.push_back(timer_query_object);
2079 phase->timer_query_objects_running.erase(timer_it++);
2088 void EffectChain::enable_phase_timing(bool enable)
2091 assert(movit_timer_queries_supported);
2093 this->do_phase_timing = enable;
2096 void EffectChain::reset_phase_timing()
2098 for (unsigned phase_num = 0; phase_num < phases.size(); ++phase_num) {
2099 Phase *phase = phases[phase_num];
2100 phase->time_elapsed_ns = 0;
2101 phase->num_measured_iterations = 0;
2105 void EffectChain::print_phase_timing()
2107 double total_time_ms = 0.0;
2108 for (unsigned phase_num = 0; phase_num < phases.size(); ++phase_num) {
2109 Phase *phase = phases[phase_num];
2110 double avg_time_ms = phase->time_elapsed_ns * 1e-6 / phase->num_measured_iterations;
2111 printf("Phase %d: %5.1f ms [", phase_num, avg_time_ms);
2112 for (unsigned effect_num = 0; effect_num < phase->effects.size(); ++effect_num) {
2113 if (effect_num != 0) {
2116 printf("%s", phase->effects[effect_num]->effect->effect_type_id().c_str());
2119 total_time_ms += avg_time_ms;
2121 printf("Total: %5.1f ms\n", total_time_ms);
2124 void EffectChain::execute_phase(Phase *phase,
2125 const map<Phase *, GLuint> &output_textures,
2126 const std::vector<DestinationTexture> &destinations,
2127 set<Phase *> *generated_mipmaps)
2129 // Set up RTT inputs for this phase.
2130 for (unsigned sampler = 0; sampler < phase->inputs.size(); ++sampler) {
2131 glActiveTexture(GL_TEXTURE0 + sampler);
2132 Phase *input = phase->inputs[sampler];
2133 input->output_node->bound_sampler_num = sampler;
2134 const auto it = output_textures.find(input);
2135 assert(it != output_textures.end());
2136 glBindTexture(GL_TEXTURE_2D, it->second);
2138 if (phase->input_needs_mipmaps && generated_mipmaps->count(input) == 0) {
2139 glGenerateMipmap(GL_TEXTURE_2D);
2141 generated_mipmaps->insert(input);
2143 setup_rtt_sampler(sampler, phase->input_needs_mipmaps);
2144 phase->input_samplers[sampler] = sampler; // Bind the sampler to the right uniform.
2147 GLuint instance_program_num = resource_pool->use_glsl_program(phase->glsl_program_num);
2150 // And now the output.
2152 if (phase->is_compute_shader) {
2153 assert(!destinations.empty());
2155 // This is currently the only place where we use image units,
2156 // so we can always start at 0. TODO: Support multiple destinations.
2157 phase->outbuf_image_unit = 0;
2158 glBindImageTexture(phase->outbuf_image_unit, destinations[0].texnum, 0, GL_FALSE, 0, GL_WRITE_ONLY, destinations[0].format);
2160 phase->uniform_output_size[0] = phase->output_width;
2161 phase->uniform_output_size[1] = phase->output_height;
2162 phase->inv_output_size.x = 1.0f / phase->output_width;
2163 phase->inv_output_size.y = 1.0f / phase->output_height;
2164 phase->output_texcoord_adjust.x = 0.5f / phase->output_width;
2165 phase->output_texcoord_adjust.y = 0.5f / phase->output_height;
2166 } else if (!destinations.empty()) {
2167 assert(destinations.size() == 1);
2168 fbo = resource_pool->create_fbo(destinations[0].texnum);
2169 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
2170 glViewport(0, 0, phase->output_width, phase->output_height);
2173 // Give the required parameters to all the effects.
2174 unsigned sampler_num = phase->inputs.size();
2175 for (unsigned i = 0; i < phase->effects.size(); ++i) {
2176 Node *node = phase->effects[i];
2177 unsigned old_sampler_num = sampler_num;
2178 node->effect->set_gl_state(instance_program_num, phase->effect_ids[node], &sampler_num);
2181 if (node->effect->is_single_texture()) {
2182 assert(sampler_num - old_sampler_num == 1);
2183 node->bound_sampler_num = old_sampler_num;
2185 node->bound_sampler_num = -1;
2189 if (phase->is_compute_shader) {
2191 phase->compute_shader_node->effect->get_compute_dimensions(phase->output_width, phase->output_height, &x, &y, &z);
2193 // Uniforms need to come after set_gl_state() _and_ get_compute_dimensions(),
2194 // since they can be updated from there.
2195 setup_uniforms(phase);
2196 glDispatchCompute(x, y, z);
2198 glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT);
2201 // Uniforms need to come after set_gl_state(), since they can be updated
2203 setup_uniforms(phase);
2205 // Bind the vertex data.
2206 GLuint vao = resource_pool->create_vec2_vao(phase->attribute_indexes, vbo);
2207 glBindVertexArray(vao);
2209 glDrawArrays(GL_TRIANGLES, 0, 3);
2212 resource_pool->release_vec2_vao(vao);
2215 for (unsigned i = 0; i < phase->effects.size(); ++i) {
2216 Node *node = phase->effects[i];
2217 node->effect->clear_gl_state();
2220 resource_pool->unuse_glsl_program(instance_program_num);
2223 resource_pool->release_fbo(fbo);
2227 void EffectChain::setup_uniforms(Phase *phase)
2229 // TODO: Use UBO blocks.
2230 for (size_t i = 0; i < phase->uniforms_image2d.size(); ++i) {
2231 const Uniform<int> &uniform = phase->uniforms_image2d[i];
2232 if (uniform.location != -1) {
2233 glUniform1iv(uniform.location, uniform.num_values, uniform.value);
2236 for (size_t i = 0; i < phase->uniforms_sampler2d.size(); ++i) {
2237 const Uniform<int> &uniform = phase->uniforms_sampler2d[i];
2238 if (uniform.location != -1) {
2239 glUniform1iv(uniform.location, uniform.num_values, uniform.value);
2242 for (size_t i = 0; i < phase->uniforms_bool.size(); ++i) {
2243 const Uniform<bool> &uniform = phase->uniforms_bool[i];
2244 assert(uniform.num_values == 1);
2245 if (uniform.location != -1) {
2246 glUniform1i(uniform.location, *uniform.value);
2249 for (size_t i = 0; i < phase->uniforms_int.size(); ++i) {
2250 const Uniform<int> &uniform = phase->uniforms_int[i];
2251 if (uniform.location != -1) {
2252 glUniform1iv(uniform.location, uniform.num_values, uniform.value);
2255 for (size_t i = 0; i < phase->uniforms_ivec2.size(); ++i) {
2256 const Uniform<int> &uniform = phase->uniforms_ivec2[i];
2257 if (uniform.location != -1) {
2258 glUniform2iv(uniform.location, uniform.num_values, uniform.value);
2261 for (size_t i = 0; i < phase->uniforms_float.size(); ++i) {
2262 const Uniform<float> &uniform = phase->uniforms_float[i];
2263 if (uniform.location != -1) {
2264 glUniform1fv(uniform.location, uniform.num_values, uniform.value);
2267 for (size_t i = 0; i < phase->uniforms_vec2.size(); ++i) {
2268 const Uniform<float> &uniform = phase->uniforms_vec2[i];
2269 if (uniform.location != -1) {
2270 glUniform2fv(uniform.location, uniform.num_values, uniform.value);
2273 for (size_t i = 0; i < phase->uniforms_vec3.size(); ++i) {
2274 const Uniform<float> &uniform = phase->uniforms_vec3[i];
2275 if (uniform.location != -1) {
2276 glUniform3fv(uniform.location, uniform.num_values, uniform.value);
2279 for (size_t i = 0; i < phase->uniforms_vec4.size(); ++i) {
2280 const Uniform<float> &uniform = phase->uniforms_vec4[i];
2281 if (uniform.location != -1) {
2282 glUniform4fv(uniform.location, uniform.num_values, uniform.value);
2285 for (size_t i = 0; i < phase->uniforms_mat3.size(); ++i) {
2286 const Uniform<Matrix3d> &uniform = phase->uniforms_mat3[i];
2287 assert(uniform.num_values == 1);
2288 if (uniform.location != -1) {
2289 // Convert to float (GLSL has no double matrices).
2291 for (unsigned y = 0; y < 3; ++y) {
2292 for (unsigned x = 0; x < 3; ++x) {
2293 matrixf[y + x * 3] = (*uniform.value)(y, x);
2296 glUniformMatrix3fv(uniform.location, 1, GL_FALSE, matrixf);
2301 void EffectChain::setup_rtt_sampler(int sampler_num, bool use_mipmaps)
2303 glActiveTexture(GL_TEXTURE0 + sampler_num);
2306 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
2309 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
2312 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
2314 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
2318 } // namespace movit