15 #include "alpha_division_effect.h"
16 #include "alpha_multiplication_effect.h"
17 #include "colorspace_conversion_effect.h"
18 #include "dither_effect.h"
20 #include "effect_chain.h"
21 #include "effect_util.h"
22 #include "gamma_compression_effect.h"
23 #include "gamma_expansion_effect.h"
26 #include "resource_pool.h"
28 #include "ycbcr_conversion_effect.h"
30 using namespace Eigen;
37 // An effect whose only purpose is to sit in a phase on its own and take the
38 // texture output from a compute shader and display it to the normal backbuffer
39 // (or any FBO). That phase can be skipped when rendering using render_to_textures().
40 class ComputeShaderOutputDisplayEffect : public Effect {
42 ComputeShaderOutputDisplayEffect() {}
43 string effect_type_id() const override { return "ComputeShaderOutputDisplayEffect"; }
44 string output_fragment_shader() override { return read_file("identity.frag"); }
45 bool needs_texture_bounce() const override { return true; }
50 EffectChain::EffectChain(float aspect_nom, float aspect_denom, ResourcePool *resource_pool)
51 : aspect_nom(aspect_nom),
52 aspect_denom(aspect_denom),
53 output_color_rgba(false),
54 num_output_color_ycbcr(0),
55 dither_effect(nullptr),
56 ycbcr_conversion_effect_node(nullptr),
57 intermediate_format(GL_RGBA16F),
58 intermediate_transformation(NO_FRAMEBUFFER_TRANSFORMATION),
60 output_origin(OUTPUT_ORIGIN_BOTTOM_LEFT),
62 resource_pool(resource_pool),
63 do_phase_timing(false) {
64 if (resource_pool == nullptr) {
65 this->resource_pool = new ResourcePool();
66 owns_resource_pool = true;
68 owns_resource_pool = false;
71 // Generate a VBO with some data in (shared position and texture coordinate data).
77 vbo = generate_vbo(2, GL_FLOAT, sizeof(vertices), vertices);
80 EffectChain::~EffectChain()
82 for (unsigned i = 0; i < nodes.size(); ++i) {
83 delete nodes[i]->effect;
86 for (unsigned i = 0; i < phases.size(); ++i) {
87 resource_pool->release_glsl_program(phases[i]->glsl_program_num);
90 if (owns_resource_pool) {
93 glDeleteBuffers(1, &vbo);
97 Input *EffectChain::add_input(Input *input)
100 inputs.push_back(input);
105 void EffectChain::add_output(const ImageFormat &format, OutputAlphaFormat alpha_format)
108 assert(!output_color_rgba);
109 output_format = format;
110 output_alpha_format = alpha_format;
111 output_color_rgba = true;
114 void EffectChain::add_ycbcr_output(const ImageFormat &format, OutputAlphaFormat alpha_format,
115 const YCbCrFormat &ycbcr_format, YCbCrOutputSplitting output_splitting,
119 assert(num_output_color_ycbcr < 2);
120 output_format = format;
121 output_alpha_format = alpha_format;
123 if (num_output_color_ycbcr == 1) {
124 // Check that the format is the same.
125 assert(output_ycbcr_format.luma_coefficients == ycbcr_format.luma_coefficients);
126 assert(output_ycbcr_format.full_range == ycbcr_format.full_range);
127 assert(output_ycbcr_format.num_levels == ycbcr_format.num_levels);
128 assert(output_ycbcr_format.chroma_subsampling_x == 1);
129 assert(output_ycbcr_format.chroma_subsampling_y == 1);
130 assert(output_ycbcr_type == output_type);
132 output_ycbcr_format = ycbcr_format;
133 output_ycbcr_type = output_type;
135 output_ycbcr_splitting[num_output_color_ycbcr++] = output_splitting;
137 assert(ycbcr_format.chroma_subsampling_x == 1);
138 assert(ycbcr_format.chroma_subsampling_y == 1);
141 void EffectChain::change_ycbcr_output_format(const YCbCrFormat &ycbcr_format)
143 assert(num_output_color_ycbcr > 0);
144 assert(output_ycbcr_format.chroma_subsampling_x == 1);
145 assert(output_ycbcr_format.chroma_subsampling_y == 1);
147 output_ycbcr_format = ycbcr_format;
149 YCbCrConversionEffect *effect = (YCbCrConversionEffect *)(ycbcr_conversion_effect_node->effect);
150 effect->change_output_format(ycbcr_format);
154 Node *EffectChain::add_node(Effect *effect)
156 for (unsigned i = 0; i < nodes.size(); ++i) {
157 assert(nodes[i]->effect != effect);
160 Node *node = new Node;
161 node->effect = effect;
162 node->disabled = false;
163 node->output_color_space = COLORSPACE_INVALID;
164 node->output_gamma_curve = GAMMA_INVALID;
165 node->output_alpha_type = ALPHA_INVALID;
166 node->needs_mipmaps = Effect::DOES_NOT_NEED_MIPMAPS;
167 node->one_to_one_sampling = false;
168 node->strong_one_to_one_sampling = false;
170 nodes.push_back(node);
171 node_map[effect] = node;
172 effect->inform_added(this);
176 void EffectChain::connect_nodes(Node *sender, Node *receiver)
178 sender->outgoing_links.push_back(receiver);
179 receiver->incoming_links.push_back(sender);
182 void EffectChain::replace_receiver(Node *old_receiver, Node *new_receiver)
184 new_receiver->incoming_links = old_receiver->incoming_links;
185 old_receiver->incoming_links.clear();
187 for (unsigned i = 0; i < new_receiver->incoming_links.size(); ++i) {
188 Node *sender = new_receiver->incoming_links[i];
189 for (unsigned j = 0; j < sender->outgoing_links.size(); ++j) {
190 if (sender->outgoing_links[j] == old_receiver) {
191 sender->outgoing_links[j] = new_receiver;
197 void EffectChain::replace_sender(Node *old_sender, Node *new_sender)
199 new_sender->outgoing_links = old_sender->outgoing_links;
200 old_sender->outgoing_links.clear();
202 for (unsigned i = 0; i < new_sender->outgoing_links.size(); ++i) {
203 Node *receiver = new_sender->outgoing_links[i];
204 for (unsigned j = 0; j < receiver->incoming_links.size(); ++j) {
205 if (receiver->incoming_links[j] == old_sender) {
206 receiver->incoming_links[j] = new_sender;
212 void EffectChain::insert_node_between(Node *sender, Node *middle, Node *receiver)
214 for (unsigned i = 0; i < sender->outgoing_links.size(); ++i) {
215 if (sender->outgoing_links[i] == receiver) {
216 sender->outgoing_links[i] = middle;
217 middle->incoming_links.push_back(sender);
220 for (unsigned i = 0; i < receiver->incoming_links.size(); ++i) {
221 if (receiver->incoming_links[i] == sender) {
222 receiver->incoming_links[i] = middle;
223 middle->outgoing_links.push_back(receiver);
227 assert(middle->incoming_links.size() == middle->effect->num_inputs());
230 GLenum EffectChain::get_input_sampler(Node *node, unsigned input_num) const
232 assert(node->effect->needs_texture_bounce());
233 assert(input_num < node->incoming_links.size());
234 assert(node->incoming_links[input_num]->bound_sampler_num >= 0);
235 assert(node->incoming_links[input_num]->bound_sampler_num < 8);
236 return GL_TEXTURE0 + node->incoming_links[input_num]->bound_sampler_num;
239 GLenum EffectChain::has_input_sampler(Node *node, unsigned input_num) const
241 assert(input_num < node->incoming_links.size());
242 return node->incoming_links[input_num]->bound_sampler_num >= 0 &&
243 node->incoming_links[input_num]->bound_sampler_num < 8;
246 void EffectChain::find_all_nonlinear_inputs(Node *node, vector<Node *> *nonlinear_inputs)
248 if (node->output_gamma_curve == GAMMA_LINEAR &&
249 node->effect->effect_type_id() != "GammaCompressionEffect") {
252 if (node->effect->num_inputs() == 0) {
253 nonlinear_inputs->push_back(node);
255 assert(node->effect->num_inputs() == node->incoming_links.size());
256 for (unsigned i = 0; i < node->incoming_links.size(); ++i) {
257 find_all_nonlinear_inputs(node->incoming_links[i], nonlinear_inputs);
262 Effect *EffectChain::add_effect(Effect *effect, const vector<Effect *> &inputs)
265 assert(inputs.size() == effect->num_inputs());
266 Node *node = add_node(effect);
267 for (unsigned i = 0; i < inputs.size(); ++i) {
268 assert(node_map.count(inputs[i]) != 0);
269 connect_nodes(node_map[inputs[i]], node);
274 // ESSL doesn't support token pasting. Replace PREFIX(x) with <effect_id>_x.
275 string replace_prefix(const string &text, const string &prefix)
280 while (start < text.size()) {
281 size_t pos = text.find("PREFIX(", start);
282 if (pos == string::npos) {
283 output.append(text.substr(start, string::npos));
287 output.append(text.substr(start, pos - start));
288 output.append(prefix);
291 pos += strlen("PREFIX(");
293 // Output stuff until we find the matching ), which we then eat.
295 size_t end_arg_pos = pos;
296 while (end_arg_pos < text.size()) {
297 if (text[end_arg_pos] == '(') {
299 } else if (text[end_arg_pos] == ')') {
307 output.append(text.substr(pos, end_arg_pos - pos));
318 void extract_uniform_declarations(const vector<Uniform<T>> &effect_uniforms,
319 const string &type_specifier,
320 const string &effect_id,
321 vector<Uniform<T>> *phase_uniforms,
324 for (unsigned i = 0; i < effect_uniforms.size(); ++i) {
325 phase_uniforms->push_back(effect_uniforms[i]);
326 phase_uniforms->back().prefix = effect_id;
328 *glsl_string += string("uniform ") + type_specifier + " " + effect_id
329 + "_" + effect_uniforms[i].name + ";\n";
334 void extract_uniform_array_declarations(const vector<Uniform<T>> &effect_uniforms,
335 const string &type_specifier,
336 const string &effect_id,
337 vector<Uniform<T>> *phase_uniforms,
340 for (unsigned i = 0; i < effect_uniforms.size(); ++i) {
341 phase_uniforms->push_back(effect_uniforms[i]);
342 phase_uniforms->back().prefix = effect_id;
345 snprintf(buf, sizeof(buf), "uniform %s %s_%s[%d];\n",
346 type_specifier.c_str(), effect_id.c_str(),
347 effect_uniforms[i].name.c_str(),
348 int(effect_uniforms[i].num_values));
354 void collect_uniform_locations(GLuint glsl_program_num, vector<Uniform<T>> *phase_uniforms)
356 for (unsigned i = 0; i < phase_uniforms->size(); ++i) {
357 Uniform<T> &uniform = (*phase_uniforms)[i];
358 uniform.location = get_uniform_location(glsl_program_num, uniform.prefix, uniform.name);
364 void EffectChain::compile_glsl_program(Phase *phase)
366 string frag_shader_header;
367 if (phase->is_compute_shader) {
368 frag_shader_header = read_file("header.comp");
370 frag_shader_header = read_version_dependent_file("header", "frag");
372 string frag_shader = "";
374 // Create functions and uniforms for all the texture inputs that we need.
375 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
376 Node *input = phase->inputs[i]->output_node;
378 sprintf(effect_id, "in%u", i);
379 phase->effect_ids.insert(make_pair(input, effect_id));
381 frag_shader += string("uniform sampler2D tex_") + effect_id + ";\n";
382 frag_shader += string("vec4 ") + effect_id + "(vec2 tc) {\n";
383 frag_shader += "\tvec4 tmp = tex2D(tex_" + string(effect_id) + ", tc);\n";
385 if (intermediate_transformation == SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION &&
386 phase->inputs[i]->output_node->output_gamma_curve == GAMMA_LINEAR) {
387 frag_shader += "\ttmp.rgb *= tmp.rgb;\n";
390 frag_shader += "\treturn tmp;\n";
391 frag_shader += "}\n";
394 Uniform<int> uniform;
395 uniform.name = effect_id;
396 uniform.value = &phase->input_samplers[i];
397 uniform.prefix = "tex";
398 uniform.num_values = 1;
399 uniform.location = -1;
400 phase->uniforms_sampler2d.push_back(uniform);
403 // Give each effect in the phase its own ID.
404 for (unsigned i = 0; i < phase->effects.size(); ++i) {
405 Node *node = phase->effects[i];
407 sprintf(effect_id, "eff%u", i);
408 phase->effect_ids.insert(make_pair(node, effect_id));
411 for (unsigned i = 0; i < phase->effects.size(); ++i) {
412 Node *node = phase->effects[i];
413 const string effect_id = phase->effect_ids[node];
414 if (node->incoming_links.size() == 1) {
415 Node *input = node->incoming_links[0];
416 if (i != 0 && input->effect->is_compute_shader()) {
417 // First effect after the compute shader reads the value
418 // that cs_output() wrote to a global variable.
419 frag_shader += string("#define INPUT(tc) CS_OUTPUT_VAL\n");
421 frag_shader += string("#define INPUT ") + phase->effect_ids[input] + "\n";
424 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
425 assert(!node->incoming_links[j]->effect->is_compute_shader());
427 sprintf(buf, "#define INPUT%d %s\n", j + 1, phase->effect_ids[node->incoming_links[j]].c_str());
433 frag_shader += string("#define FUNCNAME ") + effect_id + "\n";
434 if (node->effect->is_compute_shader()) {
435 frag_shader += string("#define NORMALIZE_TEXTURE_COORDS(tc) ((tc) * ") + effect_id + "_inv_output_size + " + effect_id + "_output_texcoord_adjust)\n";
437 frag_shader += replace_prefix(node->effect->output_fragment_shader(), effect_id);
438 frag_shader += "#undef FUNCNAME\n";
439 if (node->incoming_links.size() == 1) {
440 frag_shader += "#undef INPUT\n";
442 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
444 sprintf(buf, "#undef INPUT%d\n", j + 1);
450 if (phase->is_compute_shader) {
451 frag_shader += string("#define INPUT ") + phase->effect_ids[phase->compute_shader_node] + "\n";
452 if (phase->compute_shader_node == phase->effects.back()) {
453 // No postprocessing.
454 frag_shader += "#define CS_POSTPROC(tc) CS_OUTPUT_VAL\n";
456 frag_shader += string("#define CS_POSTPROC ") + phase->effect_ids[phase->effects.back()] + "\n";
459 frag_shader += string("#define INPUT ") + phase->effect_ids[phase->effects.back()] + "\n";
462 // If we're the last phase, add the right #defines for Y'CbCr multi-output as needed.
463 vector<string> frag_shader_outputs; // In order.
464 if (phase->output_node->outgoing_links.empty() && num_output_color_ycbcr > 0) {
465 switch (output_ycbcr_splitting[0]) {
466 case YCBCR_OUTPUT_INTERLEAVED:
468 frag_shader_outputs.push_back("FragColor");
470 case YCBCR_OUTPUT_SPLIT_Y_AND_CBCR:
471 frag_shader += "#define YCBCR_OUTPUT_SPLIT_Y_AND_CBCR 1\n";
472 frag_shader_outputs.push_back("Y");
473 frag_shader_outputs.push_back("Chroma");
475 case YCBCR_OUTPUT_PLANAR:
476 frag_shader += "#define YCBCR_OUTPUT_PLANAR 1\n";
477 frag_shader_outputs.push_back("Y");
478 frag_shader_outputs.push_back("Cb");
479 frag_shader_outputs.push_back("Cr");
485 if (num_output_color_ycbcr > 1) {
486 switch (output_ycbcr_splitting[1]) {
487 case YCBCR_OUTPUT_INTERLEAVED:
488 frag_shader += "#define SECOND_YCBCR_OUTPUT_INTERLEAVED 1\n";
489 frag_shader_outputs.push_back("YCbCr2");
491 case YCBCR_OUTPUT_SPLIT_Y_AND_CBCR:
492 frag_shader += "#define SECOND_YCBCR_OUTPUT_SPLIT_Y_AND_CBCR 1\n";
493 frag_shader_outputs.push_back("Y2");
494 frag_shader_outputs.push_back("Chroma2");
496 case YCBCR_OUTPUT_PLANAR:
497 frag_shader += "#define SECOND_YCBCR_OUTPUT_PLANAR 1\n";
498 frag_shader_outputs.push_back("Y2");
499 frag_shader_outputs.push_back("Cb2");
500 frag_shader_outputs.push_back("Cr2");
507 if (output_color_rgba) {
508 // Note: Needs to come in the header, because not only the
509 // output needs to see it (YCbCrConversionEffect and DitherEffect
511 frag_shader_header += "#define YCBCR_ALSO_OUTPUT_RGBA 1\n";
512 frag_shader_outputs.push_back("RGBA");
516 // If we're bouncing to a temporary texture, signal transformation if desired.
517 if (!phase->output_node->outgoing_links.empty()) {
518 if (intermediate_transformation == SQUARE_ROOT_FRAMEBUFFER_TRANSFORMATION &&
519 phase->output_node->output_gamma_curve == GAMMA_LINEAR) {
520 frag_shader += "#define SQUARE_ROOT_TRANSFORMATION 1\n";
524 if (phase->is_compute_shader) {
525 frag_shader.append(read_file("footer.comp"));
526 phase->compute_shader_node->effect->register_uniform_ivec2("output_size", phase->uniform_output_size);
527 phase->compute_shader_node->effect->register_uniform_vec2("inv_output_size", (float *)&phase->inv_output_size);
528 phase->compute_shader_node->effect->register_uniform_vec2("output_texcoord_adjust", (float *)&phase->output_texcoord_adjust);
530 frag_shader.append(read_file("footer.frag"));
533 // Collect uniforms from all effects and output them. Note that this needs
534 // to happen after output_fragment_shader(), even though the uniforms come
535 // before in the output source, since output_fragment_shader() is allowed
536 // to register new uniforms (e.g. arrays that are of unknown length until
537 // finalization time).
538 // TODO: Make a uniform block for platforms that support it.
539 string frag_shader_uniforms = "";
540 for (unsigned i = 0; i < phase->effects.size(); ++i) {
541 Node *node = phase->effects[i];
542 Effect *effect = node->effect;
543 const string effect_id = phase->effect_ids[node];
544 extract_uniform_declarations(effect->uniforms_image2d, "image2D", effect_id, &phase->uniforms_image2d, &frag_shader_uniforms);
545 extract_uniform_declarations(effect->uniforms_sampler2d, "sampler2D", effect_id, &phase->uniforms_sampler2d, &frag_shader_uniforms);
546 extract_uniform_declarations(effect->uniforms_bool, "bool", effect_id, &phase->uniforms_bool, &frag_shader_uniforms);
547 extract_uniform_declarations(effect->uniforms_int, "int", effect_id, &phase->uniforms_int, &frag_shader_uniforms);
548 extract_uniform_declarations(effect->uniforms_ivec2, "ivec2", effect_id, &phase->uniforms_ivec2, &frag_shader_uniforms);
549 extract_uniform_declarations(effect->uniforms_float, "float", effect_id, &phase->uniforms_float, &frag_shader_uniforms);
550 extract_uniform_declarations(effect->uniforms_vec2, "vec2", effect_id, &phase->uniforms_vec2, &frag_shader_uniforms);
551 extract_uniform_declarations(effect->uniforms_vec3, "vec3", effect_id, &phase->uniforms_vec3, &frag_shader_uniforms);
552 extract_uniform_declarations(effect->uniforms_vec4, "vec4", effect_id, &phase->uniforms_vec4, &frag_shader_uniforms);
553 extract_uniform_array_declarations(effect->uniforms_float_array, "float", effect_id, &phase->uniforms_float, &frag_shader_uniforms);
554 extract_uniform_array_declarations(effect->uniforms_vec2_array, "vec2", effect_id, &phase->uniforms_vec2, &frag_shader_uniforms);
555 extract_uniform_array_declarations(effect->uniforms_vec3_array, "vec3", effect_id, &phase->uniforms_vec3, &frag_shader_uniforms);
556 extract_uniform_array_declarations(effect->uniforms_vec4_array, "vec4", effect_id, &phase->uniforms_vec4, &frag_shader_uniforms);
557 extract_uniform_declarations(effect->uniforms_mat3, "mat3", effect_id, &phase->uniforms_mat3, &frag_shader_uniforms);
560 string vert_shader = read_version_dependent_file("vs", "vert");
562 // If we're the last phase and need to flip the picture to compensate for
563 // the origin, tell the vertex or compute shader so.
565 if (has_dummy_effect) {
566 is_last_phase = (phase->output_node->outgoing_links.size() == 1 &&
567 phase->output_node->outgoing_links[0]->effect->effect_type_id() == "ComputeShaderOutputDisplayEffect");
569 is_last_phase = phase->output_node->outgoing_links.empty();
571 if (is_last_phase && output_origin == OUTPUT_ORIGIN_TOP_LEFT) {
572 if (phase->is_compute_shader) {
573 frag_shader_header += "#define FLIP_ORIGIN 1\n";
575 const string needle = "#define FLIP_ORIGIN 0";
576 size_t pos = vert_shader.find(needle);
577 assert(pos != string::npos);
579 vert_shader[pos + needle.size() - 1] = '1';
583 frag_shader = frag_shader_header + frag_shader_uniforms + frag_shader;
585 if (phase->is_compute_shader) {
586 phase->glsl_program_num = resource_pool->compile_glsl_compute_program(frag_shader);
588 Uniform<int> uniform;
589 uniform.name = "outbuf";
590 uniform.value = &phase->outbuf_image_unit;
591 uniform.prefix = "tex";
592 uniform.num_values = 1;
593 uniform.location = -1;
594 phase->uniforms_image2d.push_back(uniform);
596 phase->glsl_program_num = resource_pool->compile_glsl_program(vert_shader, frag_shader, frag_shader_outputs);
598 GLint position_attribute_index = glGetAttribLocation(phase->glsl_program_num, "position");
599 GLint texcoord_attribute_index = glGetAttribLocation(phase->glsl_program_num, "texcoord");
600 if (position_attribute_index != -1) {
601 phase->attribute_indexes.insert(position_attribute_index);
603 if (texcoord_attribute_index != -1) {
604 phase->attribute_indexes.insert(texcoord_attribute_index);
607 // Collect the resulting location numbers for each uniform.
608 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_image2d);
609 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_sampler2d);
610 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_bool);
611 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_int);
612 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_ivec2);
613 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_float);
614 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_vec2);
615 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_vec3);
616 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_vec4);
617 collect_uniform_locations(phase->glsl_program_num, &phase->uniforms_mat3);
620 // Construct GLSL programs, starting at the given effect and following
621 // the chain from there. We end a program every time we come to an effect
622 // marked as "needs texture bounce", one that is used by multiple other
623 // effects, every time we need to bounce due to output size change
624 // (not all size changes require ending), and of course at the end.
626 // We follow a quite simple depth-first search from the output, although
627 // without recursing explicitly within each phase.
628 Phase *EffectChain::construct_phase(Node *output, map<Node *, Phase *> *completed_effects)
630 if (completed_effects->count(output)) {
631 return (*completed_effects)[output];
634 Phase *phase = new Phase;
635 phase->output_node = output;
636 phase->is_compute_shader = false;
637 phase->compute_shader_node = nullptr;
639 // If the output effect has one-to-one sampling, we try to trace this
640 // status down through the dependency chain. This is important in case
641 // we hit an effect that changes output size (and not sets a virtual
642 // output size); if we have one-to-one sampling, we don't have to break
644 output->one_to_one_sampling = output->effect->one_to_one_sampling();
645 output->strong_one_to_one_sampling = output->effect->strong_one_to_one_sampling();
647 // Effects that we have yet to calculate, but that we know should
648 // be in the current phase.
649 stack<Node *> effects_todo_this_phase;
650 effects_todo_this_phase.push(output);
652 while (!effects_todo_this_phase.empty()) {
653 Node *node = effects_todo_this_phase.top();
654 effects_todo_this_phase.pop();
656 assert(node->effect->one_to_one_sampling() >= node->effect->strong_one_to_one_sampling());
658 if (node->effect->needs_mipmaps() != Effect::DOES_NOT_NEED_MIPMAPS) {
659 // Can't have incompatible requirements imposed on us from a dependent effect;
660 // if so, it should have started a new phase instead.
661 assert(node->needs_mipmaps == Effect::DOES_NOT_NEED_MIPMAPS ||
662 node->needs_mipmaps == node->effect->needs_mipmaps());
663 node->needs_mipmaps = node->effect->needs_mipmaps();
666 // This should currently only happen for effects that are inputs
667 // (either true inputs or phase outputs). We special-case inputs,
668 // and then deduplicate phase outputs below.
669 if (node->effect->num_inputs() == 0) {
670 if (find(phase->effects.begin(), phase->effects.end(), node) != phase->effects.end()) {
674 assert(completed_effects->count(node) == 0);
677 phase->effects.push_back(node);
678 if (node->effect->is_compute_shader()) {
679 phase->is_compute_shader = true;
680 phase->compute_shader_node = node;
683 // Find all the dependencies of this effect, and add them to the stack.
684 vector<Node *> deps = node->incoming_links;
685 assert(node->effect->num_inputs() == deps.size());
686 for (unsigned i = 0; i < deps.size(); ++i) {
687 bool start_new_phase = false;
689 if (node->effect->needs_texture_bounce() &&
690 !deps[i]->effect->is_single_texture() &&
691 !deps[i]->effect->override_disable_bounce()) {
692 start_new_phase = true;
695 // Propagate information about needing mipmaps down the chain,
696 // breaking the phase if we notice an incompatibility.
698 // Note that we cannot do this propagation as a normal pass,
699 // because it needs information about where the phases end
700 // (we should not propagate the flag across phases).
701 if (node->needs_mipmaps != Effect::DOES_NOT_NEED_MIPMAPS) {
702 if (deps[i]->effect->num_inputs() == 0 && node->needs_mipmaps == Effect::NEEDS_MIPMAPS) {
703 Input *input = static_cast<Input *>(deps[i]->effect);
704 start_new_phase |= !input->can_supply_mipmaps();
705 } else if (deps[i]->effect->needs_mipmaps() == Effect::DOES_NOT_NEED_MIPMAPS) {
706 deps[i]->needs_mipmaps = node->needs_mipmaps;
707 } else if (deps[i]->effect->needs_mipmaps() != node->needs_mipmaps) {
708 start_new_phase = true;
712 if (deps[i]->outgoing_links.size() > 1) {
713 if (!deps[i]->effect->is_single_texture()) {
714 // More than one effect uses this as the input,
715 // and it is not a texture itself.
716 // The easiest thing to do (and probably also the safest
717 // performance-wise in most cases) is to bounce it to a texture
718 // and then let the next passes read from that.
719 start_new_phase = true;
721 assert(deps[i]->effect->num_inputs() == 0);
723 // For textures, we try to be slightly more clever;
724 // if none of our outputs need a bounce, we don't bounce
725 // but instead simply use the effect many times.
727 // Strictly speaking, we could bounce it for some outputs
728 // and use it directly for others, but the processing becomes
729 // somewhat simpler if the effect is only used in one such way.
730 for (unsigned j = 0; j < deps[i]->outgoing_links.size(); ++j) {
731 Node *rdep = deps[i]->outgoing_links[j];
732 start_new_phase |= rdep->effect->needs_texture_bounce();
737 if (deps[i]->effect->is_compute_shader()) {
738 // Only one compute shader per phase; we should have been stopped
739 // already due to the fact that compute shaders are not one-to-one.
740 assert(!phase->is_compute_shader);
742 // If all nodes so far are strong one-to-one, we can put them after
743 // the compute shader (ie., process them on the output).
744 start_new_phase = !node->strong_one_to_one_sampling;
745 } else if (deps[i]->effect->sets_virtual_output_size()) {
746 assert(deps[i]->effect->changes_output_size());
747 // If the next effect sets a virtual size to rely on OpenGL's
748 // bilinear sampling, we'll really need to break the phase here.
749 start_new_phase = true;
750 } else if (deps[i]->effect->changes_output_size() && !node->one_to_one_sampling) {
751 // If the next effect changes size and we don't have one-to-one sampling,
752 // we also need to break here.
753 start_new_phase = true;
756 if (start_new_phase) {
757 phase->inputs.push_back(construct_phase(deps[i], completed_effects));
759 effects_todo_this_phase.push(deps[i]);
761 // Propagate the one-to-one status down through the dependency.
762 deps[i]->one_to_one_sampling = node->one_to_one_sampling &&
763 deps[i]->effect->one_to_one_sampling();
764 deps[i]->strong_one_to_one_sampling = node->strong_one_to_one_sampling &&
765 deps[i]->effect->strong_one_to_one_sampling();
770 // No more effects to do this phase. Take all the ones we have,
771 // and create a GLSL program for it.
772 assert(!phase->effects.empty());
774 // Deduplicate the inputs, but don't change the ordering e.g. by sorting;
775 // that would be nondeterministic and thus reduce cacheability.
776 // TODO: Make this even more deterministic.
777 vector<Phase *> dedup_inputs;
778 set<Phase *> seen_inputs;
779 for (size_t i = 0; i < phase->inputs.size(); ++i) {
780 if (seen_inputs.insert(phase->inputs[i]).second) {
781 dedup_inputs.push_back(phase->inputs[i]);
784 swap(phase->inputs, dedup_inputs);
786 // Allocate samplers for each input.
787 phase->input_samplers.resize(phase->inputs.size());
789 // We added the effects from the output and back, but we need to output
790 // them in topological sort order in the shader.
791 phase->effects = topological_sort(phase->effects);
793 // Figure out if we need mipmaps or not, and if so, tell the inputs that.
794 phase->input_needs_mipmaps = false;
795 for (unsigned i = 0; i < phase->effects.size(); ++i) {
796 Node *node = phase->effects[i];
797 if (node->effect->needs_mipmaps() == Effect::NEEDS_MIPMAPS) {
798 phase->input_needs_mipmaps = true;
801 for (unsigned i = 0; i < phase->effects.size(); ++i) {
802 Node *node = phase->effects[i];
803 if (node->effect->num_inputs() == 0) {
804 Input *input = static_cast<Input *>(node->effect);
805 assert(!phase->input_needs_mipmaps || input->can_supply_mipmaps());
806 CHECK(input->set_int("needs_mipmaps", phase->input_needs_mipmaps));
810 // Tell each node which phase it ended up in, so that the unit test
811 // can check that the phases were split in the right place.
812 // Note that this ignores that effects may be part of multiple phases;
813 // if the unit tests need to test such cases, we'll reconsider.
814 for (unsigned i = 0; i < phase->effects.size(); ++i) {
815 phase->effects[i]->containing_phase = phase;
818 // Actually make the shader for this phase.
819 compile_glsl_program(phase);
821 // Initialize timers.
822 if (movit_timer_queries_supported) {
823 phase->time_elapsed_ns = 0;
824 phase->num_measured_iterations = 0;
827 assert(completed_effects->count(output) == 0);
828 completed_effects->insert(make_pair(output, phase));
829 phases.push_back(phase);
833 void EffectChain::output_dot(const char *filename)
835 if (movit_debug_level != MOVIT_DEBUG_ON) {
839 FILE *fp = fopen(filename, "w");
845 fprintf(fp, "digraph G {\n");
846 fprintf(fp, " output [shape=box label=\"(output)\"];\n");
847 for (unsigned i = 0; i < nodes.size(); ++i) {
848 // Find out which phase this event belongs to.
849 vector<int> in_phases;
850 for (unsigned j = 0; j < phases.size(); ++j) {
851 const Phase* p = phases[j];
852 if (find(p->effects.begin(), p->effects.end(), nodes[i]) != p->effects.end()) {
853 in_phases.push_back(j);
857 if (in_phases.empty()) {
858 fprintf(fp, " n%ld [label=\"%s\"];\n", (long)nodes[i], nodes[i]->effect->effect_type_id().c_str());
859 } else if (in_phases.size() == 1) {
860 fprintf(fp, " n%ld [label=\"%s\" style=\"filled\" fillcolor=\"/accent8/%d\"];\n",
861 (long)nodes[i], nodes[i]->effect->effect_type_id().c_str(),
862 (in_phases[0] % 8) + 1);
864 // If we had new enough Graphviz, style="wedged" would probably be ideal here.
866 fprintf(fp, " n%ld [label=\"%s [in multiple phases]\" style=\"filled\" fillcolor=\"/accent8/%d\"];\n",
867 (long)nodes[i], nodes[i]->effect->effect_type_id().c_str(),
868 (in_phases[0] % 8) + 1);
871 char from_node_id[256];
872 snprintf(from_node_id, 256, "n%ld", (long)nodes[i]);
874 for (unsigned j = 0; j < nodes[i]->outgoing_links.size(); ++j) {
875 char to_node_id[256];
876 snprintf(to_node_id, 256, "n%ld", (long)nodes[i]->outgoing_links[j]);
878 vector<string> labels = get_labels_for_edge(nodes[i], nodes[i]->outgoing_links[j]);
879 output_dot_edge(fp, from_node_id, to_node_id, labels);
882 if (nodes[i]->outgoing_links.empty() && !nodes[i]->disabled) {
884 vector<string> labels = get_labels_for_edge(nodes[i], nullptr);
885 output_dot_edge(fp, from_node_id, "output", labels);
893 vector<string> EffectChain::get_labels_for_edge(const Node *from, const Node *to)
895 vector<string> labels;
897 if (to != nullptr && to->effect->needs_texture_bounce()) {
898 labels.push_back("needs_bounce");
900 if (from->effect->changes_output_size()) {
901 labels.push_back("resize");
904 switch (from->output_color_space) {
905 case COLORSPACE_INVALID:
906 labels.push_back("spc[invalid]");
908 case COLORSPACE_REC_601_525:
909 labels.push_back("spc[rec601-525]");
911 case COLORSPACE_REC_601_625:
912 labels.push_back("spc[rec601-625]");
918 switch (from->output_gamma_curve) {
920 labels.push_back("gamma[invalid]");
923 labels.push_back("gamma[sRGB]");
925 case GAMMA_REC_601: // and GAMMA_REC_709
926 labels.push_back("gamma[rec601/709]");
932 switch (from->output_alpha_type) {
934 labels.push_back("alpha[invalid]");
937 labels.push_back("alpha[blank]");
939 case ALPHA_POSTMULTIPLIED:
940 labels.push_back("alpha[postmult]");
949 void EffectChain::output_dot_edge(FILE *fp,
950 const string &from_node_id,
951 const string &to_node_id,
952 const vector<string> &labels)
954 if (labels.empty()) {
955 fprintf(fp, " %s -> %s;\n", from_node_id.c_str(), to_node_id.c_str());
957 string label = labels[0];
958 for (unsigned k = 1; k < labels.size(); ++k) {
959 label += ", " + labels[k];
961 fprintf(fp, " %s -> %s [label=\"%s\"];\n", from_node_id.c_str(), to_node_id.c_str(), label.c_str());
965 void EffectChain::size_rectangle_to_fit(unsigned width, unsigned height, unsigned *output_width, unsigned *output_height)
967 unsigned scaled_width, scaled_height;
969 if (float(width) * aspect_denom >= float(height) * aspect_nom) {
970 // Same aspect, or W/H > aspect (image is wider than the frame).
971 // In either case, keep width, and adjust height.
972 scaled_width = width;
973 scaled_height = lrintf(width * aspect_denom / aspect_nom);
975 // W/H < aspect (image is taller than the frame), so keep height,
977 scaled_width = lrintf(height * aspect_nom / aspect_denom);
978 scaled_height = height;
981 // We should be consistently larger or smaller then the existing choice,
982 // since we have the same aspect.
983 assert(!(scaled_width < *output_width && scaled_height > *output_height));
984 assert(!(scaled_height < *output_height && scaled_width > *output_width));
986 if (scaled_width >= *output_width && scaled_height >= *output_height) {
987 *output_width = scaled_width;
988 *output_height = scaled_height;
992 // Propagate input texture sizes throughout, and inform effects downstream.
993 // (Like a lot of other code, we depend on effects being in topological order.)
994 void EffectChain::inform_input_sizes(Phase *phase)
996 // All effects that have a defined size (inputs and RTT inputs)
997 // get that. Reset all others.
998 for (unsigned i = 0; i < phase->effects.size(); ++i) {
999 Node *node = phase->effects[i];
1000 if (node->effect->num_inputs() == 0) {
1001 Input *input = static_cast<Input *>(node->effect);
1002 node->output_width = input->get_width();
1003 node->output_height = input->get_height();
1004 assert(node->output_width != 0);
1005 assert(node->output_height != 0);
1007 node->output_width = node->output_height = 0;
1010 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
1011 Phase *input = phase->inputs[i];
1012 input->output_node->output_width = input->virtual_output_width;
1013 input->output_node->output_height = input->virtual_output_height;
1014 assert(input->output_node->output_width != 0);
1015 assert(input->output_node->output_height != 0);
1018 // Now propagate from the inputs towards the end, and inform as we go.
1019 // The rules are simple:
1021 // 1. Don't touch effects that already have given sizes (ie., inputs
1022 // or effects that change the output size).
1023 // 2. If all of your inputs have the same size, that will be your output size.
1024 // 3. Otherwise, your output size is 0x0.
1025 for (unsigned i = 0; i < phase->effects.size(); ++i) {
1026 Node *node = phase->effects[i];
1027 if (node->effect->num_inputs() == 0) {
1030 unsigned this_output_width = 0;
1031 unsigned this_output_height = 0;
1032 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1033 Node *input = node->incoming_links[j];
1034 node->effect->inform_input_size(j, input->output_width, input->output_height);
1036 this_output_width = input->output_width;
1037 this_output_height = input->output_height;
1038 } else if (input->output_width != this_output_width || input->output_height != this_output_height) {
1040 this_output_width = 0;
1041 this_output_height = 0;
1044 if (node->effect->changes_output_size()) {
1045 // We cannot call get_output_size() before we've done inform_input_size()
1047 unsigned real_width, real_height;
1048 node->effect->get_output_size(&real_width, &real_height,
1049 &node->output_width, &node->output_height);
1050 assert(node->effect->sets_virtual_output_size() ||
1051 (real_width == node->output_width &&
1052 real_height == node->output_height));
1054 node->output_width = this_output_width;
1055 node->output_height = this_output_height;
1060 // Note: You should call inform_input_sizes() before this, as the last effect's
1061 // desired output size might change based on the inputs.
1062 void EffectChain::find_output_size(Phase *phase)
1064 Node *output_node = phase->is_compute_shader ? phase->compute_shader_node : phase->effects.back();
1066 // If the last effect explicitly sets an output size, use that.
1067 if (output_node->effect->changes_output_size()) {
1068 output_node->effect->get_output_size(&phase->output_width, &phase->output_height,
1069 &phase->virtual_output_width, &phase->virtual_output_height);
1070 assert(output_node->effect->sets_virtual_output_size() ||
1071 (phase->output_width == phase->virtual_output_width &&
1072 phase->output_height == phase->virtual_output_height));
1076 // If all effects have the same size, use that.
1077 unsigned output_width = 0, output_height = 0;
1078 bool all_inputs_same_size = true;
1080 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
1081 Phase *input = phase->inputs[i];
1082 assert(input->output_width != 0);
1083 assert(input->output_height != 0);
1084 if (output_width == 0 && output_height == 0) {
1085 output_width = input->virtual_output_width;
1086 output_height = input->virtual_output_height;
1087 } else if (output_width != input->virtual_output_width ||
1088 output_height != input->virtual_output_height) {
1089 all_inputs_same_size = false;
1092 for (unsigned i = 0; i < phase->effects.size(); ++i) {
1093 Effect *effect = phase->effects[i]->effect;
1094 if (effect->num_inputs() != 0) {
1098 Input *input = static_cast<Input *>(effect);
1099 if (output_width == 0 && output_height == 0) {
1100 output_width = input->get_width();
1101 output_height = input->get_height();
1102 } else if (output_width != input->get_width() ||
1103 output_height != input->get_height()) {
1104 all_inputs_same_size = false;
1108 if (all_inputs_same_size) {
1109 assert(output_width != 0);
1110 assert(output_height != 0);
1111 phase->virtual_output_width = phase->output_width = output_width;
1112 phase->virtual_output_height = phase->output_height = output_height;
1116 // If not, fit all the inputs into the current aspect, and select the largest one.
1119 for (unsigned i = 0; i < phase->inputs.size(); ++i) {
1120 Phase *input = phase->inputs[i];
1121 assert(input->output_width != 0);
1122 assert(input->output_height != 0);
1123 size_rectangle_to_fit(input->output_width, input->output_height, &output_width, &output_height);
1125 for (unsigned i = 0; i < phase->effects.size(); ++i) {
1126 Effect *effect = phase->effects[i]->effect;
1127 if (effect->num_inputs() != 0) {
1131 Input *input = static_cast<Input *>(effect);
1132 size_rectangle_to_fit(input->get_width(), input->get_height(), &output_width, &output_height);
1134 assert(output_width != 0);
1135 assert(output_height != 0);
1136 phase->virtual_output_width = phase->output_width = output_width;
1137 phase->virtual_output_height = phase->output_height = output_height;
1140 void EffectChain::sort_all_nodes_topologically()
1142 nodes = topological_sort(nodes);
1145 vector<Node *> EffectChain::topological_sort(const vector<Node *> &nodes)
1147 set<Node *> nodes_left_to_visit(nodes.begin(), nodes.end());
1148 vector<Node *> sorted_list;
1149 for (unsigned i = 0; i < nodes.size(); ++i) {
1150 topological_sort_visit_node(nodes[i], &nodes_left_to_visit, &sorted_list);
1152 reverse(sorted_list.begin(), sorted_list.end());
1156 void EffectChain::topological_sort_visit_node(Node *node, set<Node *> *nodes_left_to_visit, vector<Node *> *sorted_list)
1158 if (nodes_left_to_visit->count(node) == 0) {
1161 nodes_left_to_visit->erase(node);
1162 for (unsigned i = 0; i < node->outgoing_links.size(); ++i) {
1163 topological_sort_visit_node(node->outgoing_links[i], nodes_left_to_visit, sorted_list);
1165 sorted_list->push_back(node);
1168 void EffectChain::find_color_spaces_for_inputs()
1170 for (unsigned i = 0; i < nodes.size(); ++i) {
1171 Node *node = nodes[i];
1172 if (node->disabled) {
1175 if (node->incoming_links.size() == 0) {
1176 Input *input = static_cast<Input *>(node->effect);
1177 node->output_color_space = input->get_color_space();
1178 node->output_gamma_curve = input->get_gamma_curve();
1180 Effect::AlphaHandling alpha_handling = input->alpha_handling();
1181 switch (alpha_handling) {
1182 case Effect::OUTPUT_BLANK_ALPHA:
1183 node->output_alpha_type = ALPHA_BLANK;
1185 case Effect::INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA:
1186 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1188 case Effect::OUTPUT_POSTMULTIPLIED_ALPHA:
1189 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1191 case Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK:
1192 case Effect::DONT_CARE_ALPHA_TYPE:
1197 if (node->output_alpha_type == ALPHA_PREMULTIPLIED) {
1198 assert(node->output_gamma_curve == GAMMA_LINEAR);
1204 // Propagate gamma and color space information as far as we can in the graph.
1205 // The rules are simple: Anything where all the inputs agree, get that as
1206 // output as well. Anything else keeps having *_INVALID.
1207 void EffectChain::propagate_gamma_and_color_space()
1209 // We depend on going through the nodes in order.
1210 sort_all_nodes_topologically();
1212 for (unsigned i = 0; i < nodes.size(); ++i) {
1213 Node *node = nodes[i];
1214 if (node->disabled) {
1217 assert(node->incoming_links.size() == node->effect->num_inputs());
1218 if (node->incoming_links.size() == 0) {
1219 assert(node->output_color_space != COLORSPACE_INVALID);
1220 assert(node->output_gamma_curve != GAMMA_INVALID);
1224 Colorspace color_space = node->incoming_links[0]->output_color_space;
1225 GammaCurve gamma_curve = node->incoming_links[0]->output_gamma_curve;
1226 for (unsigned j = 1; j < node->incoming_links.size(); ++j) {
1227 if (node->incoming_links[j]->output_color_space != color_space) {
1228 color_space = COLORSPACE_INVALID;
1230 if (node->incoming_links[j]->output_gamma_curve != gamma_curve) {
1231 gamma_curve = GAMMA_INVALID;
1235 // The conversion effects already have their outputs set correctly,
1236 // so leave them alone.
1237 if (node->effect->effect_type_id() != "ColorspaceConversionEffect") {
1238 node->output_color_space = color_space;
1240 if (node->effect->effect_type_id() != "GammaCompressionEffect" &&
1241 node->effect->effect_type_id() != "GammaExpansionEffect") {
1242 node->output_gamma_curve = gamma_curve;
1247 // Propagate alpha information as far as we can in the graph.
1248 // Similar to propagate_gamma_and_color_space().
1249 void EffectChain::propagate_alpha()
1251 // We depend on going through the nodes in order.
1252 sort_all_nodes_topologically();
1254 for (unsigned i = 0; i < nodes.size(); ++i) {
1255 Node *node = nodes[i];
1256 if (node->disabled) {
1259 assert(node->incoming_links.size() == node->effect->num_inputs());
1260 if (node->incoming_links.size() == 0) {
1261 assert(node->output_alpha_type != ALPHA_INVALID);
1265 // The alpha multiplication/division effects are special cases.
1266 if (node->effect->effect_type_id() == "AlphaMultiplicationEffect") {
1267 assert(node->incoming_links.size() == 1);
1268 assert(node->incoming_links[0]->output_alpha_type == ALPHA_POSTMULTIPLIED);
1269 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1272 if (node->effect->effect_type_id() == "AlphaDivisionEffect") {
1273 assert(node->incoming_links.size() == 1);
1274 assert(node->incoming_links[0]->output_alpha_type == ALPHA_PREMULTIPLIED);
1275 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1279 // GammaCompressionEffect and GammaExpansionEffect are also a special case,
1280 // because they are the only one that _need_ postmultiplied alpha.
1281 if (node->effect->effect_type_id() == "GammaCompressionEffect" ||
1282 node->effect->effect_type_id() == "GammaExpansionEffect") {
1283 assert(node->incoming_links.size() == 1);
1284 if (node->incoming_links[0]->output_alpha_type == ALPHA_BLANK) {
1285 node->output_alpha_type = ALPHA_BLANK;
1286 } else if (node->incoming_links[0]->output_alpha_type == ALPHA_POSTMULTIPLIED) {
1287 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1289 node->output_alpha_type = ALPHA_INVALID;
1294 // Only inputs can have unconditional alpha output (OUTPUT_BLANK_ALPHA
1295 // or OUTPUT_POSTMULTIPLIED_ALPHA), and they have already been
1296 // taken care of above. Rationale: Even if you could imagine
1297 // e.g. an effect that took in an image and set alpha=1.0
1298 // unconditionally, it wouldn't make any sense to have it as
1299 // e.g. OUTPUT_BLANK_ALPHA, since it wouldn't know whether it
1300 // got its input pre- or postmultiplied, so it wouldn't know
1301 // whether to divide away the old alpha or not.
1302 Effect::AlphaHandling alpha_handling = node->effect->alpha_handling();
1303 assert(alpha_handling == Effect::INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA ||
1304 alpha_handling == Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK ||
1305 alpha_handling == Effect::DONT_CARE_ALPHA_TYPE);
1307 // If the node has multiple inputs, check that they are all valid and
1309 bool any_invalid = false;
1310 bool any_premultiplied = false;
1311 bool any_postmultiplied = false;
1313 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1314 switch (node->incoming_links[j]->output_alpha_type) {
1319 // Blank is good as both pre- and postmultiplied alpha,
1320 // so just ignore it.
1322 case ALPHA_PREMULTIPLIED:
1323 any_premultiplied = true;
1325 case ALPHA_POSTMULTIPLIED:
1326 any_postmultiplied = true;
1334 node->output_alpha_type = ALPHA_INVALID;
1338 // Inputs must be of the same type.
1339 if (any_premultiplied && any_postmultiplied) {
1340 node->output_alpha_type = ALPHA_INVALID;
1344 if (alpha_handling == Effect::INPUT_AND_OUTPUT_PREMULTIPLIED_ALPHA ||
1345 alpha_handling == Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK) {
1346 // This combination (requiring premultiplied alpha, but _not_ requiring
1347 // linear light) is illegal, since the combination of premultiplied alpha
1348 // and nonlinear inputs is meaningless.
1349 assert(node->effect->needs_linear_light());
1351 // If the effect has asked for premultiplied alpha, check that it has got it.
1352 if (any_postmultiplied) {
1353 node->output_alpha_type = ALPHA_INVALID;
1354 } else if (!any_premultiplied &&
1355 alpha_handling == Effect::INPUT_PREMULTIPLIED_ALPHA_KEEP_BLANK) {
1356 // Blank input alpha, and the effect preserves blank alpha.
1357 node->output_alpha_type = ALPHA_BLANK;
1359 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1362 // OK, all inputs are the same, and this effect is not going
1364 assert(alpha_handling == Effect::DONT_CARE_ALPHA_TYPE);
1365 if (any_premultiplied) {
1366 node->output_alpha_type = ALPHA_PREMULTIPLIED;
1367 } else if (any_postmultiplied) {
1368 node->output_alpha_type = ALPHA_POSTMULTIPLIED;
1370 node->output_alpha_type = ALPHA_BLANK;
1376 bool EffectChain::node_needs_colorspace_fix(Node *node)
1378 if (node->disabled) {
1381 if (node->effect->num_inputs() == 0) {
1385 // propagate_gamma_and_color_space() has already set our output
1386 // to COLORSPACE_INVALID if the inputs differ, so we can rely on that.
1387 if (node->output_color_space == COLORSPACE_INVALID) {
1390 return (node->effect->needs_srgb_primaries() && node->output_color_space != COLORSPACE_sRGB);
1393 // Fix up color spaces so that there are no COLORSPACE_INVALID nodes left in
1394 // the graph. Our strategy is not always optimal, but quite simple:
1395 // Find an effect that's as early as possible where the inputs are of
1396 // unacceptable colorspaces (that is, either different, or, if the effect only
1397 // wants sRGB, not sRGB.) Add appropriate conversions on all its inputs,
1398 // propagate the information anew, and repeat until there are no more such
1400 void EffectChain::fix_internal_color_spaces()
1402 unsigned colorspace_propagation_pass = 0;
1406 for (unsigned i = 0; i < nodes.size(); ++i) {
1407 Node *node = nodes[i];
1408 if (!node_needs_colorspace_fix(node)) {
1412 // Go through each input that is not sRGB, and insert
1413 // a colorspace conversion after it.
1414 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1415 Node *input = node->incoming_links[j];
1416 assert(input->output_color_space != COLORSPACE_INVALID);
1417 if (input->output_color_space == COLORSPACE_sRGB) {
1420 Node *conversion = add_node(new ColorspaceConversionEffect());
1421 CHECK(conversion->effect->set_int("source_space", input->output_color_space));
1422 CHECK(conversion->effect->set_int("destination_space", COLORSPACE_sRGB));
1423 conversion->output_color_space = COLORSPACE_sRGB;
1424 replace_sender(input, conversion);
1425 connect_nodes(input, conversion);
1428 // Re-sort topologically, and propagate the new information.
1429 propagate_gamma_and_color_space();
1436 sprintf(filename, "step5-colorspacefix-iter%u.dot", ++colorspace_propagation_pass);
1437 output_dot(filename);
1438 assert(colorspace_propagation_pass < 100);
1439 } while (found_any);
1441 for (unsigned i = 0; i < nodes.size(); ++i) {
1442 Node *node = nodes[i];
1443 if (node->disabled) {
1446 assert(node->output_color_space != COLORSPACE_INVALID);
1450 bool EffectChain::node_needs_alpha_fix(Node *node)
1452 if (node->disabled) {
1456 // propagate_alpha() has already set our output to ALPHA_INVALID if the
1457 // inputs differ or we are otherwise in mismatch, so we can rely on that.
1458 return (node->output_alpha_type == ALPHA_INVALID);
1461 // Fix up alpha so that there are no ALPHA_INVALID nodes left in
1462 // the graph. Similar to fix_internal_color_spaces().
1463 void EffectChain::fix_internal_alpha(unsigned step)
1465 unsigned alpha_propagation_pass = 0;
1469 for (unsigned i = 0; i < nodes.size(); ++i) {
1470 Node *node = nodes[i];
1471 if (!node_needs_alpha_fix(node)) {
1475 // If we need to fix up GammaExpansionEffect, then clearly something
1476 // is wrong, since the combination of premultiplied alpha and nonlinear inputs
1478 assert(node->effect->effect_type_id() != "GammaExpansionEffect");
1480 AlphaType desired_type = ALPHA_PREMULTIPLIED;
1482 // GammaCompressionEffect is special; it needs postmultiplied alpha.
1483 if (node->effect->effect_type_id() == "GammaCompressionEffect") {
1484 assert(node->incoming_links.size() == 1);
1485 assert(node->incoming_links[0]->output_alpha_type == ALPHA_PREMULTIPLIED);
1486 desired_type = ALPHA_POSTMULTIPLIED;
1489 // Go through each input that is not premultiplied alpha, and insert
1490 // a conversion before it.
1491 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1492 Node *input = node->incoming_links[j];
1493 assert(input->output_alpha_type != ALPHA_INVALID);
1494 if (input->output_alpha_type == desired_type ||
1495 input->output_alpha_type == ALPHA_BLANK) {
1499 if (desired_type == ALPHA_PREMULTIPLIED) {
1500 conversion = add_node(new AlphaMultiplicationEffect());
1502 conversion = add_node(new AlphaDivisionEffect());
1504 conversion->output_alpha_type = desired_type;
1505 replace_sender(input, conversion);
1506 connect_nodes(input, conversion);
1509 // Re-sort topologically, and propagate the new information.
1510 propagate_gamma_and_color_space();
1518 sprintf(filename, "step%u-alphafix-iter%u.dot", step, ++alpha_propagation_pass);
1519 output_dot(filename);
1520 assert(alpha_propagation_pass < 100);
1521 } while (found_any);
1523 for (unsigned i = 0; i < nodes.size(); ++i) {
1524 Node *node = nodes[i];
1525 if (node->disabled) {
1528 assert(node->output_alpha_type != ALPHA_INVALID);
1532 // Make so that the output is in the desired color space.
1533 void EffectChain::fix_output_color_space()
1535 Node *output = find_output_node();
1536 if (output->output_color_space != output_format.color_space) {
1537 Node *conversion = add_node(new ColorspaceConversionEffect());
1538 CHECK(conversion->effect->set_int("source_space", output->output_color_space));
1539 CHECK(conversion->effect->set_int("destination_space", output_format.color_space));
1540 conversion->output_color_space = output_format.color_space;
1541 connect_nodes(output, conversion);
1543 propagate_gamma_and_color_space();
1547 // Make so that the output is in the desired pre-/postmultiplication alpha state.
1548 void EffectChain::fix_output_alpha()
1550 Node *output = find_output_node();
1551 assert(output->output_alpha_type != ALPHA_INVALID);
1552 if (output->output_alpha_type == ALPHA_BLANK) {
1553 // No alpha output, so we don't care.
1556 if (output->output_alpha_type == ALPHA_PREMULTIPLIED &&
1557 output_alpha_format == OUTPUT_ALPHA_FORMAT_POSTMULTIPLIED) {
1558 Node *conversion = add_node(new AlphaDivisionEffect());
1559 connect_nodes(output, conversion);
1561 propagate_gamma_and_color_space();
1563 if (output->output_alpha_type == ALPHA_POSTMULTIPLIED &&
1564 output_alpha_format == OUTPUT_ALPHA_FORMAT_PREMULTIPLIED) {
1565 Node *conversion = add_node(new AlphaMultiplicationEffect());
1566 connect_nodes(output, conversion);
1568 propagate_gamma_and_color_space();
1572 bool EffectChain::node_needs_gamma_fix(Node *node)
1574 if (node->disabled) {
1578 // Small hack since the output is not an explicit node:
1579 // If we are the last node and our output is in the wrong
1580 // space compared to EffectChain's output, we need to fix it.
1581 // This will only take us to linear, but fix_output_gamma()
1582 // will come and take us to the desired output gamma
1585 // This needs to be before everything else, since it could
1586 // even apply to inputs (if they are the only effect).
1587 if (node->outgoing_links.empty() &&
1588 node->output_gamma_curve != output_format.gamma_curve &&
1589 node->output_gamma_curve != GAMMA_LINEAR) {
1593 if (node->effect->num_inputs() == 0) {
1597 // propagate_gamma_and_color_space() has already set our output
1598 // to GAMMA_INVALID if the inputs differ, so we can rely on that,
1599 // except for GammaCompressionEffect.
1600 if (node->output_gamma_curve == GAMMA_INVALID) {
1603 if (node->effect->effect_type_id() == "GammaCompressionEffect") {
1604 assert(node->incoming_links.size() == 1);
1605 return node->incoming_links[0]->output_gamma_curve != GAMMA_LINEAR;
1608 return (node->effect->needs_linear_light() && node->output_gamma_curve != GAMMA_LINEAR);
1611 // Very similar to fix_internal_color_spaces(), but for gamma.
1612 // There is one difference, though; before we start adding conversion nodes,
1613 // we see if we can get anything out of asking the sources to deliver
1614 // linear gamma directly. fix_internal_gamma_by_asking_inputs()
1615 // does that part, while fix_internal_gamma_by_inserting_nodes()
1616 // inserts nodes as needed afterwards.
1617 void EffectChain::fix_internal_gamma_by_asking_inputs(unsigned step)
1619 unsigned gamma_propagation_pass = 0;
1623 for (unsigned i = 0; i < nodes.size(); ++i) {
1624 Node *node = nodes[i];
1625 if (!node_needs_gamma_fix(node)) {
1629 // See if all inputs can give us linear gamma. If not, leave it.
1630 vector<Node *> nonlinear_inputs;
1631 find_all_nonlinear_inputs(node, &nonlinear_inputs);
1632 assert(!nonlinear_inputs.empty());
1635 for (unsigned i = 0; i < nonlinear_inputs.size(); ++i) {
1636 Input *input = static_cast<Input *>(nonlinear_inputs[i]->effect);
1637 all_ok &= input->can_output_linear_gamma();
1644 for (unsigned i = 0; i < nonlinear_inputs.size(); ++i) {
1645 CHECK(nonlinear_inputs[i]->effect->set_int("output_linear_gamma", 1));
1646 nonlinear_inputs[i]->output_gamma_curve = GAMMA_LINEAR;
1649 // Re-sort topologically, and propagate the new information.
1650 propagate_gamma_and_color_space();
1657 sprintf(filename, "step%u-gammafix-iter%u.dot", step, ++gamma_propagation_pass);
1658 output_dot(filename);
1659 assert(gamma_propagation_pass < 100);
1660 } while (found_any);
1663 void EffectChain::fix_internal_gamma_by_inserting_nodes(unsigned step)
1665 unsigned gamma_propagation_pass = 0;
1669 for (unsigned i = 0; i < nodes.size(); ++i) {
1670 Node *node = nodes[i];
1671 if (!node_needs_gamma_fix(node)) {
1675 // Special case: We could be an input and still be asked to
1676 // fix our gamma; if so, we should be the only node
1677 // (as node_needs_gamma_fix() would only return true in
1678 // for an input in that case). That means we should insert
1679 // a conversion node _after_ ourselves.
1680 if (node->incoming_links.empty()) {
1681 assert(node->outgoing_links.empty());
1682 Node *conversion = add_node(new GammaExpansionEffect());
1683 CHECK(conversion->effect->set_int("source_curve", node->output_gamma_curve));
1684 conversion->output_gamma_curve = GAMMA_LINEAR;
1685 connect_nodes(node, conversion);
1688 // If not, go through each input that is not linear gamma,
1689 // and insert a gamma conversion after it.
1690 for (unsigned j = 0; j < node->incoming_links.size(); ++j) {
1691 Node *input = node->incoming_links[j];
1692 assert(input->output_gamma_curve != GAMMA_INVALID);
1693 if (input->output_gamma_curve == GAMMA_LINEAR) {
1696 Node *conversion = add_node(new GammaExpansionEffect());
1697 CHECK(conversion->effect->set_int("source_curve", input->output_gamma_curve));
1698 conversion->output_gamma_curve = GAMMA_LINEAR;
1699 replace_sender(input, conversion);
1700 connect_nodes(input, conversion);
1703 // Re-sort topologically, and propagate the new information.
1705 propagate_gamma_and_color_space();
1712 sprintf(filename, "step%u-gammafix-iter%u.dot", step, ++gamma_propagation_pass);
1713 output_dot(filename);
1714 assert(gamma_propagation_pass < 100);
1715 } while (found_any);
1717 for (unsigned i = 0; i < nodes.size(); ++i) {
1718 Node *node = nodes[i];
1719 if (node->disabled) {
1722 assert(node->output_gamma_curve != GAMMA_INVALID);
1726 // Make so that the output is in the desired gamma.
1727 // Note that this assumes linear input gamma, so it might create the need
1728 // for another pass of fix_internal_gamma().
1729 void EffectChain::fix_output_gamma()
1731 Node *output = find_output_node();
1732 if (output->output_gamma_curve != output_format.gamma_curve) {
1733 Node *conversion = add_node(new GammaCompressionEffect());
1734 CHECK(conversion->effect->set_int("destination_curve", output_format.gamma_curve));
1735 conversion->output_gamma_curve = output_format.gamma_curve;
1736 connect_nodes(output, conversion);
1740 // If the user has requested Y'CbCr output, we need to do this conversion
1741 // _after_ GammaCompressionEffect etc., but before dither (see below).
1742 // This is because Y'CbCr, with the exception of a special optional mode
1743 // in Rec. 2020 (which we currently don't support), is defined to work on
1744 // gamma-encoded data.
1745 void EffectChain::add_ycbcr_conversion_if_needed()
1747 assert(output_color_rgba || num_output_color_ycbcr > 0);
1748 if (num_output_color_ycbcr == 0) {
1751 Node *output = find_output_node();
1752 ycbcr_conversion_effect_node = add_node(new YCbCrConversionEffect(output_ycbcr_format, output_ycbcr_type));
1753 connect_nodes(output, ycbcr_conversion_effect_node);
1756 // If the user has requested dither, add a DitherEffect right at the end
1757 // (after GammaCompressionEffect etc.). This needs to be done after everything else,
1758 // since dither is about the only effect that can _not_ be done in linear space.
1759 void EffectChain::add_dither_if_needed()
1761 if (num_dither_bits == 0) {
1764 Node *output = find_output_node();
1765 Node *dither = add_node(new DitherEffect());
1766 CHECK(dither->effect->set_int("num_bits", num_dither_bits));
1767 connect_nodes(output, dither);
1769 dither_effect = dither->effect;
1772 // Compute shaders can't output to the framebuffer, so if the last
1773 // phase ends in a compute shader, add a dummy phase at the end that
1774 // only blits directly from the temporary texture.
1776 // TODO: Add an API for rendering directly to textures, for the cases
1777 // where we're only rendering to an FBO anyway.
1778 void EffectChain::add_dummy_effect_if_needed()
1780 Node *output = find_output_node();
1782 // See if the last effect that's not strong one-to-one is a compute shader.
1783 Node *last_effect = output;
1784 while (last_effect->effect->num_inputs() == 1 &&
1785 last_effect->effect->strong_one_to_one_sampling()) {
1786 last_effect = last_effect->incoming_links[0];
1788 if (last_effect->effect->is_compute_shader()) {
1789 Node *dummy = add_node(new ComputeShaderOutputDisplayEffect());
1790 connect_nodes(output, dummy);
1791 has_dummy_effect = true;
1795 // Find the output node. This is, simply, one that has no outgoing links.
1796 // If there are multiple ones, the graph is malformed (we do not support
1797 // multiple outputs right now).
1798 Node *EffectChain::find_output_node()
1800 vector<Node *> output_nodes;
1801 for (unsigned i = 0; i < nodes.size(); ++i) {
1802 Node *node = nodes[i];
1803 if (node->disabled) {
1806 if (node->outgoing_links.empty()) {
1807 output_nodes.push_back(node);
1810 assert(output_nodes.size() == 1);
1811 return output_nodes[0];
1814 void EffectChain::finalize()
1816 // Output the graph as it is before we do any conversions on it.
1817 output_dot("step0-start.dot");
1819 // Give each effect in turn a chance to rewrite its own part of the graph.
1820 // Note that if more effects are added as part of this, they will be
1821 // picked up as part of the same for loop, since they are added at the end.
1822 for (unsigned i = 0; i < nodes.size(); ++i) {
1823 nodes[i]->effect->rewrite_graph(this, nodes[i]);
1825 output_dot("step1-rewritten.dot");
1827 find_color_spaces_for_inputs();
1828 output_dot("step2-input-colorspace.dot");
1831 output_dot("step3-propagated-alpha.dot");
1833 propagate_gamma_and_color_space();
1834 output_dot("step4-propagated-all.dot");
1836 fix_internal_color_spaces();
1837 fix_internal_alpha(6);
1838 fix_output_color_space();
1839 output_dot("step7-output-colorspacefix.dot");
1841 output_dot("step8-output-alphafix.dot");
1843 // Note that we need to fix gamma after colorspace conversion,
1844 // because colorspace conversions might create needs for gamma conversions.
1845 // Also, we need to run an extra pass of fix_internal_gamma() after
1846 // fixing the output gamma, as we only have conversions to/from linear,
1847 // and fix_internal_alpha() since GammaCompressionEffect needs
1848 // postmultiplied input.
1849 fix_internal_gamma_by_asking_inputs(9);
1850 fix_internal_gamma_by_inserting_nodes(10);
1852 output_dot("step11-output-gammafix.dot");
1854 output_dot("step12-output-alpha-propagated.dot");
1855 fix_internal_alpha(13);
1856 output_dot("step14-output-alpha-fixed.dot");
1857 fix_internal_gamma_by_asking_inputs(15);
1858 fix_internal_gamma_by_inserting_nodes(16);
1860 output_dot("step17-before-ycbcr.dot");
1861 add_ycbcr_conversion_if_needed();
1863 output_dot("step18-before-dither.dot");
1864 add_dither_if_needed();
1866 output_dot("step19-before-dummy-effect.dot");
1867 add_dummy_effect_if_needed();
1869 output_dot("step20-final.dot");
1871 // Construct all needed GLSL programs, starting at the output.
1872 // We need to keep track of which effects have already been computed,
1873 // as an effect with multiple users could otherwise be calculated
1875 map<Node *, Phase *> completed_effects;
1876 construct_phase(find_output_node(), &completed_effects);
1878 output_dot("step21-split-to-phases.dot");
1880 assert(phases[0]->inputs.empty());
1885 void EffectChain::render_to_fbo(GLuint dest_fbo, unsigned width, unsigned height)
1887 // Save original viewport.
1888 GLuint x = 0, y = 0;
1890 if (width == 0 && height == 0) {
1892 glGetIntegerv(GL_VIEWPORT, viewport);
1895 width = viewport[2];
1896 height = viewport[3];
1899 render(dest_fbo, {}, x, y, width, height);
1902 void EffectChain::render_to_texture(const vector<DestinationTexture> &destinations, unsigned width, unsigned height)
1905 assert(!destinations.empty());
1907 if (!has_dummy_effect) {
1908 // We don't end in a compute shader, so there's nothing specific for us to do.
1909 // Create an FBO for this set of textures, and just render to that.
1910 GLuint texnums[4] = { 0, 0, 0, 0 };
1911 for (unsigned i = 0; i < destinations.size() && i < 4; ++i) {
1912 texnums[i] = destinations[i].texnum;
1914 GLuint dest_fbo = resource_pool->create_fbo(texnums[0], texnums[1], texnums[2], texnums[3]);
1915 render(dest_fbo, {}, 0, 0, width, height);
1916 resource_pool->release_fbo(dest_fbo);
1918 render((GLuint)-1, destinations, 0, 0, width, height);
1922 void EffectChain::render(GLuint dest_fbo, const vector<DestinationTexture> &destinations, unsigned x, unsigned y, unsigned width, unsigned height)
1925 assert(destinations.size() <= 1);
1927 // This needs to be set anew, in case we are coming from a different context
1928 // from when we initialized.
1930 glDisable(GL_DITHER);
1933 const bool final_srgb = glIsEnabled(GL_FRAMEBUFFER_SRGB);
1935 bool current_srgb = final_srgb;
1939 glDisable(GL_BLEND);
1941 glDisable(GL_DEPTH_TEST);
1943 glDepthMask(GL_FALSE);
1946 set<Phase *> generated_mipmaps;
1948 // We keep one texture per output, but only for as long as we actually have any
1949 // phases that need it as an input. (We don't make any effort to reorder phases
1950 // to minimize the number of textures in play, as register allocation can be
1951 // complicated and we rarely have much to gain, since our graphs are typically
1953 map<Phase *, GLuint> output_textures;
1954 map<Phase *, int> ref_counts;
1955 for (Phase *phase : phases) {
1956 for (Phase *input : phase->inputs) {
1957 ++ref_counts[input];
1961 size_t num_phases = phases.size();
1962 if (destinations.empty()) {
1963 assert(dest_fbo != (GLuint)-1);
1965 assert(has_dummy_effect);
1968 assert(num_phases >= 2);
1969 assert(!phases.back()->is_compute_shader);
1970 assert(phases.back()->effects.size() == 1);
1971 assert(phases.back()->effects[0]->effect->effect_type_id() == "ComputeShaderOutputDisplayEffect");
1973 // We are rendering to a set of textures, so we can run the compute shader
1974 // directly and skip the dummy phase.
1978 for (unsigned phase_num = 0; phase_num < num_phases; ++phase_num) {
1979 Phase *phase = phases[phase_num];
1981 if (do_phase_timing) {
1982 GLuint timer_query_object;
1983 if (phase->timer_query_objects_free.empty()) {
1984 glGenQueries(1, &timer_query_object);
1986 timer_query_object = phase->timer_query_objects_free.front();
1987 phase->timer_query_objects_free.pop_front();
1989 glBeginQuery(GL_TIME_ELAPSED, timer_query_object);
1990 phase->timer_query_objects_running.push_back(timer_query_object);
1992 bool last_phase = (phase_num == num_phases - 1);
1993 if (phase_num == num_phases - 1) {
1994 // Last phase goes to the output the user specified.
1995 if (!phase->is_compute_shader) {
1996 glBindFramebuffer(GL_FRAMEBUFFER, dest_fbo);
1998 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
1999 assert(status == GL_FRAMEBUFFER_COMPLETE);
2000 glViewport(x, y, width, height);
2002 if (dither_effect != nullptr) {
2003 CHECK(dither_effect->set_int("output_width", width));
2004 CHECK(dither_effect->set_int("output_height", height));
2008 // Enable sRGB rendering for intermediates in case we are
2009 // rendering to an sRGB format.
2010 // TODO: Support this for compute shaders.
2011 bool needs_srgb = last_phase ? final_srgb : true;
2012 if (needs_srgb && !current_srgb) {
2013 glEnable(GL_FRAMEBUFFER_SRGB);
2015 current_srgb = true;
2016 } else if (!needs_srgb && current_srgb) {
2017 glDisable(GL_FRAMEBUFFER_SRGB);
2019 current_srgb = true;
2022 // Find a texture for this phase.
2023 inform_input_sizes(phase);
2024 find_output_size(phase);
2025 vector<DestinationTexture> phase_destinations;
2027 GLuint tex_num = resource_pool->create_2d_texture(intermediate_format, phase->output_width, phase->output_height);
2028 output_textures.insert(make_pair(phase, tex_num));
2029 phase_destinations.push_back(DestinationTexture{ tex_num, intermediate_format });
2031 // The output texture needs to have valid state to be written to by a compute shader.
2032 glActiveTexture(GL_TEXTURE0);
2034 glBindTexture(GL_TEXTURE_2D, tex_num);
2036 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
2038 } else if (phase->is_compute_shader) {
2039 assert(!destinations.empty());
2040 phase_destinations = destinations;
2043 execute_phase(phase, output_textures, phase_destinations, &generated_mipmaps);
2044 if (do_phase_timing) {
2045 glEndQuery(GL_TIME_ELAPSED);
2048 // Drop any input textures we don't need anymore.
2049 for (Phase *input : phase->inputs) {
2050 assert(ref_counts[input] > 0);
2051 if (--ref_counts[input] == 0) {
2052 resource_pool->release_2d_texture(output_textures[input]);
2053 output_textures.erase(input);
2058 for (const auto &phase_and_texnum : output_textures) {
2059 resource_pool->release_2d_texture(phase_and_texnum.second);
2062 glBindFramebuffer(GL_FRAMEBUFFER, 0);
2067 glBindBuffer(GL_ARRAY_BUFFER, 0);
2069 glBindVertexArray(0);
2072 if (do_phase_timing) {
2073 // Get back the timer queries.
2074 for (unsigned phase_num = 0; phase_num < phases.size(); ++phase_num) {
2075 Phase *phase = phases[phase_num];
2076 for (auto timer_it = phase->timer_query_objects_running.cbegin();
2077 timer_it != phase->timer_query_objects_running.cend(); ) {
2078 GLint timer_query_object = *timer_it;
2080 glGetQueryObjectiv(timer_query_object, GL_QUERY_RESULT_AVAILABLE, &available);
2082 GLuint64 time_elapsed;
2083 glGetQueryObjectui64v(timer_query_object, GL_QUERY_RESULT, &time_elapsed);
2084 phase->time_elapsed_ns += time_elapsed;
2085 ++phase->num_measured_iterations;
2086 phase->timer_query_objects_free.push_back(timer_query_object);
2087 phase->timer_query_objects_running.erase(timer_it++);
2096 void EffectChain::enable_phase_timing(bool enable)
2099 assert(movit_timer_queries_supported);
2101 this->do_phase_timing = enable;
2104 void EffectChain::reset_phase_timing()
2106 for (unsigned phase_num = 0; phase_num < phases.size(); ++phase_num) {
2107 Phase *phase = phases[phase_num];
2108 phase->time_elapsed_ns = 0;
2109 phase->num_measured_iterations = 0;
2113 void EffectChain::print_phase_timing()
2115 double total_time_ms = 0.0;
2116 for (unsigned phase_num = 0; phase_num < phases.size(); ++phase_num) {
2117 Phase *phase = phases[phase_num];
2118 double avg_time_ms = phase->time_elapsed_ns * 1e-6 / phase->num_measured_iterations;
2119 printf("Phase %d: %5.1f ms [", phase_num, avg_time_ms);
2120 for (unsigned effect_num = 0; effect_num < phase->effects.size(); ++effect_num) {
2121 if (effect_num != 0) {
2124 printf("%s", phase->effects[effect_num]->effect->effect_type_id().c_str());
2127 total_time_ms += avg_time_ms;
2129 printf("Total: %5.1f ms\n", total_time_ms);
2132 void EffectChain::execute_phase(Phase *phase,
2133 const map<Phase *, GLuint> &output_textures,
2134 const std::vector<DestinationTexture> &destinations,
2135 set<Phase *> *generated_mipmaps)
2137 // Set up RTT inputs for this phase.
2138 for (unsigned sampler = 0; sampler < phase->inputs.size(); ++sampler) {
2139 glActiveTexture(GL_TEXTURE0 + sampler);
2140 Phase *input = phase->inputs[sampler];
2141 input->output_node->bound_sampler_num = sampler;
2142 const auto it = output_textures.find(input);
2143 assert(it != output_textures.end());
2144 glBindTexture(GL_TEXTURE_2D, it->second);
2146 if (phase->input_needs_mipmaps && generated_mipmaps->count(input) == 0) {
2147 glGenerateMipmap(GL_TEXTURE_2D);
2149 generated_mipmaps->insert(input);
2151 setup_rtt_sampler(sampler, phase->input_needs_mipmaps);
2152 phase->input_samplers[sampler] = sampler; // Bind the sampler to the right uniform.
2155 GLuint instance_program_num = resource_pool->use_glsl_program(phase->glsl_program_num);
2158 // And now the output.
2160 if (phase->is_compute_shader) {
2161 assert(!destinations.empty());
2163 // This is currently the only place where we use image units,
2164 // so we can always start at 0. TODO: Support multiple destinations.
2165 phase->outbuf_image_unit = 0;
2166 glBindImageTexture(phase->outbuf_image_unit, destinations[0].texnum, 0, GL_FALSE, 0, GL_WRITE_ONLY, destinations[0].format);
2168 phase->uniform_output_size[0] = phase->output_width;
2169 phase->uniform_output_size[1] = phase->output_height;
2170 phase->inv_output_size.x = 1.0f / phase->output_width;
2171 phase->inv_output_size.y = 1.0f / phase->output_height;
2172 phase->output_texcoord_adjust.x = 0.5f / phase->output_width;
2173 phase->output_texcoord_adjust.y = 0.5f / phase->output_height;
2174 } else if (!destinations.empty()) {
2175 assert(destinations.size() == 1);
2176 fbo = resource_pool->create_fbo(destinations[0].texnum);
2177 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
2178 glViewport(0, 0, phase->output_width, phase->output_height);
2181 // Give the required parameters to all the effects.
2182 unsigned sampler_num = phase->inputs.size();
2183 for (unsigned i = 0; i < phase->effects.size(); ++i) {
2184 Node *node = phase->effects[i];
2185 unsigned old_sampler_num = sampler_num;
2186 node->effect->set_gl_state(instance_program_num, phase->effect_ids[node], &sampler_num);
2189 if (node->effect->is_single_texture()) {
2190 assert(sampler_num - old_sampler_num == 1);
2191 node->bound_sampler_num = old_sampler_num;
2193 node->bound_sampler_num = -1;
2197 if (phase->is_compute_shader) {
2199 phase->compute_shader_node->effect->get_compute_dimensions(phase->output_width, phase->output_height, &x, &y, &z);
2201 // Uniforms need to come after set_gl_state() _and_ get_compute_dimensions(),
2202 // since they can be updated from there.
2203 setup_uniforms(phase);
2204 glDispatchCompute(x, y, z);
2206 glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT);
2209 // Uniforms need to come after set_gl_state(), since they can be updated
2211 setup_uniforms(phase);
2213 // Bind the vertex data.
2214 GLuint vao = resource_pool->create_vec2_vao(phase->attribute_indexes, vbo);
2215 glBindVertexArray(vao);
2217 glDrawArrays(GL_TRIANGLES, 0, 3);
2220 resource_pool->release_vec2_vao(vao);
2223 for (unsigned i = 0; i < phase->effects.size(); ++i) {
2224 Node *node = phase->effects[i];
2225 node->effect->clear_gl_state();
2228 resource_pool->unuse_glsl_program(instance_program_num);
2231 resource_pool->release_fbo(fbo);
2235 void EffectChain::setup_uniforms(Phase *phase)
2237 // TODO: Use UBO blocks.
2238 for (size_t i = 0; i < phase->uniforms_image2d.size(); ++i) {
2239 const Uniform<int> &uniform = phase->uniforms_image2d[i];
2240 if (uniform.location != -1) {
2241 glUniform1iv(uniform.location, uniform.num_values, uniform.value);
2244 for (size_t i = 0; i < phase->uniforms_sampler2d.size(); ++i) {
2245 const Uniform<int> &uniform = phase->uniforms_sampler2d[i];
2246 if (uniform.location != -1) {
2247 glUniform1iv(uniform.location, uniform.num_values, uniform.value);
2250 for (size_t i = 0; i < phase->uniforms_bool.size(); ++i) {
2251 const Uniform<bool> &uniform = phase->uniforms_bool[i];
2252 assert(uniform.num_values == 1);
2253 if (uniform.location != -1) {
2254 glUniform1i(uniform.location, *uniform.value);
2257 for (size_t i = 0; i < phase->uniforms_int.size(); ++i) {
2258 const Uniform<int> &uniform = phase->uniforms_int[i];
2259 if (uniform.location != -1) {
2260 glUniform1iv(uniform.location, uniform.num_values, uniform.value);
2263 for (size_t i = 0; i < phase->uniforms_ivec2.size(); ++i) {
2264 const Uniform<int> &uniform = phase->uniforms_ivec2[i];
2265 if (uniform.location != -1) {
2266 glUniform2iv(uniform.location, uniform.num_values, uniform.value);
2269 for (size_t i = 0; i < phase->uniforms_float.size(); ++i) {
2270 const Uniform<float> &uniform = phase->uniforms_float[i];
2271 if (uniform.location != -1) {
2272 glUniform1fv(uniform.location, uniform.num_values, uniform.value);
2275 for (size_t i = 0; i < phase->uniforms_vec2.size(); ++i) {
2276 const Uniform<float> &uniform = phase->uniforms_vec2[i];
2277 if (uniform.location != -1) {
2278 glUniform2fv(uniform.location, uniform.num_values, uniform.value);
2281 for (size_t i = 0; i < phase->uniforms_vec3.size(); ++i) {
2282 const Uniform<float> &uniform = phase->uniforms_vec3[i];
2283 if (uniform.location != -1) {
2284 glUniform3fv(uniform.location, uniform.num_values, uniform.value);
2287 for (size_t i = 0; i < phase->uniforms_vec4.size(); ++i) {
2288 const Uniform<float> &uniform = phase->uniforms_vec4[i];
2289 if (uniform.location != -1) {
2290 glUniform4fv(uniform.location, uniform.num_values, uniform.value);
2293 for (size_t i = 0; i < phase->uniforms_mat3.size(); ++i) {
2294 const Uniform<Matrix3d> &uniform = phase->uniforms_mat3[i];
2295 assert(uniform.num_values == 1);
2296 if (uniform.location != -1) {
2297 // Convert to float (GLSL has no double matrices).
2299 for (unsigned y = 0; y < 3; ++y) {
2300 for (unsigned x = 0; x < 3; ++x) {
2301 matrixf[y + x * 3] = (*uniform.value)(y, x);
2304 glUniformMatrix3fv(uniform.location, 1, GL_FALSE, matrixf);
2309 void EffectChain::setup_rtt_sampler(int sampler_num, bool use_mipmaps)
2311 glActiveTexture(GL_TEXTURE0 + sampler_num);
2314 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
2317 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
2320 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
2322 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
2326 } // namespace movit