}
}
-unsigned EffectChain::fit_rectangle_to_aspect(unsigned width, unsigned height)
+void EffectChain::size_rectangle_to_fit(unsigned width, unsigned height, unsigned *output_width, unsigned *output_height)
{
+ unsigned scaled_width, scaled_height;
+
if (float(width) * aspect_denom >= float(height) * aspect_nom) {
// Same aspect, or W/H > aspect (image is wider than the frame).
- // In either case, keep width.
- return width;
+ // In either case, keep width, and adjust height.
+ scaled_width = width;
+ scaled_height = lrintf(width * aspect_denom / aspect_nom);
} else {
// W/H < aspect (image is taller than the frame), so keep height,
- // and adjust width correspondingly.
- return lrintf(height * aspect_nom / aspect_denom);
+ // and adjust width.
+ scaled_width = lrintf(height * aspect_nom / aspect_denom);
+ scaled_height = height;
+ }
+
+ // We should be consistently larger or smaller then the existing choice,
+ // since we have the same aspect.
+ assert(!(scaled_width < *output_width && scaled_height > *output_height));
+ assert(!(scaled_height < *output_height && scaled_width > *output_width));
+
+ if (scaled_width >= *output_width && scaled_height >= *output_height) {
+ *output_width = scaled_width;
+ *output_height = scaled_height;
}
}
return;
}
+ unsigned output_width = 0, output_height = 0;
+
// If not, look at the input phases and textures.
// We select the largest one (by fit into the current aspect).
- unsigned best_width = 0;
for (unsigned i = 0; i < phase->inputs.size(); ++i) {
Node *input = phase->inputs[i];
assert(input->phase->output_width != 0);
assert(input->phase->output_height != 0);
- unsigned width = fit_rectangle_to_aspect(input->phase->output_width, input->phase->output_height);
- if (width > best_width) {
- best_width = width;
- }
+ size_rectangle_to_fit(input->phase->output_width, input->phase->output_height, &output_width, &output_height);
}
for (unsigned i = 0; i < phase->effects.size(); ++i) {
Effect *effect = phase->effects[i]->effect;
}
Input *input = static_cast<Input *>(effect);
- unsigned width = fit_rectangle_to_aspect(input->get_width(), input->get_height());
- if (width > best_width) {
- best_width = width;
- }
+ size_rectangle_to_fit(input->get_width(), input->get_height(), &output_width, &output_height);
}
- assert(best_width != 0);
- phase->output_width = best_width;
- phase->output_height = best_width * aspect_denom / aspect_nom;
+ assert(output_width != 0);
+ assert(output_height != 0);
+ phase->output_width = output_width;
+ phase->output_height = output_height;
}
void EffectChain::sort_all_nodes_topologically()
void insert_node_between(Node *sender, Node *middle, Node *receiver);
private:
- // Fits a rectangle of the given size to the current aspect ratio
- // (aspect_nom/aspect_denom) and returns the new width and height.
- unsigned fit_rectangle_to_aspect(unsigned width, unsigned height);
+ // Make sure the output rectangle is at least large enough to hold
+ // the given input rectangle in both dimensions, and is of the
+ // current aspect ratio (aspect_nom/aspect_denom).
+ void size_rectangle_to_fit(unsigned width, unsigned height, unsigned *output_width, unsigned *output_height);
// Compute the input sizes for all inputs for all effects in a given phase,
// and inform the effects about the results.
EXPECT_EQ("FlatInput", node->incoming_links[0]->effect->effect_type_id());
EXPECT_EQ("ColorspaceConversionEffect", node->outgoing_links[0]->effect->effect_type_id());
}
+
+// An effect that does nothing, but requests texture bounce and stores
+// its input size.
+class SizeStoringEffect : public BouncingIdentityEffect {
+public:
+ SizeStoringEffect() : input_width(-1), input_height(-1) {}
+ virtual void inform_input_size(unsigned input_num, unsigned width, unsigned height) {
+ assert(input_num == 0);
+ input_width = width;
+ input_height = height;
+ }
+
+ int input_width, input_height;
+};
+
+TEST(EffectChainTest, AspectRatioConversion) {
+ float data1[4 * 3] = {
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f,
+ };
+ float data2[7 * 7] = {
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+ };
+
+ // The right conversion here is that the 7x7 image decides the size,
+ // since it is the biggest, so everything is scaled up to 9x7
+ // (keep the height, round the width 9.333 to 9).
+ float out_data[9 * 7];
+
+ EffectChainTester tester(NULL, 4, 3);
+
+ ImageFormat format;
+ format.color_space = COLORSPACE_sRGB;
+ format.gamma_curve = GAMMA_LINEAR;
+
+ FlatInput *input1 = new FlatInput(format, FORMAT_GRAYSCALE, GL_FLOAT, 4, 3);
+ input1->set_pixel_data(data1);
+
+ FlatInput *input2 = new FlatInput(format, FORMAT_GRAYSCALE, GL_FLOAT, 7, 7);
+ input2->set_pixel_data(data2);
+
+ SizeStoringEffect *input_store = new SizeStoringEffect();
+
+ tester.get_chain()->add_input(input1);
+ tester.get_chain()->add_input(input2);
+ tester.get_chain()->add_effect(new AddEffect(), input1, input2);
+ tester.get_chain()->add_effect(input_store);
+ tester.run(out_data, GL_RED, COLORSPACE_sRGB, GAMMA_LINEAR);
+
+ EXPECT_EQ(9, input_store->input_width);
+ EXPECT_EQ(7, input_store->input_height);
+}