2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 * Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
19 * Copyright (C) 2009, Willow Garage Inc., all rights reserved.
20 * Copyright (C) 2013, OpenCV Foundation, all rights reserved.
21 * Third party copyrights are property of their respective owners.
23 * Redistribution and use in source and binary forms, with or without modification,
24 * are permitted provided that the following conditions are met:
26 * * Redistribution's of source code must retain the above copyright notice,
27 * this list of conditions and the following disclaimer.
29 * * Redistribution's in binary form must reproduce the above copyright notice,
30 * this list of conditions and the following disclaimer in the documentation
31 * and/or other materials provided with the distribution.
33 * * The name of the copyright holders may not be used to endorse or promote products
34 * derived from this software without specific prior written permission.
36 * This software is provided by the copyright holders and contributors "as is" and
37 * any express or implied warranties, including, but not limited to, the implied
38 * warranties of merchantability and fitness for a particular purpose are disclaimed.
39 * In no event shall the Intel Corporation or contributors be liable for any direct,
40 * indirect, incidental, special, exemplary, or consequential damages
41 * (including, but not limited to, procurement of substitute goods or services;
42 * loss of use, data, or profits; or business interruption) however caused
43 * and on any theory of liability, whether in contract, strict liability,
44 * or tort (including negligence or otherwise) arising in any way out of
45 * the use of this software, even if advised of the possibility of such damage.
49 #include <libavutil/lfg.h>
50 #include "libavutil/opt.h"
51 #include "libavutil/imgutils.h"
52 #include "libavutil/mem.h"
53 #include "libavutil/fifo.h"
54 #include "libavutil/common.h"
55 #include "libavutil/avassert.h"
56 #include "libavutil/pixfmt.h"
58 #include "framequeue.h"
60 #include "transform.h"
64 #include "opencl_source.h"
68 This filter matches feature points between frames (dealing with outliers) and then
69 uses the matches to estimate an affine transform between frames. This transform is
70 decomposed into various values (translation, scale, rotation) and the values are
71 summed relative to the start of the video to obtain on absolute camera position
72 for each frame. This "camera path" is then smoothed via a gaussian filter, resulting
73 in a new path that is turned back into an affine transform and applied to each
78 All of the work to extract motion data from frames occurs in queue_frame. Motion data
79 is buffered in a smoothing window, so queue_frame simply computes the absolute camera
80 positions and places them in ringbuffers.
82 filter_frame is responsible for looking at the absolute camera positions currently
83 in the ringbuffers, applying the gaussian filter, and then transforming the frames.
86 // Number of bits for BRIEF descriptors
88 // Size of the patch from which a BRIEF descriptor is extracted
89 // This is the size used in OpenCV
90 #define BRIEF_PATCH_SIZE 31
91 #define BRIEF_PATCH_SIZE_HALF (BRIEF_PATCH_SIZE / 2)
93 #define MATCHES_CONTIG_SIZE 2000
95 #define ROUNDED_UP_DIV(a, b) ((a + (b - 1)) / b)
97 typedef struct PointPair {
104 typedef struct MotionVector {
106 // Used to mark vectors as potential outliers
107 cl_int should_consider;
110 // Denotes the indices for the different types of motion in the ringbuffers array
111 enum RingbufferIndices {
118 // Should always be last
122 // Struct that holds data for drawing point match debug data
123 typedef struct DebugMatches {
124 MotionVector *matches;
125 // The points used to calculate the affine transform for a frame
126 MotionVector model_matches[3];
129 // For cases where we couldn't calculate a model
130 int num_model_matches;
133 // Groups together the ringbuffers that store absolute distortion / position values
135 typedef struct AbsoluteFrameMotion {
136 // Array with the various ringbuffers, indexed via the RingbufferIndices enum
137 AVFifoBuffer *ringbuffers[RingbufCount];
139 // Offset to get to the current frame being processed
141 int curr_frame_offset;
142 // Keeps track of where the start and end of contiguous motion data is (to
143 // deal with cases where no motion data is found between two frames)
144 int data_start_offset;
147 AVFifoBuffer *debug_matches;
148 } AbsoluteFrameMotion;
150 // Takes care of freeing the arrays within the DebugMatches inside of the
151 // debug_matches ringbuffer and then freeing the buffer itself.
152 static void free_debug_matches(AbsoluteFrameMotion *afm) {
155 if (!afm->debug_matches) {
159 while (av_fifo_size(afm->debug_matches) > 0) {
160 av_fifo_generic_read(
163 sizeof(DebugMatches),
167 av_freep(&dm.matches);
170 av_fifo_freep(&afm->debug_matches);
173 // Stores the translation, scale, rotation, and skew deltas between two frames
174 typedef struct FrameDelta {
175 cl_float2 translation;
181 typedef struct SimilarityMatrix {
182 // The 2x3 similarity matrix
186 typedef struct CropInfo {
187 // The top left corner of the bounding box for the crop
189 // The bottom right corner of the bounding box for the crop
190 cl_float2 bottom_right;
193 // Returned from function that determines start and end values for iteration
194 // around the current frame in a ringbuffer
195 typedef struct IterIndices {
200 typedef struct DeshakeOpenCLContext {
201 OpenCLFilterContext ocf;
202 // Whether or not the above `OpenCLFilterContext` has been initialized
205 // These variables are used in the activate callback
209 // State for random number generation
212 // FIFO frame queue used to buffer future frames for processing
214 // Ringbuffers for frame positions
215 AbsoluteFrameMotion abs_motion;
217 // The number of frames' motion to consider before and after the frame we are
220 // The number of the frame we are currently processing
223 // Stores a 1d array of normalised gaussian kernel values for convolution
226 // Buffer for error values used in RANSAC code
229 // Information regarding how to crop the smoothed luminance (or RGB) planes
231 // Information regarding how to crop the smoothed chroma planes
234 // Whether or not we are processing YUV input (as oppposed to RGB)
236 // The underlying format of the hardware surfaces
239 // Buffer to copy `matches` into for the CPU to work with
240 MotionVector *matches_host;
241 MotionVector *matches_contig_host;
243 MotionVector *inliers;
245 cl_command_queue command_queue;
246 cl_kernel kernel_grayscale;
247 cl_kernel kernel_harris_response;
248 cl_kernel kernel_refine_features;
249 cl_kernel kernel_brief_descriptors;
250 cl_kernel kernel_match_descriptors;
251 cl_kernel kernel_transform;
252 cl_kernel kernel_crop_upscale;
254 // Stores a frame converted to grayscale
256 // Stores the harris response for a frame (measure of "cornerness" for each pixel)
259 // Detected features after non-maximum suppression and sub-pixel refinement
260 cl_mem refined_features;
261 // Saved from the previous frame
262 cl_mem prev_refined_features;
264 // BRIEF sampling pattern that is randomly initialized
265 cl_mem brief_pattern;
266 // Feature point descriptors for the current frame
268 // Feature point descriptors for the previous frame
269 cl_mem prev_descriptors;
270 // Vectors between points in current and previous frame
272 cl_mem matches_contig;
273 // Holds the matrix to transform luminance (or RGB) with
275 // Holds the matrix to transform chroma with
278 // Configurable options
284 // Whether or not feature points should be refined at a sub-pixel level
285 cl_int refine_features;
286 // If the user sets a value other than the default, 0, this percentage is
287 // translated into a sigma value ranging from 0.5 to 40.0
288 float smooth_percent;
289 // This number is multiplied by the video frame rate to determine the size
290 // of the smooth window
291 float smooth_window_multiplier;
295 cl_kernel kernel_draw_debug_info;
296 cl_mem debug_matches;
297 cl_mem debug_model_matches;
299 // These store the total time spent executing the different kernels in nanoseconds
300 unsigned long long grayscale_time;
301 unsigned long long harris_response_time;
302 unsigned long long refine_features_time;
303 unsigned long long brief_descriptors_time;
304 unsigned long long match_descriptors_time;
305 unsigned long long transform_time;
306 unsigned long long crop_upscale_time;
308 // Time spent copying matched features from the device to the host
309 unsigned long long read_buf_time;
310 } DeshakeOpenCLContext;
312 // Returns a random uniformly-distributed number in [low, high]
313 static int rand_in(int low, int high, AVLFG *alfg) {
314 return (av_lfg_get(alfg) % (high - low)) + low;
317 // Returns the average execution time for an event given the total time and the
318 // number of frames processed.
319 static double averaged_event_time_ms(unsigned long long total_time, int num_frames) {
320 return (double)total_time / (double)num_frames / 1000000.0;
323 // The following code is loosely ported from OpenCV
325 // Estimates affine transform from 3 point pairs
326 // model is a 2x3 matrix:
329 static void run_estimate_kernel(const MotionVector *point_pairs, double *model)
332 double x1 = point_pairs[0].p.p1.s[0];
333 double y1 = point_pairs[0].p.p1.s[1];
334 double x2 = point_pairs[1].p.p1.s[0];
335 double y2 = point_pairs[1].p.p1.s[1];
336 double x3 = point_pairs[2].p.p1.s[0];
337 double y3 = point_pairs[2].p.p1.s[1];
340 double X1 = point_pairs[0].p.p2.s[0];
341 double Y1 = point_pairs[0].p.p2.s[1];
342 double X2 = point_pairs[1].p.p2.s[0];
343 double Y2 = point_pairs[1].p.p2.s[1];
344 double X3 = point_pairs[2].p.p2.s[0];
345 double Y3 = point_pairs[2].p.p2.s[1];
347 double d = 1.0 / ( x1*(y2-y3) + x2*(y3-y1) + x3*(y1-y2) );
349 model[0] = d * ( X1*(y2-y3) + X2*(y3-y1) + X3*(y1-y2) );
350 model[1] = d * ( X1*(x3-x2) + X2*(x1-x3) + X3*(x2-x1) );
351 model[2] = d * ( X1*(x2*y3 - x3*y2) + X2*(x3*y1 - x1*y3) + X3*(x1*y2 - x2*y1) );
353 model[3] = d * ( Y1*(y2-y3) + Y2*(y3-y1) + Y3*(y1-y2) );
354 model[4] = d * ( Y1*(x3-x2) + Y2*(x1-x3) + Y3*(x2-x1) );
355 model[5] = d * ( Y1*(x2*y3 - x3*y2) + Y2*(x3*y1 - x1*y3) + Y3*(x1*y2 - x2*y1) );
358 // Checks that the 3 points in the given array are not collinear
359 static int points_not_collinear(const cl_float2 **points)
363 for (j = 0; j < i; j++) {
364 double dx1 = points[j]->s[0] - points[i]->s[0];
365 double dy1 = points[j]->s[1] - points[i]->s[1];
367 for (k = 0; k < j; k++) {
368 double dx2 = points[k]->s[0] - points[i]->s[0];
369 double dy2 = points[k]->s[1] - points[i]->s[1];
371 // Assuming a 3840 x 2160 video with a point at (0, 0) and one at
372 // (3839, 2159), this prevents a third point from being within roughly
373 // 0.5 of a pixel of the line connecting the two on both axes
374 if (fabs(dx2*dy1 - dy2*dx1) <= 1.0) {
383 // Checks a subset of 3 point pairs to make sure that the points are not collinear
384 // and not too close to each other
385 static int check_subset(const MotionVector *pairs_subset)
387 const cl_float2 *prev_points[] = {
388 &pairs_subset[0].p.p1,
389 &pairs_subset[1].p.p1,
390 &pairs_subset[2].p.p1
393 const cl_float2 *curr_points[] = {
394 &pairs_subset[0].p.p2,
395 &pairs_subset[1].p.p2,
396 &pairs_subset[2].p.p2
399 return points_not_collinear(prev_points) && points_not_collinear(curr_points);
402 // Selects a random subset of 3 points from point_pairs and places them in pairs_subset
403 static int get_subset(
405 const MotionVector *point_pairs,
406 const int num_point_pairs,
407 MotionVector *pairs_subset,
411 int i = 0, j, iters = 0;
413 for (; iters < max_attempts; iters++) {
414 for (i = 0; i < 3 && iters < max_attempts;) {
418 idx_i = idx[i] = rand_in(0, num_point_pairs, alfg);
420 for (j = 0; j < i; j++) {
421 if (idx_i == idx[j]) {
431 pairs_subset[i] = point_pairs[idx[i]];
435 if (i == 3 && !check_subset(pairs_subset)) {
441 return i == 3 && iters < max_attempts;
444 // Computes the error for each of the given points based on the given model.
445 static void compute_error(
446 const MotionVector *point_pairs,
447 const int num_point_pairs,
451 double F0 = model[0], F1 = model[1], F2 = model[2];
452 double F3 = model[3], F4 = model[4], F5 = model[5];
454 for (int i = 0; i < num_point_pairs; i++) {
455 const cl_float2 *f = &point_pairs[i].p.p1;
456 const cl_float2 *t = &point_pairs[i].p.p2;
458 double a = F0*f->s[0] + F1*f->s[1] + F2 - t->s[0];
459 double b = F3*f->s[0] + F4*f->s[1] + F5 - t->s[1];
465 // Determines which of the given point matches are inliers for the given model
466 // based on the specified threshold.
468 // err must be an array of num_point_pairs length
469 static int find_inliers(
470 MotionVector *point_pairs,
471 const int num_point_pairs,
476 float t = (float)(thresh * thresh);
477 int i, n = num_point_pairs, num_inliers = 0;
479 compute_error(point_pairs, num_point_pairs, model, err);
481 for (i = 0; i < n; i++) {
484 point_pairs[i].should_consider = 1;
487 point_pairs[i].should_consider = 0;
494 // Determines the number of iterations required to achieve the desired confidence level.
496 // The equation used to determine the number of iterations to do is:
497 // 1 - confidence = (1 - inlier_probability^num_points)^num_iters
499 // Solving for num_iters:
501 // num_iters = log(1 - confidence) / log(1 - inlier_probability^num_points)
503 // A more in-depth explanation can be found at https://en.wikipedia.org/wiki/Random_sample_consensus
504 // under the 'Parameters' heading
505 static int ransac_update_num_iters(double confidence, double num_outliers, int max_iters)
509 confidence = av_clipd(confidence, 0.0, 1.0);
510 num_outliers = av_clipd(num_outliers, 0.0, 1.0);
512 // avoid inf's & nan's
513 num = FFMAX(1.0 - confidence, DBL_MIN);
514 denom = 1.0 - pow(1.0 - num_outliers, 3);
515 if (denom < DBL_MIN) {
522 return denom >= 0 || -num >= max_iters * (-denom) ? max_iters : (int)round(num / denom);
525 // Estimates an affine transform between the given pairs of points using RANdom
527 static int estimate_affine_2d(
528 DeshakeOpenCLContext *deshake_ctx,
529 MotionVector *point_pairs,
530 DebugMatches *debug_matches,
531 const int num_point_pairs,
533 const double threshold,
535 const double confidence
538 double best_model[6], model[6];
539 MotionVector pairs_subset[3], best_pairs[3];
541 int iter, niters = FFMAX(max_iters, 1);
542 int good_count, max_good_count = 0;
544 // We need at least 3 points to build a model from
545 if (num_point_pairs < 3) {
547 } else if (num_point_pairs == 3) {
548 // There are only 3 points, so RANSAC doesn't apply here
549 run_estimate_kernel(point_pairs, model_out);
551 for (int i = 0; i < 3; ++i) {
552 point_pairs[i].should_consider = 1;
558 for (iter = 0; iter < niters; ++iter) {
559 int found = get_subset(&deshake_ctx->alfg, point_pairs, num_point_pairs, pairs_subset, 10000);
569 run_estimate_kernel(pairs_subset, model);
570 good_count = find_inliers(point_pairs, num_point_pairs, model, deshake_ctx->ransac_err, threshold);
572 if (good_count > FFMAX(max_good_count, 2)) {
573 for (int mi = 0; mi < 6; ++mi) {
574 best_model[mi] = model[mi];
577 for (int pi = 0; pi < 3; pi++) {
578 best_pairs[pi] = pairs_subset[pi];
581 max_good_count = good_count;
582 niters = ransac_update_num_iters(
584 (double)(num_point_pairs - good_count) / num_point_pairs,
590 if (max_good_count > 0) {
591 for (int mi = 0; mi < 6; ++mi) {
592 model_out[mi] = best_model[mi];
595 for (int pi = 0; pi < 3; ++pi) {
596 debug_matches->model_matches[pi] = best_pairs[pi];
598 debug_matches->num_model_matches = 3;
600 // Find the inliers again for the best model for debugging
601 find_inliers(point_pairs, num_point_pairs, best_model, deshake_ctx->ransac_err, threshold);
608 // "Wiggles" the first point in best_pairs around a tiny bit in order to decrease the
610 static void optimize_model(
611 DeshakeOpenCLContext *deshake_ctx,
612 MotionVector *best_pairs,
613 MotionVector *inliers,
614 const int num_inliers,
618 float move_x_val = 0.01;
619 float move_y_val = 0.01;
621 float old_move_x_val = 0;
623 int last_changed = 0;
625 for (int iters = 0; iters < 200; iters++) {
629 best_pairs[0].p.p2.s[0] += move_x_val;
631 best_pairs[0].p.p2.s[0] += move_y_val;
634 run_estimate_kernel(best_pairs, model);
635 compute_error(inliers, num_inliers, model, deshake_ctx->ransac_err);
637 for (int j = 0; j < num_inliers; j++) {
638 total_err += deshake_ctx->ransac_err[j];
641 if (total_err < best_err) {
642 for (int mi = 0; mi < 6; ++mi) {
643 model_out[mi] = model[mi];
646 best_err = total_err;
647 last_changed = iters;
651 best_pairs[0].p.p2.s[0] -= move_x_val;
653 best_pairs[0].p.p2.s[0] -= move_y_val;
656 if (iters - last_changed > 4) {
657 // We've already improved the model as much as we can
661 old_move_x_val = move_x_val;
669 if (old_move_x_val < 0) {
678 // Uses a process similar to that of RANSAC to find a transform that minimizes
679 // the total error for a set of point matches determined to be inliers
681 // (Pick random subsets, compute model, find total error, iterate until error
683 static int minimize_error(
684 DeshakeOpenCLContext *deshake_ctx,
685 MotionVector *inliers,
686 DebugMatches *debug_matches,
687 const int num_inliers,
692 float best_err = FLT_MAX;
693 double best_model[6], model[6];
694 MotionVector pairs_subset[3], best_pairs[3];
696 for (int i = 0; i < max_iters; i++) {
698 int found = get_subset(&deshake_ctx->alfg, inliers, num_inliers, pairs_subset, 10000);
708 run_estimate_kernel(pairs_subset, model);
709 compute_error(inliers, num_inliers, model, deshake_ctx->ransac_err);
711 for (int j = 0; j < num_inliers; j++) {
712 total_err += deshake_ctx->ransac_err[j];
715 if (total_err < best_err) {
716 for (int mi = 0; mi < 6; ++mi) {
717 best_model[mi] = model[mi];
720 for (int pi = 0; pi < 3; pi++) {
721 best_pairs[pi] = pairs_subset[pi];
724 best_err = total_err;
728 for (int mi = 0; mi < 6; ++mi) {
729 model_out[mi] = best_model[mi];
732 for (int pi = 0; pi < 3; ++pi) {
733 debug_matches->model_matches[pi] = best_pairs[pi];
735 debug_matches->num_model_matches = 3;
738 optimize_model(deshake_ctx, best_pairs, inliers, num_inliers, best_err, model_out);
742 // End code from OpenCV
744 // Decomposes a similarity matrix into translation, rotation, scale, and skew
746 // See http://frederic-wang.fr/decomposition-of-2d-transform-matrices.html
747 static FrameDelta decompose_transform(double *model)
757 double delta = a * d - b * c;
759 memset(&ret, 0, sizeof(ret));
761 ret.translation.s[0] = e;
762 ret.translation.s[1] = f;
764 // This is the QR method
765 if (a != 0 || b != 0) {
766 double r = hypot(a, b);
768 ret.rotation = FFSIGN(b) * acos(a / r);
770 ret.scale.s[1] = delta / r;
771 ret.skew.s[0] = atan((a * c + b * d) / (r * r));
773 } else if (c != 0 || d != 0) {
774 double s = sqrt(c * c + d * d);
776 ret.rotation = M_PI / 2 - FFSIGN(d) * acos(-c / s);
777 ret.scale.s[0] = delta / s;
780 ret.skew.s[1] = atan((a * c + b * d) / (s * s));
781 } // otherwise there is only translation
786 // Move valid vectors from the 2d buffer into a 1d buffer where they are contiguous
787 static int make_vectors_contig(
788 DeshakeOpenCLContext *deshake_ctx,
794 for (int i = 0; i < size_y; ++i) {
795 for (int j = 0; j < size_x; ++j) {
796 MotionVector v = deshake_ctx->matches_host[j + i * size_x];
798 if (v.should_consider) {
799 deshake_ctx->matches_contig_host[num_vectors] = v;
803 // Make sure we do not exceed the amount of space we allocated for these vectors
804 if (num_vectors == MATCHES_CONTIG_SIZE - 1) {
812 // Returns the gaussian kernel value for the given x coordinate and sigma value
813 static float gaussian_for(int x, float sigma) {
814 return 1.0f / expf(((float)x * (float)x) / (2.0f * sigma * sigma));
817 // Makes a normalized gaussian kernel of the given length for the given sigma
818 // and places it in gauss_kernel
819 static void make_gauss_kernel(float *gauss_kernel, float length, float sigma)
822 int window_half = length / 2;
824 for (int i = 0; i < length; ++i) {
825 float val = gaussian_for(i - window_half, sigma);
828 gauss_kernel[i] = val;
831 // Normalize the gaussian values
832 for (int i = 0; i < length; ++i) {
833 gauss_kernel[i] /= gauss_sum;
837 // Returns indices to start and end iteration at in order to iterate over a window
838 // of length size centered at the current frame in a ringbuffer
840 // Always returns numbers that result in a window of length size, even if that
841 // means specifying negative indices or indices past the end of the values in the
842 // ringbuffers. Make sure you clip indices appropriately within your loop.
843 static IterIndices start_end_for(DeshakeOpenCLContext *deshake_ctx, int length) {
846 indices.start = deshake_ctx->abs_motion.curr_frame_offset - (length / 2);
847 indices.end = deshake_ctx->abs_motion.curr_frame_offset + (length / 2) + (length % 2);
852 // Sets val to the value in the given ringbuffer at the given offset, taking care of
853 // clipping the offset into the appropriate range
854 static void ringbuf_float_at(
855 DeshakeOpenCLContext *deshake_ctx,
856 AVFifoBuffer *values,
860 int clip_start, clip_end, offset_clipped;
861 if (deshake_ctx->abs_motion.data_end_offset != -1) {
862 clip_end = deshake_ctx->abs_motion.data_end_offset;
864 // This expression represents the last valid index in the buffer,
865 // which we use repeatedly at the end of the video.
866 clip_end = deshake_ctx->smooth_window - (av_fifo_space(values) / sizeof(float)) - 1;
869 if (deshake_ctx->abs_motion.data_start_offset != -1) {
870 clip_start = deshake_ctx->abs_motion.data_start_offset;
872 // Negative indices will occur at the start of the video, and we want
873 // them to be clipped to 0 in order to repeatedly use the position of
878 offset_clipped = av_clip(
884 av_fifo_generic_peek_at(
887 offset_clipped * sizeof(float),
893 // Returns smoothed current frame value of the given buffer of floats based on the
894 // given Gaussian kernel and its length (also the window length, centered around the
895 // current frame) and the "maximum value" of the motion.
897 // This "maximum value" should be the width / height of the image in the case of
898 // translation and an empirically chosen constant for rotation / scale.
900 // The sigma chosen to generate the final gaussian kernel with used to smooth the
901 // camera path is either hardcoded (set by user, deshake_ctx->smooth_percent) or
902 // adaptively chosen.
904 DeshakeOpenCLContext *deshake_ctx,
910 float new_large_s = 0, new_small_s = 0, new_best = 0, old, diff_between,
911 percent_of_max, inverted_percent;
912 IterIndices indices = start_end_for(deshake_ctx, length);
913 float large_sigma = 40.0f;
914 float small_sigma = 2.0f;
917 if (deshake_ctx->smooth_percent) {
918 best_sigma = (large_sigma - 0.5f) * deshake_ctx->smooth_percent + 0.5f;
920 // Strategy to adaptively smooth trajectory:
922 // 1. Smooth path with large and small sigma values
923 // 2. Take the absolute value of the difference between them
924 // 3. Get a percentage by putting the difference over the "max value"
925 // 4, Invert the percentage
926 // 5. Calculate a new sigma value weighted towards the larger sigma value
927 // 6. Determine final smoothed trajectory value using that sigma
929 make_gauss_kernel(gauss_kernel, length, large_sigma);
930 for (int i = indices.start, j = 0; i < indices.end; ++i, ++j) {
931 ringbuf_float_at(deshake_ctx, values, &old, i);
932 new_large_s += old * gauss_kernel[j];
935 make_gauss_kernel(gauss_kernel, length, small_sigma);
936 for (int i = indices.start, j = 0; i < indices.end; ++i, ++j) {
937 ringbuf_float_at(deshake_ctx, values, &old, i);
938 new_small_s += old * gauss_kernel[j];
941 diff_between = fabsf(new_large_s - new_small_s);
942 percent_of_max = diff_between / max_val;
943 inverted_percent = 1 - percent_of_max;
944 best_sigma = large_sigma * powf(inverted_percent, 40);
947 make_gauss_kernel(gauss_kernel, length, best_sigma);
948 for (int i = indices.start, j = 0; i < indices.end; ++i, ++j) {
949 ringbuf_float_at(deshake_ctx, values, &old, i);
950 new_best += old * gauss_kernel[j];
956 // Returns the position of the given point after the transform is applied
957 static cl_float2 transformed_point(float x, float y, float *transform) {
960 ret.s[0] = x * transform[0] + y * transform[1] + transform[2];
961 ret.s[1] = x * transform[3] + y * transform[4] + transform[5];
966 // Creates an affine transform that scales from the center of a frame
967 static void transform_center_scale(
978 float center_s_w, center_s_h;
989 center_s = transformed_point(center_w, center_h, matrix);
990 center_s_w = center_w - center_s.s[0];
991 center_s_h = center_h - center_s.s[1];
994 x_shift + center_s_w,
995 y_shift + center_s_h,
1003 // Determines the crop necessary to eliminate black borders from a smoothed frame
1004 // and updates target crop accordingly
1005 static void update_needed_crop(
1011 float new_width, new_height, adjusted_width, adjusted_height, adjusted_x, adjusted_y;
1013 cl_float2 top_left = transformed_point(0, 0, transform);
1014 cl_float2 top_right = transformed_point(frame_width, 0, transform);
1015 cl_float2 bottom_left = transformed_point(0, frame_height, transform);
1016 cl_float2 bottom_right = transformed_point(frame_width, frame_height, transform);
1017 float ar_h = frame_height / frame_width;
1018 float ar_w = frame_width / frame_height;
1020 if (crop->bottom_right.s[0] == 0) {
1021 // The crop hasn't been set to the original size of the plane
1022 crop->bottom_right.s[0] = frame_width;
1023 crop->bottom_right.s[1] = frame_height;
1026 crop->top_left.s[0] = FFMAX3(
1027 crop->top_left.s[0],
1032 crop->top_left.s[1] = FFMAX3(
1033 crop->top_left.s[1],
1038 crop->bottom_right.s[0] = FFMIN3(
1039 crop->bottom_right.s[0],
1044 crop->bottom_right.s[1] = FFMIN3(
1045 crop->bottom_right.s[1],
1050 // Make sure our potentially new bounding box has the same aspect ratio
1051 new_height = crop->bottom_right.s[1] - crop->top_left.s[1];
1052 new_width = crop->bottom_right.s[0] - crop->top_left.s[0];
1054 adjusted_width = new_height * ar_w;
1055 adjusted_x = crop->bottom_right.s[0] - adjusted_width;
1057 if (adjusted_x >= crop->top_left.s[0]) {
1058 crop->top_left.s[0] = adjusted_x;
1060 adjusted_height = new_width * ar_h;
1061 adjusted_y = crop->bottom_right.s[1] - adjusted_height;
1062 crop->top_left.s[1] = adjusted_y;
1066 static av_cold void deshake_opencl_uninit(AVFilterContext *avctx)
1068 DeshakeOpenCLContext *ctx = avctx->priv;
1071 for (int i = 0; i < RingbufCount; i++)
1072 av_fifo_freep(&ctx->abs_motion.ringbuffers[i]);
1075 free_debug_matches(&ctx->abs_motion);
1077 if (ctx->gauss_kernel)
1078 av_freep(&ctx->gauss_kernel);
1080 if (ctx->ransac_err)
1081 av_freep(&ctx->ransac_err);
1083 if (ctx->matches_host)
1084 av_freep(&ctx->matches_host);
1086 if (ctx->matches_contig_host)
1087 av_freep(&ctx->matches_contig_host);
1090 av_freep(&ctx->inliers);
1092 ff_framequeue_free(&ctx->fq);
1094 CL_RELEASE_KERNEL(ctx->kernel_grayscale);
1095 CL_RELEASE_KERNEL(ctx->kernel_harris_response);
1096 CL_RELEASE_KERNEL(ctx->kernel_refine_features);
1097 CL_RELEASE_KERNEL(ctx->kernel_brief_descriptors);
1098 CL_RELEASE_KERNEL(ctx->kernel_match_descriptors);
1099 CL_RELEASE_KERNEL(ctx->kernel_crop_upscale);
1101 CL_RELEASE_KERNEL(ctx->kernel_draw_debug_info);
1103 CL_RELEASE_QUEUE(ctx->command_queue);
1106 CL_RELEASE_MEMORY(ctx->grayscale);
1107 CL_RELEASE_MEMORY(ctx->harris_buf);
1108 CL_RELEASE_MEMORY(ctx->refined_features);
1109 CL_RELEASE_MEMORY(ctx->prev_refined_features);
1110 CL_RELEASE_MEMORY(ctx->brief_pattern);
1111 CL_RELEASE_MEMORY(ctx->descriptors);
1112 CL_RELEASE_MEMORY(ctx->prev_descriptors);
1113 CL_RELEASE_MEMORY(ctx->matches);
1114 CL_RELEASE_MEMORY(ctx->matches_contig);
1115 CL_RELEASE_MEMORY(ctx->transform_y);
1116 CL_RELEASE_MEMORY(ctx->transform_uv);
1117 if (ctx->debug_on) {
1118 CL_RELEASE_MEMORY(ctx->debug_matches);
1119 CL_RELEASE_MEMORY(ctx->debug_model_matches);
1122 ff_opencl_filter_uninit(avctx);
1125 static int deshake_opencl_init(AVFilterContext *avctx)
1127 DeshakeOpenCLContext *ctx = avctx->priv;
1128 AVFilterLink *outlink = avctx->outputs[0];
1129 AVFilterLink *inlink = avctx->inputs[0];
1130 // Pointer to the host-side pattern buffer to be initialized and then copied
1132 PointPair *pattern_host = NULL;
1135 cl_ulong8 zeroed_ulong8;
1136 FFFrameQueueGlobal fqg;
1137 cl_image_format grayscale_format;
1138 cl_image_desc grayscale_desc;
1139 cl_command_queue_properties queue_props;
1141 const enum AVPixelFormat disallowed_formats[14] = {
1145 AV_PIX_FMT_GBRP10BE,
1146 AV_PIX_FMT_GBRP10LE,
1147 AV_PIX_FMT_GBRP16BE,
1148 AV_PIX_FMT_GBRP16LE,
1150 AV_PIX_FMT_GBRAP16BE,
1151 AV_PIX_FMT_GBRAP16LE,
1152 AV_PIX_FMT_GBRAP12BE,
1153 AV_PIX_FMT_GBRAP12LE,
1154 AV_PIX_FMT_GBRAP10BE,
1155 AV_PIX_FMT_GBRAP10LE
1158 // Number of elements for an array
1159 const int image_grid_32 = ROUNDED_UP_DIV(outlink->h, 32) * ROUNDED_UP_DIV(outlink->w, 32);
1161 const int descriptor_buf_size = image_grid_32 * (BREIFN / 8);
1162 const int features_buf_size = image_grid_32 * sizeof(cl_float2);
1164 const AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
1165 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(hw_frames_ctx->sw_format);
1167 av_assert0(hw_frames_ctx);
1170 ff_framequeue_global_init(&fqg);
1171 ff_framequeue_init(&ctx->fq, &fqg);
1173 ctx->smooth_window = (int)(av_q2d(avctx->inputs[0]->frame_rate) * ctx->smooth_window_multiplier);
1174 ctx->curr_frame = 0;
1176 memset(&zeroed_ulong8, 0, sizeof(cl_ulong8));
1178 ctx->gauss_kernel = av_malloc_array(ctx->smooth_window, sizeof(float));
1179 if (!ctx->gauss_kernel) {
1180 err = AVERROR(ENOMEM);
1184 ctx->ransac_err = av_malloc_array(MATCHES_CONTIG_SIZE, sizeof(float));
1185 if (!ctx->ransac_err) {
1186 err = AVERROR(ENOMEM);
1190 for (int i = 0; i < RingbufCount; i++) {
1191 ctx->abs_motion.ringbuffers[i] = av_fifo_alloc_array(
1196 if (!ctx->abs_motion.ringbuffers[i]) {
1197 err = AVERROR(ENOMEM);
1202 if (ctx->debug_on) {
1203 ctx->abs_motion.debug_matches = av_fifo_alloc_array(
1204 ctx->smooth_window / 2,
1205 sizeof(DebugMatches)
1208 if (!ctx->abs_motion.debug_matches) {
1209 err = AVERROR(ENOMEM);
1214 ctx->abs_motion.curr_frame_offset = 0;
1215 ctx->abs_motion.data_start_offset = -1;
1216 ctx->abs_motion.data_end_offset = -1;
1218 pattern_host = av_malloc_array(BREIFN, sizeof(PointPair));
1219 if (!pattern_host) {
1220 err = AVERROR(ENOMEM);
1224 ctx->matches_host = av_malloc_array(image_grid_32, sizeof(MotionVector));
1225 if (!ctx->matches_host) {
1226 err = AVERROR(ENOMEM);
1230 ctx->matches_contig_host = av_malloc_array(MATCHES_CONTIG_SIZE, sizeof(MotionVector));
1231 if (!ctx->matches_contig_host) {
1232 err = AVERROR(ENOMEM);
1236 ctx->inliers = av_malloc_array(MATCHES_CONTIG_SIZE, sizeof(MotionVector));
1237 if (!ctx->inliers) {
1238 err = AVERROR(ENOMEM);
1242 // Initializing the patch pattern for building BREIF descriptors with
1243 av_lfg_init(&ctx->alfg, 234342424);
1244 for (int i = 0; i < BREIFN; ++i) {
1247 for (int j = 0; j < 2; ++j) {
1248 pair.p1.s[j] = rand_in(-BRIEF_PATCH_SIZE_HALF, BRIEF_PATCH_SIZE_HALF + 1, &ctx->alfg);
1249 pair.p2.s[j] = rand_in(-BRIEF_PATCH_SIZE_HALF, BRIEF_PATCH_SIZE_HALF + 1, &ctx->alfg);
1252 pattern_host[i] = pair;
1255 for (int i = 0; i < 14; i++) {
1256 if (ctx->sw_format == disallowed_formats[i]) {
1257 av_log(avctx, AV_LOG_ERROR, "unsupported format in deshake_opencl.\n");
1258 err = AVERROR(ENOSYS);
1263 if (desc->flags & AV_PIX_FMT_FLAG_RGB) {
1268 ctx->sw_format = hw_frames_ctx->sw_format;
1270 err = ff_opencl_filter_load_program(avctx, &ff_opencl_source_deshake, 1);
1274 if (ctx->debug_on) {
1275 queue_props = CL_QUEUE_PROFILING_ENABLE;
1279 ctx->command_queue = clCreateCommandQueue(
1280 ctx->ocf.hwctx->context,
1281 ctx->ocf.hwctx->device_id,
1285 CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create OpenCL command queue %d.\n", cle);
1287 CL_CREATE_KERNEL(ctx, grayscale);
1288 CL_CREATE_KERNEL(ctx, harris_response);
1289 CL_CREATE_KERNEL(ctx, refine_features);
1290 CL_CREATE_KERNEL(ctx, brief_descriptors);
1291 CL_CREATE_KERNEL(ctx, match_descriptors);
1292 CL_CREATE_KERNEL(ctx, transform);
1293 CL_CREATE_KERNEL(ctx, crop_upscale);
1295 CL_CREATE_KERNEL(ctx, draw_debug_info);
1298 grayscale_format.image_channel_order = CL_R;
1299 grayscale_format.image_channel_data_type = CL_FLOAT;
1301 grayscale_desc = (cl_image_desc) {
1302 .image_type = CL_MEM_OBJECT_IMAGE2D,
1303 .image_width = outlink->w,
1304 .image_height = outlink->h,
1306 .image_array_size = 0,
1307 .image_row_pitch = 0,
1308 .image_slice_pitch = 0,
1309 .num_mip_levels = 0,
1314 ctx->grayscale = clCreateImage(
1315 ctx->ocf.hwctx->context,
1322 CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to create grayscale image: %d.\n", cle);
1325 CL_CREATE_BUFFER(ctx, harris_buf, outlink->h * outlink->w * sizeof(float));
1326 CL_CREATE_BUFFER(ctx, refined_features, features_buf_size);
1327 CL_CREATE_BUFFER(ctx, prev_refined_features, features_buf_size);
1328 CL_CREATE_BUFFER_FLAGS(
1331 CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
1332 BREIFN * sizeof(PointPair),
1335 CL_CREATE_BUFFER(ctx, descriptors, descriptor_buf_size);
1336 CL_CREATE_BUFFER(ctx, prev_descriptors, descriptor_buf_size);
1337 CL_CREATE_BUFFER(ctx, matches, image_grid_32 * sizeof(MotionVector));
1338 CL_CREATE_BUFFER(ctx, matches_contig, MATCHES_CONTIG_SIZE * sizeof(MotionVector));
1339 CL_CREATE_BUFFER(ctx, transform_y, 9 * sizeof(float));
1340 CL_CREATE_BUFFER(ctx, transform_uv, 9 * sizeof(float));
1341 if (ctx->debug_on) {
1342 CL_CREATE_BUFFER(ctx, debug_matches, MATCHES_CONTIG_SIZE * sizeof(MotionVector));
1343 CL_CREATE_BUFFER(ctx, debug_model_matches, 3 * sizeof(MotionVector));
1346 ctx->initialized = 1;
1347 av_freep(&pattern_host);
1352 av_freep(&pattern_host);
1356 // Logs debug information about the transform data
1357 static void transform_debug(AVFilterContext *avctx, float *new_vals, float *old_vals, int curr_frame) {
1358 av_log(avctx, AV_LOG_VERBOSE,
1360 "\tframe moved from: %f x, %f y\n"
1361 "\t to: %f x, %f y\n"
1362 "\t rotated from: %f degrees\n"
1363 "\t to: %f degrees\n"
1364 "\t scaled from: %f x, %f y\n"
1365 "\t to: %f x, %f y\n"
1367 "\tframe moved by: %f x, %f y\n"
1368 "\t rotated by: %f degrees\n"
1369 "\t scaled by: %f x, %f y\n",
1371 old_vals[RingbufX], old_vals[RingbufY],
1372 new_vals[RingbufX], new_vals[RingbufY],
1373 old_vals[RingbufRot] * (180.0 / M_PI),
1374 new_vals[RingbufRot] * (180.0 / M_PI),
1375 old_vals[RingbufScaleX], old_vals[RingbufScaleY],
1376 new_vals[RingbufScaleX], new_vals[RingbufScaleY],
1377 old_vals[RingbufX] - new_vals[RingbufX], old_vals[RingbufY] - new_vals[RingbufY],
1378 old_vals[RingbufRot] * (180.0 / M_PI) - new_vals[RingbufRot] * (180.0 / M_PI),
1379 new_vals[RingbufScaleX] / old_vals[RingbufScaleX], new_vals[RingbufScaleY] / old_vals[RingbufScaleY]
1383 // Uses the buffered motion information to determine a transform that smooths the
1384 // given frame and applies it
1385 static int filter_frame(AVFilterLink *link, AVFrame *input_frame)
1387 AVFilterContext *avctx = link->dst;
1388 AVFilterLink *outlink = avctx->outputs[0];
1389 DeshakeOpenCLContext *deshake_ctx = avctx->priv;
1390 AVFrame *cropped_frame = NULL, *transformed_frame = NULL;
1393 float new_vals[RingbufCount];
1394 float old_vals[RingbufCount];
1395 // Luma (in the case of YUV) transform, or just the transform in the case of RGB
1396 float transform_y[9];
1398 float transform_uv[9];
1399 // Luma crop transform (or RGB)
1400 float transform_crop_y[9];
1401 // Chroma crop transform
1402 float transform_crop_uv[9];
1403 float transform_debug_rgb[9];
1404 size_t global_work[2];
1406 cl_mem src, transformed, dst;
1407 cl_mem transforms[3];
1409 cl_event transform_event, crop_upscale_event;
1410 DebugMatches debug_matches;
1411 cl_int num_model_matches;
1413 const float center_w = (float)input_frame->width / 2;
1414 const float center_h = (float)input_frame->height / 2;
1416 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(deshake_ctx->sw_format);
1417 const int chroma_width = AV_CEIL_RSHIFT(input_frame->width, desc->log2_chroma_w);
1418 const int chroma_height = AV_CEIL_RSHIFT(input_frame->height, desc->log2_chroma_h);
1420 const float center_w_chroma = (float)chroma_width / 2;
1421 const float center_h_chroma = (float)chroma_height / 2;
1423 const float luma_w_over_chroma_w = ((float)input_frame->width / (float)chroma_width);
1424 const float luma_h_over_chroma_h = ((float)input_frame->height / (float)chroma_height);
1426 if (deshake_ctx->debug_on) {
1427 av_fifo_generic_read(
1428 deshake_ctx->abs_motion.debug_matches,
1430 sizeof(DebugMatches),
1435 if (input_frame->pkt_duration) {
1436 duration = input_frame->pkt_duration;
1438 duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
1440 deshake_ctx->duration = input_frame->pts + duration;
1442 // Get the absolute transform data for this frame
1443 for (int i = 0; i < RingbufCount; i++) {
1444 av_fifo_generic_peek_at(
1445 deshake_ctx->abs_motion.ringbuffers[i],
1447 deshake_ctx->abs_motion.curr_frame_offset * sizeof(float),
1453 if (deshake_ctx->tripod_mode) {
1454 // If tripod mode is turned on we simply undo all motion relative to the
1457 new_vals[RingbufX] = 0.0f;
1458 new_vals[RingbufY] = 0.0f;
1459 new_vals[RingbufRot] = 0.0f;
1460 new_vals[RingbufScaleX] = 1.0f;
1461 new_vals[RingbufScaleY] = 1.0f;
1463 // Tripod mode is off and we need to smooth a moving camera
1465 new_vals[RingbufX] = smooth(
1467 deshake_ctx->gauss_kernel,
1468 deshake_ctx->smooth_window,
1470 deshake_ctx->abs_motion.ringbuffers[RingbufX]
1472 new_vals[RingbufY] = smooth(
1474 deshake_ctx->gauss_kernel,
1475 deshake_ctx->smooth_window,
1476 input_frame->height,
1477 deshake_ctx->abs_motion.ringbuffers[RingbufY]
1479 new_vals[RingbufRot] = smooth(
1481 deshake_ctx->gauss_kernel,
1482 deshake_ctx->smooth_window,
1484 deshake_ctx->abs_motion.ringbuffers[RingbufRot]
1486 new_vals[RingbufScaleX] = smooth(
1488 deshake_ctx->gauss_kernel,
1489 deshake_ctx->smooth_window,
1491 deshake_ctx->abs_motion.ringbuffers[RingbufScaleX]
1493 new_vals[RingbufScaleY] = smooth(
1495 deshake_ctx->gauss_kernel,
1496 deshake_ctx->smooth_window,
1498 deshake_ctx->abs_motion.ringbuffers[RingbufScaleY]
1502 transform_center_scale(
1503 old_vals[RingbufX] - new_vals[RingbufX],
1504 old_vals[RingbufY] - new_vals[RingbufY],
1505 old_vals[RingbufRot] - new_vals[RingbufRot],
1506 new_vals[RingbufScaleX] / old_vals[RingbufScaleX],
1507 new_vals[RingbufScaleY] / old_vals[RingbufScaleY],
1513 transform_center_scale(
1514 (old_vals[RingbufX] - new_vals[RingbufX]) / luma_w_over_chroma_w,
1515 (old_vals[RingbufY] - new_vals[RingbufY]) / luma_h_over_chroma_h,
1516 old_vals[RingbufRot] - new_vals[RingbufRot],
1517 new_vals[RingbufScaleX] / old_vals[RingbufScaleX],
1518 new_vals[RingbufScaleY] / old_vals[RingbufScaleY],
1524 CL_BLOCKING_WRITE_BUFFER(deshake_ctx->command_queue, deshake_ctx->transform_y, 9 * sizeof(float), transform_y, NULL);
1525 CL_BLOCKING_WRITE_BUFFER(deshake_ctx->command_queue, deshake_ctx->transform_uv, 9 * sizeof(float), transform_uv, NULL);
1527 if (deshake_ctx->debug_on)
1528 transform_debug(avctx, new_vals, old_vals, deshake_ctx->curr_frame);
1530 cropped_frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
1531 if (!cropped_frame) {
1532 err = AVERROR(ENOMEM);
1536 transformed_frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
1537 if (!transformed_frame) {
1538 err = AVERROR(ENOMEM);
1542 transforms[0] = deshake_ctx->transform_y;
1543 transforms[1] = transforms[2] = deshake_ctx->transform_uv;
1545 for (int p = 0; p < FF_ARRAY_ELEMS(transformed_frame->data); p++) {
1546 // Transform all of the planes appropriately
1547 src = (cl_mem)input_frame->data[p];
1548 transformed = (cl_mem)transformed_frame->data[p];
1553 err = ff_opencl_filter_work_size_from_image(avctx, global_work, input_frame, p, 0);
1557 CL_RUN_KERNEL_WITH_ARGS(
1558 deshake_ctx->command_queue,
1559 deshake_ctx->kernel_transform,
1563 { sizeof(cl_mem), &src },
1564 { sizeof(cl_mem), &transformed },
1565 { sizeof(cl_mem), &transforms[p] },
1569 if (deshake_ctx->debug_on && !deshake_ctx->is_yuv && debug_matches.num_matches > 0) {
1570 CL_BLOCKING_WRITE_BUFFER(
1571 deshake_ctx->command_queue,
1572 deshake_ctx->debug_matches,
1573 debug_matches.num_matches * sizeof(MotionVector),
1574 debug_matches.matches,
1578 CL_BLOCKING_WRITE_BUFFER(
1579 deshake_ctx->command_queue,
1580 deshake_ctx->debug_model_matches,
1581 debug_matches.num_model_matches * sizeof(MotionVector),
1582 debug_matches.model_matches,
1586 num_model_matches = debug_matches.num_model_matches;
1588 // Invert the transform
1589 transform_center_scale(
1590 new_vals[RingbufX] - old_vals[RingbufX],
1591 new_vals[RingbufY] - old_vals[RingbufY],
1592 new_vals[RingbufRot] - old_vals[RingbufRot],
1593 old_vals[RingbufScaleX] / new_vals[RingbufScaleX],
1594 old_vals[RingbufScaleY] / new_vals[RingbufScaleY],
1600 CL_BLOCKING_WRITE_BUFFER(deshake_ctx->command_queue, deshake_ctx->transform_y, 9 * sizeof(float), transform_debug_rgb, NULL);
1602 transformed = (cl_mem)transformed_frame->data[0];
1603 CL_RUN_KERNEL_WITH_ARGS(
1604 deshake_ctx->command_queue,
1605 deshake_ctx->kernel_draw_debug_info,
1606 (size_t[]){ debug_matches.num_matches },
1609 { sizeof(cl_mem), &transformed },
1610 { sizeof(cl_mem), &deshake_ctx->debug_matches },
1611 { sizeof(cl_mem), &deshake_ctx->debug_model_matches },
1612 { sizeof(cl_int), &num_model_matches },
1613 { sizeof(cl_mem), &deshake_ctx->transform_y }
1617 if (deshake_ctx->should_crop) {
1618 // Generate transforms for cropping
1619 transform_center_scale(
1620 (old_vals[RingbufX] - new_vals[RingbufX]) / 5,
1621 (old_vals[RingbufY] - new_vals[RingbufY]) / 5,
1622 (old_vals[RingbufRot] - new_vals[RingbufRot]) / 5,
1623 new_vals[RingbufScaleX] / old_vals[RingbufScaleX],
1624 new_vals[RingbufScaleY] / old_vals[RingbufScaleY],
1629 update_needed_crop(&deshake_ctx->crop_y, transform_crop_y, input_frame->width, input_frame->height);
1631 transform_center_scale(
1632 (old_vals[RingbufX] - new_vals[RingbufX]) / (5 * luma_w_over_chroma_w),
1633 (old_vals[RingbufY] - new_vals[RingbufY]) / (5 * luma_h_over_chroma_h),
1634 (old_vals[RingbufRot] - new_vals[RingbufRot]) / 5,
1635 new_vals[RingbufScaleX] / old_vals[RingbufScaleX],
1636 new_vals[RingbufScaleY] / old_vals[RingbufScaleY],
1641 update_needed_crop(&deshake_ctx->crop_uv, transform_crop_uv, chroma_width, chroma_height);
1643 crops[0] = deshake_ctx->crop_y;
1644 crops[1] = crops[2] = deshake_ctx->crop_uv;
1646 for (int p = 0; p < FF_ARRAY_ELEMS(cropped_frame->data); p++) {
1647 // Crop all of the planes appropriately
1648 dst = (cl_mem)cropped_frame->data[p];
1649 transformed = (cl_mem)transformed_frame->data[p];
1654 err = ff_opencl_filter_work_size_from_image(avctx, global_work, input_frame, p, 0);
1658 CL_RUN_KERNEL_WITH_ARGS(
1659 deshake_ctx->command_queue,
1660 deshake_ctx->kernel_crop_upscale,
1663 &crop_upscale_event,
1664 { sizeof(cl_mem), &transformed },
1665 { sizeof(cl_mem), &dst },
1666 { sizeof(cl_float2), &crops[p].top_left },
1667 { sizeof(cl_float2), &crops[p].bottom_right },
1672 if (deshake_ctx->curr_frame < deshake_ctx->smooth_window / 2) {
1673 // This means we are somewhere at the start of the video. We need to
1674 // increment the current frame offset until it reaches the center of
1675 // the ringbuffers (as the current frame will be located there for
1676 // the rest of the video).
1678 // The end of the video is taken care of by draining motion data
1679 // one-by-one out of the buffer, causing the (at that point fixed)
1680 // offset to move towards later frames' data.
1681 ++deshake_ctx->abs_motion.curr_frame_offset;
1684 if (deshake_ctx->abs_motion.data_end_offset != -1) {
1685 // Keep the end offset in sync with the frame it's supposed to be
1687 --deshake_ctx->abs_motion.data_end_offset;
1689 if (deshake_ctx->abs_motion.data_end_offset == deshake_ctx->abs_motion.curr_frame_offset - 1) {
1690 // The end offset would be the start of the new video sequence; flip to
1692 deshake_ctx->abs_motion.data_end_offset = -1;
1693 deshake_ctx->abs_motion.data_start_offset = deshake_ctx->abs_motion.curr_frame_offset;
1695 } else if (deshake_ctx->abs_motion.data_start_offset != -1) {
1696 // Keep the start offset in sync with the frame it's supposed to be
1698 --deshake_ctx->abs_motion.data_start_offset;
1701 if (deshake_ctx->debug_on) {
1702 deshake_ctx->transform_time += ff_opencl_get_event_time(transform_event);
1703 if (deshake_ctx->should_crop) {
1704 deshake_ctx->crop_upscale_time += ff_opencl_get_event_time(crop_upscale_event);
1708 ++deshake_ctx->curr_frame;
1710 if (deshake_ctx->debug_on)
1711 av_freep(&debug_matches.matches);
1713 if (deshake_ctx->should_crop) {
1714 err = av_frame_copy_props(cropped_frame, input_frame);
1718 av_frame_free(&transformed_frame);
1719 av_frame_free(&input_frame);
1720 return ff_filter_frame(outlink, cropped_frame);
1723 err = av_frame_copy_props(transformed_frame, input_frame);
1727 av_frame_free(&cropped_frame);
1728 av_frame_free(&input_frame);
1729 return ff_filter_frame(outlink, transformed_frame);
1733 clFinish(deshake_ctx->command_queue);
1735 if (deshake_ctx->debug_on)
1736 if (debug_matches.matches)
1737 av_freep(&debug_matches.matches);
1739 av_frame_free(&input_frame);
1740 av_frame_free(&transformed_frame);
1741 av_frame_free(&cropped_frame);
1745 // Add the given frame to the frame queue to eventually be processed.
1747 // Also determines the motion from the previous frame and updates the stored
1748 // motion information accordingly.
1749 static int queue_frame(AVFilterLink *link, AVFrame *input_frame)
1751 AVFilterContext *avctx = link->dst;
1752 DeshakeOpenCLContext *deshake_ctx = avctx->priv;
1755 int num_inliers = 0;
1757 FrameDelta relative;
1758 SimilarityMatrix model;
1759 size_t global_work[2];
1760 size_t harris_global_work[2];
1761 size_t grid_32_global_work[2];
1762 int grid_32_h, grid_32_w;
1763 size_t local_work[2];
1767 cl_event grayscale_event, harris_response_event, refine_features_event,
1768 brief_event, match_descriptors_event, read_buf_event;
1769 DebugMatches debug_matches;
1776 err = ff_opencl_filter_work_size_from_image(avctx, global_work, input_frame, 0, 0);
1780 err = ff_opencl_filter_work_size_from_image(avctx, harris_global_work, input_frame, 0, 8);
1784 err = ff_opencl_filter_work_size_from_image(avctx, grid_32_global_work, input_frame, 0, 32);
1788 // We want a single work-item for each 32x32 block of pixels in the input frame
1789 grid_32_global_work[0] /= 32;
1790 grid_32_global_work[1] /= 32;
1792 grid_32_h = ROUNDED_UP_DIV(input_frame->height, 32);
1793 grid_32_w = ROUNDED_UP_DIV(input_frame->width, 32);
1795 if (deshake_ctx->is_yuv) {
1796 deshake_ctx->grayscale = (cl_mem)input_frame->data[0];
1798 src = (cl_mem)input_frame->data[0];
1800 CL_RUN_KERNEL_WITH_ARGS(
1801 deshake_ctx->command_queue,
1802 deshake_ctx->kernel_grayscale,
1806 { sizeof(cl_mem), &src },
1807 { sizeof(cl_mem), &deshake_ctx->grayscale }
1811 CL_RUN_KERNEL_WITH_ARGS(
1812 deshake_ctx->command_queue,
1813 deshake_ctx->kernel_harris_response,
1816 &harris_response_event,
1817 { sizeof(cl_mem), &deshake_ctx->grayscale },
1818 { sizeof(cl_mem), &deshake_ctx->harris_buf }
1821 CL_RUN_KERNEL_WITH_ARGS(
1822 deshake_ctx->command_queue,
1823 deshake_ctx->kernel_refine_features,
1824 grid_32_global_work,
1826 &refine_features_event,
1827 { sizeof(cl_mem), &deshake_ctx->grayscale },
1828 { sizeof(cl_mem), &deshake_ctx->harris_buf },
1829 { sizeof(cl_mem), &deshake_ctx->refined_features },
1830 { sizeof(cl_int), &deshake_ctx->refine_features }
1833 CL_RUN_KERNEL_WITH_ARGS(
1834 deshake_ctx->command_queue,
1835 deshake_ctx->kernel_brief_descriptors,
1836 grid_32_global_work,
1839 { sizeof(cl_mem), &deshake_ctx->grayscale },
1840 { sizeof(cl_mem), &deshake_ctx->refined_features },
1841 { sizeof(cl_mem), &deshake_ctx->descriptors },
1842 { sizeof(cl_mem), &deshake_ctx->brief_pattern}
1845 if (av_fifo_size(deshake_ctx->abs_motion.ringbuffers[RingbufX]) == 0) {
1846 // This is the first frame we've been given to queue, meaning there is
1847 // no previous frame to match descriptors to
1849 goto no_motion_data;
1852 CL_RUN_KERNEL_WITH_ARGS(
1853 deshake_ctx->command_queue,
1854 deshake_ctx->kernel_match_descriptors,
1855 grid_32_global_work,
1857 &match_descriptors_event,
1858 { sizeof(cl_mem), &deshake_ctx->prev_refined_features },
1859 { sizeof(cl_mem), &deshake_ctx->refined_features },
1860 { sizeof(cl_mem), &deshake_ctx->descriptors },
1861 { sizeof(cl_mem), &deshake_ctx->prev_descriptors },
1862 { sizeof(cl_mem), &deshake_ctx->matches }
1865 cle = clEnqueueReadBuffer(
1866 deshake_ctx->command_queue,
1867 deshake_ctx->matches,
1870 grid_32_h * grid_32_w * sizeof(MotionVector),
1871 deshake_ctx->matches_host,
1876 CL_FAIL_ON_ERROR(AVERROR(EIO), "Failed to read matches to host: %d.\n", cle);
1878 num_vectors = make_vectors_contig(deshake_ctx, grid_32_h, grid_32_w);
1880 if (num_vectors < 10) {
1881 // Not enough matches to get reliable motion data for this frame
1883 // From this point on all data is relative to this frame rather than the
1884 // original frame. We have to make sure that we don't mix values that were
1885 // relative to the original frame with the new values relative to this
1886 // frame when doing the gaussian smoothing. We keep track of where the old
1887 // values end using this data_end_offset field in order to accomplish
1890 // If no motion data is present for multiple frames in a short window of
1891 // time, we leave the end where it was to avoid mixing 0s in with the
1892 // old data (and just treat them all as part of the new values)
1893 if (deshake_ctx->abs_motion.data_end_offset == -1) {
1894 deshake_ctx->abs_motion.data_end_offset =
1895 av_fifo_size(deshake_ctx->abs_motion.ringbuffers[RingbufX]) / sizeof(float) - 1;
1898 goto no_motion_data;
1901 if (!estimate_affine_2d(
1903 deshake_ctx->matches_contig_host,
1911 goto no_motion_data;
1914 for (int i = 0; i < num_vectors; i++) {
1915 if (deshake_ctx->matches_contig_host[i].should_consider) {
1916 deshake_ctx->inliers[num_inliers] = deshake_ctx->matches_contig_host[i];
1921 if (!minimize_error(
1923 deshake_ctx->inliers,
1929 goto no_motion_data;
1933 relative = decompose_transform(model.matrix);
1935 // Get the absolute transform data for the previous frame
1936 for (int i = 0; i < RingbufCount; i++) {
1937 av_fifo_generic_peek_at(
1938 deshake_ctx->abs_motion.ringbuffers[i],
1940 av_fifo_size(deshake_ctx->abs_motion.ringbuffers[i]) - sizeof(float),
1946 new_vals[RingbufX] = prev_vals[RingbufX] + relative.translation.s[0];
1947 new_vals[RingbufY] = prev_vals[RingbufY] + relative.translation.s[1];
1948 new_vals[RingbufRot] = prev_vals[RingbufRot] + relative.rotation;
1949 new_vals[RingbufScaleX] = prev_vals[RingbufScaleX] / relative.scale.s[0];
1950 new_vals[RingbufScaleY] = prev_vals[RingbufScaleY] / relative.scale.s[1];
1952 if (deshake_ctx->debug_on) {
1953 if (!deshake_ctx->is_yuv) {
1954 deshake_ctx->grayscale_time += ff_opencl_get_event_time(grayscale_event);
1956 deshake_ctx->harris_response_time += ff_opencl_get_event_time(harris_response_event);
1957 deshake_ctx->refine_features_time += ff_opencl_get_event_time(refine_features_event);
1958 deshake_ctx->brief_descriptors_time += ff_opencl_get_event_time(brief_event);
1959 deshake_ctx->match_descriptors_time += ff_opencl_get_event_time(match_descriptors_event);
1960 deshake_ctx->read_buf_time += ff_opencl_get_event_time(read_buf_event);
1966 new_vals[RingbufX] = 0.0f;
1967 new_vals[RingbufY] = 0.0f;
1968 new_vals[RingbufRot] = 0.0f;
1969 new_vals[RingbufScaleX] = 1.0f;
1970 new_vals[RingbufScaleY] = 1.0f;
1972 for (int i = 0; i < num_vectors; i++) {
1973 deshake_ctx->matches_contig_host[i].should_consider = 0;
1975 debug_matches.num_model_matches = 0;
1977 if (deshake_ctx->debug_on) {
1978 av_log(avctx, AV_LOG_VERBOSE,
1979 "\n[ALERT] No motion data found in queue_frame, motion reset to 0\n\n"
1986 // Swap the descriptor buffers (we don't need the previous frame's descriptors
1987 // again so we will use that space for the next frame's descriptors)
1988 temp = deshake_ctx->prev_descriptors;
1989 deshake_ctx->prev_descriptors = deshake_ctx->descriptors;
1990 deshake_ctx->descriptors = temp;
1992 // Same for the refined features
1993 temp = deshake_ctx->prev_refined_features;
1994 deshake_ctx->prev_refined_features = deshake_ctx->refined_features;
1995 deshake_ctx->refined_features = temp;
1997 if (deshake_ctx->debug_on) {
1998 if (num_vectors == 0) {
1999 debug_matches.matches = NULL;
2001 debug_matches.matches = av_malloc_array(num_vectors, sizeof(MotionVector));
2003 if (!debug_matches.matches) {
2004 err = AVERROR(ENOMEM);
2009 for (int i = 0; i < num_vectors; i++) {
2010 debug_matches.matches[i] = deshake_ctx->matches_contig_host[i];
2012 debug_matches.num_matches = num_vectors;
2014 av_fifo_generic_write(
2015 deshake_ctx->abs_motion.debug_matches,
2017 sizeof(DebugMatches),
2022 for (int i = 0; i < RingbufCount; i++) {
2023 av_fifo_generic_write(
2024 deshake_ctx->abs_motion.ringbuffers[i],
2031 return ff_framequeue_add(&deshake_ctx->fq, input_frame);
2034 clFinish(deshake_ctx->command_queue);
2035 av_frame_free(&input_frame);
2039 static int activate(AVFilterContext *ctx)
2041 AVFilterLink *inlink = ctx->inputs[0];
2042 AVFilterLink *outlink = ctx->outputs[0];
2043 DeshakeOpenCLContext *deshake_ctx = ctx->priv;
2044 AVFrame *frame = NULL;
2048 FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
2050 if (!deshake_ctx->eof) {
2051 ret = ff_inlink_consume_frame(inlink, &frame);
2055 if (!frame->hw_frames_ctx)
2056 return AVERROR(EINVAL);
2058 if (!deshake_ctx->initialized) {
2059 ret = deshake_opencl_init(ctx);
2064 // If there is no more space in the ringbuffers, remove the oldest
2065 // values to make room for the new ones
2066 if (av_fifo_space(deshake_ctx->abs_motion.ringbuffers[RingbufX]) == 0) {
2067 for (int i = 0; i < RingbufCount; i++) {
2068 av_fifo_drain(deshake_ctx->abs_motion.ringbuffers[i], sizeof(float));
2071 ret = queue_frame(inlink, frame);
2075 // See if we have enough buffered frames to process one
2077 // "enough" is half the smooth window of queued frames into the future
2078 if (ff_framequeue_queued_frames(&deshake_ctx->fq) >= deshake_ctx->smooth_window / 2) {
2079 return filter_frame(inlink, ff_framequeue_take(&deshake_ctx->fq));
2085 if (!deshake_ctx->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
2086 if (status == AVERROR_EOF) {
2087 deshake_ctx->eof = 1;
2091 if (deshake_ctx->eof) {
2092 // Finish processing the rest of the frames in the queue.
2093 while(ff_framequeue_queued_frames(&deshake_ctx->fq) != 0) {
2094 for (int i = 0; i < RingbufCount; i++) {
2095 av_fifo_drain(deshake_ctx->abs_motion.ringbuffers[i], sizeof(float));
2098 ret = filter_frame(inlink, ff_framequeue_take(&deshake_ctx->fq));
2104 if (deshake_ctx->debug_on) {
2105 av_log(ctx, AV_LOG_VERBOSE,
2106 "Average kernel execution times:\n"
2107 "\t grayscale: %0.3f ms\n"
2108 "\t harris_response: %0.3f ms\n"
2109 "\t refine_features: %0.3f ms\n"
2110 "\tbrief_descriptors: %0.3f ms\n"
2111 "\tmatch_descriptors: %0.3f ms\n"
2112 "\t transform: %0.3f ms\n"
2113 "\t crop_upscale: %0.3f ms\n"
2114 "Average buffer read times:\n"
2115 "\t features buf: %0.3f ms\n",
2116 averaged_event_time_ms(deshake_ctx->grayscale_time, deshake_ctx->curr_frame),
2117 averaged_event_time_ms(deshake_ctx->harris_response_time, deshake_ctx->curr_frame),
2118 averaged_event_time_ms(deshake_ctx->refine_features_time, deshake_ctx->curr_frame),
2119 averaged_event_time_ms(deshake_ctx->brief_descriptors_time, deshake_ctx->curr_frame),
2120 averaged_event_time_ms(deshake_ctx->match_descriptors_time, deshake_ctx->curr_frame),
2121 averaged_event_time_ms(deshake_ctx->transform_time, deshake_ctx->curr_frame),
2122 averaged_event_time_ms(deshake_ctx->crop_upscale_time, deshake_ctx->curr_frame),
2123 averaged_event_time_ms(deshake_ctx->read_buf_time, deshake_ctx->curr_frame)
2127 ff_outlink_set_status(outlink, AVERROR_EOF, deshake_ctx->duration);
2131 if (!deshake_ctx->eof) {
2132 FF_FILTER_FORWARD_WANTED(outlink, inlink);
2135 return FFERROR_NOT_READY;
2138 static const AVFilterPad deshake_opencl_inputs[] = {
2141 .type = AVMEDIA_TYPE_VIDEO,
2142 .config_props = &ff_opencl_filter_config_input,
2147 static const AVFilterPad deshake_opencl_outputs[] = {
2150 .type = AVMEDIA_TYPE_VIDEO,
2151 .config_props = &ff_opencl_filter_config_output,
2156 #define OFFSET(x) offsetof(DeshakeOpenCLContext, x)
2157 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
2159 static const AVOption deshake_opencl_options[] = {
2161 "tripod", "simulates a tripod by preventing any camera movement whatsoever "
2162 "from the original frame",
2163 OFFSET(tripod_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS
2166 "debug", "turn on additional debugging information",
2167 OFFSET(debug_on), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS
2170 "adaptive_crop", "attempt to subtly crop borders to reduce mirrored content",
2171 OFFSET(should_crop), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS
2174 "refine_features", "refine feature point locations at a sub-pixel level",
2175 OFFSET(refine_features), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS
2178 "smooth_strength", "smoothing strength (0 attempts to adaptively determine optimal strength)",
2179 OFFSET(smooth_percent), AV_OPT_TYPE_FLOAT, {.dbl = 0.0f}, 0.0f, 1.0f, FLAGS
2182 "smooth_window_multiplier", "multiplier for number of frames to buffer for motion data",
2183 OFFSET(smooth_window_multiplier), AV_OPT_TYPE_FLOAT, {.dbl = 2.0}, 0.1, 10.0, FLAGS
2188 AVFILTER_DEFINE_CLASS(deshake_opencl);
2190 AVFilter ff_vf_deshake_opencl = {
2191 .name = "deshake_opencl",
2192 .description = NULL_IF_CONFIG_SMALL("Feature-point based video stabilization filter"),
2193 .priv_size = sizeof(DeshakeOpenCLContext),
2194 .priv_class = &deshake_opencl_class,
2195 .init = &ff_opencl_filter_init,
2196 .uninit = &deshake_opencl_uninit,
2197 .query_formats = &ff_opencl_filter_query_formats,
2198 .activate = activate,
2199 .inputs = deshake_opencl_inputs,
2200 .outputs = deshake_opencl_outputs,
2201 .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE