}
}
+/** Convert an MLT image to one that can be used by VS.
+ * Use free_vsimage() when done with the resulting image.
+ */
+
VSPixelFormat mltimage_to_vsimage( mlt_image_format mlt_format, int width, int height, uint8_t* mlt_img, uint8_t** vs_img )
{
switch( mlt_format )
}
}
+/** Convert a VS image back to the MLT image it originally came from in mltimage_to_vsimage().
+ */
+
void vsimage_to_mltimage( uint8_t* vs_img, uint8_t* mlt_img, mlt_image_format mlt_format, int width, int height )
{
switch( mlt_format )
}
break;
default:
- return PF_NONE;
+ break;
}
}
+/** Free an image allocated by mltimage_to_vsimage().
+ */
+
void free_vsimage( uint8_t* vs_img, VSPixelFormat format )
{
if( format != PF_YUV420P )
mlt_pool_release( vs_img );
}
}
+
+/** Compare two VSMotionDetectConfig structures.
+ * Return 1 if they are different. 0 if they are the same.
+ */
+
+int compare_motion_config( VSMotionDetectConfig* a, VSMotionDetectConfig* b )
+{
+ if( a->shakiness != b->shakiness ||
+ a->accuracy != b->accuracy ||
+ a->stepSize != b->stepSize ||
+ // Skip: Deprecated
+ // a->algo != b->algo ||
+ a->virtualTripod != b->virtualTripod ||
+ a->show != b->show ||
+ // Skip: inconsequential?
+ // a->modName != b->modName ||
+ a->contrastThreshold != b->contrastThreshold )
+ {
+ return 1;
+ }
+ return 0;
+}
+
+/** Compare two VSTransformConfig structures.
+ * Return 1 if they are different. 0 if they are the same.
+ */
+
+int compare_transform_config( VSTransformConfig* a, VSTransformConfig* b )
+{
+ if( a->relative != b->relative ||
+ a->smoothing != b->smoothing ||
+ a->crop != b->crop ||
+ a->invert != b->invert ||
+ a->zoom != b->zoom ||
+ a->optZoom != b->optZoom ||
+ a->zoomSpeed != b->zoomSpeed ||
+ a->interpolType != b->interpolType ||
+ a->maxShift != b->maxShift ||
+ a->maxAngle != b->maxAngle ||
+ // Skip: inconsequential?
+ // a->modName != b->modName ||
+ // Skip: unused?
+ // a->verbose != b->verbose ||
+ a->simpleMotionCalculation != b->simpleMotionCalculation ||
+ // Skip: unused?
+ // a->storeTransforms != b->storeTransforms ||
+ a->smoothZoom != b->smoothZoom ||
+ a->camPathAlgo != b->camPathAlgo )
+ {
+ return 1;
+ }
+ return 0;
+}
void vsimage_to_mltimage( uint8_t* vs_img, uint8_t* mlt_img, mlt_image_format mlt_format, int width, int height );
void free_vsimage( uint8_t* vs_img, VSPixelFormat format );
+int compare_motion_config( VSMotionDetectConfig* a, VSMotionDetectConfig* b );
+int compare_transform_config( VSTransformConfig* a, VSTransformConfig* b );
+
#endif /* VIDSTAB_COMMON_H_ */
#include <string.h>
#include <assert.h>
-#define FILTER_NAME "vid.stab.deshake"
-
typedef struct _deshake_data
{
bool initialized;
VSMotionDetect md;
VSTransformData td;
VSSlidingAvgTrans avg;
-
+ VSMotionDetectConfig mconf;
+ VSTransformConfig tconf;
mlt_position lastFrame;
} DeshakeData;
-int init_deshake(DeshakeData *data, mlt_properties properties,
- VSPixelFormat vs_format, int *width, int *height, char* interps)
+static void get_config( VSTransformConfig* tconf, VSMotionDetectConfig* mconf, mlt_filter filter, mlt_frame frame )
+{
+ mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
+ const char* filterName = mlt_properties_get( properties, "mlt_service" );
+
+ memset( mconf, 0, sizeof(VSMotionDetectConfig) );
+ *mconf = vsMotionDetectGetDefaultConfig( filterName );
+ mconf->shakiness = mlt_properties_get_int( properties, "shakiness" );
+ mconf->accuracy = mlt_properties_get_int(properties, "accuracy");
+ mconf->stepSize = mlt_properties_get_int(properties, "stepsize");
+ mconf->contrastThreshold = mlt_properties_get_double(properties, "mincontrast");
+
+ memset( tconf, 0, sizeof(VSTransformConfig) );
+ *tconf = vsTransformGetDefaultConfig( filterName );
+ tconf->smoothing = mlt_properties_get_int(properties, "smoothing");
+ tconf->maxShift = mlt_properties_get_int(properties, "maxshift");
+ tconf->maxAngle = mlt_properties_get_double(properties, "maxangle");
+ tconf->crop = (VSBorderType) mlt_properties_get_int(properties, "crop");
+ tconf->zoom = mlt_properties_get_int(properties, "zoom");
+ tconf->optZoom = mlt_properties_get_int(properties, "optzoom");
+ tconf->zoomSpeed = mlt_properties_get_double(properties, "zoomspeed");
+ tconf->relative = 1;
+
+ // by default a bicubic interpolation is selected
+ const char *interps = mlt_properties_get( MLT_FRAME_PROPERTIES( frame ), "rescale.interp" );
+ tconf->interpolType = VS_BiCubic;
+ if ( strcmp( interps, "nearest" ) == 0 || strcmp( interps, "neighbor" ) == 0 )
+ tconf->interpolType = VS_Zero;
+ else if ( strcmp( interps, "tiles" ) == 0 || strcmp( interps, "fast_bilinear" ) == 0 )
+ tconf->interpolType = VS_Linear;
+ else if ( strcmp( interps, "bilinear" ) == 0 )
+ tconf->interpolType = VS_BiLinear;
+}
+
+static int check_config( mlt_filter filter, mlt_frame frame )
+{
+ DeshakeData *data = static_cast<DeshakeData*>( filter->child );
+ VSTransformConfig new_tconf;
+ VSMotionDetectConfig new_mconf;
+
+ get_config( &new_tconf, &new_mconf, filter, frame );
+
+ if( compare_transform_config( &data->tconf, &new_tconf ) ||
+ compare_motion_config( &data->mconf, &new_mconf ) )
+ {
+ return 1;
+ }
+
+ return 0;
+}
+
+static void init_deshake( DeshakeData *data, mlt_filter filter, mlt_frame frame,
+ VSPixelFormat vs_format, int *width, int *height )
{
VSFrameInfo fiIn, fiOut;
- vsFrameInfoInit(&fiIn, *width, *height, vs_format);
- vsFrameInfoInit(&fiOut, *width, *height, vs_format);
-
- VSMotionDetectConfig conf = vsMotionDetectGetDefaultConfig(FILTER_NAME);
- conf.shakiness = mlt_properties_get_int(properties, "shakiness");
- conf.accuracy = mlt_properties_get_int(properties, "accuracy");
- conf.stepSize = mlt_properties_get_int(properties, "stepsize");
- conf.algo = mlt_properties_get_int(properties, "algo");
- conf.contrastThreshold = mlt_properties_get_double(properties, "mincontrast");
- conf.show = 0;
-
- vsMotionDetectInit(&data->md, &conf, &fiIn);
-
- VSTransformConfig tdconf = vsTransformGetDefaultConfig(FILTER_NAME);
- tdconf.smoothing = mlt_properties_get_int(properties, "smoothing");
- tdconf.maxShift = mlt_properties_get_int(properties, "maxshift");
- tdconf.maxAngle = mlt_properties_get_double(properties, "maxangle");
- tdconf.crop = (VSBorderType) mlt_properties_get_int(properties, "crop");
- tdconf.zoom = mlt_properties_get_int(properties, "zoom");
- tdconf.optZoom = mlt_properties_get_int(properties, "optzoom");
- tdconf.zoomSpeed = mlt_properties_get_double(properties, "zoomspeed");
- tdconf.relative = 1;
- tdconf.invert = 0;
-
- // by default a bilinear interpolation is selected
- tdconf.interpolType = VS_BiLinear;
- if (strcmp(interps, "nearest") == 0 || strcmp(interps, "neighbor") == 0)
- tdconf.interpolType = VS_Zero;
- else if (strcmp(interps, "tiles") == 0 || strcmp(interps, "fast_bilinear") == 0)
- tdconf.interpolType = VS_Linear;
-
- vsTransformDataInit(&data->td, &tdconf, &fiIn, &fiOut);
+
+ vsFrameInfoInit( &fiIn, *width, *height, vs_format );
+ vsFrameInfoInit( &fiOut, *width, *height, vs_format );
+ get_config( &data->tconf, &data->mconf, filter, frame );
+ vsMotionDetectInit( &data->md, &data->mconf, &fiIn );
+ vsTransformDataInit(&data->td, &data->tconf, &fiIn, &fiOut);
data->avg.initialized = 0;
- return 0;
}
-void clear_deshake(DeshakeData *data)
+static void clear_deshake(DeshakeData *data)
{
if (data->initialized)
{
int *width, int *height, int writable)
{
mlt_filter filter = (mlt_filter) mlt_frame_pop_service(frame);
- mlt_properties properties = MLT_FILTER_PROPERTIES(filter);
uint8_t* vs_image = NULL;
VSPixelFormat vs_format = PF_NONE;
// Service locks are for concurrency control
mlt_service_lock(MLT_FILTER_SERVICE(filter));
- // Handle signal from app to re-init data
- if (mlt_properties_get_int(properties, "refresh"))
- {
- mlt_properties_set(properties, "refresh", NULL);
- clear_deshake(data);
- data->initialized = false;
- }
-
// clear deshake data, when seeking or dropping frames
- mlt_position pos = mlt_filter_get_position(filter, frame);
- if(pos != data->lastFrame+1) {
- clear_deshake(data);
+ mlt_position pos = mlt_filter_get_position( filter, frame );
+ if( pos != data->lastFrame + 1 ||
+ check_config( filter, frame) == 1 )
+ {
+ clear_deshake( data );
data->initialized = false;
}
data->lastFrame = pos;
- if (!data->initialized)
+ if ( !data->initialized )
{
- char *interps = mlt_properties_get(MLT_FRAME_PROPERTIES(frame), "rescale.interp");
- init_deshake(data, properties, vs_format, width, height,
- interps);
+ init_deshake( data, filter, frame, vs_format, width, height );
data->initialized = true;
}
vsFrameFillFromBuffer(&vsFrame, vs_image, &md->fi);
vsMotionDetection(md, &localmotions, &vsFrame);
- motion = vsSimpleMotionsToTransform(md->fi, FILTER_NAME, &localmotions);
+ const char* filterName = mlt_properties_get( MLT_FILTER_PROPERTIES( filter ), "mlt_service" );
+ motion = vsSimpleMotionsToTransform(md->fi, filterName, &localmotions);
vs_vector_del(&localmotions);
vsTransformPrepare(td, &vsFrame, &vsFrame);
extern "C"
{
-mlt_filter filter_deshake_init(mlt_profile profile, mlt_service_type type,
- const char *id, char *arg)
+mlt_filter filter_deshake_init( mlt_profile profile, mlt_service_type type, const char *id, char *arg )
{
mlt_filter filter = NULL;
mlt_properties_set(properties, "shakiness", "4");
mlt_properties_set(properties, "accuracy", "4");
mlt_properties_set(properties, "stepsize", "6");
- mlt_properties_set(properties, "algo", "1");
mlt_properties_set(properties, "mincontrast", "0.3");
//properties for transform
notes: >
Deshakes a video clip by extracting relative transformations
of subsequent frames and transforms the high-frequency away.
- This is a single pass verion of stabilize and transform plugin.
+ This is a single pass version of the vidstab filter.
parameters:
- identifier: shakiness
title: Shakiness
type: integer
- description: How shaky is the video and how quick is the camera? (analysis)
+ description: How shaky the video is.
readonly: no
required: no
minimum: 1
- identifier: accuracy
title: Accuracy
type: integer
- description: Accuracy of shakiness detection (analysis)
+ description: The accuracy of shakiness detection.
readonly: no
required: no
minimum: 1
- identifier: stepsize
title: Stepsize
type: integer
- description: Step size of search process, region around minimum is scanned with 1 pixel resolution (analysis)
+ description: The step size of the search process.
readonly: no
required: no
minimum: 0
mutable: yes
widget: spinner
- - identifier: algo
- title: Algorithm
- type: integer
- description: 0 = brute force (translation only), 1 = small measurement fields (analysis)
- readonly: no
- required: no
- minimum: 0
- maximum: 1
- default: 1
- mutable: yes
- widget: spinner
-
- identifier: mincontrast
title: Minimum Contrast
type: float
- description: Below this contrast a field is discarded (analysis)
+ description: Below this contrast, a field is discarded.
readonly: no
required: no
minimum: 0
- identifier: smoothing
title: Smoothing
type: integer
- description: number of frames for lowpass filtering (2N + 1 frames) (transform)
+ description: Number of frames for lowpass filtering (2N + 1 frames)
readonly: no
required: no
minimum: 0
- identifier: maxshift
title: Maxshift
type: integer
- description: maximum translation, -1 = no limit (transform)
+ description: Maximum number of pixels to transform the image. -1 = no limit
unit: pixels
readonly: no
required: no
- identifier: maxangle
title: Maxangle
type: float
- description: max angle to rotate, -1 = no limit (transform)
+ description: Maximum angle to rotate, -1 = no limit
unit: radians
readonly: no
required: no
- identifier: crop
title: Crop
type: integer
- description: 0 = keep border, 1 = black background (transform)
+ description: 0 = keep border, 1 = black background
readonly: no
required: no
minimum: 0
- identifier: zoom
title: Zoom
type: integer
- description: additional zoom amount (transform)
+ description: Additional zoom amount
unit: percent
readonly: no
required: no
- identifier: optzoom
title: Optimal Zoom
type: integer
- description: automatically determine optimal zoom. 1 - static zoom, 2 - adaptive zoom (transform)
+ description: Automatically determine optimal zoom. 1 - static zoom, 2 - adaptive zoom
readonly: no
required: no
minimum: 0
- identifier: zoomspeed
title: Optimal Zoom Speed
type: float
- description: zoom per frame in percent, (used when optzoom = 2) (transform)
+ description: Zoom per frame (used when optzoom = 2)
+ unit: percent
readonly: no
required: no
minimum: 0
default: 0.25
mutable: yes
widget: spinner
-
- - identifier: refresh
- description: >
- Applications should set this when it updates a transform parameter.
- type: integer
- minimum: 0
- maximum: 1
{
VSMotionDetect md;
VSManyLocalMotions mlms;
+ mlt_position last_position;
} vs_analyze;
typedef struct
{
VSTransformData td;
+ VSTransformConfig conf;
VSTransformations trans;
} vs_apply;
mlt_animation_close( animation );
}
-static vs_apply* init_apply_data( mlt_filter filter, mlt_frame frame, VSPixelFormat vs_format, int width, int height )
+static void get_transform_config( VSTransformConfig* conf, mlt_filter filter, mlt_frame frame )
{
mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
- vs_apply* apply_data = (vs_apply*)calloc( 1, sizeof(vs_apply) );
- memset( apply_data, 0, sizeof( vs_apply ) );
-
const char* filterName = mlt_properties_get( properties, "mlt_service" );
- VSTransformConfig conf = vsTransformGetDefaultConfig( filterName );
- conf.smoothing = mlt_properties_get_int( properties, "smoothing" );
- conf.maxShift = mlt_properties_get_int( properties, "maxshift" );
- conf.maxAngle = mlt_properties_get_double( properties, "maxangle" );
- conf.crop = (VSBorderType)mlt_properties_get_int( properties, "crop" );
- conf.zoom = mlt_properties_get_int( properties, "zoom" );
- conf.optZoom = mlt_properties_get_int( properties, "optzoom" );
- conf.zoomSpeed = mlt_properties_get_double( properties, "zoomspeed" );
- conf.relative = mlt_properties_get_int( properties, "relative" );
- conf.invert = mlt_properties_get_int( properties, "invert" );
+
+ *conf = vsTransformGetDefaultConfig( filterName );
+ conf->smoothing = mlt_properties_get_int( properties, "smoothing" );
+ conf->maxShift = mlt_properties_get_int( properties, "maxshift" );
+ conf->maxAngle = mlt_properties_get_double( properties, "maxangle" );
+ conf->crop = (VSBorderType)mlt_properties_get_int( properties, "crop" );
+ conf->zoom = mlt_properties_get_int( properties, "zoom" );
+ conf->optZoom = mlt_properties_get_int( properties, "optzoom" );
+ conf->zoomSpeed = mlt_properties_get_double( properties, "zoomspeed" );
+ conf->relative = mlt_properties_get_int( properties, "relative" );
+ conf->invert = mlt_properties_get_int( properties, "invert" );
if ( mlt_properties_get_int( properties, "tripod" ) != 0 )
{
// Virtual tripod mode: relative=False, smoothing=0
- conf.relative = 0;
- conf.smoothing = 0;
+ conf->relative = 0;
+ conf->smoothing = 0;
}
- // by default a bilinear interpolation is selected
+ // by default a bicubic interpolation is selected
const char *interps = mlt_properties_get( MLT_FRAME_PROPERTIES( frame ), "rescale.interp" );
- conf.interpolType = VS_BiLinear;
- if (strcmp(interps, "nearest") == 0 || strcmp(interps, "neighbor") == 0)
- conf.interpolType = VS_Zero;
- else if (strcmp(interps, "tiles") == 0 || strcmp(interps, "fast_bilinear") == 0)
- conf.interpolType = VS_Linear;
+ conf->interpolType = VS_BiCubic;
+ if ( strcmp( interps, "nearest" ) == 0 || strcmp( interps, "neighbor" ) == 0 )
+ conf->interpolType = VS_Zero;
+ else if ( strcmp( interps, "tiles" ) == 0 || strcmp( interps, "fast_bilinear" ) == 0 )
+ conf->interpolType = VS_Linear;
+ else if ( strcmp( interps, "bilinear" ) == 0 )
+ conf->interpolType = VS_BiLinear;
+}
- // load motions
- VSManyLocalMotions mlms;
- vs_vector_init( &mlms, mlt_filter_get_length2( filter, frame ) );
- read_manylocalmotions( properties, &mlms );
+static int check_apply_config( mlt_filter filter, mlt_frame frame )
+{
+ vs_apply* apply_data = ((vs_data*)filter->child)->apply_data;
+
+ if( apply_data )
+ {
+ VSTransformConfig new_conf;
+ memset( &new_conf, 0, sizeof(VSTransformConfig) );
+ get_transform_config( &new_conf, filter, frame );
+ return compare_transform_config( &apply_data->conf, &new_conf );
+ }
+
+ return 0;
+}
+
+static void init_apply_data( mlt_filter filter, mlt_frame frame, VSPixelFormat vs_format, int width, int height )
+{
+ vs_data* data = (vs_data*)filter->child;
+ vs_apply* apply_data = (vs_apply*)calloc( 1, sizeof(vs_apply) );
+ memset( apply_data, 0, sizeof( vs_apply ) );
+
+ // Initialize the VSTransformConfig
+ get_transform_config( &apply_data->conf, filter, frame );
- // Convert motions to VSTransformations
- VSTransformData* td = &apply_data->td;
- VSTransformations* trans = &apply_data->trans;
+ // Initialize VSTransformData
VSFrameInfo fi_src, fi_dst;
vsFrameInfoInit( &fi_src, width, height, vs_format );
vsFrameInfoInit( &fi_dst, width, height, vs_format );
- vsTransformDataInit( td, &conf, &fi_src, &fi_dst );
- vsTransformationsInit( trans );
- vsLocalmotions2Transforms( td, &mlms, trans );
- vsPreprocessTransforms( td, trans );
+ vsTransformDataInit( &apply_data->td, &apply_data->conf, &fi_src, &fi_dst );
+ // Initialize VSTransformations
+ vsTransformationsInit( &apply_data->trans );
+
+ // Load the motions from the analyze step and convert them to VSTransformations
+ VSManyLocalMotions mlms;
+ vs_vector_init( &mlms, mlt_filter_get_length2( filter, frame ) );
+ read_manylocalmotions( MLT_FILTER_PROPERTIES( filter ), &mlms );
+ vsLocalmotions2Transforms( &apply_data->td, &mlms, &apply_data->trans );
+ vsPreprocessTransforms( &apply_data->td, &apply_data->trans );
free_manylocalmotions( &mlms );
- return apply_data;
+ data->apply_data = apply_data;
}
static void destory_apply_data( vs_apply* apply_data )
}
}
-static vs_analyze* init_analyze_data( mlt_filter filter, mlt_frame frame, VSPixelFormat vs_format, int width, int height )
+static void init_analyze_data( mlt_filter filter, mlt_frame frame, VSPixelFormat vs_format, int width, int height )
{
mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
+ vs_data* data = (vs_data*)filter->child;
vs_analyze* analyze_data = (vs_analyze*)calloc( 1, sizeof(vs_analyze) );
memset( analyze_data, 0, sizeof(vs_analyze) );
- // Initialize the VSManyLocalMotions vector where motion data will be
+ // Initialize the saved VSManyLocalMotions vector where motion data will be
// stored for each frame.
vs_vector_init( &analyze_data->mlms, mlt_filter_get_length2( filter, frame ) );
- // Initialize a VSFrameInfo to be used below
- VSFrameInfo fi;
- vsFrameInfoInit( &fi, width, height, vs_format );
-
- // Initialize a VSMotionDetect
+ // Initialize a VSMotionDetectConfig
const char* filterName = mlt_properties_get( properties, "mlt_service" );
VSMotionDetectConfig conf = vsMotionDetectGetDefaultConfig( filterName );
conf.shakiness = mlt_properties_get_int( properties, "shakiness" );
conf.accuracy = mlt_properties_get_int( properties, "accuracy" );
conf.stepSize = mlt_properties_get_int( properties, "stepsize" );
- conf.algo = mlt_properties_get_int( properties, "algo" );
conf.contrastThreshold = mlt_properties_get_double( properties, "mincontrast" );
conf.show = mlt_properties_get_int( properties, "show" );
conf.virtualTripod = mlt_properties_get_int( properties, "tripod" );
+
+ // Initialize a VSFrameInfo
+ VSFrameInfo fi;
+ vsFrameInfoInit( &fi, width, height, vs_format );
+
+ // Initialize the saved VSMotionDetect
vsMotionDetectInit( &analyze_data->md, &conf, &fi );
- return analyze_data;
+ data->analyze_data = analyze_data;
}
void destory_analyze_data( vs_analyze* analyze_data )
}
}
-static int transform_image( mlt_filter filter, mlt_frame frame, uint8_t* vs_image, VSPixelFormat vs_format, int width, int height )
+static int apply_results( mlt_filter filter, mlt_frame frame, uint8_t* vs_image, VSPixelFormat vs_format, int width, int height )
{
int error = 0;
mlt_properties properties = MLT_FILTER_PROPERTIES( filter );
vs_data* data = (vs_data*)filter->child;
- // Handle signal from app to re-init data
- if ( mlt_properties_get_int(properties, "refresh") )
+ if ( check_apply_config( filter, frame ) ||
+ mlt_properties_get_int( properties, "reload" ) )
{
- mlt_properties_set(properties, "refresh", NULL);
+ mlt_properties_set_int( properties, "reload", 0 );
destory_apply_data( data->apply_data );
data->apply_data = NULL;
}
// Init transform data if necessary (first time)
if ( !data->apply_data )
{
- data->apply_data = init_apply_data( filter, frame, vs_format, width, height );
+ init_apply_data( filter, frame, vs_format, width, height );
}
// Apply transformations to this image
vs_data* data = (vs_data*)filter->child;
mlt_position pos = mlt_filter_get_position( filter, frame );
- if ( !data->analyze_data )
+ // If any frames are skipped, analysis data will be incomplete.
+ if( data->analyze_data && pos != data->analyze_data->last_position + 1 )
{
- data->analyze_data = init_analyze_data( filter, frame, vs_format, width, height );
+ destory_analyze_data( data->analyze_data );
+ data->analyze_data = NULL;
}
- // Initialize the VSFrame to be analyzed.
- VSMotionDetect* md = &data->analyze_data->md;
- LocalMotions localmotions;
- VSFrame vsFrame;
- vsFrameFillFromBuffer( &vsFrame, vs_image, &md->fi );
-
- // Detect and save motions.
- vsMotionDetection( md, &localmotions, &vsFrame );
- vs_vector_set_dup( &data->analyze_data->mlms, pos, &localmotions, sizeof(LocalMotions) );
+ if ( !data->analyze_data && pos == 0 )
+ {
+ // Analysis must start on the first frame
+ init_analyze_data( filter, frame, vs_format, width, height );
+ }
- // Publish the motions if this is the last frame.
- if ( pos + 1 == mlt_filter_get_length2( filter, frame ) )
+ if( data->analyze_data )
{
- publish_manylocalmotions( properties, &data->analyze_data->mlms );
+ // Initialize the VSFrame to be analyzed.
+ VSMotionDetect* md = &data->analyze_data->md;
+ LocalMotions localmotions;
+ VSFrame vsFrame;
+ vsFrameFillFromBuffer( &vsFrame, vs_image, &md->fi );
+
+ // Detect and save motions.
+ vsMotionDetection( md, &localmotions, &vsFrame );
+ vs_vector_set_dup( &data->analyze_data->mlms, pos, &localmotions, sizeof(LocalMotions) );
+
+ // Publish the motions if this is the last frame.
+ if ( pos + 1 == mlt_filter_get_length2( filter, frame ) )
+ {
+ publish_manylocalmotions( properties, &data->analyze_data->mlms );
+ }
+
+ data->analyze_data->last_position = pos;
}
}
{
mlt_service_lock( MLT_FILTER_SERVICE(filter) );
- if( mlt_properties_get( properties, "results" ) )
+ char* results = mlt_properties_get( properties, "results" );
+ if( results && strcmp( results, "" ) )
{
- transform_image( filter, frame, vs_image, vs_format, *width, *height );
+ apply_results( filter, frame, vs_image, vs_format, *width, *height );
vsimage_to_mltimage( vs_image, *image, *format, *width, *height );
}
else
mlt_properties_set(properties, "zoom", "0");
mlt_properties_set(properties, "optzoom", "1");
mlt_properties_set(properties, "zoomspeed", "0.25");
+ mlt_properties_set( properties, "reload", "0" );
mlt_properties_set(properties, "vid.stab.version", LIBVIDSTAB_VERSION);
}
description: Stabilize Video (for wiggly/rolling video)
notes: >
This filter requires two passes. The first pass performs analysis and stores
- the result in the vectors property. The second pass applies the vectors to
+ the result in the "results" property. The second pass applies the results to
the image.
To use with melt, use 'melt ... -consumer xml:output.mlt all=1' for the
first pass. For the second pass, use output.mlt as the input.
parameters:
- - identifier: vectors (transform)
- title: Vectors
- type: geometry
+ - identifier: results
+ title: Analysis Results
+ type: string
description: >
- A set of X/Y coordinates by which to adjust the image.
- When this is not supplied, the filter computes the vectors and stores
+ Set after analysis. Used during application.
+ A set of image motion information that describes the analyzed clip.
+ When results are not supplied, the filter computes the results and stores
them in this property when the last frame has been processed.
+ mutable: no
- identifier: shakiness
title: Shakiness
type: integer
- description: How shaky is the video (analysis)
+ description: >
+ Used during analysis.
+ How shaky the video is.
readonly: no
required: no
minimum: 1
maximum: 10
default: 4
- mutable: yes
+ mutable: no
widget: spinner
- identifier: accuracy
title: Accuracy
type: integer
- description: Accuracy of shakiness detection (analysis)
+ description: >
+ Used during analysis.
+ The accuracy of shakiness detection.
readonly: no
required: no
minimum: 1
maximum: 15
default: 4
- mutable: yes
+ mutable: no
widget: spinner
- identifier: stepsize
title: Stepsize
type: integer
- description: Step size of search process (analysis)
+ description: >
+ Used during analysis.
+ The step size of the search process.
readonly: no
required: no
minimum: 0
maximum: 100
default: 6
- mutable: yes
- widget: spinner
-
- - identifier: algo
- title: Algorithm
- type: integer
- description: 0 = brute force (translation only), 1 = small measurement fields (analysis)
- readonly: no
- required: no
- minimum: 0
- maximum: 1
- default: 1
- mutable: yes
+ mutable: no
widget: spinner
- identifier: mincontrast
title: Minimum Contrast
type: float
- description: Below this contrast, a field is discarded (analysis)
+ description: >
+ Used during analysis.
+ Below this contrast, a field is discarded.
readonly: no
required: no
minimum: 0
maximum: 1
default: 0.3
- mutable: yes
+ mutable: no
widget: spinner
- identifier: show
title: Show
type: integer
- description: 0 = draw nothing, 1 or 2 = show fields and transforms (analysis)
+ description: >
+ Used during analysis.
+ 0 = draw nothing
+ 1 or 2 = show fields and transforms
readonly: no
required: no
minimum: 0
maximum: 2
default: 0
- mutable: yes
+ mutable: no
widget: spinner
- identifier: tripod
title: Tripod
type: integer
- description: virtual tripod mode (if >0) motion is compared to a reference frame (frame N is the value)
+ description: >
+ Used during analysis and application.
+ if 0, tripod mode is disabled.
+ if > 0, specifies the frame to be used as a reference frame for tripod mode
+ During application, relative and smoothing properties are both ignored if tripod mode is in use.
readonly: no
required: no
minimum: 0
maximum: 100000
default: 0
- mutable: yes
+ mutable: no
widget: spinner
- identifier: smoothing
title: Smoothing
type: integer
- description: number of frames for lowpass filtering (2N + 1 frames) (transform)
+ description: >
+ Used during application.
+ Number of frames for lowpass filtering (2N + 1 frames)
readonly: no
required: no
minimum: 0
- identifier: maxshift
title: Maxshift
type: integer
- description: maximum translation, -1 = no limit (transform)
+ description: >
+ Used during application.
+ Maximum number of pixels to transform the image. -1 = no limit
unit: pixels
readonly: no
required: no
- identifier: maxangle
title: Maxangle
type: float
- description: max angle to rotate, -1 = no limit (transform)
+ description: >
+ Used during application.
+ Maximum angle to rotate, -1 = no limit
unit: radians
readonly: no
required: no
- identifier: crop
title: Crop
type: integer
- description: 0 = keep border, 1 = black background (transform)
+ description: >
+ Used during application.
+ 0 = keep border, 1 = black background
readonly: no
required: no
minimum: 0
- identifier: invert
title: Invert
type: integer
- description: Invert transforms (transform)
+ description: >
+ Used during application.
+ Invert transforms
readonly: no
required: no
minimum: 0
widget: spinner
- identifier: relative
- title: Relative Transform
+ title: Relative
type: integer
- description: 0 = absolute, 1 = relative (transform)
+ description: >
+ Used during application.
+ Consider transforms as absolute (0) or relative (1)
readonly: no
required: no
minimum: 0
- identifier: zoom
title: Zoom
type: integer
- description: additional zoom amount (transform)
+ description: >
+ Used during application.
+ Additional zoom amount
unit: percent
readonly: no
required: no
- identifier: optzoom
title: Optimal Zoom
type: integer
- description: automatically determine optimal zoom. 1 - static zoom, 2 - adaptive zoom (transform)
+ description: >
+ Used during application.
+ Automatically determine optimal zoom. 1 - static zoom, 2 - adaptive zoom
readonly: no
required: no
minimum: 0
- identifier: zoomspeed
title: Optimal Zoom Speed
type: float
- description: zoom per frame in percent, (used when optzoom = 2) (transform)
+ description: >
+ Used during application.
+ Zoom per frame (used when optzoom = 2)
+ unit: percent
readonly: no
required: no
minimum: 0
mutable: yes
widget: spinner
- - identifier: refresh
+ - identifier: reload
+ title: Reload Results
description: >
- Applications should set this when it updates a transform parameter.
+ The application should set this to 1 when it updates the results property to indicate that the results should be reloaded.
type: integer
minimum: 0
maximum: 1
+ mutable: yes