+/** Flush the read/render thread's buffer.
+ *
+ * \public \memberof mlt_consumer_s
+ * \param self a consumer
+ */
+
+void mlt_consumer_purge( mlt_consumer self )
+{
+ if ( self )
+ {
+ consumer_private *priv = self->local;
+
+ pthread_mutex_lock( &priv->put_mutex );
+ if ( priv->put ) {
+ mlt_frame_close( priv->put );
+ priv->put = NULL;
+ }
+ pthread_cond_broadcast( &priv->put_cond );
+ pthread_mutex_unlock( &priv->put_mutex );
+
+ if ( priv->started && priv->real_time )
+ pthread_mutex_lock( &priv->queue_mutex );
+
+ if ( self->purge )
+ self->purge( self );
+
+ while ( priv->started && mlt_deque_count( priv->queue ) )
+ mlt_frame_close( mlt_deque_pop_back( priv->queue ) );
+ if ( priv->started && priv->real_time )
+ {
+ priv->is_purge = 1;
+ pthread_cond_broadcast( &priv->queue_cond );
+ pthread_mutex_unlock( &priv->queue_mutex );
+ if ( abs( priv->real_time ) > 1 )
+ {
+ pthread_mutex_lock( &priv->done_mutex );
+ pthread_cond_broadcast( &priv->done_cond );
+ pthread_mutex_unlock( &priv->done_mutex );
+ }
+ }
+
+ pthread_mutex_lock( &priv->put_mutex );
+ if ( priv->put ) {
+ mlt_frame_close( priv->put );
+ priv->put = NULL;
+ }
+ pthread_cond_broadcast( &priv->put_cond );
+ pthread_mutex_unlock( &priv->put_mutex );
+ }
+}
+
+/** Use multiple worker threads and a work queue.
+ */
+
+static mlt_frame worker_get_frame( mlt_consumer self, mlt_properties properties )
+{
+ // Frame to return
+ mlt_frame frame = NULL;
+ consumer_private *priv = self->local;
+ double fps = mlt_properties_get_double( properties, "fps" );
+ int threads = abs( priv->real_time );
+ int buffer = mlt_properties_get_int( properties, "_buffer" );
+ buffer = buffer > 0 ? buffer : mlt_properties_get_int( properties, "buffer" );
+ // This is a heuristic to determine a suitable minimum buffer size for the number of threads.
+ int headroom = 2 + threads * threads;
+ buffer = buffer < headroom ? headroom : buffer;
+
+ // Start worker threads if not already started.
+ if ( ! priv->ahead )
+ {
+ int prefill = mlt_properties_get_int( properties, "prefill" );
+ prefill = prefill > 0 && prefill < buffer ? prefill : buffer;
+
+ consumer_work_start( self );
+
+ // Fill the work queue.
+ int i = buffer;
+ while ( priv->ahead && i-- )
+ {
+ frame = mlt_consumer_get_frame( self );
+ if ( frame )
+ {
+ pthread_mutex_lock( &priv->queue_mutex );
+ mlt_deque_push_back( priv->queue, frame );
+ pthread_cond_signal( &priv->queue_cond );
+ pthread_mutex_unlock( &priv->queue_mutex );
+ }
+ }
+
+ // Wait for prefill
+ while ( priv->ahead && first_unprocessed_frame( self ) < prefill )
+ {
+ pthread_mutex_lock( &priv->done_mutex );
+ pthread_cond_wait( &priv->done_cond, &priv->done_mutex );
+ pthread_mutex_unlock( &priv->done_mutex );
+ }
+ priv->process_head = threads;
+ }
+
+// mlt_log_verbose( MLT_CONSUMER_SERVICE(self), "size %d done count %d work count %d process_head %d\n",
+// threads, first_unprocessed_frame( self ), mlt_deque_count( priv->queue ), priv->process_head );
+
+ // Feed the work queue
+ while ( priv->ahead && mlt_deque_count( priv->queue ) < buffer )
+ {
+ frame = mlt_consumer_get_frame( self );
+ if ( frame )
+ {
+ pthread_mutex_lock( &priv->queue_mutex );
+ mlt_deque_push_back( priv->queue, frame );
+ pthread_cond_signal( &priv->queue_cond );
+ pthread_mutex_unlock( &priv->queue_mutex );
+ }
+ }
+
+ // Wait if not realtime.
+ while ( priv->ahead && priv->real_time < 0 && !priv->is_purge &&
+ !( mlt_properties_get_int( MLT_FRAME_PROPERTIES( MLT_FRAME( mlt_deque_peek_front( priv->queue ) ) ), "rendered" ) ) )
+ {
+ pthread_mutex_lock( &priv->done_mutex );
+ pthread_cond_wait( &priv->done_cond, &priv->done_mutex );
+ pthread_mutex_unlock( &priv->done_mutex );
+ }
+
+ // Get the frame from the queue.
+ pthread_mutex_lock( &priv->queue_mutex );
+ frame = mlt_deque_pop_front( priv->queue );
+ pthread_mutex_unlock( &priv->queue_mutex );
+ if ( ! frame ) {
+ priv->is_purge = 0;
+ return frame;
+ }
+
+ // Adapt the worker process head to the runtime conditions.
+ if ( priv->real_time > 0 )
+ {
+ if ( mlt_properties_get_int( MLT_FRAME_PROPERTIES( frame ), "rendered" ) )
+ {
+ priv->consecutive_dropped = 0;
+ if ( priv->process_head > threads && priv->consecutive_rendered >= priv->process_head )
+ priv->process_head--;
+ else
+ priv->consecutive_rendered++;
+ }
+ else
+ {
+ priv->consecutive_rendered = 0;
+ if ( priv->process_head < buffer - threads && priv->consecutive_dropped > threads )
+ priv->process_head++;
+ else
+ priv->consecutive_dropped++;
+ }
+// mlt_log_verbose( MLT_CONSUMER_SERVICE(self), "dropped %d rendered %d process_head %d\n",
+// priv->consecutive_dropped, priv->consecutive_rendered, priv->process_head );
+
+ // Check for too many consecutively dropped frames
+ if ( priv->consecutive_dropped > mlt_properties_get_int( properties, "drop_max" ) )
+ {
+ int orig_buffer = mlt_properties_get_int( properties, "buffer" );
+ int prefill = mlt_properties_get_int( properties, "prefill" );
+ mlt_log_verbose( self, "too many frames dropped - " );
+
+ // If using a default low-latency buffer level (SDL) and below the limit
+ if ( ( orig_buffer == 1 || prefill == 1 ) && buffer < (threads + 1) * 10 )
+ {
+ // Auto-scale the buffer to compensate
+ mlt_log_verbose( self, "increasing buffer to %d\n", buffer + threads );
+ mlt_properties_set_int( properties, "_buffer", buffer + threads );
+ priv->consecutive_dropped = fps / 2;
+ }
+ else
+ {
+ // Tell the consumer to render it
+ mlt_log_verbose( self, "forcing next frame\n" );
+ mlt_properties_set_int( MLT_FRAME_PROPERTIES( frame ), "rendered", 1 );
+ priv->consecutive_dropped = 0;
+ }
+ }
+ }
+ if ( priv->is_purge ) {
+ priv->is_purge = 0;
+ mlt_frame_close( frame );
+ frame = NULL;
+ }
+ return frame;
+}
+
+/** Get the next frame from the producer connected to a consumer.
+ *
+ * Typically, one uses this instead of \p mlt_consumer_get_frame to make
+ * the asynchronous/real-time behavior configurable at runtime.
+ * You should close the frame returned from this when you are done with it.
+ *
+ * \public \memberof mlt_consumer_s
+ * \param self a consumer
+ * \return a frame
+ */
+
+mlt_frame mlt_consumer_rt_frame( mlt_consumer self )