/*****************************************************************************
* video.c: transcoding stream output module (video)
*****************************************************************************
- * Copyright (C) 2003-2009 the VideoLAN team
+ * Copyright (C) 2003-2009 VLC authors and VideoLAN
* $Id$
*
* Authors: Laurent Aimar <fenrir@via.ecp.fr>
* Jean-Paul Saman <jpsaman #_at_# m2x dot nl>
* Antoine Cellerier <dionoea at videolan dot org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
/*****************************************************************************
#include <vlc_spu.h>
#include <vlc_modules.h>
-#define ENC_FRAMERATE (25 * 1001 + .5)
-#define ENC_FRAMERATE_BASE 1001
+#define ENC_FRAMERATE (25 * 1000)
+#define ENC_FRAMERATE_BASE 1000
struct decoder_owner_sys_t
{
{
sout_stream_sys_t *p_sys = (sout_stream_sys_t*)obj;
sout_stream_id_t *id = p_sys->id_video;
- picture_t *p_pic;
+ picture_t *p_pic = NULL;
int canc = vlc_savecancel ();
+ block_t *p_block = NULL;
for( ;; )
{
- block_t *p_block;
vlc_mutex_lock( &p_sys->lock_out );
while( !p_sys->b_abort &&
(p_pic = picture_fifo_Pop( p_sys->pp_pics )) == NULL )
vlc_cond_wait( &p_sys->cond, &p_sys->lock_out );
- if( p_sys->b_abort )
+ if( p_sys->b_abort && !p_pic )
{
vlc_mutex_unlock( &p_sys->lock_out );
break;
}
vlc_mutex_unlock( &p_sys->lock_out );
- p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic );
+ if( p_pic )
+ {
+ p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic );
+
+ vlc_mutex_lock( &p_sys->lock_out );
+ block_ChainAppend( &p_sys->p_buffers, p_block );
+
+ vlc_mutex_unlock( &p_sys->lock_out );
+ picture_Release( p_pic );
+ }
vlc_mutex_lock( &p_sys->lock_out );
+ if( p_sys->b_abort )
+ {
+ vlc_mutex_unlock( &p_sys->lock_out );
+ break;
+ }
+ vlc_mutex_unlock( &p_sys->lock_out );
+ }
+
+ /*Encode what we have in the buffer on closing*/
+ vlc_mutex_lock( &p_sys->lock_out );
+ while( (p_pic = picture_fifo_Pop( p_sys->pp_pics )) != NULL )
+ {
+ p_block = id->p_encoder->pf_encode_video( id->p_encoder, p_pic );
+
block_ChainAppend( &p_sys->p_buffers, p_block );
- vlc_mutex_unlock( &p_sys->lock_out );
picture_Release( p_pic );
}
- block_ChainRelease( p_sys->p_buffers );
+ /*Now flush encoder*/
+ do {
+ p_block = id->p_encoder->pf_encode_video(id->p_encoder, NULL );
+ block_ChainAppend( &p_sys->p_buffers, p_block );
+ } while( p_block );
+
+ vlc_mutex_unlock( &p_sys->lock_out );
+
vlc_restorecancel (canc);
return NULL;
? id->p_encoder->fmt_out.video.i_height
: id->p_decoder->fmt_in.video.i_height
? id->p_decoder->fmt_in.video.i_height : 16;
- id->p_encoder->fmt_in.video.i_frame_rate = ENC_FRAMERATE;
- id->p_encoder->fmt_in.video.i_frame_rate_base = ENC_FRAMERATE_BASE;
+ id->p_encoder->fmt_in.video.i_visible_width =
+ id->p_encoder->fmt_out.video.i_visible_width
+ ? id->p_encoder->fmt_out.video.i_visible_width
+ : id->p_decoder->fmt_in.video.i_visible_width
+ ? id->p_decoder->fmt_in.video.i_visible_width : id->p_encoder->fmt_in.video.i_width;
+ id->p_encoder->fmt_in.video.i_visible_height =
+ id->p_encoder->fmt_out.video.i_visible_height
+ ? id->p_encoder->fmt_out.video.i_visible_height
+ : id->p_decoder->fmt_in.video.i_visible_height
+ ? id->p_decoder->fmt_in.video.i_visible_height : id->p_encoder->fmt_in.video.i_height;
+ id->p_encoder->fmt_in.video.i_frame_rate = id->p_decoder->fmt_out.video.i_frame_rate;
+ id->p_encoder->fmt_in.video.i_frame_rate_base = id->p_decoder->fmt_out.video.i_frame_rate_base;
id->p_encoder->i_threads = p_sys->i_threads;
id->p_encoder->p_cfg = p_sys->p_video_cfg;
static void transcode_video_filter_init( sout_stream_t *p_stream,
sout_stream_id_t *id )
{
- const es_format_t *p_fmt_out = &id->p_decoder->fmt_out;
+ es_format_t *p_fmt_out = &id->p_decoder->fmt_out;
+ id->p_encoder->fmt_in.video.i_chroma = id->p_encoder->fmt_in.i_codec;
id->p_f_chain = filter_chain_New( p_stream, "video filter2",
false,
p_fmt_out = filter_chain_GetFmtOut( id->p_f_chain );
}
+ /* Check that we have visible_width/height*/
+ if( !p_fmt_out->video.i_visible_height )
+ p_fmt_out->video.i_visible_height = p_fmt_out->video.i_height;
+ if( !p_fmt_out->video.i_visible_width )
+ p_fmt_out->video.i_visible_width = p_fmt_out->video.i_width;
+
if( p_stream->p_sys->psz_vf2 )
{
- const es_format_t *p_fmt_out;
id->p_uf_chain = filter_chain_New( p_stream, "video filter2",
true,
transcode_video_filter_allocation_init,
transcode_video_filter_allocation_clear,
p_stream->p_sys );
- filter_chain_Reset( id->p_uf_chain, &id->p_encoder->fmt_in,
+ filter_chain_Reset( id->p_uf_chain, p_fmt_out,
&id->p_encoder->fmt_in );
filter_chain_AppendFromString( id->p_uf_chain, p_stream->p_sys->psz_vf2 );
p_fmt_out = filter_chain_GetFmtOut( id->p_uf_chain );
}
-/* Take care of the scaling and chroma conversions.
- *
- * XXX: Shouldn't this really be after p_uf_chain, not p_f_chain,
- * in case p_uf_chain changes the format?
- */
+/* Take care of the scaling and chroma conversions. */
static void conversion_video_filter_append( sout_stream_id_t *id )
{
const es_format_t *p_fmt_out = &id->p_decoder->fmt_out;
if( id->p_f_chain )
p_fmt_out = filter_chain_GetFmtOut( id->p_f_chain );
+ if( id->p_uf_chain )
+ p_fmt_out = filter_chain_GetFmtOut( id->p_uf_chain );
if( ( p_fmt_out->video.i_chroma != id->p_encoder->fmt_in.video.i_chroma ) ||
( p_fmt_out->video.i_width != id->p_encoder->fmt_in.video.i_width ) ||
/* Calculate scaling
* width/height of source */
- int i_src_width = p_fmt_out->video.i_width;
- int i_src_height = p_fmt_out->video.i_height;
+ int i_src_visible_width = p_fmt_out->video.i_visible_width;
+ int i_src_visible_height = p_fmt_out->video.i_visible_height;
+
+ if (i_src_visible_width == 0)
+ i_src_visible_width = p_fmt_out->video.i_width;
+ if (i_src_visible_height == 0)
+ i_src_visible_height = p_fmt_out->video.i_height;
+
/* with/height scaling */
float f_scale_width = 1;
float f_scale_height = 1;
- /* width/height of output stream */
- int i_dst_width;
- int i_dst_height;
-
/* aspect ratio */
float f_aspect = (double)p_fmt_out->video.i_sar_num *
p_fmt_out->video.i_width /
msg_Dbg( p_stream, "decoder aspect is %f:1", f_aspect );
/* Change f_aspect from source frame to source pixel */
- f_aspect = f_aspect * i_src_height / i_src_width;
+ f_aspect = f_aspect * i_src_visible_height / i_src_visible_width;
msg_Dbg( p_stream, "source pixel aspect is %f:1", f_aspect );
/* Calculate scaling factor for specified parameters */
/* Global scaling. Make sure width will remain a factor of 16 */
float f_real_scale;
int i_new_height;
- int i_new_width = i_src_width * p_sys->f_scale;
+ int i_new_width = i_src_visible_width * p_sys->f_scale;
if( i_new_width % 16 <= 7 && i_new_width >= 16 )
i_new_width -= i_new_width % 16;
else
i_new_width += 16 - i_new_width % 16;
- f_real_scale = (float)( i_new_width ) / (float) i_src_width;
+ f_real_scale = (float)( i_new_width ) / (float) i_src_visible_width;
- i_new_height = __MAX( 16, i_src_height * (float)f_real_scale );
+ i_new_height = __MAX( 16, i_src_visible_height * (float)f_real_scale );
f_scale_width = f_real_scale;
- f_scale_height = (float) i_new_height / (float) i_src_height;
+ f_scale_height = (float) i_new_height / (float) i_src_visible_height;
}
else if( id->p_encoder->fmt_out.video.i_width > 0 &&
id->p_encoder->fmt_out.video.i_height <= 0 )
{
/* Only width specified */
- f_scale_width = (float)id->p_encoder->fmt_out.video.i_width/i_src_width;
+ f_scale_width = (float)id->p_encoder->fmt_out.video.i_width/i_src_visible_width;
f_scale_height = f_scale_width;
}
else if( id->p_encoder->fmt_out.video.i_width <= 0 &&
id->p_encoder->fmt_out.video.i_height > 0 )
{
/* Only height specified */
- f_scale_height = (float)id->p_encoder->fmt_out.video.i_height/i_src_height;
+ f_scale_height = (float)id->p_encoder->fmt_out.video.i_height/i_src_visible_height;
f_scale_width = f_scale_height;
}
else if( id->p_encoder->fmt_out.video.i_width > 0 &&
id->p_encoder->fmt_out.video.i_height > 0 )
{
/* Width and height specified */
- f_scale_width = (float)id->p_encoder->fmt_out.video.i_width/i_src_width;
- f_scale_height = (float)id->p_encoder->fmt_out.video.i_height/i_src_height;
+ f_scale_width = (float)id->p_encoder->fmt_out.video.i_width/i_src_visible_width;
+ f_scale_height = (float)id->p_encoder->fmt_out.video.i_height/i_src_visible_height;
}
/* check maxwidth and maxheight */
if( p_sys->i_maxwidth && f_scale_width > (float)p_sys->i_maxwidth /
- i_src_width )
+ i_src_visible_width )
{
- f_scale_width = (float)p_sys->i_maxwidth / i_src_width;
+ f_scale_width = (float)p_sys->i_maxwidth / i_src_visible_width;
}
if( p_sys->i_maxheight && f_scale_height > (float)p_sys->i_maxheight /
- i_src_height )
+ i_src_visible_height )
{
- f_scale_height = (float)p_sys->i_maxheight / i_src_height;
+ f_scale_height = (float)p_sys->i_maxheight / i_src_visible_height;
}
/* Calculate width, height from scaling
* Make sure its multiple of 2
*/
- i_dst_width = 2 * (int)(f_scale_width*i_src_width/2+0.5);
- i_dst_height = 2 * (int)(f_scale_height*i_src_height/2+0.5);
+ /* width/height of output stream */
+ int i_dst_visible_width = 2 * (int)(f_scale_width*i_src_visible_width/2+0.5);
+ int i_dst_visible_height = 2 * (int)(f_scale_height*i_src_visible_height/2+0.5);
+ int i_dst_width = 2 * (int)(f_scale_width*p_fmt_out->video.i_width/2+0.5);
+ int i_dst_height = 2 * (int)(f_scale_height*p_fmt_out->video.i_height/2+0.5);
/* Change aspect ratio from scaled pixel to output frame */
- f_aspect = f_aspect * i_dst_width / i_dst_height;
+ f_aspect = f_aspect * i_dst_visible_width / i_dst_visible_height;
/* Store calculated values */
- id->p_encoder->fmt_out.video.i_width =
- id->p_encoder->fmt_out.video.i_visible_width = i_dst_width;
- id->p_encoder->fmt_out.video.i_height =
- id->p_encoder->fmt_out.video.i_visible_height = i_dst_height;
+ id->p_encoder->fmt_out.video.i_width = i_dst_width;
+ id->p_encoder->fmt_out.video.i_visible_width = i_dst_visible_width;
+ id->p_encoder->fmt_out.video.i_height = i_dst_height;
+ id->p_encoder->fmt_out.video.i_visible_height = i_dst_visible_height;
- id->p_encoder->fmt_in.video.i_width =
- id->p_encoder->fmt_in.video.i_visible_width = i_dst_width;
- id->p_encoder->fmt_in.video.i_height =
- id->p_encoder->fmt_in.video.i_visible_height = i_dst_height;
+ id->p_encoder->fmt_in.video.i_width = i_dst_width;
+ id->p_encoder->fmt_in.video.i_visible_width = i_dst_visible_width;
+ id->p_encoder->fmt_in.video.i_height = i_dst_height;
+ id->p_encoder->fmt_in.video.i_visible_height = i_dst_visible_height;
msg_Dbg( p_stream, "source %ix%i, destination %ix%i",
- i_src_width, i_src_height,
- i_dst_width, i_dst_height
+ i_src_visible_width, i_src_visible_height,
+ i_dst_visible_width, i_dst_visible_height
);
/* Handle frame rate conversion */
id->p_encoder->fmt_in.video.i_frame_rate_base =
id->p_encoder->fmt_out.video.i_frame_rate_base;
+ vlc_ureduce( &id->p_encoder->fmt_in.video.i_frame_rate,
+ &id->p_encoder->fmt_in.video.i_frame_rate_base,
+ id->p_encoder->fmt_in.video.i_frame_rate,
+ id->p_encoder->fmt_in.video.i_frame_rate_base,
+ 0 );
+ msg_Dbg( p_stream, "source fps %d/%d, destination %d/%d",
+ id->p_decoder->fmt_out.video.i_frame_rate,
+ id->p_decoder->fmt_out.video.i_frame_rate_base,
+ id->p_encoder->fmt_in.video.i_frame_rate,
+ id->p_encoder->fmt_in.video.i_frame_rate_base );
+
+ id->i_output_frame_interval = id->p_encoder->fmt_out.video.i_frame_rate_base * CLOCK_FREQ / id->p_encoder->fmt_out.video.i_frame_rate;
+ id->i_input_frame_interval = id->p_decoder->fmt_out.video.i_frame_rate_base * CLOCK_FREQ / id->p_decoder->fmt_out.video.i_frame_rate;
+ msg_Info( p_stream, "input interval %d (base %d) output interval %d (base %d)", id->i_input_frame_interval, id->p_decoder->fmt_out.video.i_frame_rate_base,
+ id->i_output_frame_interval, id->p_encoder->fmt_in.video.i_frame_rate_base );
+
date_Init( &id->interpolated_pts,
- id->p_encoder->fmt_out.video.i_frame_rate,
- id->p_encoder->fmt_out.video.i_frame_rate_base );
+ id->p_decoder->fmt_out.video.i_frame_rate,
+ 1 );
+ date_Init( &id->next_output_pts,
+ id->p_encoder->fmt_in.video.i_frame_rate,
+ 1 );
/* Check whether a particular aspect ratio was requested */
if( id->p_encoder->fmt_out.video.i_sar_num <= 0 ||
{
vlc_ureduce( &id->p_encoder->fmt_out.video.i_sar_num,
&id->p_encoder->fmt_out.video.i_sar_den,
- (uint64_t)p_fmt_out->video.i_sar_num * i_src_width * i_dst_height,
- (uint64_t)p_fmt_out->video.i_sar_den * i_src_height * i_dst_width,
+ (uint64_t)p_fmt_out->video.i_sar_num * i_src_visible_width * i_dst_visible_height,
+ (uint64_t)p_fmt_out->video.i_sar_den * i_src_visible_height * i_dst_visible_width,
0 );
}
else
id->p_encoder->fmt_out.video.i_sar_num * id->p_encoder->fmt_out.video.i_width,
id->p_encoder->fmt_out.video.i_sar_den * id->p_encoder->fmt_out.video.i_height );
- id->p_encoder->fmt_in.video.i_chroma = id->p_encoder->fmt_in.i_codec;
}
static int transcode_video_encoder_open( sout_stream_t *p_stream,
vlc_cond_destroy( &p_stream->p_sys->cond );
picture_fifo_Delete( p_stream->p_sys->pp_pics );
+ block_ChainRelease( p_stream->p_sys->p_buffers );
p_stream->p_sys->pp_pics = NULL;
}
filter_chain_Delete( id->p_uf_chain );
}
-static void OutputFrame( sout_stream_sys_t *p_sys, picture_t *p_pic, bool b_need_duplicate, sout_stream_t *p_stream, sout_stream_id_t *id, block_t **out )
+static void OutputFrame( sout_stream_sys_t *p_sys, picture_t *p_pic, sout_stream_t *p_stream, sout_stream_id_t *id, block_t **out )
{
+
picture_t *p_pic2 = NULL;
+ bool b_need_duplicate=false;
+ /* If input pts + input_frame_interval is lower than next_output_pts - output_frame_interval
+ * Then the future input frame should fit better and we can drop this one
+ *
+ * Duplication need is checked in OutputFrame */
+ if( ( p_pic->date + (mtime_t)id->i_input_frame_interval ) <
+ ( date_Get( &id->next_output_pts ) ) )
+ {
+#if 0
+ msg_Dbg( p_stream, "dropping frame (%"PRId64" + %"PRId64" vs %"PRId64")",
+ p_pic->date, id->i_input_frame_interval, date_Get(&id->next_output_pts) );
+#endif
+ picture_Release( p_pic );
+ return;
+ }
/*
* Encoding
*/
-
/* Check if we have a subpicture to overlay */
if( p_sys->p_spu )
{
subpicture_Delete( p_subpic );
}
}
+ /*This pts is handled, increase clock to next one*/
+ date_Increment( &id->next_output_pts, id->p_encoder->fmt_in.video.i_frame_rate_base );
if( p_sys->i_threads == 0 )
{
block_ChainAppend( out, p_block );
}
- if( p_sys->b_master_sync )
+ /* we need to duplicate while next_output_pts + output_frame_interval < input_pts (next input pts)*/
+ b_need_duplicate = ( date_Get( &id->next_output_pts ) + id->i_output_frame_interval ) <
+ ( date_Get( &id->interpolated_pts ) );
+
+ if( p_sys->i_threads )
{
- mtime_t i_pts = date_Get( &id->interpolated_pts ) + 1;
- mtime_t i_video_drift = p_pic->date - i_pts;
- if (unlikely ( i_video_drift > MASTER_SYNC_MAX_DRIFT
- || i_video_drift < -MASTER_SYNC_MAX_DRIFT ) )
+ if( p_sys->b_master_sync )
{
- msg_Dbg( p_stream,
- "drift is too high (%"PRId64"), resetting master sync",
- i_video_drift );
- date_Set( &id->interpolated_pts, p_pic->date );
- i_pts = p_pic->date + 1;
+ p_pic2 = video_new_buffer_encoder( id->p_encoder );
+ if( likely( p_pic2 != NULL ) )
+ picture_Copy( p_pic2, p_pic );
}
- date_Increment( &id->interpolated_pts, 1 );
-
- if( unlikely( b_need_duplicate ) )
- {
-
- if( p_sys->i_threads >= 1 )
- {
- /* We can't modify the picture, we need to duplicate it */
- p_pic2 = video_new_buffer_encoder( id->p_encoder );
- if( likely( p_pic2 != NULL ) )
- {
- picture_Copy( p_pic2, p_pic );
- p_pic2->date = i_pts;
- }
- }
- else
- {
- block_t *p_block;
- p_pic->date = i_pts;
- p_block = id->p_encoder->pf_encode_video(id->p_encoder, p_pic);
- block_ChainAppend( out, p_block );
- }
- }
+ vlc_mutex_lock( &p_sys->lock_out );
+ picture_fifo_Push( p_sys->pp_pics, p_pic );
+ vlc_cond_signal( &p_sys->cond );
+ vlc_mutex_unlock( &p_sys->lock_out );
}
- if( p_sys->i_threads == 0 )
- {
- picture_Release( p_pic );
- }
- else
+ while( (p_sys->b_master_sync && b_need_duplicate ))
{
- vlc_mutex_lock( &p_sys->lock_out );
- picture_fifo_Push( p_sys->pp_pics, p_pic );
- if( p_pic2 != NULL )
+ if( p_sys->i_threads >= 1 )
{
- picture_fifo_Push( p_sys->pp_pics, p_pic2 );
+ picture_t *p_tmp = NULL;
+ /* We can't modify the picture, we need to duplicate it */
+ p_tmp = video_new_buffer_encoder( id->p_encoder );
+ if( likely( p_tmp != NULL ) )
+ {
+ picture_Copy( p_tmp, p_pic2 );
+ p_tmp->date = date_Get( &id->next_output_pts );
+ vlc_mutex_lock( &p_sys->lock_out );
+ picture_fifo_Push( p_sys->pp_pics, p_tmp );
+ vlc_cond_signal( &p_sys->cond );
+ vlc_mutex_unlock( &p_sys->lock_out );
+ }
}
- vlc_cond_signal( &p_sys->cond );
- vlc_mutex_unlock( &p_sys->lock_out );
+ else
+ {
+ block_t *p_block;
+ p_pic->date = date_Get( &id->next_output_pts );
+ p_block = id->p_encoder->pf_encode_video(id->p_encoder, p_pic);
+ block_ChainAppend( out, p_block );
+ }
+#if 0
+ msg_Dbg( p_stream, "duplicated frame");
+#endif
+ date_Increment( &id->next_output_pts, id->p_encoder->fmt_in.video.i_frame_rate_base );
+ b_need_duplicate = ( date_Get( &id->next_output_pts ) + id->i_output_frame_interval ) <
+ ( date_Get( &id->interpolated_pts ) );
}
+
+ if( p_sys->i_threads && p_pic2 )
+ picture_Release( p_pic2 );
+ else if ( p_sys->i_threads == 0 )
+ picture_Release( p_pic );
}
int transcode_video_process( sout_stream_t *p_stream, sout_stream_id_t *id,
block_t *in, block_t **out )
{
sout_stream_sys_t *p_sys = p_stream->p_sys;
- bool b_need_duplicate = false;
- picture_t *p_pic;
+ picture_t *p_pic = NULL;
*out = NULL;
if( unlikely( in == NULL ) )
}
else
{
- /*
- * FIXME: we need EncoderThread() to flush buffers and signal us
- * when it's done so we can send the last frames to the chain
- */
+ msg_Dbg( p_stream, "Flushing thread and waiting that");
+ vlc_mutex_lock( &p_stream->p_sys->lock_out );
+ p_stream->p_sys->b_abort = true;
+ vlc_cond_signal( &p_stream->p_sys->cond );
+ vlc_mutex_unlock( &p_stream->p_sys->lock_out );
+
+ vlc_join( p_stream->p_sys->thread, NULL );
+ vlc_mutex_lock( &p_sys->lock_out );
+ *out = p_sys->p_buffers;
+ p_sys->p_buffers = NULL;
+ vlc_mutex_unlock( &p_sys->lock_out );
+
+ msg_Dbg( p_stream, "Flushing done");
}
return VLC_SUCCESS;
}
if( p_stream->p_sout->i_out_pace_nocontrol && p_sys->b_hurry_up )
{
mtime_t current_date = mdate();
- if( unlikely( current_date + 50000 > p_pic->date ) )
+ if( unlikely( (current_date - 50000) > p_pic->date ) )
{
msg_Dbg( p_stream, "late picture skipped (%"PRId64")",
- current_date + 50000 - p_pic->date );
+ current_date - 50000 - p_pic->date );
picture_Release( p_pic );
continue;
}
}
- if( p_sys->b_master_sync )
- {
- mtime_t i_master_drift = p_sys->i_master_drift;
- mtime_t i_pts = date_Get( &id->interpolated_pts ) + 1;
- mtime_t i_video_drift = p_pic->date - i_pts;
-
- if ( unlikely( i_video_drift > MASTER_SYNC_MAX_DRIFT
- || i_video_drift < -MASTER_SYNC_MAX_DRIFT ) )
- {
- msg_Dbg( p_stream,
- "drift is too high (%"PRId64", resetting master sync",
- i_video_drift );
- date_Set( &id->interpolated_pts, p_pic->date );
- i_pts = p_pic->date + 1;
- }
- i_video_drift = p_pic->date - i_pts;
- b_need_duplicate = false;
-
- /* Set the pts of the frame being encoded */
- p_pic->date = i_pts;
-
- if( unlikely( i_video_drift < (i_master_drift - 50000) ) )
- {
-#if 0
- msg_Dbg( p_stream, "dropping frame (%i)",
- (int)(i_video_drift - i_master_drift) );
-#endif
- picture_Release( p_pic );
- continue;
- }
- else if( unlikely( i_video_drift > (i_master_drift + 50000) ) )
- {
-#if 0
- msg_Dbg( p_stream, "adding frame (%i)",
- (int)(i_video_drift - i_master_drift) );
-#endif
- b_need_duplicate = true;
- }
- }
if( unlikely (
id->p_encoder->p_module &&
!video_format_IsSimilar( &p_sys->fmt_input_video, &id->p_decoder->fmt_out.video )
}
}
+ /*Input lipsync and drop check */
+ if( p_sys->b_master_sync )
+ {
+ /* How much audio has drifted */
+ mtime_t i_master_drift = p_sys->i_master_drift;
+
+ /* This is the pts input should have now with constant frame rate */
+ mtime_t i_pts = date_Get( &id->interpolated_pts );
+
+ /* How much video pts is ahead of calculated pts */
+ mtime_t i_video_drift = p_pic->date - i_pts;
+
+ /* Check that we are having lipsync with input here */
+ if( unlikely ( ( (i_video_drift - i_master_drift ) > MASTER_SYNC_MAX_DRIFT
+ || (i_video_drift + i_master_drift ) < -MASTER_SYNC_MAX_DRIFT ) ) )
+ {
+ msg_Warn( p_stream,
+ "video drift too big, resetting sync %"PRId64" to %"PRId64,
+ (i_video_drift + i_master_drift),
+ p_pic->date
+ );
+ date_Set( &id->interpolated_pts, p_pic->date );
+ date_Set( &id->next_output_pts, p_pic->date );
+ i_pts = date_Get( &id->interpolated_pts );
+ }
+
+ /* Set the pts of the frame being encoded */
+ p_pic->date = i_pts;
+
+ /* now take next input pts, pts dates are only enabled if p_module is set*/
+ date_Increment( &id->interpolated_pts, id->p_decoder->fmt_out.video.i_frame_rate_base );
+
+
+ /* If input pts + input_frame_interval is lower than next_output_pts - output_frame_interval
+ * Then the future input frame should fit better and we can drop this one
+ *
+ * Duplication need is checked in OutputFrame */
+ if( ( p_pic->date + (mtime_t)id->i_input_frame_interval ) <
+ ( date_Get( &id->next_output_pts ) ) )
+ {
+#if 0
+ msg_Dbg( p_stream, "dropping frame (%"PRId64" + %"PRId64" vs %"PRId64")",
+ p_pic->date, id->i_input_frame_interval, date_Get(&id->next_output_pts) );
+#endif
+ picture_Release( p_pic );
+ continue;
+ }
+#if 0
+ msg_Dbg( p_stream, "not dropping frame");
+#endif
+
+ /* input calculated pts isn't necessary what pts output should be, so use output pts*/
+ p_pic->date = date_Get( &id->next_output_pts );
+
+
+ }
+
/* Run the filter and output chains; first with the picture,
* and then with NULL as many times as we need until they
* stop outputting frames.
if( !p_user_filtered_pic )
break;
- OutputFrame( p_sys, p_user_filtered_pic, b_need_duplicate, p_stream, id, out );
- b_need_duplicate = false;
+ OutputFrame( p_sys, p_user_filtered_pic, p_stream, id, out );
p_filtered_pic = NULL;
}
if( p_sys->f_fps > 0 )
{
- id->p_encoder->fmt_out.video.i_frame_rate = (p_sys->f_fps * ENC_FRAMERATE_BASE) + 0.5;
+ id->p_encoder->fmt_out.video.i_frame_rate = (p_sys->f_fps * ENC_FRAMERATE_BASE);
id->p_encoder->fmt_out.video.i_frame_rate_base = ENC_FRAMERATE_BASE;
}