if ( codec->coded_frame && codec->coded_frame->pts != AV_NOPTS_VALUE )
{
pkt.pts = av_rescale_q( codec->coded_frame->pts, codec->time_base, stream->time_base );
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio stream %d pkt pts %lld frame pts %lld",
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio stream %d pkt pts %"PRId64" frame pts %"PRId64,
stream->index, pkt.pts, codec->coded_frame->pts );
}
pkt.flags |= PKT_FLAG_KEY;
#endif
struct SwsContext *context = sws_getContext( width, height, PIX_FMT_YUYV422,
width, height, video_st->codec->pix_fmt, flags, NULL, NULL, NULL);
- sws_scale( context, input->data, input->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) input->data, input->linesize, 0, height,
output->data, output->linesize);
sws_freeContext( context );
#else
if ( c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE )
pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, video_st->time_base );
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pkt pts %lld frame pts %lld", pkt.pts, c->coded_frame->pts );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pkt pts %"PRId64" frame pts %"PRId64, pkt.pts, c->coded_frame->pts );
if( c->coded_frame && c->coded_frame->key_frame )
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
}
}
if ( audio_st[0] )
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio pts %lld (%f) ", audio_st[0]->pts.val, audio_pts );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "audio pts %"PRId64" (%f) ", audio_st[0]->pts.val, audio_pts );
if ( video_st )
- mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pts %lld (%f) ", video_st->pts.val, video_pts );
+ mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "video pts %"PRId64" (%f) ", video_st->pts.val, video_pts );
mlt_log_debug( MLT_CONSUMER_SERVICE( consumer ), "\n" );
}
{
#if defined(SWSCALE) && (LIBSWSCALE_VERSION_INT >= ((0<<16)+(7<<8)+2))
int *coefficients;
+ const int *new_coefficients = coefficients;
int full_range;
int brightness, contrast, saturation;
case 470:
case 601:
case 624:
- coefficients = sws_getCoefficients( SWS_CS_ITU601 );
+ new_coefficients = sws_getCoefficients( SWS_CS_ITU601 );
break;
case 240:
- coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
+ new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
break;
case 709:
- coefficients = sws_getCoefficients( SWS_CS_ITU709 );
+ new_coefficients = sws_getCoefficients( SWS_CS_ITU709 );
break;
}
- sws_setColorspaceDetails( context, coefficients, full_range, coefficients, full_range,
+ sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range,
brightness, contrast, saturation );
}
#endif
struct SwsContext *context = sws_getContext( width, height, in_fmt,
width, height, out_fmt, flags, NULL, NULL, NULL);
set_luma_transfer( context, colorspace, use_full_range );
- sws_scale( context, input.data, input.linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) input.data, input.linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
#else
{
#if defined(SWSCALE) && (LIBSWSCALE_VERSION_INT >= ((0<<16)+(7<<8)+2))
int *coefficients;
+ const int *new_coefficients = coefficients;
int full_range;
int brightness, contrast, saturation;
case 470:
case 601:
case 624:
- coefficients = sws_getCoefficients( SWS_CS_ITU601 );
+ new_coefficients = sws_getCoefficients( SWS_CS_ITU601 );
break;
case 240:
- coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
+ new_coefficients = sws_getCoefficients( SWS_CS_SMPTE240M );
break;
case 709:
- coefficients = sws_getCoefficients( SWS_CS_ITU709 );
+ new_coefficients = sws_getCoefficients( SWS_CS_ITU709 );
break;
}
- sws_setColorspaceDetails( context, coefficients, full_range, coefficients, full_range,
+ sws_setColorspaceDetails( context, new_coefficients, full_range, new_coefficients, full_range,
brightness, contrast, saturation );
}
#endif
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
set_luma_transfer( context, colorspace, full_range );
- sws_scale( context, frame->data, frame->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
}
output.linesize[1] = width >> 1;
output.linesize[2] = width >> 1;
set_luma_transfer( context, colorspace, full_range );
- sws_scale( context, frame->data, frame->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
}
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_RGB24, width, height );
set_luma_transfer( context, colorspace, full_range );
- sws_scale( context, frame->data, frame->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
}
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_RGBA, width, height );
set_luma_transfer( context, colorspace, full_range );
- sws_scale( context, frame->data, frame->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
}
AVPicture output;
avpicture_fill( &output, buffer, PIX_FMT_YUYV422, width, height );
set_luma_transfer( context, colorspace, full_range );
- sws_scale( context, frame->data, frame->linesize, 0, height,
+ sws_scale( context, (const uint8_t* const*) frame->data, frame->linesize, 0, height,
output.data, output.linesize);
sws_freeContext( context );
}
ret = av_read_frame( context, &pkt );
if ( ret >= 0 && ( pkt.flags & PKT_FLAG_KEY ) && pkt.stream_index == self->video_index )
{
- mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "first_pts %lld dts %lld pts_dts_delta %d\n", pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
+ mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "first_pts %"PRId64" dts %"PRId64" pts_dts_delta %d\n", pkt.pts, pkt.dts, (int)(pkt.pts - pkt.dts) );
self->first_pts = pkt.pts;
toscan = 0;
}
{
timestamp = ( req_position - 0.1 / source_fps ) /
( av_q2d( stream->time_base ) * source_fps );
- mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "pos %d pts %lld ", req_position, timestamp );
+ mlt_log_verbose( MLT_PRODUCER_SERVICE(producer), "pos %d pts %"PRId64" ", req_position, timestamp );
if ( self->first_pts > 0 )
timestamp += self->first_pts;
else if ( context->start_time != AV_NOPTS_VALUE )
timestamp -= AV_TIME_BASE;
if ( timestamp < 0 )
timestamp = 0;
- mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %lld position %d expected %d last_pos %d\n",
+ mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "seeking timestamp %"PRId64" position %d expected %d last_pos %d\n",
timestamp, position, self->video_expected, last_position );
// Seek to the timestamp
{
self->invalid_pts_counter = 0;
}
- mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.pts %llu req_pos %d cur_pos %d pkt_pos %d\n",
+ mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.pts %"PRId64" req_pos %d cur_pos %d pkt_pos %d\n",
pkt.pts, req_position, self->current_position, int_position );
}
else
{
int_position = req_position;
}
- mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.dts %llu req_pos %d cur_pos %d pkt_pos %d\n",
+ mlt_log_debug( MLT_PRODUCER_SERVICE(producer), "pkt.dts %"PRId64" req_pos %d cur_pos %d pkt_pos %d\n",
pkt.dts, req_position, self->current_position, int_position );
// Make a dumb assumption on streams that contain wild timestamps
if ( abs( req_position - int_position ) > 999 )