#include "libavutil/avstring.h"
#include "libavutil/colorspace.h"
+#include "libavutil/display.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
typedef struct VideoPicture {
double pts; // presentation timestamp for this picture
- double target_clock; // av_gettime() time at which this should be displayed ideally
+ double target_clock; // av_gettime_relative() time at which this should be displayed ideally
int64_t pos; // byte position in file
SDL_Overlay *bmp;
int width, height; /* source height & width */
AVStream *video_st;
PacketQueue videoq;
double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
- double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
+ double video_current_pts_drift; // video_current_pts - time (av_gettime_relative) at which we updated video_current_pts - used to have running video pts
int64_t video_current_pos; // current displayed file pos
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
#if CONFIG_AVFILTER
static char *vfilters = NULL;
#endif
+static int autorotate = 1;
/* current context */
static int is_full_screen;
SDL_LockMutex(q->mutex);
for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
pkt1 = pkt->next;
- av_free_packet(&pkt->pkt);
+ av_packet_unref(&pkt->pkt);
av_freep(&pkt);
}
q->last_pkt = NULL;
{
AVPacketList *pkt1;
- /* duplicate the packet */
- if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
- return -1;
-
pkt1 = av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
#define BPP 1
-static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
+static void blend_subrect(uint8_t *dst[4], uint16_t dst_linesize[4],
+ const AVSubtitleRect *rect, int imgw, int imgh)
{
int wrap, wrap3, width2, skip2;
int y, u, v, a, u1, v1, a1, w, h;
dsth = av_clip(rect->h, 0, imgh);
dstx = av_clip(rect->x, 0, imgw - dstw);
dsty = av_clip(rect->y, 0, imgh - dsth);
- lum = dst->data[0] + dsty * dst->linesize[0];
- cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
- cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
+ /* sdl has U and V inverted */
+ lum = dst[0] + dsty * dst_linesize[0];
+ cb = dst[2] + (dsty >> 1) * dst_linesize[2];
+ cr = dst[1] + (dsty >> 1) * dst_linesize[1];
width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
skip2 = dstx >> 1;
- wrap = dst->linesize[0];
- wrap3 = rect->pict.linesize[0];
- p = rect->pict.data[0];
- pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
+ wrap = dst_linesize[0];
+ wrap3 = rect->linesize[0];
+ p = rect->data[0];
+ pal = (const uint32_t *)rect->data[1]; /* Now in YCrCb! */
if (dsty & 1) {
lum += dstx;
}
p += wrap3 - dstw * BPP;
lum += wrap - dstw - dstx;
- cb += dst->linesize[1] - width2 - skip2;
- cr += dst->linesize[2] - width2 - skip2;
+ cb += dst_linesize[2] - width2 - skip2;
+ cr += dst_linesize[1] - width2 - skip2;
}
for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
lum += dstx;
}
p += wrap3 + (wrap3 - dstw * BPP);
lum += wrap + (wrap - dstw - dstx);
- cb += dst->linesize[1] - width2 - skip2;
- cr += dst->linesize[2] - width2 - skip2;
+ cb += dst_linesize[2] - width2 - skip2;
+ cr += dst_linesize[1] - width2 - skip2;
}
/* handle odd height */
if (h) {
{
VideoPicture *vp;
SubPicture *sp;
- AVPicture pict;
float aspect_ratio;
int width, height, x, y;
SDL_Rect rect;
{
SDL_LockYUVOverlay (vp->bmp);
- pict.data[0] = vp->bmp->pixels[0];
- pict.data[1] = vp->bmp->pixels[2];
- pict.data[2] = vp->bmp->pixels[1];
-
- pict.linesize[0] = vp->bmp->pitches[0];
- pict.linesize[1] = vp->bmp->pitches[2];
- pict.linesize[2] = vp->bmp->pitches[1];
-
for (i = 0; i < sp->sub.num_rects; i++)
- blend_subrect(&pict, sp->sub.rects[i],
- vp->bmp->w, vp->bmp->h);
+ blend_subrect(vp->bmp->pixels, vp->bmp->pitches,
+ sp->sub.rects[i], vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
/* to be more precise, we take into account the time spent since
the last buffer computation */
if (audio_callback_time) {
- time_diff = av_gettime() - audio_callback_time;
+ time_diff = av_gettime_relative() - audio_callback_time;
delay -= (time_diff * s->sdl_sample_rate) / 1000000;
}
if (is->paused) {
return is->video_current_pts;
} else {
- return is->video_current_pts_drift + av_gettime() / 1000000.0;
+ return is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
}
}
static double get_external_clock(VideoState *is)
{
int64_t ti;
- ti = av_gettime();
+ ti = av_gettime_relative();
return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
}
static void stream_pause(VideoState *is)
{
if (is->paused) {
- is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
+ is->frame_timer += av_gettime_relative() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
if (is->read_pause_return != AVERROR(ENOSYS)) {
- is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
+ is->video_current_pts = is->video_current_pts_drift + av_gettime_relative() / 1000000.0;
}
- is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
+ is->video_current_pts_drift = is->video_current_pts - av_gettime_relative() / 1000000.0;
}
is->paused = !is->paused;
}
static double compute_target_time(double frame_current_pts, VideoState *is)
{
- double delay, sync_threshold, diff;
+ double delay, sync_threshold, diff = 0;
/* compute nominal delay */
delay = frame_current_pts - is->frame_last_pts;
}
is->frame_timer += delay;
- av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
+ av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
delay, frame_current_pts, -diff);
return is->frame_timer;
if (is->pictq_size == 0) {
// nothing to do, no picture to display in the que
} else {
- double time = av_gettime() / 1000000.0;
+ double time = av_gettime_relative() / 1000000.0;
double next_target;
/* dequeue the picture */
vp = &is->pictq[is->pictq_rindex];
int aqsize, vqsize, sqsize;
double av_diff;
- cur_time = av_gettime();
+ cur_time = av_gettime_relative();
if (!last_time || (cur_time - last_time) >= 30000) {
aqsize = 0;
vqsize = 0;
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
{
VideoPicture *vp;
-#if CONFIG_AVFILTER
- AVPicture pict_src;
-#else
+#if !CONFIG_AVFILTER
int dst_pix_fmt = AV_PIX_FMT_YUV420P;
#endif
/* wait until we have space to put a new picture */
/* if the frame is not skipped, then display it */
if (vp->bmp) {
- AVPicture pict = { { 0 } };
+ uint8_t *data[4];
+ int linesize[4];
/* get a pointer on the bitmap */
SDL_LockYUVOverlay (vp->bmp);
- pict.data[0] = vp->bmp->pixels[0];
- pict.data[1] = vp->bmp->pixels[2];
- pict.data[2] = vp->bmp->pixels[1];
+ data[0] = vp->bmp->pixels[0];
+ data[1] = vp->bmp->pixels[2];
+ data[2] = vp->bmp->pixels[1];
- pict.linesize[0] = vp->bmp->pitches[0];
- pict.linesize[1] = vp->bmp->pitches[2];
- pict.linesize[2] = vp->bmp->pitches[1];
+ linesize[0] = vp->bmp->pitches[0];
+ linesize[1] = vp->bmp->pitches[2];
+ linesize[2] = vp->bmp->pitches[1];
#if CONFIG_AVFILTER
- pict_src.data[0] = src_frame->data[0];
- pict_src.data[1] = src_frame->data[1];
- pict_src.data[2] = src_frame->data[2];
-
- pict_src.linesize[0] = src_frame->linesize[0];
- pict_src.linesize[1] = src_frame->linesize[1];
- pict_src.linesize[2] = src_frame->linesize[2];
-
// FIXME use direct rendering
- av_picture_copy(&pict, &pict_src,
- vp->pix_fmt, vp->width, vp->height);
+ av_image_copy(data, linesize, src_frame->data, src_frame->linesize,
+ vp->pix_fmt, vp->width, vp->height);
#else
av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
exit(1);
}
sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
- 0, vp->height, pict.data, pict.linesize);
+ 0, vp->height, data, linesize);
#endif
/* update the bitmap content */
SDL_UnlockYUVOverlay(vp->bmp);
init_pts_correction(&is->pts_ctx);
is->frame_last_pts = AV_NOPTS_VALUE;
is->frame_last_delay = 0;
- is->frame_timer = (double)av_gettime() / 1000000.0;
+ is->frame_timer = (double)av_gettime_relative() / 1000000.0;
is->skip_frames = 1;
is->skip_frames_index = 0;
return 0;
char sws_flags_str[128];
char buffersrc_args[256];
int ret;
- AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
+ AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter;
AVCodecContext *codec = is->video_st->codec;
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
"out", NULL, NULL, graph)) < 0)
return ret;
- if ((ret = avfilter_graph_create_filter(&filt_format,
- avfilter_get_by_name("format"),
- "format", "yuv420p", NULL, graph)) < 0)
- return ret;
- if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
- return ret;
-
+ last_filter = filt_out;
+
+/* Note: this macro adds a filter before the lastly added filter, so the
+ * processing order of the filters is in reverse */
+#define INSERT_FILT(name, arg) do { \
+ AVFilterContext *filt_ctx; \
+ \
+ ret = avfilter_graph_create_filter(&filt_ctx, \
+ avfilter_get_by_name(name), \
+ "avplay_" name, arg, NULL, graph); \
+ if (ret < 0) \
+ return ret; \
+ \
+ ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
+ if (ret < 0) \
+ return ret; \
+ \
+ last_filter = filt_ctx; \
+} while (0)
+
+ INSERT_FILT("format", "yuv420p");
+
+ if (autorotate) {
+ uint8_t* displaymatrix = av_stream_get_side_data(is->video_st,
+ AV_PKT_DATA_DISPLAYMATRIX, NULL);
+ if (displaymatrix) {
+ double rot = av_display_rotation_get((int32_t*) displaymatrix);
+ if (rot < -135 || rot > 135) {
+ INSERT_FILT("vflip", NULL);
+ INSERT_FILT("hflip", NULL);
+ } else if (rot < -45) {
+ INSERT_FILT("transpose", "dir=clock");
+ } else if (rot > 45) {
+ INSERT_FILT("transpose", "dir=cclock");
+ }
+ }
+ }
if (vfilters) {
AVFilterInOut *outputs = avfilter_inout_alloc();
outputs->next = NULL;
inputs->name = av_strdup("out");
- inputs->filter_ctx = filt_format;
+ inputs->filter_ctx = last_filter;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
return ret;
} else {
- if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
+ if ((ret = avfilter_link(filt_src, 0, last_filter, 0)) < 0)
return ret;
}
AVFilterContext *filt_out = NULL, *filt_in = NULL;
int last_w = is->video_st->codec->width;
int last_h = is->video_st->codec->height;
+ if (!graph) {
+ av_frame_free(&frame);
+ return AVERROR(ENOMEM);
+ }
if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
goto the_end;
filt_out = is->out_video_filter;
#endif
+ if (!frame) {
+#if CONFIG_AVFILTER
+ avfilter_graph_free(&graph);
+#endif
+ return AVERROR(ENOMEM);
+ }
+
for (;;) {
#if CONFIG_AVFILTER
AVRational tb;
while (is->paused && !is->videoq.abort_request)
SDL_Delay(10);
- av_free_packet(&pkt);
+ av_packet_unref(&pkt);
ret = get_video_frame(is, frame, &pts_int, &pkt);
if (ret < 0)
#if CONFIG_AVFILTER
if ( last_w != is->video_st->codec->width
|| last_h != is->video_st->codec->height) {
- av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
+ av_log(NULL, AV_LOG_TRACE, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
is->video_st->codec->width, is->video_st->codec->height);
avfilter_graph_free(&graph);
graph = avfilter_graph_alloc();
if (av_cmp_q(tb, is->video_st->time_base)) {
av_unused int64_t pts1 = pts_int;
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
- av_dlog(NULL, "video_thread(): "
+ av_log(NULL, AV_LOG_TRACE, "video_thread(): "
"tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
tb.num, tb.den, pts1,
is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
av_freep(&vfilters);
avfilter_graph_free(&graph);
#endif
- av_free_packet(&pkt);
+ av_packet_unref(&pkt);
av_frame_free(&frame);
return 0;
}
{
for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
{
- RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
+ RGBA_IN(r, g, b, a, (uint32_t *)sp->sub.rects[i]->data[1] + j);
y = RGB_TO_Y_CCIR(r, g, b);
u = RGB_TO_U_CCIR(r, g, b, 0);
v = RGB_TO_V_CCIR(r, g, b, 0);
- YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
+ YUVA_OUT((uint32_t *)sp->sub.rects[i]->data[1] + j, y, u, v, a);
}
}
is->subpq_size++;
SDL_UnlockMutex(is->subpq_mutex);
}
- av_free_packet(pkt);
+ av_packet_unref(pkt);
}
return 0;
}
samples_size = wanted_size;
}
}
- av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
+ av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
diff, avg_diff, samples_size - samples_size1,
is->audio_clock, is->video_clock, is->audio_diff_threshold);
}
if (!got_frame) {
/* stop sending empty packets if the decoder is finished */
- if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
+ if (!pkt_temp->data && (dec->codec->capabilities & AV_CODEC_CAP_DELAY))
flush_complete = 1;
continue;
}
/* free the current packet */
if (pkt->data)
- av_free_packet(pkt);
+ av_packet_unref(pkt);
memset(pkt_temp, 0, sizeof(*pkt_temp));
if (is->paused || is->audioq.abort_request) {
int audio_size, len1;
double pts;
- audio_callback_time = av_gettime();
+ audio_callback_time = av_gettime_relative();
while (len > 0) {
if (is->audio_buf_index >= is->audio_buf_size) {
avctx->skip_loop_filter = skip_loop_filter;
avctx->error_concealment = error_concealment;
- if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
+ if (fast)
+ avctx->flags2 |= AV_CODEC_FLAG2_FAST;
if (!av_dict_get(opts, "threads", NULL, 0))
av_dict_set(&opts, "threads", "auto", 0);
SDL_CloseAudio();
packet_queue_end(&is->audioq);
- av_free_packet(&is->audio_pkt);
+ av_packet_unref(&is->audio_pkt);
if (is->avr)
avresample_free(&is->avr);
av_freep(&is->audio_buf1);
global_video_state = is;
ic = avformat_alloc_context();
+ if (!ic) {
+ av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
ic->interrupt_callback.callback = decode_interrupt_cb;
err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
if (err < 0) {
packet_queue_put(&is->videoq, pkt);
}
if (is->audio_stream >= 0 &&
- is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
+ (is->audio_st->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) {
av_init_packet(pkt);
pkt->data = NULL;
pkt->size = 0;
} else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
packet_queue_put(&is->subtitleq, pkt);
} else {
- av_free_packet(pkt);
+ av_packet_unref(pkt);
}
}
/* wait until the end */
{ "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
{ "i", 0, { NULL }, "avconv compatibility dummy option", ""},
+ { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
{ NULL, },
};