X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=ffplay.c;h=cde88db71e72daa4e06e443489328aface1de7da;hb=0cbae3763d96f40e0eae0c9f04794a38e1f78ca6;hp=15bc6f1e2da6467e8fc7b0d421c423871858f071;hpb=e3ec2cde2c394af6c9b64bccd39bc206a88a4e58;p=ffmpeg diff --git a/ffplay.c b/ffplay.c index 15bc6f1e2da..cde88db71e7 100644 --- a/ffplay.c +++ b/ffplay.c @@ -66,7 +66,9 @@ const char program_name[] = "ffplay"; const int program_birth_year = 2003; #define MAX_QUEUE_SIZE (15 * 1024 * 1024) -#define MIN_FRAMES 5 +#define MIN_FRAMES 25 +#define EXTERNAL_CLOCK_MIN_FRAMES 2 +#define EXTERNAL_CLOCK_MAX_FRAMES 10 /* Minimum SDL audio buffer size, in samples. */ #define SDL_AUDIO_MIN_BUFFER_SIZE 512 @@ -102,7 +104,7 @@ const int program_birth_year = 2003; #define CURSOR_HIDE_DELAY 1000000 -static int64_t sws_flags = SWS_BICUBIC; +static unsigned sws_flags = SWS_BICUBIC; typedef struct MyAVPacketList { AVPacket pkt; @@ -223,6 +225,9 @@ typedef struct VideoState { Decoder viddec; Decoder subdec; + int viddec_width; + int viddec_height; + int audio_stream; int av_sync_type; @@ -278,6 +283,7 @@ typedef struct VideoState { #if !CONFIG_AVFILTER struct SwsContext *img_convert_ctx; #endif + struct SwsContext *sub_convert_ctx; SDL_Rect last_display_rect; int eof; @@ -829,229 +835,50 @@ static void fill_border(int xleft, int ytop, int width, int height, int x, int y #define ALPHA_BLEND(a, oldp, newp, s)\ ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s)) -#define RGBA_IN(r, g, b, a, s)\ -{\ - unsigned int v = ((const uint32_t *)(s))[0];\ - a = (v >> 24) & 0xff;\ - r = (v >> 16) & 0xff;\ - g = (v >> 8) & 0xff;\ - b = v & 0xff;\ -} - -#define YUVA_IN(y, u, v, a, s, pal)\ -{\ - unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\ - a = (val >> 24) & 0xff;\ - y = (val >> 16) & 0xff;\ - u = (val >> 8) & 0xff;\ - v = val & 0xff;\ -} - -#define YUVA_OUT(d, y, u, v, a)\ -{\ - ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\ -} #define BPP 1 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh) { - int wrap, wrap3, width2, skip2; - int y, u, v, a, u1, v1, a1, w, h; + int x, y, Y, U, V, A; uint8_t *lum, *cb, *cr; - const uint8_t *p; - const uint32_t *pal; int dstx, dsty, dstw, dsth; + const AVPicture *src = &rect->pict; dstw = av_clip(rect->w, 0, imgw); dsth = av_clip(rect->h, 0, imgh); dstx = av_clip(rect->x, 0, imgw - dstw); dsty = av_clip(rect->y, 0, imgh - dsth); - lum = dst->data[0] + dsty * dst->linesize[0]; - cb = dst->data[1] + (dsty >> 1) * dst->linesize[1]; - cr = dst->data[2] + (dsty >> 1) * dst->linesize[2]; - - width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1); - skip2 = dstx >> 1; - wrap = dst->linesize[0]; - wrap3 = rect->pict.linesize[0]; - p = rect->pict.data[0]; - pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */ - - if (dsty & 1) { - lum += dstx; - cb += skip2; - cr += skip2; - - if (dstx & 1) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); - cb++; - cr++; + lum = dst->data[0] + dstx + dsty * dst->linesize[0]; + cb = dst->data[1] + dstx/2 + (dsty >> 1) * dst->linesize[1]; + cr = dst->data[2] + dstx/2 + (dsty >> 1) * dst->linesize[2]; + + for (y = 0; ydata[0][x + y*src->linesize[0]]; + A = src->data[3][x + y*src->linesize[3]]; + lum[0] = ALPHA_BLEND(A, lum[0], Y, 0); lum++; - p += BPP; - } - for (w = dstw - (dstx & 1); w >= 2; w -= 2) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1); - cb++; - cr++; - p += 2 * BPP; - lum += 2; } - if (w) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); - p++; - lum++; - } - p += wrap3 - dstw * BPP; - lum += wrap - dstw - dstx; - cb += dst->linesize[1] - width2 - skip2; - cr += dst->linesize[2] - width2 - skip2; - } - for (h = dsth - (dsty & 1); h >= 2; h -= 2) { - lum += dstx; - cb += skip2; - cr += skip2; - - if (dstx & 1) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - p += wrap3; - lum += wrap; - YUVA_IN(y, u, v, a, p, pal); - u1 += u; - v1 += v; - a1 += a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1); - cb++; - cr++; - p += -wrap3 + BPP; - lum += -wrap + 1; - } - for (w = dstw - (dstx & 1); w >= 2; w -= 2) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - p += wrap3; - lum += wrap; - - YUVA_IN(y, u, v, a, p, pal); - u1 += u; - v1 += v; - a1 += a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2); + lum += dst->linesize[0] - dstw; + } + for (y = 0; ydata[1][x + y*src->linesize[1]]; + V = src->data[2][x + y*src->linesize[2]]; + A = src->data[3][2*x + 2*y *src->linesize[3]] + + src->data[3][2*x + 1 + 2*y *src->linesize[3]] + + src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]] + + src->data[3][2*x + (2*y+1)*src->linesize[3]]; + cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0); + cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0); cb++; cr++; - p += -wrap3 + 2 * BPP; - lum += -wrap + 2; - } - if (w) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - p += wrap3; - lum += wrap; - YUVA_IN(y, u, v, a, p, pal); - u1 += u; - v1 += v; - a1 += a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1); - cb++; - cr++; - p += -wrap3 + BPP; - lum += -wrap + 1; - } - p += wrap3 + (wrap3 - dstw * BPP); - lum += wrap + (wrap - dstw - dstx); - cb += dst->linesize[1] - width2 - skip2; - cr += dst->linesize[2] - width2 - skip2; - } - /* handle odd height */ - if (h) { - lum += dstx; - cb += skip2; - cr += skip2; - - if (dstx & 1) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); - cb++; - cr++; - lum++; - p += BPP; - } - for (w = dstw - (dstx & 1); w >= 2; w -= 2) { - YUVA_IN(y, u, v, a, p, pal); - u1 = u; - v1 = v; - a1 = a; - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - - YUVA_IN(y, u, v, a, p + BPP, pal); - u1 += u; - v1 += v; - a1 += a; - lum[1] = ALPHA_BLEND(a, lum[1], y, 0); - cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1); - cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1); - cb++; - cr++; - p += 2 * BPP; - lum += 2; - } - if (w) { - YUVA_IN(y, u, v, a, p, pal); - lum[0] = ALPHA_BLEND(a, lum[0], y, 0); - cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0); - cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0); } + cb += dst->linesize[1] - dstw/2; + cr += dst->linesize[2] - dstw/2; } } @@ -1306,6 +1133,7 @@ static void stream_close(VideoState *is) #if !CONFIG_AVFILTER sws_freeContext(is->img_convert_ctx); #endif + sws_freeContext(is->sub_convert_ctx); av_free(is); } @@ -1475,11 +1303,11 @@ static double get_master_clock(VideoState *is) } static void check_external_clock_speed(VideoState *is) { - if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 || - is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) { + if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES || + is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) { set_clock_speed(&is->extclk, FFMAX(EXTERNAL_CLOCK_SPEED_MIN, is->extclk.speed - EXTERNAL_CLOCK_SPEED_STEP)); - } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) && - (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) { + } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) && + (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) { set_clock_speed(&is->extclk, FFMIN(EXTERNAL_CLOCK_SPEED_MAX, is->extclk.speed + EXTERNAL_CLOCK_SPEED_STEP)); } else { double speed = is->extclk.speed; @@ -1851,7 +1679,18 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double av_picture_copy(&pict, (AVPicture *)src_frame, src_frame->format, vp->width, vp->height); #else - av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags); + { + AVDictionaryEntry *e = av_dict_get(sws_dict, "sws_flags", NULL, 0); + if (e) { + const AVClass *class = sws_get_class(); + const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0, + AV_OPT_SEARCH_FAKE_OBJ); + int ret = av_opt_eval_flags(&class, o, e->value, &sws_flags); + if (ret < 0) + exit(1); + } + } + is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx, vp->width, vp->height, src_frame->format, vp->width, vp->height, AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL); @@ -1893,6 +1732,9 @@ static int get_video_frame(VideoState *is, AVFrame *frame) frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); + is->viddec_width = frame->width; + is->viddec_height = frame->height; + if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { if (frame->pts != AV_NOPTS_VALUE) { double diff = dpts - get_master_clock(is); @@ -1958,15 +1800,23 @@ fail: static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame) { static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }; - char sws_flags_str[128]; + char sws_flags_str[512] = ""; char buffersrc_args[256]; int ret; AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL; AVCodecContext *codec = is->video_st->codec; AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL); + AVDictionaryEntry *e = NULL; + + while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) { + if (!strcmp(e->key, "sws_flags")) { + av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value); + } else + av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value); + } + if (strlen(sws_flags_str)) + sws_flags_str[strlen(sws_flags_str)-1] = '\0'; - av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags); - snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags); graph->scale_sws_opts = av_strdup(sws_flags_str); snprintf(buffersrc_args, sizeof(buffersrc_args), @@ -2328,8 +2178,7 @@ static int subtitle_thread(void *arg) Frame *sp; int got_subtitle; double pts; - int i, j; - int r, g, b, y, u, v, a; + int i; for (;;) { if (!(sp = frame_queue_peek_writable(&is->subpq))) @@ -2348,14 +2197,41 @@ static int subtitle_thread(void *arg) for (i = 0; i < sp->sub.num_rects; i++) { - for (j = 0; j < sp->sub.rects[i]->nb_colors; j++) - { - RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j); - y = RGB_TO_Y_CCIR(r, g, b); - u = RGB_TO_U_CCIR(r, g, b, 0); - v = RGB_TO_V_CCIR(r, g, b, 0); - YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a); + int in_w = sp->sub.rects[i]->w; + int in_h = sp->sub.rects[i]->h; + int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width; + int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height; + int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w; + int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h; + AVPicture newpic; + + //can not use avpicture_alloc as it is not compatible with avsubtitle_free() + av_image_fill_linesizes(newpic.linesize, AV_PIX_FMT_YUVA420P, out_w); + newpic.data[0] = av_malloc(newpic.linesize[0] * out_h); + newpic.data[3] = av_malloc(newpic.linesize[3] * out_h); + newpic.data[1] = av_malloc(newpic.linesize[1] * ((out_h+1)/2)); + newpic.data[2] = av_malloc(newpic.linesize[2] * ((out_h+1)/2)); + + is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx, + in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h, + AV_PIX_FMT_YUVA420P, sws_flags, NULL, NULL, NULL); + if (!is->sub_convert_ctx || !newpic.data[0] || !newpic.data[3] || + !newpic.data[1] || !newpic.data[2] + ) { + av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n"); + exit(1); } + sws_scale(is->sub_convert_ctx, + (void*)sp->sub.rects[i]->pict.data, sp->sub.rects[i]->pict.linesize, + 0, in_h, newpic.data, newpic.linesize); + + av_free(sp->sub.rects[i]->pict.data[0]); + av_free(sp->sub.rects[i]->pict.data[1]); + sp->sub.rects[i]->pict = newpic; + sp->sub.rects[i]->w = out_w; + sp->sub.rects[i]->h = out_h; + sp->sub.rects[i]->x = sp->sub.rects[i]->x * out_w / in_w; + sp->sub.rects[i]->y = sp->sub.rects[i]->y * out_h / in_h; } /* now we can update the picture count */ @@ -2448,6 +2324,13 @@ static int audio_decode_frame(VideoState *is) return -1; do { +#if defined(_WIN32) + while (frame_queue_nb_remaining(&is->sampq) == 0) { + if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2) + return -1; + av_usleep (1000); + } +#endif if (!(af = frame_queue_peek_readable(&is->sampq))) return -1; frame_queue_next(&is->sampq); @@ -2772,6 +2655,9 @@ static int stream_component_open(VideoState *is, int stream_index) is->video_stream = stream_index; is->video_st = ic->streams[stream_index]; + is->viddec_width = avctx->width; + is->viddec_height = avctx->height; + decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread); decoder_start(&is->viddec, video_thread, is); is->queue_attachments_req = 1;