X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libavcodec%2Fv4l2_m2m.c;h=cdfd579810f21f89ea933a0f7bc2e41bcef8d8d0;hb=252500a78fe1a31abc79e6070d16f50382c39343;hp=427e165f586f940f14fdef16dc411364580cc1b8;hpb=0dda0f3bdb7e8a2d5bef7457375f72f38a100ccb;p=ffmpeg diff --git a/libavcodec/v4l2_m2m.c b/libavcodec/v4l2_m2m.c index 427e165f586..cdfd579810f 100644 --- a/libavcodec/v4l2_m2m.c +++ b/libavcodec/v4l2_m2m.c @@ -60,14 +60,15 @@ static inline int v4l2_mplane_video(struct v4l2_capability *cap) return 0; } -static int v4l2_prepare_contexts(V4L2m2mContext* s) +static int v4l2_prepare_contexts(V4L2m2mContext *s, int probe) { struct v4l2_capability cap; + void *log_ctx = s->avctx; int ret; s->capture.done = s->output.done = 0; s->capture.name = "capture"; - s->output.name = "output "; + s->output.name = "output"; atomic_init(&s->refcount, 0); sem_init(&s->refsync, 0, 0); @@ -76,7 +77,10 @@ static int v4l2_prepare_contexts(V4L2m2mContext* s) if (ret < 0) return ret; - av_log(s->avctx, AV_LOG_INFO, "driver '%s' on card '%s'\n", cap.driver, cap.card); + av_log(log_ctx, probe ? AV_LOG_DEBUG : AV_LOG_INFO, + "driver '%s' on card '%s' in %s mode\n", cap.driver, cap.card, + v4l2_mplane_video(&cap) ? "mplane" : + v4l2_splane_video(&cap) ? "splane" : "unknown"); if (v4l2_mplane_video(&cap)) { s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; @@ -93,34 +97,35 @@ static int v4l2_prepare_contexts(V4L2m2mContext* s) return AVERROR(EINVAL); } -static int v4l2_probe_driver(V4L2m2mContext* s) +static int v4l2_probe_driver(V4L2m2mContext *s) { + void *log_ctx = s->avctx; int ret; s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0); if (s->fd < 0) return AVERROR(errno); - ret = v4l2_prepare_contexts(s); + ret = v4l2_prepare_contexts(s, 1); if (ret < 0) goto done; - ret = ff_v4l2_context_get_format(&s->output); + ret = ff_v4l2_context_get_format(&s->output, 1); if (ret) { - av_log(s->avctx, AV_LOG_DEBUG, "v4l2 output format not supported\n"); + av_log(log_ctx, AV_LOG_DEBUG, "v4l2 output format not supported\n"); goto done; } - ret = ff_v4l2_context_get_format(&s->capture); + ret = ff_v4l2_context_get_format(&s->capture, 1); if (ret) { - av_log(s->avctx, AV_LOG_DEBUG, "v4l2 capture format not supported\n"); + av_log(log_ctx, AV_LOG_DEBUG, "v4l2 capture format not supported\n"); goto done; } done: if (close(s->fd) < 0) { ret = AVERROR(errno); - av_log(s->avctx, AV_LOG_ERROR, "failure closing %s (%s)\n", s->devname, av_err2str(AVERROR(errno))); + av_log(log_ctx, AV_LOG_ERROR, "failure closing %s (%s)\n", s->devname, av_err2str(AVERROR(errno))); } s->fd = -1; @@ -128,19 +133,30 @@ done: return ret; } -static int v4l2_configure_contexts(V4L2m2mContext* s) +static int v4l2_configure_contexts(V4L2m2mContext *s) { void *log_ctx = s->avctx; int ret; + struct v4l2_format ofmt, cfmt; s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0); if (s->fd < 0) return AVERROR(errno); - ret = v4l2_prepare_contexts(s); + ret = v4l2_prepare_contexts(s, 0); if (ret < 0) goto error; + ofmt = s->output.format; + cfmt = s->capture.format; + av_log(log_ctx, AV_LOG_INFO, "requesting formats: output=%s capture=%s\n", + av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(ofmt.type) ? + ofmt.fmt.pix_mp.pixelformat : + ofmt.fmt.pix.pixelformat), + av_fourcc2str(V4L2_TYPE_IS_MULTIPLANAR(cfmt.type) ? + cfmt.fmt.pix_mp.pixelformat : + cfmt.fmt.pix.pixelformat)); + ret = ff_v4l2_context_set_format(&s->output); if (ret) { av_log(log_ctx, AV_LOG_ERROR, "can't set v4l2 output format\n"); @@ -160,7 +176,7 @@ static int v4l2_configure_contexts(V4L2m2mContext* s) } /* decoder's buffers need to be updated at a later stage */ - if (!av_codec_is_decoder(s->avctx->codec)) { + if (s->avctx && !av_codec_is_decoder(s->avctx->codec)) { ret = ff_v4l2_context_init(&s->capture); if (ret) { av_log(log_ctx, AV_LOG_ERROR, "no v4l2 capture context's buffers\n"); @@ -186,38 +202,39 @@ error: * V4L2 M2M Interface * ******************************************************************************/ -int ff_v4l2_m2m_codec_reinit(V4L2m2mContext* s) +int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s) { + void *log_ctx = s->avctx; int ret; - av_log(s->avctx, AV_LOG_DEBUG, "reinit context\n"); + av_log(log_ctx, AV_LOG_DEBUG, "reinit context\n"); /* 1. streamoff */ ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF); if (ret) - av_log(s->avctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n"); + av_log(log_ctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n"); /* 2. unmap the capture buffers (v4l2 and ffmpeg): * we must wait for all references to be released before being allowed * to queue new buffers. */ - av_log(s->avctx, AV_LOG_DEBUG, "waiting for user to release AVBufferRefs\n"); + av_log(log_ctx, AV_LOG_DEBUG, "waiting for user to release AVBufferRefs\n"); if (atomic_load(&s->refcount)) while(sem_wait(&s->refsync) == -1 && errno == EINTR); ff_v4l2_context_release(&s->capture); /* 3. get the new capture format */ - ret = ff_v4l2_context_get_format(&s->capture); + ret = ff_v4l2_context_get_format(&s->capture, 0); if (ret) { - av_log(s->avctx, AV_LOG_ERROR, "query the new capture format\n"); + av_log(log_ctx, AV_LOG_ERROR, "query the new capture format\n"); return ret; } /* 4. set the capture format */ ret = ff_v4l2_context_set_format(&s->capture); if (ret) { - av_log(s->avctx, AV_LOG_ERROR, "setting capture format\n"); + av_log(log_ctx, AV_LOG_ERROR, "setting capture format\n"); return ret; } @@ -241,14 +258,14 @@ int ff_v4l2_m2m_codec_full_reinit(V4L2m2mContext *s) ret = ff_v4l2_context_set_status(&s->output, VIDIOC_STREAMOFF); if (ret) { - av_log(s->avctx, AV_LOG_ERROR, "output VIDIOC_STREAMOFF\n"); + av_log(log_ctx, AV_LOG_ERROR, "output VIDIOC_STREAMOFF\n"); goto error; } ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF); if (ret) { - av_log(s->avctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n"); - goto error; + av_log(log_ctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n"); + goto error; } /* release and unmmap the buffers */ @@ -259,13 +276,13 @@ int ff_v4l2_m2m_codec_full_reinit(V4L2m2mContext *s) s->draining = 0; s->reinit = 0; - ret = ff_v4l2_context_get_format(&s->output); + ret = ff_v4l2_context_get_format(&s->output, 0); if (ret) { av_log(log_ctx, AV_LOG_DEBUG, "v4l2 output format not supported\n"); goto error; } - ret = ff_v4l2_context_get_format(&s->capture); + ret = ff_v4l2_context_get_format(&s->capture, 0); if (ret) { av_log(log_ctx, AV_LOG_DEBUG, "v4l2 capture format not supported\n"); goto error; @@ -290,7 +307,7 @@ int ff_v4l2_m2m_codec_full_reinit(V4L2m2mContext *s) } /* decoder's buffers need to be updated at a later stage */ - if (!av_codec_is_decoder(s->avctx->codec)) { + if (s->avctx && !av_codec_is_decoder(s->avctx->codec)) { ret = ff_v4l2_context_init(&s->capture); if (ret) { av_log(log_ctx, AV_LOG_ERROR, "no v4l2 capture context's buffers\n"); @@ -312,23 +329,30 @@ static void v4l2_m2m_destroy_context(void *opaque, uint8_t *context) sem_destroy(&s->refsync); close(s->fd); + av_frame_unref(s->frame); + av_frame_free(&s->frame); + av_packet_unref(&s->buf_pkt); av_free(s); } -int ff_v4l2_m2m_codec_end(AVCodecContext *avctx) +int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv) { - V4L2m2mPriv *priv = avctx->priv_data; - V4L2m2mContext* s = priv->context; + V4L2m2mContext *s = priv->context; int ret; - ret = ff_v4l2_context_set_status(&s->output, VIDIOC_STREAMOFF); - if (ret) - av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", s->output.name); + if (!s) + return 0; - ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF); - if (ret) - av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", s->capture.name); + if (s->fd >= 0) { + ret = ff_v4l2_context_set_status(&s->output, VIDIOC_STREAMOFF); + if (ret) + av_log(s->avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", s->output.name); + + ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF); + if (ret) + av_log(s->avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", s->capture.name); + } ff_v4l2_context_release(&s->output); @@ -338,15 +362,13 @@ int ff_v4l2_m2m_codec_end(AVCodecContext *avctx) return 0; } -int ff_v4l2_m2m_codec_init(AVCodecContext *avctx) +int ff_v4l2_m2m_codec_init(V4L2m2mPriv *priv) { int ret = AVERROR(EINVAL); struct dirent *entry; - char node[PATH_MAX]; DIR *dirp; - V4L2m2mContext *s = ((V4L2m2mPriv*)avctx->priv_data)->context; - s->avctx = avctx; + V4L2m2mContext *s = priv->context; dirp = opendir("/dev"); if (!dirp) @@ -357,12 +379,11 @@ int ff_v4l2_m2m_codec_init(AVCodecContext *avctx) if (strncmp(entry->d_name, "video", 5)) continue; - snprintf(node, sizeof(node), "/dev/%s", entry->d_name); - av_log(s->avctx, AV_LOG_DEBUG, "probing device %s\n", node); - strncpy(s->devname, node, strlen(node) + 1); + snprintf(s->devname, sizeof(s->devname), "/dev/%s", entry->d_name); + av_log(s->avctx, AV_LOG_DEBUG, "probing device %s\n", s->devname); ret = v4l2_probe_driver(s); if (!ret) - break; + break; } closedir(dirp); @@ -374,15 +395,13 @@ int ff_v4l2_m2m_codec_init(AVCodecContext *avctx) return ret; } - av_log(s->avctx, AV_LOG_INFO, "Using device %s\n", node); + av_log(s->avctx, AV_LOG_INFO, "Using device %s\n", s->devname); return v4l2_configure_contexts(s); } -int ff_v4l2_m2m_create_context(AVCodecContext *avctx, V4L2m2mContext **s) +int ff_v4l2_m2m_create_context(V4L2m2mPriv *priv, V4L2m2mContext **s) { - V4L2m2mPriv *priv = avctx->priv_data; - *s = av_mallocz(sizeof(V4L2m2mContext)); if (!*s) return AVERROR(ENOMEM); @@ -396,11 +415,20 @@ int ff_v4l2_m2m_create_context(AVCodecContext *avctx, V4L2m2mContext **s) /* assign the context */ priv->context = *s; + (*s)->priv = priv; /* populate it */ priv->context->capture.num_buffers = priv->num_capture_buffers; priv->context->output.num_buffers = priv->num_output_buffers; priv->context->self_ref = priv->context_ref; + priv->context->fd = -1; + + priv->context->frame = av_frame_alloc(); + if (!priv->context->frame) { + av_buffer_unref(&priv->context_ref); + *s = NULL; /* freed when unreferencing context_ref */ + return AVERROR(ENOMEM); + } return 0; }