}
}
-/**
+/*
* Update the requested input sample format based on the output sample format.
* This is currently only used to request float output from decoders which
* support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
}
}
-/**
+/*
* Read one frame for lavfi output for ost and encode it.
*/
static int poll_filter(OutputStream *ost)
return 0;
}
-/**
+/*
* Read as many frames from possible from lavfi and encode them.
*
* Always read from the active stream with the lowest timestamp. If no frames
return 0;
}
-/**
- * @return 1 if there are still streams where more output is wanted,
- * 0 otherwise
- */
+/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
static int need_output(void)
{
int i;
input_files[i]->eagain = 0;
}
-/**
+/*
* Read one packet from an input file and send it for
* - decoding -> lavfi (audio/video)
* - decoding -> encoding -> muxing (subtitles)
* - muxing (streamcopy)
*
- * @return
+ * Return
* - 0 -- one packet was read and processed
* - AVERROR(EAGAIN) -- no packets were available for selected file,
* this function should be called again
/* select an input stream for an output stream */
typedef struct StreamMap {
- int disabled; /** 1 is this mapping is disabled by a negative map */
+ int disabled; /* 1 is this mapping is disabled by a negative map */
int file_index;
int stream_index;
int sync_file_index;
int sync_stream_index;
- char *linklabel; /** name of an output link, for mapping lavfi outputs */
+ char *linklabel; /* name of an output link, for mapping lavfi outputs */
} StreamMap;
-/**
- * select an input file for an output file
- */
+/* select an input file for an output file */
typedef struct MetadataMap {
- int file; ///< file index
- char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
- int index; ///< stream/chapter/program number
+ int file; // file index
+ char type; // type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
+ int index; // stream/chapter/program number
} MetadataMap;
typedef struct OptionsContext {
#include "libavutil/pixfmt.h"
#include "libavutil/samplefmt.h"
-/**
- * Define a function for building a string containing a list of
- * allowed formats,
- */
+/* Define a function for building a string containing a list of
+ * allowed formats. */
#define DEF_CHOOSE_FORMAT(type, var, supported_list, none, get_name, separator)\
static char *choose_ ## var ## s(OutputStream *ost) \
{ \
}
/**
- * Parse a metadata specifier in arg.
+ * Parse a metadata specifier passed as 'arg' parameter.
* @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
* @param index for type c/p, chapter/program index is written here
* @param stream_spec for type s, the stream specifier is written here
return avcodec_find_decoder(st->codec->codec_id);
}
-/**
- * Add all the streams from the given input file to the global
- * list of input streams.
- */
+/* Add all the streams from the given input file to the global
+ * list of input streams. */
static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{
int i;
#define SUBPICTURE_QUEUE_SIZE 4
typedef struct VideoPicture {
- double pts; ///< presentation time stamp for this picture
- double target_clock; ///< av_gettime() time at which this should be displayed ideally
- int64_t pos; ///< byte position in file
+ double pts; // presentation timestamp for this picture
+ double target_clock; // av_gettime() time at which this should be displayed ideally
+ int64_t pos; // byte position in file
SDL_Overlay *bmp;
int width, height; /* source height & width */
int allocated;
double frame_timer;
double frame_last_pts;
double frame_last_delay;
- double video_clock; ///< pts of last decoded frame / predicted pts of next decoded frame
+ double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
int video_stream;
AVStream *video_st;
PacketQueue videoq;
- double video_current_pts; ///< current displayed pts (different from video_clock if frame fifos are used)
- double video_current_pts_drift; ///< video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
- int64_t video_current_pos; ///< current displayed file pos
+ double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
+ double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
+ int64_t video_current_pos; // current displayed file pos
VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
int pictq_size, pictq_rindex, pictq_windex;
SDL_mutex *pictq_mutex;
PtsCorrectionContext pts_ctx;
#if CONFIG_AVFILTER
- AVFilterContext *in_video_filter; ///< the first filter in the video chain
- AVFilterContext *out_video_filter; ///< the last filter in the video chain
+ AVFilterContext *in_video_filter; // the first filter in the video chain
+ AVFilterContext *out_video_filter; // the last filter in the video chain
int use_dr1;
FrameBuffer *buffer_pool;
#endif
SDL_UnlockMutex(is->pictq_mutex);
}
-/**
- *
- * @param pts the dts of the pkt / pts of the frame and guessed if not known
- */
+/* The 'pts' parameter is the dts of the packet / pts of the frame and
+ * guessed if not known. */
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
{
VideoPicture *vp;
return 0;
}
-/**
- * compute the exact PTS for the picture if it is omitted in the stream
- * @param pts1 the dts of the pkt / pts of the frame
- */
+/* Compute the exact PTS for the picture if it is omitted in the stream.
+ * The 'pts1' parameter is the dts of the packet / pts of the frame. */
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
{
double frame_delay, pts;