+ return;
+}
+
+/* Copy AviSynth clip data into an AVPacket. */
+static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt,
+ int discard)
+{
+ AviSynthContext *avs = s->priv_data;
+ AVS_VideoFrame *frame;
+ unsigned char *dst_p;
+ const unsigned char *src_p;
+ int n, i, plane, rowsize, planeheight, pitch, bits;
+ const char *error;
+
+ if (avs->curr_frame >= avs->vi->num_frames)
+ return AVERROR_EOF;
+
+ /* This must happen even if the stream is discarded to prevent desync. */
+ n = avs->curr_frame++;
+ if (discard)
+ return 0;
+
+#ifdef USING_AVISYNTH
+ /* Define the bpp values for the new AviSynth 2.6 colorspaces.
+ * Since AvxSynth doesn't have these functions, special-case
+ * it in order to avoid implicit declaration errors. */
+
+ if (avs_is_yv24(avs->vi))
+ bits = 24;
+ else if (avs_is_yv16(avs->vi))
+ bits = 16;
+ else if (avs_is_yv411(avs->vi))
+ bits = 12;
+ else if (avs_is_y8(avs->vi))
+ bits = 8;
+ else
+#endif
+ bits = avs_bits_per_pixel(avs->vi);
+
+ /* Without the cast to int64_t, calculation overflows at about 9k x 9k
+ * resolution. */
+ pkt->size = (((int64_t)avs->vi->width *
+ (int64_t)avs->vi->height) * bits) / 8;
+ if (!pkt->size)
+ return AVERROR_UNKNOWN;
+
+ if (av_new_packet(pkt, pkt->size) < 0)
+ return AVERROR(ENOMEM);
+
+ pkt->pts = n;
+ pkt->dts = n;
+ pkt->duration = 1;
+ pkt->stream_index = avs->curr_stream;
+
+ frame = avs_library.avs_get_frame(avs->clip, n);
+ error = avs_library.avs_clip_get_error(avs->clip);
+ if (error) {
+ av_log(s, AV_LOG_ERROR, "%s\n", error);
+ avs->error = 1;
+ av_packet_unref(pkt);
+ return AVERROR_UNKNOWN;
+ }
+
+ dst_p = pkt->data;
+ for (i = 0; i < avs->n_planes; i++) {
+ plane = avs->planes[i];
+ src_p = avs_get_read_ptr_p(frame, plane);
+ pitch = avs_get_pitch_p(frame, plane);
+
+ rowsize = avs_get_row_size_p(frame, plane);
+ planeheight = avs_get_height_p(frame, plane);
+
+ /* Flip RGB video. */
+ if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
+ src_p = src_p + (planeheight - 1) * pitch;
+ pitch = -pitch;
+ }
+
+ avs_library.avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch,
+ rowsize, planeheight);
+ dst_p += rowsize * planeheight;
+ }
+
+ avs_library.avs_release_video_frame(frame);
+ return 0;
+}
+
+static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt,
+ int discard)
+{
+ AviSynthContext *avs = s->priv_data;
+ AVRational fps, samplerate;
+ int samples;
+ int64_t n;
+ const char *error;
+
+ if (avs->curr_sample >= avs->vi->num_audio_samples)
+ return AVERROR_EOF;
+
+ fps.num = avs->vi->fps_numerator;
+ fps.den = avs->vi->fps_denominator;
+ samplerate.num = avs->vi->audio_samples_per_second;
+ samplerate.den = 1;
+
+ if (avs_has_video(avs->vi)) {
+ if (avs->curr_frame < avs->vi->num_frames)
+ samples = av_rescale_q(avs->curr_frame, samplerate, fps) -
+ avs->curr_sample;
+ else
+ samples = av_rescale_q(1, samplerate, fps);
+ } else {
+ samples = 1000;
+ }
+
+ /* After seeking, audio may catch up with video. */
+ if (samples <= 0) {
+ pkt->size = 0;
+ pkt->data = NULL;
+ return 0;
+ }
+
+ if (avs->curr_sample + samples > avs->vi->num_audio_samples)
+ samples = avs->vi->num_audio_samples - avs->curr_sample;
+
+ /* This must happen even if the stream is discarded to prevent desync. */
+ n = avs->curr_sample;
+ avs->curr_sample += samples;
+ if (discard)
+ return 0;
+
+ pkt->size = avs_bytes_per_channel_sample(avs->vi) *
+ samples * avs->vi->nchannels;
+ if (!pkt->size)
+ return AVERROR_UNKNOWN;
+
+ if (av_new_packet(pkt, pkt->size) < 0)
+ return AVERROR(ENOMEM);
+
+ pkt->pts = n;
+ pkt->dts = n;
+ pkt->duration = samples;
+ pkt->stream_index = avs->curr_stream;
+
+ avs_library.avs_get_audio(avs->clip, pkt->data, n, samples);
+ error = avs_library.avs_clip_get_error(avs->clip);
+ if (error) {
+ av_log(s, AV_LOG_ERROR, "%s\n", error);
+ avs->error = 1;
+ av_packet_unref(pkt);
+ return AVERROR_UNKNOWN;
+ }
+ return 0;
+}
+
+static av_cold int avisynth_read_header(AVFormatContext *s)
+{
+ int ret;
+
+ // Calling library must implement a lock for thread-safe opens.
+ if (ret = avpriv_lock_avformat())
+ return ret;
+
+ if (ret = avisynth_open_file(s)) {
+ avpriv_unlock_avformat();
+ return ret;
+ }
+
+ avpriv_unlock_avformat();
+ return 0;
+}
+
+static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ AviSynthContext *avs = s->priv_data;
+ AVStream *st;
+ int discard = 0;
+ int ret;
+
+ if (avs->error)
+ return AVERROR_UNKNOWN;
+
+ /* If either stream reaches EOF, try to read the other one before
+ * giving up. */
+ avisynth_next_stream(s, &st, pkt, &discard);
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ ret = avisynth_read_packet_video(s, pkt, discard);
+ if (ret == AVERROR_EOF && avs_has_audio(avs->vi)) {
+ avisynth_next_stream(s, &st, pkt, &discard);
+ return avisynth_read_packet_audio(s, pkt, discard);
+ }
+ } else {
+ ret = avisynth_read_packet_audio(s, pkt, discard);
+ if (ret == AVERROR_EOF && avs_has_video(avs->vi)) {
+ avisynth_next_stream(s, &st, pkt, &discard);
+ return avisynth_read_packet_video(s, pkt, discard);
+ }
+ }
+
+ return ret;
+}
+
+static av_cold int avisynth_read_close(AVFormatContext *s)
+{
+ if (avpriv_lock_avformat())
+ return AVERROR_UNKNOWN;
+
+ avisynth_context_destroy(s->priv_data);
+ avpriv_unlock_avformat();
+ return 0;
+}
+
+static int avisynth_read_seek(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
+{
+ AviSynthContext *avs = s->priv_data;
+ AVStream *st;
+ AVRational fps, samplerate;
+
+ if (avs->error)
+ return AVERROR_UNKNOWN;
+
+ fps = (AVRational) { avs->vi->fps_numerator,
+ avs->vi->fps_denominator };
+ samplerate = (AVRational) { avs->vi->audio_samples_per_second, 1 };
+
+ st = s->streams[stream_index];
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ /* AviSynth frame counts are signed int. */
+ if ((timestamp >= avs->vi->num_frames) ||
+ (timestamp > INT_MAX) ||
+ (timestamp < 0))
+ return AVERROR_EOF;
+ avs->curr_frame = timestamp;
+ if (avs_has_audio(avs->vi))
+ avs->curr_sample = av_rescale_q(timestamp, samplerate, fps);
+ } else {
+ if ((timestamp >= avs->vi->num_audio_samples) || (timestamp < 0))
+ return AVERROR_EOF;
+ /* Force frame granularity for seeking. */
+ if (avs_has_video(avs->vi)) {
+ avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
+ avs->curr_sample = av_rescale_q(avs->curr_frame, samplerate, fps);
+ } else {
+ avs->curr_sample = timestamp;
+ }