const char *avformat_license(void)
{
#define LICENSE_PREFIX "libavformat license: "
- return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+ return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
}
int ff_lock_avformat(void)
* Return the number of bytes read or an error. */
static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
{
- int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
int orig_size = pkt->size;
int ret;
if (size > 0)
pkt->flags |= AV_PKT_FLAG_CORRUPT;
- pkt->pos = orig_pos;
if (!pkt->size)
av_packet_unref(pkt);
return pkt->size > orig_size ? pkt->size - orig_size : ret;
{ 0 }
};
int score;
- AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
+ const AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
if (fmt) {
int i;
av_log(s, AV_LOG_DEBUG,
"Probe with size=%d, packets=%d detected %s with score=%d\n",
- pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets,
+ pd->buf_size, s->max_probe_packets - st->probe_packets,
fmt->name, score);
for (i = 0; fmt_id_type[i].name; i++) {
if (!strcmp(fmt->name, fmt_id_type[i].name)) {
return ret;
}
} else {
- // TODO: Adapt callers in this file so the line below can use
- // av_packet_move_ref() to effectively move the reference
- // to the list.
- pktl->pkt = *pkt;
+ ret = av_packet_make_refcounted(pkt);
+ if (ret < 0) {
+ av_free(pktl);
+ return ret;
+ }
+ av_packet_move_ref(&pktl->pkt, pkt);
}
if (*packet_buffer)
int avformat_open_input(AVFormatContext **ps, const char *filename,
- AVInputFormat *fmt, AVDictionary **options)
+ ff_const59 AVInputFormat *fmt, AVDictionary **options)
{
AVFormatContext *s = *ps;
int i, ret = 0;
level = AV_LOG_ERROR;
av_log(s, level, "Discarding ID3 tags because more suitable tags were found.\n");
av_dict_free(&s->internal->id3v2_meta);
- if (s->error_recognition & AV_EF_EXPLODE)
- return AVERROR_INVALIDDATA;
+ if (s->error_recognition & AV_EF_EXPLODE) {
+ ret = AVERROR_INVALIDDATA;
+ goto close;
+ }
}
if (id3v2_extra_meta) {
if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
!strcmp(s->iformat->name, "tta") || !strcmp(s->iformat->name, "wav")) {
if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
- goto fail;
+ goto close;
if ((ret = ff_id3v2_parse_chapters(s, &id3v2_extra_meta)) < 0)
- goto fail;
+ goto close;
if ((ret = ff_id3v2_parse_priv(s, &id3v2_extra_meta)) < 0)
- goto fail;
+ goto close;
} else
av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
}
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
if ((ret = avformat_queue_attached_pictures(s)) < 0)
- goto fail;
+ goto close;
if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->internal->data_offset)
s->internal->data_offset = avio_tell(s->pb);
*ps = s;
return 0;
+close:
+ if (s->iformat->read_close)
+ s->iformat->read_close(s);
fail:
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
av_dict_free(&tmp);
int ret, i, err;
AVStream *st;
+ pkt->data = NULL;
+ pkt->size = 0;
+ av_init_packet(pkt);
+
for (;;) {
AVPacketList *pktl = s->internal->raw_packet_buffer;
+ const AVPacket *pkt1;
if (pktl) {
- *pkt = pktl->pkt;
- st = s->streams[pkt->stream_index];
+ st = s->streams[pktl->pkt.stream_index];
if (s->internal->raw_packet_buffer_remaining_size <= 0)
if ((err = probe_codec(s, st, NULL)) < 0)
return err;
if (st->request_probe <= 0) {
- s->internal->raw_packet_buffer = pktl->next;
+ ff_packet_list_get(&s->internal->raw_packet_buffer,
+ &s->internal->raw_packet_buffer_end, pkt);
s->internal->raw_packet_buffer_remaining_size += pkt->size;
- av_free(pktl);
return 0;
}
}
- pkt->data = NULL;
- pkt->size = 0;
- av_init_packet(pkt);
ret = s->iformat->read_packet(s, pkt);
if (ret < 0) {
+ av_packet_unref(pkt);
+
/* Some demuxers return FFERROR_REDO when they consume
data and discard it (ignored streams, junk, extradata).
We must re-call the demuxer to get the real packet. */
}
err = av_packet_make_refcounted(pkt);
- if (err < 0)
+ if (err < 0) {
+ av_packet_unref(pkt);
return err;
+ }
- if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
- (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
+ if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
av_log(s, AV_LOG_WARNING,
- "Dropped corrupted packet (stream = %d)\n",
- pkt->stream_index);
- av_packet_unref(pkt);
- continue;
+ "Packet corrupt (stream = %d, dts = %s)",
+ pkt->stream_index, av_ts2str(pkt->dts));
+ if (s->flags & AVFMT_FLAG_DISCARD_CORRUPT) {
+ av_log(s, AV_LOG_WARNING, ", dropping it.\n");
+ av_packet_unref(pkt);
+ continue;
+ }
+ av_log(s, AV_LOG_WARNING, ".\n");
}
- if (pkt->stream_index >= (unsigned)s->nb_streams) {
- av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
- continue;
- }
+ av_assert0(pkt->stream_index < (unsigned)s->nb_streams &&
+ "Invalid stream index.\n");
st = s->streams[pkt->stream_index];
err = ff_packet_list_put(&s->internal->raw_packet_buffer,
&s->internal->raw_packet_buffer_end,
pkt, 0);
- if (err)
+ if (err < 0) {
+ av_packet_unref(pkt);
return err;
- s->internal->raw_packet_buffer_remaining_size -= pkt->size;
+ }
+ pkt1 = &s->internal->raw_packet_buffer_end->pkt;
+ s->internal->raw_packet_buffer_remaining_size -= pkt1->size;
- if ((err = probe_codec(s, st, pkt)) < 0)
+ if ((err = probe_codec(s, st, pkt1)) < 0)
return err;
}
}
const AVCodecDescriptor *d = avcodec_descriptor_get(id);
if (!d)
return 0;
- if (d->type == AVMEDIA_TYPE_VIDEO && !(d->props & AV_CODEC_PROP_INTRA_ONLY))
+ if ((d->type == AVMEDIA_TYPE_VIDEO || d->type == AVMEDIA_TYPE_AUDIO) &&
+ !(d->props & AV_CODEC_PROP_INTRA_ONLY))
return 0;
return 1;
}
}
duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base);
- if (pkt->duration == 0) {
+ if (pkt->duration <= 0) {
ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
if (den && num) {
duration = (AVRational) {num, den};
}
}
- if (pkt->duration != 0 && (s->internal->packet_buffer || s->internal->parse_queue))
+ if (pkt->duration > 0 && (s->internal->packet_buffer || s->internal->parse_queue))
update_initial_durations(s, st, pkt->stream_index, pkt->duration);
/* Correct timestamps with byte offset if demuxers only have timestamps
* by knowing the future. */
} else if (pkt->pts != AV_NOPTS_VALUE ||
pkt->dts != AV_NOPTS_VALUE ||
- pkt->duration ) {
+ pkt->duration > 0 ) {
/* presentation is not delayed : PTS and DTS are the same */
if (pkt->pts == AV_NOPTS_VALUE)
if (pkt->pts == AV_NOPTS_VALUE)
pkt->pts = st->cur_dts;
pkt->dts = pkt->pts;
- if (pkt->pts != AV_NOPTS_VALUE)
+ if (pkt->pts != AV_NOPTS_VALUE && duration.num >= 0)
st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
}
}
st->cur_dts = pkt->dts;
if (s->debug & FF_FDEBUG_TS)
- av_log(s, AV_LOG_DEBUG, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
- presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
+ av_log(s, AV_LOG_DEBUG, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s st:%d (%d)\n",
+ presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), st->index, st->id);
/* update flags */
if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA || is_intra_only(st->codecpar->codec_id))
/**
* Parse a packet, add all split parts to parse_queue.
*
- * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
+ * @param pkt Packet to parse; must not be NULL.
+ * @param flush Indicates whether to flush. If set, pkt must be blank.
*/
-static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
+static int parse_packet(AVFormatContext *s, AVPacket *pkt,
+ int stream_index, int flush)
{
- AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
+ AVPacket out_pkt;
AVStream *st = s->streams[stream_index];
- uint8_t *data = pkt ? pkt->data : NULL;
- int size = pkt ? pkt->size : 0;
- int ret = 0, got_output = 0;
-
- if (!pkt) {
- av_init_packet(&flush_pkt);
- pkt = &flush_pkt;
- got_output = 1;
- } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
+ uint8_t *data = pkt->data;
+ int size = pkt->size;
+ int ret = 0, got_output = flush;
+
+ if (size || flush) {
+ av_init_packet(&out_pkt);
+ } else if (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
// preserve 0-size sync packets
compute_pkt_fields(s, st, st->parser, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
}
- while (size > 0 || (pkt == &flush_pkt && got_output)) {
+ while (size > 0 || (flush && got_output)) {
int len;
int64_t next_pts = pkt->pts;
int64_t next_dts = pkt->dts;
- av_init_packet(&out_pkt);
len = av_parser_parse2(st->parser, st->internal->avctx,
&out_pkt.data, &out_pkt.size, data, size,
pkt->pts, pkt->dts, pkt->pos);
}
/* end of the stream => close and free the parser */
- if (pkt == &flush_pkt) {
+ if (flush) {
av_parser_close(st->parser);
st->parser = NULL;
}
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
- int ret = 0, i, got_packet = 0;
+ int ret, i, got_packet = 0;
AVDictionary *metadata = NULL;
- av_init_packet(pkt);
-
while (!got_packet && !s->internal->parse_queue) {
AVStream *st;
- AVPacket cur_pkt;
/* read next packet */
- ret = ff_read_packet(s, &cur_pkt);
+ ret = ff_read_packet(s, pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser && st->need_parsing)
- parse_packet(s, NULL, st->index);
+ parse_packet(s, pkt, st->index, 1);
}
/* all remaining packets are now in parse_queue =>
* really terminate parsing */
break;
}
ret = 0;
- st = s->streams[cur_pkt.stream_index];
+ st = s->streams[pkt->stream_index];
/* update context if required */
if (st->internal->need_context_update) {
}
ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);
- if (ret < 0)
+ if (ret < 0) {
+ av_packet_unref(pkt);
return ret;
+ }
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
/* update deprecated public codec context */
ret = avcodec_parameters_to_context(st->codec, st->codecpar);
- if (ret < 0)
+ if (ret < 0) {
+ av_packet_unref(pkt);
return ret;
+ }
FF_ENABLE_DEPRECATION_WARNINGS
#endif
st->internal->need_context_update = 0;
}
- if (cur_pkt.pts != AV_NOPTS_VALUE &&
- cur_pkt.dts != AV_NOPTS_VALUE &&
- cur_pkt.pts < cur_pkt.dts) {
+ if (pkt->pts != AV_NOPTS_VALUE &&
+ pkt->dts != AV_NOPTS_VALUE &&
+ pkt->pts < pkt->dts) {
av_log(s, AV_LOG_WARNING,
"Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
- cur_pkt.stream_index,
- av_ts2str(cur_pkt.pts),
- av_ts2str(cur_pkt.dts),
- cur_pkt.size);
+ pkt->stream_index,
+ av_ts2str(pkt->pts),
+ av_ts2str(pkt->dts),
+ pkt->size);
}
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG,
"ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",
- cur_pkt.stream_index,
- av_ts2str(cur_pkt.pts),
- av_ts2str(cur_pkt.dts),
- cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
+ pkt->stream_index,
+ av_ts2str(pkt->pts),
+ av_ts2str(pkt->dts),
+ pkt->size, pkt->duration, pkt->flags);
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codecpar->codec_id);
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
- *pkt = cur_pkt;
compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
}
got_packet = 1;
} else if (st->discard < AVDISCARD_ALL) {
- if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
+ if ((ret = parse_packet(s, pkt, pkt->stream_index, 0)) < 0)
return ret;
st->codecpar->sample_rate = st->internal->avctx->sample_rate;
st->codecpar->bit_rate = st->internal->avctx->bit_rate;
st->codecpar->codec_id = st->internal->avctx->codec_id;
} else {
/* free packet */
- av_packet_unref(&cur_pkt);
+ av_packet_unref(pkt);
}
if (pkt->flags & AV_PKT_FLAG_KEY)
st->skip_to_keyframe = 0;
if (st->skip_to_keyframe) {
- av_packet_unref(&cur_pkt);
- if (got_packet) {
- *pkt = cur_pkt;
- }
+ av_packet_unref(pkt);
got_packet = 0;
}
}
av_ts2str(pkt->dts),
pkt->size, pkt->duration, pkt->flags);
+ /* A demuxer might have returned EOF because of an IO error, let's
+ * propagate this back to the user. */
+ if (ret == AVERROR_EOF && s->pb && s->pb->error < 0 && s->pb->error != AVERROR(EAGAIN))
+ ret = s->pb->error;
+
return ret;
}
ret = ff_packet_list_put(&s->internal->packet_buffer,
&s->internal->packet_buffer_end,
- pkt, FF_PACKETLIST_FLAG_REF_PACKET);
- av_packet_unref(pkt);
- if (ret < 0)
+ pkt, 0);
+ if (ret < 0) {
+ av_packet_unref(pkt);
return ret;
+ }
}
return_packet:
/* We set the current DTS to an unspecified origin. */
st->cur_dts = AV_NOPTS_VALUE;
- st->probe_packets = MAX_PROBE_PACKETS;
+ st->probe_packets = s->max_probe_packets;
for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
st->pts_buffer[j] = AV_NOPTS_VALUE;
//We could use URLProtocol flags here but as many user applications do not use URLProtocols this would be unreliable
const char *proto = avio_find_protocol_name(s->url);
+ av_assert0(time_tolerance >= 0);
+
if (!proto) {
av_log(s, AV_LOG_INFO,
"Protocol name not provided, cannot determine if input is local or "
for (; i2 < st2->nb_index_entries; i2++) {
AVIndexEntry *e2 = &st2->index_entries[i2];
int64_t e2_pts = av_rescale_q(e2->timestamp, st2->time_base, AV_TIME_BASE_Q);
- if (e2_pts - e1_pts < time_tolerance)
+ if (e2_pts < e1_pts || e2_pts - (uint64_t)e1_pts < time_tolerance)
continue;
pos_delta = FFMAX(pos_delta, e1->pos - e2->pos);
break;
/* XXX This could be adjusted depending on protocol*/
if (s->pb->buffer_size < pos_delta && pos_delta < (1<<24)) {
av_log(s, AV_LOG_VERBOSE, "Reconfiguring buffers to size %"PRId64"\n", pos_delta);
- ffio_set_buf_size(s->pb, pos_delta);
+
+ /* realloc the buffer and the original data will be retained */
+ if (ffio_realloc_buf(s->pb, pos_delta)) {
+ av_log(s, AV_LOG_ERROR, "Realloc buffer fail.\n");
+ return;
+ }
+
s->pb->short_seek_threshold = FFMAX(s->pb->short_seek_threshold, pos_delta/2);
}
int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
int64_t target_ts, int flags)
{
- AVInputFormat *avif = s->iformat;
+ const AVInputFormat *avif = s->iformat;
int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
int64_t ts_min, ts_max, ts;
int index;
case AVMEDIA_TYPE_VIDEO:
case AVMEDIA_TYPE_AUDIO:
if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) {
- av_log(ic, AV_LOG_DEBUG, "stream %d : no PTS found at end of file, duration not set\n", i);
+ av_log(ic, AV_LOG_WARNING, "stream %d : no PTS found at end of file, duration not set\n", i);
} else
- av_log(ic, AV_LOG_DEBUG, "stream %d : no TS found at start of file, duration not set\n", i);
+ av_log(ic, AV_LOG_WARNING, "stream %d : no TS found at start of file, duration not set\n", i);
}
}
}
}
}
+/* 1:1 map to AVDurationEstimationMethod */
+static const char *duration_name[] = {
+ [AVFMT_DURATION_FROM_PTS] = "pts",
+ [AVFMT_DURATION_FROM_STREAM] = "stream",
+ [AVFMT_DURATION_FROM_BITRATE] = "bit rate",
+};
+
+static const char *duration_estimate_name(enum AVDurationEstimationMethod method)
+{
+ return duration_name[method];
+}
+
static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
{
int64_t file_size;
/* at least one component has timings - we use them for all
* the components */
fill_all_stream_timings(ic);
- ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
+ /* nut demuxer estimate the duration from PTS */
+ if(!strcmp(ic->iformat->name, "nut"))
+ ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
+ else
+ ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
} else {
/* less precise: use bitrate info */
estimate_timings_from_bit_rate(ic);
AVStream av_unused *st;
for (i = 0; i < ic->nb_streams; i++) {
st = ic->streams[i];
- av_log(ic, AV_LOG_TRACE, "stream %d: start_time: %0.3f duration: %0.3f\n", i,
- (double) st->start_time * av_q2d(st->time_base),
- (double) st->duration * av_q2d(st->time_base));
+ if (st->time_base.den)
+ av_log(ic, AV_LOG_TRACE, "stream %d: start_time: %0.3f duration: %0.3f\n", i,
+ (double) st->start_time * av_q2d(st->time_base),
+ (double) st->duration * av_q2d(st->time_base));
}
av_log(ic, AV_LOG_TRACE,
- "format: start_time: %0.3f duration: %0.3f bitrate=%"PRId64" kb/s\n",
+ "format: start_time: %0.3f duration: %0.3f (estimate from %s) bitrate=%"PRId64" kb/s\n",
(double) ic->start_time / AV_TIME_BASE,
(double) ic->duration / AV_TIME_BASE,
+ duration_estimate_name(ic->duration_estimation_method),
(int64_t)ic->bit_rate / 1000);
}
}
}
/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
-static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
- AVDictionary **options)
+static int try_decode_frame(AVFormatContext *s, AVStream *st,
+ const AVPacket *avpkt, AVDictionary **options)
{
AVCodecContext *avctx = st->internal->avctx;
const AVCodec *codec;
}
}
}
- st->info->duration_count++;
- st->info->rfps_duration_sum += duration;
+ if (st->info->rfps_duration_sum <= INT64_MAX - duration) {
+ st->info->duration_count++;
+ st->info->rfps_duration_sum += duration;
+ }
if (st->info->duration_count % 10 == 0) {
int n = st->info->duration_count;
return ret;
}
-static int extract_extradata(AVStream *st, AVPacket *pkt)
+static int extract_extradata(AVStream *st, const AVPacket *pkt)
{
AVStreamInternal *sti = st->internal;
AVPacket *pkt_ref;
int64_t read_size;
AVStream *st;
AVCodecContext *avctx;
- AVPacket pkt1, *pkt;
+ AVPacket pkt1;
int64_t old_offset = avio_tell(ic->pb);
// new streams might appear, no options for those
int orig_nb_streams = ic->nb_streams;
read_size = 0;
for (;;) {
+ const AVPacket *pkt;
int analyzed_all_streams;
if (ff_check_interrupt(&ic->interrupt_callback)) {
ret = AVERROR_EXIT;
}
analyzed_all_streams = 0;
if (!missing_streams || !*missing_streams)
- if (i == ic->nb_streams) {
- analyzed_all_streams = 1;
- /* NOTE: If the format has no header, then we need to read some
- * packets to get most of the streams, so we cannot stop here. */
- if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
- /* If we found the info for all the codecs, we can stop. */
- ret = count;
- av_log(ic, AV_LOG_DEBUG, "All info found\n");
- flush_codecs = 0;
- break;
+ if (i == ic->nb_streams) {
+ analyzed_all_streams = 1;
+ /* NOTE: If the format has no header, then we need to read some
+ * packets to get most of the streams, so we cannot stop here. */
+ if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
+ /* If we found the info for all the codecs, we can stop. */
+ ret = count;
+ av_log(ic, AV_LOG_DEBUG, "All info found\n");
+ flush_codecs = 0;
+ break;
+ }
}
- }
/* We did not get all the codec info, but we read too much data. */
if (read_size >= probesize) {
ret = count;
break;
}
- pkt = &pkt1;
-
if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
ret = ff_packet_list_put(&ic->internal->packet_buffer,
&ic->internal->packet_buffer_end,
- pkt, 0);
+ &pkt1, 0);
if (ret < 0)
- goto find_stream_info_err;
+ goto unref_then_goto_end;
+
+ pkt = &ic->internal->packet_buffer_end->pkt;
+ } else {
+ pkt = &pkt1;
}
st = ic->streams[pkt->stream_index];
if (!st->internal->avctx_inited) {
ret = avcodec_parameters_to_context(avctx, st->codecpar);
if (ret < 0)
- goto find_stream_info_err;
+ goto unref_then_goto_end;
st->internal->avctx_inited = 1;
}
limit,
t, pkt->stream_index);
if (ic->flags & AVFMT_FLAG_NOBUFFER)
- av_packet_unref(pkt);
+ av_packet_unref(&pkt1);
break;
}
if (pkt->duration) {
if (!st->internal->avctx->extradata) {
ret = extract_extradata(st, pkt);
if (ret < 0)
- goto find_stream_info_err;
+ goto unref_then_goto_end;
}
/* If still no information, we try to open the codec and to
(options && i < orig_nb_streams) ? &options[i] : NULL);
if (ic->flags & AVFMT_FLAG_NOBUFFER)
- av_packet_unref(pkt);
+ av_packet_unref(&pkt1);
st->codec_info_nb_frames++;
count++;
av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n",
avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count);
return ret;
+
+unref_then_goto_end:
+ av_packet_unref(&pkt1);
+ goto find_stream_info_err;
}
AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
continue;
}
}
- disposition = !(st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED | AV_DISPOSITION_VISUAL_IMPAIRED));
+ disposition = !(st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED | AV_DISPOSITION_VISUAL_IMPAIRED))
+ + !! (st->disposition & AV_DISPOSITION_DEFAULT);
count = st->codec_info_nb_frames;
bitrate = par->bit_rate;
multiframe = FFMIN(5, count);
if (!s)
return;
+ if (s->oformat && s->oformat->deinit && s->internal->initialized)
+ s->oformat->deinit(s);
+
av_opt_free(s);
if (s->iformat && s->iformat->priv_class && s->priv_data)
av_opt_free(s->priv_data);
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
st->first_dts = AV_NOPTS_VALUE;
- st->probe_packets = MAX_PROBE_PACKETS;
+ st->probe_packets = s->max_probe_packets;
st->pts_wrap_reference = AV_NOPTS_VALUE;
st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
char *hostname, int hostname_size,
int *port_ptr, char *path, int path_size, const char *url)
{
- const char *p, *ls, *ls2, *at, *at2, *col, *brk;
+ const char *p, *ls, *at, *at2, *col, *brk;
if (port_ptr)
*port_ptr = -1;
}
/* separate path from hostname */
- ls = strchr(p, '/');
- ls2 = strchr(p, '?');
- if (!ls)
- ls = ls2;
- else if (ls && ls2)
- ls = FFMIN(ls, ls2);
- if (ls)
- av_strlcpy(path, ls, path_size);
- else
- ls = &p[strlen(p)]; // XXX
+ ls = p + strcspn(p, "/?#");
+ av_strlcpy(path, ls, path_size);
/* the rest is hostname, use that to parse auth/port */
if (ls != p) {
* >0 if st is a matching stream
*/
static int match_stream_specifier(AVFormatContext *s, AVStream *st,
- const char *spec, const char **indexptr)
+ const char *spec, const char **indexptr, AVProgram **p)
{
int match = 1; /* Stores if the specifier matches so far. */
while (*spec) {
for (j = 0; j < s->programs[i]->nb_stream_indexes; j++) {
if (st->index == s->programs[i]->stream_index[j]) {
found = 1;
+ if (p)
+ *p = s->programs[i];
i = s->nb_programs;
break;
}
int ret, index;
char *endptr;
const char *indexptr = NULL;
+ AVProgram *p = NULL;
+ int nb_streams;
- ret = match_stream_specifier(s, st, spec, &indexptr);
+ ret = match_stream_specifier(s, st, spec, &indexptr, &p);
if (ret < 0)
goto error;
return (index == st->index);
/* If we requested a matching stream index, we have to ensure st is that. */
- for (int i = 0; i < s->nb_streams && index >= 0; i++) {
- ret = match_stream_specifier(s, s->streams[i], spec, NULL);
+ nb_streams = p ? p->nb_stream_indexes : s->nb_streams;
+ for (int i = 0; i < nb_streams && index >= 0; i++) {
+ AVStream *candidate = p ? s->streams[p->stream_index[i]] : s->streams[i];
+ ret = match_stream_specifier(s, candidate, spec, NULL, NULL);
if (ret < 0)
goto error;
- if (ret > 0 && index-- == 0 && st == s->streams[i])
+ if (ret > 0 && index-- == 0 && st == candidate)
return 1;
}
return 0;
};
const uint8_t *data = NULL;
- int size = 0;
+ int ret, size = 0;
if (st->codecpar->width == 1920) {
if (st->codecpar->field_order == AV_FIELD_PROGRESSIVE) {
if (!size)
return 0;
- av_freep(&st->codecpar->extradata);
- if (ff_alloc_extradata(st->codecpar, size))
- return AVERROR(ENOMEM);
+ if ((ret = ff_alloc_extradata(st->codecpar, size)) < 0)
+ return ret;
memcpy(st->codecpar->extradata, data, size);
return 0;