#include "riff.h"
#include "audiointerleave.h"
#include "url.h"
-#include <sys/time.h>
-#include <time.h>
#include <stdarg.h>
#if CONFIG_NETWORK
#include "network.h"
score_max = 0;
while ((fmt = av_oformat_next(fmt))) {
score = 0;
- if (fmt->name && short_name && !strcmp(fmt->name, short_name))
+ if (fmt->name && short_name && !av_strcasecmp(fmt->name, short_name))
score += 100;
if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
score += 10;
if (s->pb) {
s->flags |= AVFMT_FLAG_CUSTOM_IO;
if (!s->iformat)
- return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
+ return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
else if (s->iformat->flags & AVFMT_NOFILE)
return AVERROR(EINVAL);
return 0;
return ret;
if (s->iformat)
return 0;
- return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, 0);
+ return av_probe_input_buffer(s->pb, &s->iformat, filename, s, 0, s->probesize);
}
static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
return &pktl->pkt;
}
+static void queue_attached_pictures(AVFormatContext *s)
+{
+ int i;
+ for (i = 0; i < s->nb_streams; i++)
+ if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
+ s->streams[i]->discard < AVDISCARD_ALL) {
+ AVPacket copy = s->streams[i]->attached_pic;
+ copy.destruct = NULL;
+ add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
+ }
+}
+
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
{
AVFormatContext *s = *ps;
- int i, ret = 0;
+ int ret = 0;
AVDictionary *tmp = NULL;
ID3v2ExtraMeta *id3v2_extra_meta = NULL;
}
s->duration = s->start_time = AV_NOPTS_VALUE;
- av_strlcpy(s->filename, filename, sizeof(s->filename));
+ av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
/* allocate private data */
if (s->iformat->priv_data_size > 0) {
goto fail;
ff_id3v2_free_extra_meta(&id3v2_extra_meta);
- /* queue attached pictures */
- for (i = 0; i < s->nb_streams; i++)
- if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
- AVPacket copy = s->streams[i]->attached_pic;
- copy.destruct = NULL;
- add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
- }
+ queue_attached_pictures(s);
if (s->pb && !s->data_offset)
s->data_offset = avio_tell(s->pb);
/*******************************************************/
-int av_read_packet(AVFormatContext *s, AVPacket *pkt)
+static void probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
+{
+ if(st->codec->codec_id == CODEC_ID_PROBE){
+ AVProbeData *pd = &st->probe_data;
+ av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
+ --st->probe_packets;
+
+ if (pkt) {
+ pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
+ memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
+ pd->buf_size += pkt->size;
+ memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
+ } else {
+ st->probe_packets = 0;
+ }
+
+ if (!st->probe_packets ||
+ av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
+ set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
+ if(st->codec->codec_id != CODEC_ID_PROBE){
+ pd->buf_size=0;
+ av_freep(&pd->buf);
+ av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
+ }
+ }
+ }
+}
+
+int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, i;
AVStream *st;
if (pktl) {
*pkt = pktl->pkt;
- if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
- !s->streams[pkt->stream_index]->probe_packets ||
- s->raw_packet_buffer_remaining_size < pkt->size){
- AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
+ st = s->streams[pkt->stream_index];
+ if (st->codec->codec_id != CODEC_ID_PROBE || !st->probe_packets ||
+ s->raw_packet_buffer_remaining_size < pkt->size) {
+ AVProbeData *pd;
+ if (st->probe_packets) {
+ probe_codec(s, st, NULL);
+ }
+ pd = &st->probe_data;
av_freep(&pd->buf);
pd->buf_size = 0;
s->raw_packet_buffer = pktl->next;
if (ret < 0) {
if (!pktl || ret == AVERROR(EAGAIN))
return ret;
- for (i = 0; i < s->nb_streams; i++)
- s->streams[i]->probe_packets = 0;
+ for (i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (st->probe_packets) {
+ probe_codec(s, st, NULL);
+ }
+ }
continue;
}
add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
s->raw_packet_buffer_remaining_size -= pkt->size;
- if(st->codec->codec_id == CODEC_ID_PROBE){
- AVProbeData *pd = &st->probe_data;
- av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
- --st->probe_packets;
-
- pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
- memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
- pd->buf_size += pkt->size;
- memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
-
- if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
- //FIXME we do not reduce score to 0 for the case of running out of buffer space in bytes
- set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
- if(st->codec->codec_id != CODEC_ID_PROBE){
- pd->buf_size=0;
- av_freep(&pd->buf);
- av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
- }
- }
- }
+ probe_codec(s, st, pkt);
}
}
+#if FF_API_READ_PACKET
+int av_read_packet(AVFormatContext *s, AVPacket *pkt)
+{
+ return ff_read_packet(s, pkt);
+}
+#endif
+
+
/**********************************************************/
/**
* Get the number of samples of an audio frame. Return -1 on error.
*/
-static int get_audio_frame_size(AVCodecContext *enc, int size)
+static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
{
int frame_size;
- if (enc->frame_size <= 1) {
- int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
+ /* give frame_size priority if demuxing */
+ if (!mux && enc->frame_size > 1)
+ return enc->frame_size;
- if (bits_per_sample) {
- if (enc->channels == 0)
- return -1;
- frame_size = (size << 3) / (bits_per_sample * enc->channels);
- } else {
- /* used for example by ADPCM codecs */
- if (enc->bit_rate == 0)
- return -1;
- frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
- }
- } else {
- frame_size = enc->frame_size;
- }
- return frame_size;
+ if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
+ return frame_size;
+
+ /* fallback to using frame_size if muxing */
+ if (enc->frame_size > 1)
+ return enc->frame_size;
+
+ return -1;
}
}
break;
case AVMEDIA_TYPE_AUDIO:
- frame_size = get_audio_frame_size(st->codec, pkt->size);
+ frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
if (frame_size <= 0 || st->codec->sample_rate <= 0)
break;
*pnum = frame_size;
case CODEC_ID_VCR1:
case CODEC_ID_DNXHD:
case CODEC_ID_JPEG2000:
+ case CODEC_ID_MDEC:
return 1;
default: break;
}
st->start_time = pts;
}
-static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
+static void update_initial_durations(AVFormatContext *s, AVStream *st,
+ int stream_index, int duration)
{
AVPacketList *pktl= s->packet_buffer;
int64_t cur_dts= 0;
if(st->first_dts != AV_NOPTS_VALUE){
cur_dts= st->first_dts;
for(; pktl; pktl= pktl->next){
- if(pktl->pkt.stream_index == pkt->stream_index){
+ if(pktl->pkt.stream_index == stream_index){
if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
break;
- cur_dts -= pkt->duration;
+ cur_dts -= duration;
}
}
pktl= s->packet_buffer;
return;
for(; pktl; pktl= pktl->next){
- if(pktl->pkt.stream_index != pkt->stream_index)
+ if(pktl->pkt.stream_index != stream_index)
continue;
if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
&& !pktl->pkt.duration){
pktl->pkt.dts= cur_dts;
if(!st->codec->has_b_frames)
pktl->pkt.pts= cur_dts;
- cur_dts += pkt->duration;
- pktl->pkt.duration= pkt->duration;
+ cur_dts += duration;
+ if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+ pktl->pkt.duration = duration;
}else
break;
}
pkt->dts= pkt->pts= AV_NOPTS_VALUE;
}
- if (pkt->duration == 0) {
+ if (pkt->duration == 0 && st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
compute_frame_duration(&num, &den, st, pc, pkt);
if (den && num) {
pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
if(pkt->duration != 0 && s->packet_buffer)
- update_initial_durations(s, st, pkt);
+ update_initial_durations(s, st, pkt->stream_index, pkt->duration);
}
}
st->last_IP_pts= pkt->pts;
/* cannot compute PTS if not present (we can compute it only
by knowing the future */
- } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
- if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
- int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
- int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
- if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
- pkt->pts += pkt->duration;
- // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
+ } else if (pkt->pts != AV_NOPTS_VALUE ||
+ pkt->dts != AV_NOPTS_VALUE ||
+ pkt->duration ||
+ st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ int duration = pkt->duration;
+ if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ compute_frame_duration(&num, &den, st, pc, pkt);
+ if (den && num) {
+ duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den,
+ den * (int64_t)st->time_base.num,
+ AV_ROUND_DOWN);
+ if (duration != 0 && s->packet_buffer) {
+ update_initial_durations(s, st, pkt->stream_index,
+ duration);
+ }
}
}
- /* presentation is not delayed : PTS and DTS are the same */
- if(pkt->pts == AV_NOPTS_VALUE)
- pkt->pts = pkt->dts;
- update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
- if(pkt->pts == AV_NOPTS_VALUE)
- pkt->pts = st->cur_dts;
- pkt->dts = pkt->pts;
- if(pkt->pts != AV_NOPTS_VALUE)
- st->cur_dts = pkt->pts + pkt->duration;
+ if (pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE ||
+ duration) {
+ /* presentation is not delayed : PTS and DTS are the same */
+ if (pkt->pts == AV_NOPTS_VALUE)
+ pkt->pts = pkt->dts;
+ update_initial_timestamps(s, pkt->stream_index, pkt->pts,
+ pkt->pts);
+ if (pkt->pts == AV_NOPTS_VALUE)
+ pkt->pts = st->cur_dts;
+ pkt->dts = pkt->pts;
+ if (pkt->pts != AV_NOPTS_VALUE)
+ st->cur_dts = pkt->pts + duration;
+ }
}
}
*pkt_buf_end = NULL;
}
+/**
+ * Parse a packet, add all split parts to parse_queue
+ *
+ * @param pkt packet to parse, NULL when flushing the parser at end of stream
+ */
+static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
+{
+ AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
+ AVStream *st = s->streams[stream_index];
+ uint8_t *data = pkt ? pkt->data : NULL;
+ int size = pkt ? pkt->size : 0;
+ int ret = 0, got_output = 0;
+
+ if (!pkt) {
+ av_init_packet(&flush_pkt);
+ pkt = &flush_pkt;
+ got_output = 1;
+ }
+
+ while (size > 0 || (pkt == &flush_pkt && got_output)) {
+ int len;
+
+ av_init_packet(&out_pkt);
+ len = av_parser_parse2(st->parser, st->codec,
+ &out_pkt.data, &out_pkt.size, data, size,
+ pkt->pts, pkt->dts, pkt->pos);
+
+ pkt->pts = pkt->dts = AV_NOPTS_VALUE;
+ /* increment read pointer */
+ data += len;
+ size -= len;
+
+ got_output = !!out_pkt.size;
+
+ if (!out_pkt.size)
+ continue;
+
+ /* set the duration */
+ out_pkt.duration = 0;
+ if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if (st->codec->sample_rate > 0) {
+ out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
+ (AVRational){ 1, st->codec->sample_rate },
+ st->time_base,
+ AV_ROUND_DOWN);
+ }
+ } else if (st->codec->time_base.num != 0 &&
+ st->codec->time_base.den != 0) {
+ out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
+ st->codec->time_base,
+ st->time_base,
+ AV_ROUND_DOWN);
+ }
+
+ out_pkt.stream_index = st->index;
+ out_pkt.pts = st->parser->pts;
+ out_pkt.dts = st->parser->dts;
+ out_pkt.pos = st->parser->pos;
+
+ if (st->parser->key_frame == 1 ||
+ (st->parser->key_frame == -1 &&
+ st->parser->pict_type == AV_PICTURE_TYPE_I))
+ out_pkt.flags |= AV_PKT_FLAG_KEY;
+
+ compute_pkt_fields(s, st, st->parser, &out_pkt);
+
+ if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+ out_pkt.flags & AV_PKT_FLAG_KEY) {
+ ff_reduce_index(s, st->index);
+ av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
+ 0, 0, AVINDEX_KEYFRAME);
+ }
+
+ if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
+ out_pkt.destruct = pkt->destruct;
+ pkt->destruct = NULL;
+ }
+ if ((ret = av_dup_packet(&out_pkt)) < 0)
+ goto fail;
+
+ if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
+ av_free_packet(&out_pkt);
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+
+ /* end of the stream => close and free the parser */
+ if (pkt == &flush_pkt) {
+ av_parser_close(st->parser);
+ st->parser = NULL;
+ }
+
+fail:
+ av_free_packet(pkt);
+ return ret;
+}
+
static int read_from_packet_buffer(AVPacketList **pkt_buffer,
AVPacketList **pkt_buffer_end,
AVPacket *pkt)
return 0;
}
-
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
- AVStream *st;
- int len, ret, i;
+ int ret = 0, i, got_packet = 0;
av_init_packet(pkt);
- for(;;) {
- /* select current input stream component */
- st = s->cur_st;
- if (st) {
- if (!st->need_parsing || !st->parser) {
- /* no parsing needed: we just output the packet as is */
- /* raw data support */
- *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
- compute_pkt_fields(s, st, NULL, pkt);
- s->cur_st = NULL;
- if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
- (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
- ff_reduce_index(s, st->index);
- av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
- }
- break;
- } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
- len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
- st->cur_ptr, st->cur_len,
- st->cur_pkt.pts, st->cur_pkt.dts,
- st->cur_pkt.pos);
- st->cur_pkt.pts = AV_NOPTS_VALUE;
- st->cur_pkt.dts = AV_NOPTS_VALUE;
- /* increment read pointer */
- st->cur_ptr += len;
- st->cur_len -= len;
-
- /* return packet if any */
- if (pkt->size) {
- got_packet:
- pkt->duration = 0;
- if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
- if (st->codec->sample_rate > 0) {
- pkt->duration = av_rescale_q_rnd(st->parser->duration,
- (AVRational){ 1, st->codec->sample_rate },
- st->time_base,
- AV_ROUND_DOWN);
- }
- } else if (st->codec->time_base.num != 0 &&
- st->codec->time_base.den != 0) {
- pkt->duration = av_rescale_q_rnd(st->parser->duration,
- st->codec->time_base,
- st->time_base,
- AV_ROUND_DOWN);
- }
- pkt->stream_index = st->index;
- pkt->pts = st->parser->pts;
- pkt->dts = st->parser->dts;
- pkt->pos = st->parser->pos;
- if (st->parser->key_frame == 1 ||
- (st->parser->key_frame == -1 &&
- st->parser->pict_type == AV_PICTURE_TYPE_I))
- pkt->flags |= AV_PKT_FLAG_KEY;
- if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
- s->cur_st = NULL;
- pkt->destruct= st->cur_pkt.destruct;
- st->cur_pkt.destruct= NULL;
- st->cur_pkt.data = NULL;
- assert(st->cur_len == 0);
- }else{
- pkt->destruct = NULL;
- }
- compute_pkt_fields(s, st, st->parser, pkt);
-
- if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
- ff_reduce_index(s, st->index);
- av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
- 0, 0, AVINDEX_KEYFRAME);
- }
+ while (!got_packet && !s->parse_queue) {
+ AVStream *st;
+ AVPacket cur_pkt;
- break;
- }
- } else {
- /* free packet */
- av_free_packet(&st->cur_pkt);
- s->cur_st = NULL;
- }
- } else {
- AVPacket cur_pkt;
- /* read next packet */
- ret = av_read_packet(s, &cur_pkt);
- if (ret < 0) {
- if (ret == AVERROR(EAGAIN))
- return ret;
- /* return the last frames, if any */
- for(i = 0; i < s->nb_streams; i++) {
- st = s->streams[i];
- if (st->parser && st->need_parsing) {
- av_parser_parse2(st->parser, st->codec,
- &pkt->data, &pkt->size,
- NULL, 0,
- AV_NOPTS_VALUE, AV_NOPTS_VALUE,
- AV_NOPTS_VALUE);
- if (pkt->size)
- goto got_packet;
- }
- }
- /* no more packets: really terminate parsing */
+ /* read next packet */
+ ret = ff_read_packet(s, &cur_pkt);
+ if (ret < 0) {
+ if (ret == AVERROR(EAGAIN))
return ret;
+ /* flush the parsers */
+ for(i = 0; i < s->nb_streams; i++) {
+ st = s->streams[i];
+ if (st->parser && st->need_parsing)
+ parse_packet(s, NULL, st->index);
}
- st = s->streams[cur_pkt.stream_index];
- st->cur_pkt= cur_pkt;
-
- if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
- st->cur_pkt.dts != AV_NOPTS_VALUE &&
- st->cur_pkt.pts < st->cur_pkt.dts){
- av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
- st->cur_pkt.stream_index,
- st->cur_pkt.pts,
- st->cur_pkt.dts,
- st->cur_pkt.size);
-// av_free_packet(&st->cur_pkt);
-// return -1;
+ /* all remaining packets are now in parse_queue =>
+ * really terminate parsing */
+ break;
+ }
+ ret = 0;
+ st = s->streams[cur_pkt.stream_index];
+
+ if (cur_pkt.pts != AV_NOPTS_VALUE &&
+ cur_pkt.dts != AV_NOPTS_VALUE &&
+ cur_pkt.pts < cur_pkt.dts) {
+ av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+ cur_pkt.stream_index,
+ cur_pkt.pts,
+ cur_pkt.dts,
+ cur_pkt.size);
+ }
+ if (s->debug & FF_FDEBUG_TS)
+ av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
+ cur_pkt.stream_index,
+ cur_pkt.pts,
+ cur_pkt.dts,
+ cur_pkt.size,
+ cur_pkt.duration,
+ cur_pkt.flags);
+
+ if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
+ st->parser = av_parser_init(st->codec->codec_id);
+ if (!st->parser) {
+ /* no parser available: just output the raw packets */
+ st->need_parsing = AVSTREAM_PARSE_NONE;
+ } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
+ st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+ } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
+ st->parser->flags |= PARSER_FLAG_ONCE;
}
+ }
- if(s->debug & FF_FDEBUG_TS)
- av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
- st->cur_pkt.stream_index,
- st->cur_pkt.pts,
- st->cur_pkt.dts,
- st->cur_pkt.size,
- st->cur_pkt.duration,
- st->cur_pkt.flags);
-
- s->cur_st = st;
- st->cur_ptr = st->cur_pkt.data;
- st->cur_len = st->cur_pkt.size;
- if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
- st->parser = av_parser_init(st->codec->codec_id);
- if (!st->parser) {
- /* no parser available: just output the raw packets */
- st->need_parsing = AVSTREAM_PARSE_NONE;
- }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
- st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
- }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
- st->parser->flags |= PARSER_FLAG_ONCE;
- }
+ if (!st->need_parsing || !st->parser) {
+ /* no parsing needed: we just output the packet as is */
+ *pkt = cur_pkt;
+ compute_pkt_fields(s, st, NULL, pkt);
+ if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+ (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+ ff_reduce_index(s, st->index);
+ av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}
+ got_packet = 1;
+ } else if (st->discard < AVDISCARD_ALL) {
+ if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
+ return ret;
+ } else {
+ /* free packet */
+ av_free_packet(&cur_pkt);
}
}
+
+ if (!got_packet && s->parse_queue)
+ ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
+
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
pkt->stream_index,
pkt->duration,
pkt->flags);
- return 0;
+ return ret;
}
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
/* XXX: suppress the packet queue */
static void flush_packet_queue(AVFormatContext *s)
{
+ free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
return -1;
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+ !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
return i;
}
if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
flush_packet_queue(s);
- s->cur_st = NULL;
-
/* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
if (st->parser) {
av_parser_close(st->parser);
st->parser = NULL;
- av_free_packet(&st->cur_pkt);
}
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
st->reference_dts = AV_NOPTS_VALUE;
- /* fail safe */
- st->cur_ptr = NULL;
- st->cur_len = 0;
st->probe_packets = MAX_PROBE_PACKETS;
static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
int64_t pos_min, pos_max;
-#if 0
- AVStream *st;
-
- if (stream_index < 0)
- return -1;
-
- st= s->streams[stream_index];
-#endif
pos_min = s->data_offset;
pos_max = avio_size(s->pb) - 1;
avio_seek(s->pb, pos, SEEK_SET);
-#if 0
- av_update_cur_dts(s, st, ts);
-#endif
return 0;
}
return 0;
}
-int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+static int seek_frame_internal(AVFormatContext *s, int stream_index,
+ int64_t timestamp, int flags)
{
int ret;
AVStream *st;
return -1;
}
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
+{
+ int ret = seek_frame_internal(s, stream_index, timestamp, flags);
+
+ if (ret >= 0)
+ queue_attached_pictures(s);
+
+ return ret;
+}
+
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
{
if(min_ts > ts || max_ts < ts)
return -1;
if (s->iformat->read_seek2) {
+ int ret;
ff_read_frame_flush(s);
- return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+ ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
+
+ if (ret >= 0)
+ queue_attached_pictures(s);
+ return ret;
}
if(s->iformat->read_timestamp){
if (st->duration != AV_NOPTS_VALUE)
return 1;
}
+ if (ic->duration != AV_NOPTS_VALUE)
+ return 1;
return 0;
}
int64_t filesize, offset, duration;
int retry=0;
- ic->cur_st = NULL;
-
/* flush packet queue */
flush_packet_queue(ic);
if (st->parser) {
av_parser_close(st->parser);
st->parser= NULL;
- av_free_packet(&st->cur_pkt);
}
}
break;
do {
- ret = av_read_packet(ic, pkt);
+ ret = ff_read_packet(ic, pkt);
} while(ret == AVERROR(EAGAIN));
if (ret != 0)
break;
}
}
-static int has_codec_parameters(AVCodecContext *avctx)
+static int has_codec_parameters(AVStream *st)
{
+ AVCodecContext *avctx = st->codec;
int val;
switch (avctx->codec_type) {
case AVMEDIA_TYPE_AUDIO:
- val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
- if (!avctx->frame_size &&
- (avctx->codec_id == CODEC_ID_AAC ||
- avctx->codec_id == CODEC_ID_MP1 ||
- avctx->codec_id == CODEC_ID_MP2 ||
- avctx->codec_id == CODEC_ID_MP3 ||
- avctx->codec_id == CODEC_ID_CELT))
+ val = avctx->sample_rate && avctx->channels;
+ if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
return 0;
break;
case AVMEDIA_TYPE_VIDEO:
- val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
+ val = avctx->width;
+ if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
+ return 0;
break;
default:
val = 1;
AVFrame picture;
AVPacket pkt = *avpkt;
- if (!avcodec_is_open(st->codec)) {
+ if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
AVDictionary *thread_opt = NULL;
codec = st->codec->codec ? st->codec->codec :
avcodec_find_decoder(st->codec->codec_id);
- if (!codec)
+ if (!codec) {
+ st->info->found_decoder = -1;
return -1;
+ }
/* force thread count to 1 since the h264 decoder will not extract SPS
* and PPS to extradata during multi-threaded decoding */
ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
if (!options)
av_dict_free(&thread_opt);
- if (ret < 0)
+ if (ret < 0) {
+ st->info->found_decoder = -1;
return ret;
- }
+ }
+ st->info->found_decoder = 1;
+ } else if (!st->info->found_decoder)
+ st->info->found_decoder = 1;
+
+ if (st->info->found_decoder < 0)
+ return -1;
while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
ret >= 0 &&
- (!has_codec_parameters(st->codec) ||
+ (!has_codec_parameters(st) ||
!has_decode_delay_been_guessed(st) ||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
got_picture = 0;
: &thread_opt);
//try to just open decoders, in case this is enough to get parameters
- if(!has_codec_parameters(st->codec)){
+ if (!has_codec_parameters(st)) {
if (codec && !st->codec->codec)
avcodec_open2(st->codec, codec, options ? &options[i]
: &thread_opt);
int fps_analyze_framecount = 20;
st = ic->streams[i];
- if (!has_codec_parameters(st->codec))
+ if (!has_codec_parameters(st))
break;
/* if the timebase is coarse (like the usual millisecond precision
of mkv), we need to analyze more frames to reliably arrive at
break;
if(st->parser && st->parser->parser->split && !st->codec->extradata)
break;
- if(st->first_dts == AV_NOPTS_VALUE)
+ if (st->first_dts == AV_NOPTS_VALUE &&
+ (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
+ st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
break;
}
if (i == ic->nb_streams) {
if (ret < 0) {
/* EOF or error*/
AVPacket empty_pkt = { 0 };
- int err;
+ int err = 0;
av_init_packet(&empty_pkt);
ret = -1; /* we could not have all the codec parameters before EOF */
st = ic->streams[i];
/* flush the decoders */
- do {
- err = try_decode_frame(st, &empty_pkt,
- (options && i < orig_nb_streams) ?
- &options[i] : NULL);
- } while (err > 0 && !has_codec_parameters(st->codec));
+ if (st->info->found_decoder == 1) {
+ do {
+ err = try_decode_frame(st, &empty_pkt,
+ (options && i < orig_nb_streams) ?
+ &options[i] : NULL);
+ } while (err > 0 && !has_codec_parameters(st));
+ }
if (err < 0) {
av_log(ic, AV_LOG_WARNING,
"decoding for stream %d failed\n", st->index);
- } else if (!has_codec_parameters(st->codec)){
+ } else if (!has_codec_parameters(st)) {
char buf[256];
avcodec_string(buf, sizeof(buf), st->codec, 0);
av_log(ic, AV_LOG_WARNING,
st = ic->streams[pkt->stream_index];
if (st->codec_info_nb_frames>1) {
- if (st->time_base.den > 0 && av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
+ if (av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration) {
av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
break;
}
}
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
- if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
- av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
- (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
- st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+ if (st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && st->info->codec_info_duration)
+ av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
+ (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
+ st->info->codec_info_duration*(int64_t)st->time_base.num, 60000);
// the check for tb_unreliable() is not completely correct, since this is not about handling
// a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
// ipmovie.c produces.
compute_chapters_end(ic);
-#if 0
- /* correct DTS for B-frame streams with no timestamps */
- for(i=0;i<ic->nb_streams;i++) {
- st = ic->streams[i];
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
- if(b-frames){
- ppktl = &ic->packet_buffer;
- while(ppkt1){
- if(ppkt1->stream_index != i)
- continue;
- if(ppkt1->pkt->dts < 0)
- break;
- if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
- break;
- ppkt1->pkt->dts -= delta;
- ppkt1= ppkt1->next;
- }
- if(ppkt1)
- continue;
- st->cur_dts -= delta;
- }
- }
- }
-#endif
-
find_stream_info_err:
for (i=0; i < ic->nb_streams; i++) {
if (ic->streams[i]->codec)
st = s->streams[i];
if (st->parser) {
av_parser_close(st->parser);
- av_free_packet(&st->cur_pkt);
}
if (st->attached_pic.data)
av_free_packet(&st->attached_pic);
pkt->dts= st->pts_buffer[0];
}
- if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
+ if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
+ ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
+ st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
av_log(s, AV_LOG_ERROR,
"Application provided invalid, non monotonically increasing dts to muxer in stream %d: %"PRId64" >= %"PRId64"\n",
st->index, st->cur_dts, pkt->dts);
/* update pts */
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
- frame_size = get_audio_frame_size(st->codec, pkt->size);
+ frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
/* HACK/FIXME, we skip the initial 0 size packets as they are most
likely equal to the encoder delay, but it would be better if we
return comp > 0;
}
-int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
+int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
+ AVPacket *pkt, int flush)
+{
AVPacketList *pktl;
int stream_count=0;
int i;
}
}
+#if FF_API_INTERLEAVE_PACKET
+int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
+ AVPacket *pkt, int flush)
+{
+ return ff_interleave_packet_per_dts(s, out, pkt, flush);
+}
+#endif
+
/**
* Interleave an AVPacket correctly so it can be muxed.
* @param out the interleaved packet will be output here
av_free_packet(in);
return ret;
} else
- return av_interleave_packet_per_dts(s, out, in, flush);
+ return ff_interleave_packet_per_dts(s, out, in, flush);
}
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
- AVStream *st= s->streams[ pkt->stream_index];
- int ret;
+ int ret, flush = 0;
- //FIXME/XXX/HACK drop zero sized packets
- if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
- return 0;
+ if (pkt) {
+ AVStream *st= s->streams[ pkt->stream_index];
- av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
- pkt->size, pkt->dts, pkt->pts);
- if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
- return ret;
+ //FIXME/XXX/HACK drop zero sized packets
+ if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
+ return 0;
- if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
- return AVERROR(EINVAL);
+ av_dlog(s, "av_interleaved_write_frame size:%d dts:%"PRId64" pts:%"PRId64"\n",
+ pkt->size, pkt->dts, pkt->pts);
+ if((ret = compute_pkt_fields2(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+ return ret;
+
+ if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
+ return AVERROR(EINVAL);
+ } else {
+ av_dlog(s, "av_interleaved_write_frame FLUSH\n");
+ flush = 1;
+ }
for(;;){
AVPacket opkt;
- int ret= interleave_packet(s, &opkt, pkt, 0);
+ int ret= interleave_packet(s, &opkt, pkt, flush);
if(ret<=0) //FIXME cleanup needed for ret<0 ?
return ret;
av_free(printed);
}
-int64_t av_gettime(void)
+#if FF_API_AV_GETTIME && CONFIG_SHARED && HAVE_SYMVER
+FF_SYMVER(int64_t, av_gettime, (void), "LIBAVFORMAT_54")
{
- struct timeval tv;
- gettimeofday(&tv,NULL);
- return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+ return av_gettime();
}
+#endif
uint64_t ff_ntp_time(void)
{
int port, const char *fmt, ...)
{
#if CONFIG_NETWORK
- struct addrinfo hints, *ai;
+ struct addrinfo hints = { 0 }, *ai;
#endif
str[0] = '\0';
#if CONFIG_NETWORK && defined(AF_INET6)
/* Determine if hostname is a numerical IPv6 address,
* properly escape it within [] in that case. */
- memset(&hints, 0, sizeof(hints));
hints.ai_flags = AI_NUMERICHOST;
if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
if (ai->ai_family == AF_INET6) {