}
if(!ap->prealloced_context)
- ic = av_alloc_format_context();
+ ic = avformat_alloc_context();
else
ic = *ic_ptr;
if (!ic) {
case CODEC_ID_ASV2:
case CODEC_ID_VCR1:
case CODEC_ID_DNXHD:
+ case CODEC_ID_JPEG2000:
return 1;
default: break;
}
if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
/* raw data support */
- *pkt = s->cur_pkt;
+ *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
compute_pkt_fields(s, st, NULL, pkt);
s->cur_st = NULL;
break;
- } else if (s->cur_len > 0 && st->discard < AVDISCARD_ALL) {
+ } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
len = av_parser_parse(st->parser, st->codec, &pkt->data, &pkt->size,
- s->cur_ptr, s->cur_len,
- s->cur_pkt.pts, s->cur_pkt.dts);
- s->cur_pkt.pts = AV_NOPTS_VALUE;
- s->cur_pkt.dts = AV_NOPTS_VALUE;
+ st->cur_ptr, st->cur_len,
+ st->cur_pkt.pts, st->cur_pkt.dts);
+ st->cur_pkt.pts = AV_NOPTS_VALUE;
+ st->cur_pkt.dts = AV_NOPTS_VALUE;
/* increment read pointer */
- s->cur_ptr += len;
- s->cur_len -= len;
+ st->cur_ptr += len;
+ st->cur_len -= len;
/* return packet if any */
if (pkt->size) {
+ pkt->pos = st->cur_pkt.pos; // Isn't quite accurate but close.
got_packet:
- pkt->pos = s->cur_pkt.pos; // Isn't quite accurate but close.
pkt->duration = 0;
pkt->stream_index = st->index;
pkt->pts = st->parser->pts;
}
} else {
/* free packet */
- av_free_packet(&s->cur_pkt);
+ av_free_packet(&st->cur_pkt);
s->cur_st = NULL;
}
} else {
+ AVPacket cur_pkt;
/* read next packet */
- ret = av_read_packet(s, &s->cur_pkt);
+ ret = av_read_packet(s, &cur_pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* no more packets: really terminate parsing */
return ret;
}
+ st = s->streams[cur_pkt.stream_index];
+ st->cur_pkt= cur_pkt;
- if(s->cur_pkt.pts != AV_NOPTS_VALUE &&
- s->cur_pkt.dts != AV_NOPTS_VALUE &&
- s->cur_pkt.pts < s->cur_pkt.dts){
+ if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
+ st->cur_pkt.dts != AV_NOPTS_VALUE &&
+ st->cur_pkt.pts < st->cur_pkt.dts){
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
- s->cur_pkt.stream_index,
- s->cur_pkt.pts,
- s->cur_pkt.dts,
- s->cur_pkt.size);
-// av_free_packet(&s->cur_pkt);
+ st->cur_pkt.stream_index,
+ st->cur_pkt.pts,
+ st->cur_pkt.dts,
+ st->cur_pkt.size);
+// av_free_packet(&st->cur_pkt);
// return -1;
}
- st = s->streams[s->cur_pkt.stream_index];
if(s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, flags=%d\n",
- s->cur_pkt.stream_index,
- s->cur_pkt.pts,
- s->cur_pkt.dts,
- s->cur_pkt.size,
- s->cur_pkt.flags);
+ st->cur_pkt.stream_index,
+ st->cur_pkt.pts,
+ st->cur_pkt.dts,
+ st->cur_pkt.size,
+ st->cur_pkt.flags);
s->cur_st = st;
- s->cur_ptr = s->cur_pkt.data;
- s->cur_len = s->cur_pkt.size;
+ st->cur_ptr = st->cur_pkt.data;
+ st->cur_len = st->cur_pkt.size;
if (st->need_parsing && !st->parser) {
st->parser = av_parser_init(st->codec->codec_id);
if (!st->parser) {
}
if(st->parser && (s->iformat->flags & AVFMT_GENERIC_INDEX)){
st->parser->next_frame_offset=
- st->parser->cur_offset= s->cur_pkt.pos;
+ st->parser->cur_offset= st->cur_pkt.pos;
}
}
}
flush_packet_queue(s);
- /* free previous packet */
- if (s->cur_st) {
- if (s->cur_st->parser)
- av_free_packet(&s->cur_pkt);
- s->cur_st = NULL;
- }
- /* fail safe */
- s->cur_ptr = NULL;
- s->cur_len = 0;
+ s->cur_st = NULL;
/* for each stream, reset read state */
for(i = 0; i < s->nb_streams; i++) {
if (st->parser) {
av_parser_close(st->parser);
st->parser = NULL;
+ av_free_packet(&st->cur_pkt);
}
st->last_IP_pts = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
+ /* fail safe */
+ st->cur_ptr = NULL;
+ st->cur_len = 0;
}
}
return ret;
}
for(i=0;; i++) {
- int ret = av_read_frame(s, &pkt);
+ int ret;
+ do{
+ ret = av_read_frame(s, &pkt);
+ }while(ret == AVERROR(EAGAIN));
if(ret<0)
break;
av_free_packet(&pkt);
int64_t end_time;
int64_t filesize, offset, duration;
- /* free previous packet */
- if (ic->cur_st && ic->cur_st->parser)
- av_free_packet(&ic->cur_pkt);
ic->cur_st = NULL;
/* flush packet queue */
if (st->parser) {
av_parser_close(st->parser);
st->parser= NULL;
+ av_free_packet(&st->cur_pkt);
}
}
if (i == ic->nb_streams)
break;
- ret = av_read_packet(ic, pkt);
+ do{
+ ret = av_read_packet(ic, pkt);
+ }while(ret == AVERROR(EAGAIN));
if (ret != 0)
break;
read_size += pkt->size;
if (read_size >= DURATION_MAX_READ_SIZE)
break;
- ret = av_read_packet(ic, pkt);
+ do{
+ ret = av_read_packet(ic, pkt);
+ }while(ret == AVERROR(EAGAIN));
if (ret != 0)
break;
read_size += pkt->size;
/* NOTE: a new stream can be added there if no header in file
(AVFMTCTX_NOHEADER) */
ret = av_read_frame_internal(ic, &pkt1);
+ if(ret == AVERROR(EAGAIN))
+ continue;
if (ret < 0) {
/* EOF or error */
ret = -1; /* we could not have all the codec parameters before EOF */
int i;
AVStream *st;
- /* free previous packet */
- if (s->cur_st && s->cur_st->parser)
- av_free_packet(&s->cur_pkt);
-
if (s->iformat->read_close)
s->iformat->read_close(s);
for(i=0;i<s->nb_streams;i++) {
st = s->streams[i];
if (st->parser) {
av_parser_close(st->parser);
+ av_free_packet(&st->cur_pkt);
}
av_metadata_free(&st->metadata);
av_free(st->index_entries);
return program;
}
-void av_set_program_name(AVProgram *program, char *provider_name, char *name)
-{
- assert(!provider_name == !name);
- if(name){
- av_free(program->provider_name);
- av_free(program-> name);
- program->provider_name = av_strdup(provider_name);
- program-> name = av_strdup( name);
- }
-}
-
AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
{
AVChapter *chapter = NULL;
}else
st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
}
+
+ if(s->oformat->flags & AVFMT_GLOBALHEADER &&
+ !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
+ av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
}
if (!s->priv_data && s->oformat->priv_data_size > 0) {
return ret;
}
+void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
+ int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
+{
+ AVPacketList **next_point, *this_pktl;
+
+ this_pktl = av_mallocz(sizeof(AVPacketList));
+ this_pktl->pkt= *pkt;
+ if(pkt->destruct == av_destruct_packet)
+ pkt->destruct= NULL; // not shared -> must keep original from being freed
+ else
+ av_dup_packet(&this_pktl->pkt); //shared -> must dup
+
+ next_point = &s->packet_buffer;
+ while(*next_point){
+ if(compare(s, &(*next_point)->pkt, pkt))
+ break;
+ next_point= &(*next_point)->next;
+ }
+ this_pktl->next= *next_point;
+ *next_point= this_pktl;
+}
+
+int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
+{
+ AVStream *st = s->streams[ pkt ->stream_index];
+ AVStream *st2= s->streams[ next->stream_index];
+ int64_t left = st2->time_base.num * (int64_t)st ->time_base.den;
+ int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
+
+ if (pkt->dts == AV_NOPTS_VALUE)
+ return 0;
+
+ return next->dts * left > pkt->dts * right; //FIXME this can overflow
+}
+
int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
- AVPacketList *pktl, **next_point, *this_pktl;
+ AVPacketList *pktl;
int stream_count=0;
int streams[MAX_STREAMS];
if(pkt){
- AVStream *st= s->streams[ pkt->stream_index];
-
-// assert(pkt->destruct != av_destruct_packet); //FIXME
-
- this_pktl = av_mallocz(sizeof(AVPacketList));
- this_pktl->pkt= *pkt;
- if(pkt->destruct == av_destruct_packet)
- pkt->destruct= NULL; // not shared -> must keep original from being freed
- else
- av_dup_packet(&this_pktl->pkt); //shared -> must dup
-
- next_point = &s->packet_buffer;
- while(*next_point){
- AVStream *st2= s->streams[ (*next_point)->pkt.stream_index];
- int64_t left= st2->time_base.num * (int64_t)st ->time_base.den;
- int64_t right= st ->time_base.num * (int64_t)st2->time_base.den;
- if((*next_point)->pkt.dts * left > pkt->dts * right) //FIXME this can overflow
- break;
- next_point= &(*next_point)->next;
- }
- this_pktl->next= *next_point;
- *next_point= this_pktl;
+ ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
}
memset(streams, 0, sizeof(streams));
if(compute_pkt_fields2(st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return -1;
- if(pkt->dts == AV_NOPTS_VALUE)
+ if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
return -1;
for(;;){
av_log(NULL, AV_LOG_INFO, "(%s)", st->language);
av_log(NULL, AV_LOG_DEBUG, ", %d/%d", st->time_base.num/g, st->time_base.den/g);
av_log(NULL, AV_LOG_INFO, ": %s", buf);
+ if (st->sample_aspect_ratio.num && // default
+ av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
+ AVRational display_aspect_ratio;
+ av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
+ st->codec->width*st->sample_aspect_ratio.num,
+ st->codec->height*st->sample_aspect_ratio.den,
+ 1024*1024);
+ av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
+ st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
+ display_aspect_ratio.num, display_aspect_ratio.den);
+ }
if(st->codec->codec_type == CODEC_TYPE_VIDEO){
if(st->r_frame_rate.den && st->r_frame_rate.num)
av_log(NULL, AV_LOG_INFO, ", %5.2f tb(r)", av_q2d(st->r_frame_rate));
p = datestr;
q = NULL;
if (!duration) {
+ if (!strncasecmp(datestr, "now", len))
+ return (int64_t) now * 1000000;
+
/* parse the year-month-day part */
for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
q = small_strptime(p, date_fmt[i], &dt);