AVFormatContext *ctx;
int ctx_inited;
uint8_t iobuf[32768];
- URLContext *out;
+ AVIOContext *out;
int packets_written;
char initfile[1024];
int64_t init_start_pos;
int init_range_length;
int nb_segments, segments_size, segment_index;
Segment **segments;
- int64_t first_dts, start_dts, end_dts;
+ int64_t first_pts, start_pts, max_pts;
+ int64_t last_dts;
int bit_rate;
char bandwidth_str[64];
{
OutputStream *os = opaque;
if (os->out)
- ffurl_write(os->out, buf, buf_size);
+ avio_write(os->out, buf, buf_size);
return buf_size;
}
// RFC 6381
-static void set_codec_str(AVFormatContext *s, AVCodecContext *codec,
+static void set_codec_str(AVFormatContext *s, AVCodecParameters *par,
char *str, int size)
{
const AVCodecTag *tags[2] = { NULL, NULL };
uint32_t tag;
- if (codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ if (par->codec_type == AVMEDIA_TYPE_VIDEO)
tags[0] = ff_codec_movvideo_tags;
- else if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
+ else if (par->codec_type == AVMEDIA_TYPE_AUDIO)
tags[0] = ff_codec_movaudio_tags;
else
return;
- tag = av_codec_get_tag(tags, codec->codec_id);
+ tag = av_codec_get_tag(tags, par->codec_id);
if (!tag)
return;
if (size < 5)
if (!strcmp(str, "mp4a") || !strcmp(str, "mp4v")) {
uint32_t oti;
tags[0] = ff_mp4_obj_type;
- oti = av_codec_get_tag(tags, codec->codec_id);
+ oti = av_codec_get_tag(tags, par->codec_id);
if (oti)
av_strlcatf(str, size, ".%02x", oti);
else
return;
if (tag == MKTAG('m', 'p', '4', 'a')) {
- if (codec->extradata_size >= 2) {
- int aot = codec->extradata[0] >> 3;
+ if (par->extradata_size >= 2) {
+ int aot = par->extradata[0] >> 3;
if (aot == 31)
- aot = ((AV_RB16(codec->extradata) >> 5) & 0x3f) + 32;
+ aot = ((AV_RB16(par->extradata) >> 5) & 0x3f) + 32;
av_strlcatf(str, size, ".%d", aot);
}
} else if (tag == MKTAG('m', 'p', '4', 'v')) {
}
} else if (!strcmp(str, "avc1")) {
uint8_t *tmpbuf = NULL;
- uint8_t *extradata = codec->extradata;
- int extradata_size = codec->extradata_size;
+ uint8_t *extradata = par->extradata;
+ int extradata_size = par->extradata_size;
if (!extradata_size)
return;
if (extradata[0] != 1) {
if (avio_open_dyn_buf(&pb) < 0)
return;
if (ff_isom_write_avcc(pb, extradata, extradata_size) < 0) {
- avio_close_dyn_buf(pb, &tmpbuf);
- av_free(tmpbuf);
+ ffio_free_dyn_buf(&pb);
return;
}
extradata_size = avio_close_dyn_buf(pb, &extradata);
av_write_trailer(os->ctx);
if (os->ctx && os->ctx->pb)
av_free(os->ctx->pb);
- ffurl_close(os->out);
- os->out = NULL;
+ ff_format_io_close(s, &os->out);
if (os->ctx)
avformat_free_context(os->ctx);
for (j = 0; j < os->nb_segments; j++)
// next parse the dash format-tag and generate a c-string format tag
// (next_ptr now points at the first '%' at the beginning of the format-tag)
if (id_type != DASH_TMPL_ID_UNDEFINED) {
- const char *number_format = DASH_TMPL_ID_TIME ? "lld" : "d";
+ const char *number_format = (id_type == DASH_TMPL_ID_TIME) ? PRId64 : "d";
if (next_ptr[0] == '$') { // no dash format-tag
snprintf(format_tag, format_tag_size, "%%%s", number_format);
*ptr = &next_ptr[1];
avio_printf(out, "%d.%dS", seconds, fractions / (AV_TIME_BASE / 10));
}
+static void format_date_now(char *buf, int size)
+{
+ time_t t = time(NULL);
+ struct tm *ptm, tmbuf;
+ ptm = gmtime_r(&t, &tmbuf);
+ if (ptm) {
+ if (!strftime(buf, size, "%Y-%m-%dT%H:%M:%S", ptm))
+ buf[0] = '\0';
+ }
+}
+
static int write_manifest(AVFormatContext *s, int final)
{
DASHContext *c = s->priv_data;
AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
snprintf(temp_filename, sizeof(temp_filename), "%s.tmp", s->filename);
- ret = avio_open2(&out, temp_filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
+ ret = s->io_open(s, &out, temp_filename, AVIO_FLAG_WRITE, NULL);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Unable to open %s for writing\n", temp_filename);
return ret;
avio_printf(out, "\"\n");
} else {
int64_t update_period = c->last_duration / AV_TIME_BASE;
+ char now_str[100];
if (c->use_template && !c->use_timeline)
update_period = 500;
avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period);
avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE);
if (!c->availability_start_time[0] && s->nb_streams > 0 && c->streams[0].nb_segments > 0) {
- time_t t = time(NULL);
- struct tm *ptm, tmbuf;
- ptm = gmtime_r(&t, &tmbuf);
- if (ptm) {
- if (!strftime(c->availability_start_time, sizeof(c->availability_start_time),
- "%Y-%m-%dT%H:%M:%S", ptm))
- c->availability_start_time[0] = '\0';
- }
+ format_date_now(c->availability_start_time, sizeof(c->availability_start_time));
}
if (c->availability_start_time[0])
avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time);
+ format_date_now(now_str, sizeof(now_str));
+ if (now_str[0])
+ avio_printf(out, "\tpublishTime=\"%s\"\n", now_str);
if (c->window_size && c->use_template) {
avio_printf(out, "\ttimeShiftBufferDepth=\"");
write_time(out, c->last_duration * c->window_size);
}
if (c->has_video) {
- avio_printf(out, "\t\t<AdaptationSet id=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
+ avio_printf(out, "\t\t<AdaptationSet contentType=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
OutputStream *os = &c->streams[i];
- if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
+ if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
- avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/mp4\" codecs=\"%s\"%s width=\"%d\" height=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->width, st->codec->height);
+ avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/mp4\" codecs=\"%s\"%s width=\"%d\" height=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codecpar->width, st->codecpar->height);
output_segment_list(&c->streams[i], out, c);
avio_printf(out, "\t\t\t</Representation>\n");
}
avio_printf(out, "\t\t</AdaptationSet>\n");
}
if (c->has_audio) {
- avio_printf(out, "\t\t<AdaptationSet id=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
+ avio_printf(out, "\t\t<AdaptationSet contentType=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
OutputStream *os = &c->streams[i];
- if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+ if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
- avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/mp4\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->sample_rate);
- avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n", st->codec->channels);
+ avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/mp4\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codecpar->sample_rate);
+ avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n", st->codecpar->channels);
output_segment_list(&c->streams[i], out, c);
avio_printf(out, "\t\t\t</Representation>\n");
}
avio_printf(out, "\t</Period>\n");
avio_printf(out, "</MPD>\n");
avio_flush(out);
- avio_close(out);
+ ff_format_io_close(s, &out);
return ff_rename(temp_filename, s->filename);
}
AVDictionary *opts = NULL;
char filename[1024];
- os->bit_rate = s->streams[i]->codec->bit_rate;
+ os->bit_rate = s->streams[i]->codecpar->bit_rate;
if (os->bit_rate) {
snprintf(os->bandwidth_str, sizeof(os->bandwidth_str),
" bandwidth=\"%d\"", os->bit_rate);
os->ctx = ctx;
ctx->oformat = oformat;
ctx->interrupt_callback = s->interrupt_callback;
+ ctx->opaque = s->opaque;
+ ctx->io_close = s->io_close;
+ ctx->io_open = s->io_open;
if (!(st = avformat_new_stream(ctx, NULL))) {
ret = AVERROR(ENOMEM);
goto fail;
}
- avcodec_copy_context(st->codec, s->streams[i]->codec);
+ avcodec_parameters_copy(st->codecpar, s->streams[i]->codecpar);
st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
st->time_base = s->streams[i]->time_base;
ctx->avoid_negative_ts = s->avoid_negative_ts;
dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), c->init_seg_name, i, 0, os->bit_rate, 0);
}
snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
- ret = ffurl_open(&os->out, filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
+ ret = s->io_open(s, &os->out, filename, AVIO_FLAG_WRITE, NULL);
if (ret < 0)
goto fail;
os->init_start_pos = 0;
- av_dict_set(&opts, "movflags", "frag_custom+dash", 0);
+ av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
if ((ret = avformat_write_header(ctx, &opts)) < 0) {
goto fail;
}
avio_flush(ctx->pb);
av_dict_free(&opts);
- if (c->single_file) {
- os->init_range_length = avio_tell(ctx->pb);
- } else {
- ffurl_close(os->out);
- os->out = NULL;
- }
- av_log(s, AV_LOG_VERBOSE, "Representation %d init segment written to: %s\n", i, filename);
+ av_log(s, AV_LOG_VERBOSE, "Representation %d init segment will be written to: %s\n", i, filename);
s->streams[i]->time_base = st->time_base;
// If the muxer wants to shift timestamps, request to have them shifted
// already before being handed to this muxer, so we don't have mismatches
// between the MPD and the actual segments.
s->avoid_negative_ts = ctx->avoid_negative_ts;
- if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+ if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
c->has_video = 1;
- else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+ else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
c->has_audio = 1;
- set_codec_str(s, os->ctx->streams[0]->codec, os->codec_str, sizeof(os->codec_str));
- os->first_dts = AV_NOPTS_VALUE;
- os->end_dts = AV_NOPTS_VALUE;
+ set_codec_str(s, st->codecpar, os->codec_str, sizeof(os->codec_str));
+ os->first_pts = AV_NOPTS_VALUE;
+ os->max_pts = AV_NOPTS_VALUE;
+ os->last_dts = AV_NOPTS_VALUE;
os->segment_index = 1;
}
av_strlcpy(seg->file, file, sizeof(seg->file));
seg->time = time;
seg->duration = duration;
+ if (seg->time < 0) { // If pts<0, it is expected to be cut away with an edit list
+ seg->duration += seg->time;
+ seg->time = 0;
+ }
seg->start_pos = start_pos;
seg->range_length = range_length;
seg->index_length = index_length;
int64_t pos, int *index_length)
{
uint8_t buf[8];
- URLContext *fd;
+ AVIOContext *pb;
int ret;
- ret = ffurl_open(&fd, full_path, AVIO_FLAG_READ, &s->interrupt_callback, NULL);
+ ret = s->io_open(s, &pb, full_path, AVIO_FLAG_READ, NULL);
if (ret < 0)
return;
- if (ffurl_seek(fd, pos, SEEK_SET) != pos) {
- ffurl_close(fd);
+ if (avio_seek(pb, pos, SEEK_SET) != pos) {
+ ff_format_io_close(s, &pb);
return;
}
- ret = ffurl_read(fd, buf, 8);
- ffurl_close(fd);
+ ret = avio_read(pb, buf, 8);
+ ff_format_io_close(s, &pb);
if (ret < 8)
return;
if (AV_RL32(&buf[4]) != MKTAG('s', 'i', 'd', 'x'))
*index_length = AV_RB32(&buf[0]);
}
+static int update_stream_extradata(AVFormatContext *s, OutputStream *os,
+ AVCodecParameters *par)
+{
+ uint8_t *extradata;
+
+ if (os->ctx->streams[0]->codecpar->extradata_size || !par->extradata_size)
+ return 0;
+
+ extradata = av_malloc(par->extradata_size);
+
+ if (!extradata)
+ return AVERROR(ENOMEM);
+
+ memcpy(extradata, par->extradata, par->extradata_size);
+
+ os->ctx->streams[0]->codecpar->extradata = extradata;
+ os->ctx->streams[0]->codecpar->extradata_size = par->extradata_size;
+
+ set_codec_str(s, par, os->codec_str, sizeof(os->codec_str));
+
+ return 0;
+}
+
static int dash_flush(AVFormatContext *s, int final, int stream)
{
DASHContext *c = s->priv_data;
for (i = 0; i < s->nb_streams; i++) {
OutputStream *os = &c->streams[i];
char filename[1024] = "", full_path[1024], temp_path[1024];
- int64_t start_pos = avio_tell(os->ctx->pb);
+ int64_t start_pos;
int range_length, index_length = 0;
if (!os->packets_written)
// Flush all audio streams as well, in sync with video keyframes,
// but not the other video streams.
if (stream >= 0 && i != stream) {
- if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+ if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
continue;
// Make sure we don't flush audio streams multiple times, when
// all video streams are flushed one at a time.
continue;
}
+ if (!os->init_range_length) {
+ av_write_frame(os->ctx, NULL);
+ os->init_range_length = avio_tell(os->ctx->pb);
+ if (!c->single_file)
+ ff_format_io_close(s, &os->out);
+ }
+
+ start_pos = avio_tell(os->ctx->pb);
+
if (!c->single_file) {
- dash_fill_tmpl_params(filename, sizeof(filename), c->media_seg_name, i, os->segment_index, os->bit_rate, os->start_dts);
+ dash_fill_tmpl_params(filename, sizeof(filename), c->media_seg_name, i, os->segment_index, os->bit_rate, os->start_pts);
snprintf(full_path, sizeof(full_path), "%s%s", c->dirname, filename);
snprintf(temp_path, sizeof(temp_path), "%s.tmp", full_path);
- ret = ffurl_open(&os->out, temp_path, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
+ ret = s->io_open(s, &os->out, temp_path, AVIO_FLAG_WRITE, NULL);
if (ret < 0)
break;
write_styp(os->ctx->pb);
if (c->single_file) {
find_index_range(s, full_path, start_pos, &index_length);
} else {
- ffurl_close(os->out);
- os->out = NULL;
+ ff_format_io_close(s, &os->out);
ret = ff_rename(temp_path, full_path);
if (ret < 0)
break;
}
- add_segment(os, filename, os->start_dts, os->end_dts - os->start_dts, start_pos, range_length, index_length);
+ add_segment(os, filename, os->start_pts, os->max_pts - os->start_pts, start_pos, range_length, index_length);
av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written to: %s\n", i, os->segment_index, full_path);
}
int64_t seg_end_duration = (os->segment_index) * (int64_t) c->min_seg_duration;
int ret;
+ ret = update_stream_extradata(s, os, st->codecpar);
+ if (ret < 0)
+ return ret;
+
+ // Fill in a heuristic guess of the packet duration, if none is available.
+ // The mp4 muxer will do something similar (for the last packet in a fragment)
+ // if nothing is set (setting it for the other packets doesn't hurt).
+ // By setting a nonzero duration here, we can be sure that the mp4 muxer won't
+ // invoke its heuristic (this doesn't have to be identical to that algorithm),
+ // so that we know the exact timestamps of fragments.
+ if (!pkt->duration && os->last_dts != AV_NOPTS_VALUE)
+ pkt->duration = pkt->dts - os->last_dts;
+ os->last_dts = pkt->dts;
+
// If forcing the stream to start at 0, the mp4 muxer will set the start
// timestamps to 0. Do the same here, to avoid mismatches in duration/timestamps.
- if (os->first_dts == AV_NOPTS_VALUE &&
+ if (os->first_pts == AV_NOPTS_VALUE &&
s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
pkt->pts -= pkt->dts;
pkt->dts = 0;
}
- if (os->first_dts == AV_NOPTS_VALUE)
- os->first_dts = pkt->dts;
+ if (os->first_pts == AV_NOPTS_VALUE)
+ os->first_pts = pkt->pts;
- if ((!c->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
+ if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
- av_compare_ts(pkt->dts - os->first_dts, st->time_base,
+ av_compare_ts(pkt->pts - os->first_pts, st->time_base,
seg_end_duration, AV_TIME_BASE_Q) >= 0) {
int64_t prev_duration = c->last_duration;
- c->last_duration = av_rescale_q(pkt->dts - os->start_dts,
+ c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
st->time_base,
AV_TIME_BASE_Q);
- c->total_duration = av_rescale_q(pkt->dts - os->first_dts,
+ c->total_duration = av_rescale_q(pkt->pts - os->first_pts,
st->time_base,
AV_TIME_BASE_Q);
// If we wrote a previous segment, adjust the start time of the segment
// to the end of the previous one (which is the same as the mp4 muxer
// does). This avoids gaps in the timeline.
- if (os->end_dts != AV_NOPTS_VALUE)
- os->start_dts = os->end_dts;
+ if (os->max_pts != AV_NOPTS_VALUE)
+ os->start_pts = os->max_pts;
else
- os->start_dts = pkt->dts;
+ os->start_pts = pkt->pts;
}
- os->end_dts = pkt->dts + pkt->duration;
+ if (os->max_pts == AV_NOPTS_VALUE)
+ os->max_pts = pkt->pts + pkt->duration;
+ else
+ os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration);
os->packets_written++;
return ff_write_chained(os->ctx, 0, pkt, s);
}
// If no segments have been written so far, try to do a crude
// guess of the segment duration
if (!c->last_duration)
- c->last_duration = av_rescale_q(os->end_dts - os->start_dts,
+ c->last_duration = av_rescale_q(os->max_pts - os->start_pts,
s->streams[0]->time_base,
AV_TIME_BASE_Q);
- c->total_duration = av_rescale_q(os->end_dts - os->first_dts,
+ c->total_duration = av_rescale_q(os->max_pts - os->first_pts,
s->streams[0]->time_base,
AV_TIME_BASE_Q);
}
{ "use_timeline", "Use SegmentTimeline in SegmentTemplate", OFFSET(use_timeline), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, E },
{ "single_file", "Store all segments in one file, accessed using byte ranges", OFFSET(single_file), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, E },
{ "single_file_name", "DASH-templated name to be used for baseURL. Implies storing all segments in one file, accessed using byte ranges", OFFSET(single_file_name), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
- { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.m4s"}, 0, 0, E },
- { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.m4s"}, 0, 0, E },
+ { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.m4s"}, 0, 0, E },
+ { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.m4s"}, 0, 0, E },
{ NULL },
};