]> git.sesse.net Git - ffmpeg/blobdiff - libavformat/dashenc.c
img2enc: Refactor the atomic renaming code
[ffmpeg] / libavformat / dashenc.c
index 9a8169e16795feb073fec96daa8f9d7be92611fc..9c7e23aa8e56a66f27fa69a83f1f8de753691def 100644 (file)
@@ -61,14 +61,15 @@ typedef struct OutputStream {
     AVFormatContext *ctx;
     int ctx_inited;
     uint8_t iobuf[32768];
-    URLContext *out;
+    AVIOContext *out;
     int packets_written;
     char initfile[1024];
     int64_t init_start_pos;
     int init_range_length;
     int nb_segments, segments_size, segment_index;
     Segment **segments;
-    int64_t first_dts, start_dts, end_dts;
+    int64_t first_pts, start_pts, max_pts;
+    int64_t last_dts;
     int bit_rate;
     char bandwidth_str[64];
 
@@ -86,8 +87,8 @@ typedef struct DASHContext {
     int single_file;
     OutputStream *streams;
     int has_video, has_audio;
-    int last_duration;
-    int total_duration;
+    int64_t last_duration;
+    int64_t total_duration;
     char availability_start_time[100];
     char dirname[1024];
     const char *single_file_name;
@@ -99,24 +100,24 @@ static int dash_write(void *opaque, uint8_t *buf, int buf_size)
 {
     OutputStream *os = opaque;
     if (os->out)
-        ffurl_write(os->out, buf, buf_size);
+        avio_write(os->out, buf, buf_size);
     return buf_size;
 }
 
 // RFC 6381
-static void set_codec_str(AVFormatContext *s, AVCodecContext *codec,
+static void set_codec_str(AVFormatContext *s, AVCodecParameters *par,
                           char *str, int size)
 {
     const AVCodecTag *tags[2] = { NULL, NULL };
     uint32_t tag;
-    if (codec->codec_type == AVMEDIA_TYPE_VIDEO)
+    if (par->codec_type == AVMEDIA_TYPE_VIDEO)
         tags[0] = ff_codec_movvideo_tags;
-    else if (codec->codec_type == AVMEDIA_TYPE_AUDIO)
+    else if (par->codec_type == AVMEDIA_TYPE_AUDIO)
         tags[0] = ff_codec_movaudio_tags;
     else
         return;
 
-    tag = av_codec_get_tag(tags, codec->codec_id);
+    tag = av_codec_get_tag(tags, par->codec_id);
     if (!tag)
         return;
     if (size < 5)
@@ -127,17 +128,17 @@ static void set_codec_str(AVFormatContext *s, AVCodecContext *codec,
     if (!strcmp(str, "mp4a") || !strcmp(str, "mp4v")) {
         uint32_t oti;
         tags[0] = ff_mp4_obj_type;
-        oti = av_codec_get_tag(tags, codec->codec_id);
+        oti = av_codec_get_tag(tags, par->codec_id);
         if (oti)
             av_strlcatf(str, size, ".%02x", oti);
         else
             return;
 
         if (tag == MKTAG('m', 'p', '4', 'a')) {
-            if (codec->extradata_size >= 2) {
-                int aot = codec->extradata[0] >> 3;
+            if (par->extradata_size >= 2) {
+                int aot = par->extradata[0] >> 3;
                 if (aot == 31)
-                    aot = ((AV_RB16(codec->extradata) >> 5) & 0x3f) + 32;
+                    aot = ((AV_RB16(par->extradata) >> 5) & 0x3f) + 32;
                 av_strlcatf(str, size, ".%d", aot);
             }
         } else if (tag == MKTAG('m', 'p', '4', 'v')) {
@@ -146,8 +147,8 @@ static void set_codec_str(AVFormatContext *s, AVCodecContext *codec,
         }
     } else if (!strcmp(str, "avc1")) {
         uint8_t *tmpbuf = NULL;
-        uint8_t *extradata = codec->extradata;
-        int extradata_size = codec->extradata_size;
+        uint8_t *extradata = par->extradata;
+        int extradata_size = par->extradata_size;
         if (!extradata_size)
             return;
         if (extradata[0] != 1) {
@@ -155,8 +156,7 @@ static void set_codec_str(AVFormatContext *s, AVCodecContext *codec,
             if (avio_open_dyn_buf(&pb) < 0)
                 return;
             if (ff_isom_write_avcc(pb, extradata, extradata_size) < 0) {
-                avio_close_dyn_buf(pb, &tmpbuf);
-                av_free(tmpbuf);
+                ffio_free_dyn_buf(&pb);
                 return;
             }
             extradata_size = avio_close_dyn_buf(pb, &extradata);
@@ -182,8 +182,7 @@ static void dash_free(AVFormatContext *s)
             av_write_trailer(os->ctx);
         if (os->ctx && os->ctx->pb)
             av_free(os->ctx->pb);
-        ffurl_close(os->out);
-        os->out =  NULL;
+        ff_format_io_close(s, &os->out);
         if (os->ctx)
             avformat_free_context(os->ctx);
         for (j = 0; j < os->nb_segments; j++)
@@ -205,30 +204,36 @@ static void output_segment_list(OutputStream *os, AVIOContext *out, DASHContext
         int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : AV_TIME_BASE;
         avio_printf(out, "\t\t\t\t<SegmentTemplate timescale=\"%d\" ", timescale);
         if (!c->use_timeline)
-            avio_printf(out, "duration=\"%d\" ", c->last_duration);
+            avio_printf(out, "duration=\"%"PRId64"\" ", c->last_duration);
         avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\">\n", c->init_seg_name, c->media_seg_name, c->use_timeline ? start_number : 1);
         if (c->use_timeline) {
+            int64_t cur_time = 0;
             avio_printf(out, "\t\t\t\t\t<SegmentTimeline>\n");
             for (i = start_index; i < os->nb_segments; ) {
                 Segment *seg = os->segments[i];
                 int repeat = 0;
                 avio_printf(out, "\t\t\t\t\t\t<S ");
-                if (i == start_index)
+                if (i == start_index || seg->time != cur_time) {
+                    cur_time = seg->time;
                     avio_printf(out, "t=\"%"PRId64"\" ", seg->time);
+                }
                 avio_printf(out, "d=\"%d\" ", seg->duration);
-                while (i + repeat + 1 < os->nb_segments && os->segments[i + repeat + 1]->duration == seg->duration)
+                while (i + repeat + 1 < os->nb_segments &&
+                       os->segments[i + repeat + 1]->duration == seg->duration &&
+                       os->segments[i + repeat + 1]->time == os->segments[i + repeat]->time + os->segments[i + repeat]->duration)
                     repeat++;
                 if (repeat > 0)
                     avio_printf(out, "r=\"%d\" ", repeat);
                 avio_printf(out, "/>\n");
                 i += 1 + repeat;
+                cur_time += (1 + repeat) * seg->duration;
             }
             avio_printf(out, "\t\t\t\t\t</SegmentTimeline>\n");
         }
         avio_printf(out, "\t\t\t\t</SegmentTemplate>\n");
     } else if (c->single_file) {
         avio_printf(out, "\t\t\t\t<BaseURL>%s</BaseURL>\n", os->initfile);
-        avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%d\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
+        avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
         avio_printf(out, "\t\t\t\t\t<Initialization range=\"%"PRId64"-%"PRId64"\" />\n", os->init_start_pos, os->init_start_pos + os->init_range_length - 1);
         for (i = start_index; i < os->nb_segments; i++) {
             Segment *seg = os->segments[i];
@@ -239,7 +244,7 @@ static void output_segment_list(OutputStream *os, AVIOContext *out, DASHContext
         }
         avio_printf(out, "\t\t\t\t</SegmentList>\n");
     } else {
-        avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%d\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
+        avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
         avio_printf(out, "\t\t\t\t\t<Initialization sourceURL=\"%s\" />\n", os->initfile);
         for (i = start_index; i < os->nb_segments; i++) {
             Segment *seg = os->segments[i];
@@ -276,7 +281,7 @@ static DASHTmplId dash_read_tmpl_id(const char *identifier, char *format_tag,
         // next parse the dash format-tag and generate a c-string format tag
         // (next_ptr now points at the first '%' at the beginning of the format-tag)
         if (id_type != DASH_TMPL_ID_UNDEFINED) {
-            const char *number_format = DASH_TMPL_ID_TIME ? "lld" : "d";
+            const char *number_format = (id_type == DASH_TMPL_ID_TIME) ? PRId64 : "d";
             if (next_ptr[0] == '$') { // no dash format-tag
                 snprintf(format_tag, format_tag_size, "%%%s", number_format);
                 *ptr = &next_ptr[1];
@@ -308,8 +313,7 @@ static void dash_fill_tmpl_params(char *dst, size_t buffer_size,
     int dst_pos = 0;
     const char *t_cur = template;
     while (dst_pos < buffer_size - 1 && *t_cur) {
-        int format_tag_size = 7;
-        char format_tag[format_tag_size]; // May be "%d", "%0Xd", or "%0Xlld" (for $Time$), where X is in [0-9]
+        char format_tag[7]; // May be "%d", "%0Xd", or "%0Xlld" (for $Time$), where X is in [0-9]
         int n = 0;
         DASHTmplId id_type;
         const char *t_next = strchr(t_cur, '$'); // copy over everything up to the first '$' character
@@ -328,7 +332,7 @@ static void dash_fill_tmpl_params(char *dst, size_t buffer_size,
             break;
 
         // t_cur is now pointing to a '$' character
-        id_type = dash_read_tmpl_id(t_cur, format_tag, format_tag_size, &t_next);
+        id_type = dash_read_tmpl_id(t_cur, format_tag, sizeof(format_tag), &t_next);
         switch (id_type) {
         case DASH_TMPL_ID_ESCAPE:
             av_strlcpy(&dst[dst_pos], "$", 2);
@@ -419,6 +423,17 @@ static void write_time(AVIOContext *out, int64_t time)
     avio_printf(out, "%d.%dS", seconds, fractions / (AV_TIME_BASE / 10));
 }
 
+static void format_date_now(char *buf, int size)
+{
+    time_t t = time(NULL);
+    struct tm *ptm, tmbuf;
+    ptm = gmtime_r(&t, &tmbuf);
+    if (ptm) {
+        if (!strftime(buf, size, "%Y-%m-%dT%H:%M:%S", ptm))
+            buf[0] = '\0';
+    }
+}
+
 static int write_manifest(AVFormatContext *s, int final)
 {
     DASHContext *c = s->priv_data;
@@ -428,7 +443,7 @@ static int write_manifest(AVFormatContext *s, int final)
     AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
 
     snprintf(temp_filename, sizeof(temp_filename), "%s.tmp", s->filename);
-    ret = avio_open2(&out, temp_filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
+    ret = s->io_open(s, &out, temp_filename, AVIO_FLAG_WRITE, NULL);
     if (ret < 0) {
         av_log(s, AV_LOG_ERROR, "Unable to open %s for writing\n", temp_filename);
         return ret;
@@ -445,23 +460,20 @@ static int write_manifest(AVFormatContext *s, int final)
         write_time(out, c->total_duration);
         avio_printf(out, "\"\n");
     } else {
-        int update_period = c->last_duration / AV_TIME_BASE;
+        int64_t update_period = c->last_duration / AV_TIME_BASE;
+        char now_str[100];
         if (c->use_template && !c->use_timeline)
             update_period = 500;
-        avio_printf(out, "\tminimumUpdatePeriod=\"PT%dS\"\n", update_period);
-        avio_printf(out, "\tsuggestedPresentationDelay=\"PT%dS\"\n", c->last_duration / AV_TIME_BASE);
+        avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period);
+        avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE);
         if (!c->availability_start_time[0] && s->nb_streams > 0 && c->streams[0].nb_segments > 0) {
-            time_t t = time(NULL);
-            struct tm *ptm, tmbuf;
-            ptm = gmtime_r(&t, &tmbuf);
-            if (ptm) {
-                if (!strftime(c->availability_start_time, sizeof(c->availability_start_time),
-                              "%Y-%m-%dT%H:%M:%S", ptm))
-                    c->availability_start_time[0] = '\0';
-            }
+            format_date_now(c->availability_start_time, sizeof(c->availability_start_time));
         }
         if (c->availability_start_time[0])
             avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time);
+        format_date_now(now_str, sizeof(now_str));
+        if (now_str[0])
+            avio_printf(out, "\tpublishTime=\"%s\"\n", now_str);
         if (c->window_size && c->use_template) {
             avio_printf(out, "\ttimeShiftBufferDepth=\"");
             write_time(out, c->last_duration * c->window_size);
@@ -490,27 +502,27 @@ static int write_manifest(AVFormatContext *s, int final)
     }
 
     if (c->has_video) {
-        avio_printf(out, "\t\t<AdaptationSet id=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
+        avio_printf(out, "\t\t<AdaptationSet contentType=\"video\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
         for (i = 0; i < s->nb_streams; i++) {
             AVStream *st = s->streams[i];
             OutputStream *os = &c->streams[i];
-            if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
+            if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
                 continue;
-            avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/mp4\" codecs=\"%s\"%s width=\"%d\" height=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->width, st->codec->height);
+            avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/mp4\" codecs=\"%s\"%s width=\"%d\" height=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codecpar->width, st->codecpar->height);
             output_segment_list(&c->streams[i], out, c);
             avio_printf(out, "\t\t\t</Representation>\n");
         }
         avio_printf(out, "\t\t</AdaptationSet>\n");
     }
     if (c->has_audio) {
-        avio_printf(out, "\t\t<AdaptationSet id=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
+        avio_printf(out, "\t\t<AdaptationSet contentType=\"audio\" segmentAlignment=\"true\" bitstreamSwitching=\"true\">\n");
         for (i = 0; i < s->nb_streams; i++) {
             AVStream *st = s->streams[i];
             OutputStream *os = &c->streams[i];
-            if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+            if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
                 continue;
-            avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/mp4\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codec->sample_rate);
-            avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n", st->codec->channels);
+            avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/mp4\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n", i, os->codec_str, os->bandwidth_str, st->codecpar->sample_rate);
+            avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n", st->codecpar->channels);
             output_segment_list(&c->streams[i], out, c);
             avio_printf(out, "\t\t\t</Representation>\n");
         }
@@ -519,7 +531,7 @@ static int write_manifest(AVFormatContext *s, int final)
     avio_printf(out, "\t</Period>\n");
     avio_printf(out, "</MPD>\n");
     avio_flush(out);
-    avio_close(out);
+    ff_format_io_close(s, &out);
     return ff_rename(temp_filename, s->filename);
 }
 
@@ -569,7 +581,7 @@ static int dash_write_header(AVFormatContext *s)
         AVDictionary *opts = NULL;
         char filename[1024];
 
-        os->bit_rate = s->streams[i]->codec->bit_rate;
+        os->bit_rate = s->streams[i]->codecpar->bit_rate;
         if (os->bit_rate) {
             snprintf(os->bandwidth_str, sizeof(os->bandwidth_str),
                      " bandwidth=\"%d\"", os->bit_rate);
@@ -591,12 +603,15 @@ static int dash_write_header(AVFormatContext *s)
         os->ctx = ctx;
         ctx->oformat = oformat;
         ctx->interrupt_callback = s->interrupt_callback;
+        ctx->opaque             = s->opaque;
+        ctx->io_close           = s->io_close;
+        ctx->io_open            = s->io_open;
 
         if (!(st = avformat_new_stream(ctx, NULL))) {
             ret = AVERROR(ENOMEM);
             goto fail;
         }
-        avcodec_copy_context(st->codec, s->streams[i]->codec);
+        avcodec_parameters_copy(st->codecpar, s->streams[i]->codecpar);
         st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
         st->time_base = s->streams[i]->time_base;
         ctx->avoid_negative_ts = s->avoid_negative_ts;
@@ -616,12 +631,12 @@ static int dash_write_header(AVFormatContext *s)
             dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), c->init_seg_name, i, 0, os->bit_rate, 0);
         }
         snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
-        ret = ffurl_open(&os->out, filename, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
+        ret = s->io_open(s, &os->out, filename, AVIO_FLAG_WRITE, NULL);
         if (ret < 0)
             goto fail;
         os->init_start_pos = 0;
 
-        av_dict_set(&opts, "movflags", "frag_custom+dash", 0);
+        av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov", 0);
         if ((ret = avformat_write_header(ctx, &opts)) < 0) {
              goto fail;
         }
@@ -629,25 +644,22 @@ static int dash_write_header(AVFormatContext *s)
         avio_flush(ctx->pb);
         av_dict_free(&opts);
 
-        if (c->single_file) {
-            os->init_range_length = avio_tell(ctx->pb);
-        } else {
-            ffurl_close(os->out);
-            os->out = NULL;
-        }
+        av_log(s, AV_LOG_VERBOSE, "Representation %d init segment will be written to: %s\n", i, filename);
 
         s->streams[i]->time_base = st->time_base;
         // If the muxer wants to shift timestamps, request to have them shifted
         // already before being handed to this muxer, so we don't have mismatches
         // between the MPD and the actual segments.
         s->avoid_negative_ts = ctx->avoid_negative_ts;
-        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
+        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
             c->has_video = 1;
-        else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+        else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
             c->has_audio = 1;
 
-        set_codec_str(s, os->ctx->streams[0]->codec, os->codec_str, sizeof(os->codec_str));
-        os->first_dts = AV_NOPTS_VALUE;
+        set_codec_str(s, st->codecpar, os->codec_str, sizeof(os->codec_str));
+        os->first_pts = AV_NOPTS_VALUE;
+        os->max_pts = AV_NOPTS_VALUE;
+        os->last_dts = AV_NOPTS_VALUE;
         os->segment_index = 1;
     }
 
@@ -656,6 +668,8 @@ static int dash_write_header(AVFormatContext *s)
         ret = AVERROR(EINVAL);
     }
     ret = write_manifest(s, 0);
+    if (!ret)
+        av_log(s, AV_LOG_VERBOSE, "Manifest written to: %s\n", s->filename);
 
 fail:
     if (ret)
@@ -685,6 +699,10 @@ static int add_segment(OutputStream *os, const char *file,
     av_strlcpy(seg->file, file, sizeof(seg->file));
     seg->time = time;
     seg->duration = duration;
+    if (seg->time < 0) { // If pts<0, it is expected to be cut away with an edit list
+        seg->duration += seg->time;
+        seg->time = 0;
+    }
     seg->start_pos = start_pos;
     seg->range_length = range_length;
     seg->index_length = index_length;
@@ -703,25 +721,22 @@ static void write_styp(AVIOContext *pb)
     ffio_wfourcc(pb, "msix");
 }
 
-static void find_index_range(AVFormatContext *s, const char *dirname,
-                             const char *filename, int64_t pos,
-                             int *index_length)
+static void find_index_range(AVFormatContext *s, const char *full_path,
+                             int64_t pos, int *index_length)
 {
-    char full_path[1024];
     uint8_t buf[8];
-    URLContext *fd;
+    AVIOContext *pb;
     int ret;
 
-    snprintf(full_path, sizeof(full_path), "%s%s", dirname, filename);
-    ret = ffurl_open(&fd, full_path, AVIO_FLAG_READ, &s->interrupt_callback, NULL);
+    ret = s->io_open(s, &pb, full_path, AVIO_FLAG_READ, NULL);
     if (ret < 0)
         return;
-    if (ffurl_seek(fd, pos, SEEK_SET) != pos) {
-        ffurl_close(fd);
+    if (avio_seek(pb, pos, SEEK_SET) != pos) {
+        ff_format_io_close(s, &pb);
         return;
     }
-    ret = ffurl_read(fd, buf, 8);
-    ffurl_close(fd);
+    ret = avio_read(pb, buf, 8);
+    ff_format_io_close(s, &pb);
     if (ret < 8)
         return;
     if (AV_RL32(&buf[4]) != MKTAG('s', 'i', 'd', 'x'))
@@ -729,6 +744,29 @@ static void find_index_range(AVFormatContext *s, const char *dirname,
     *index_length = AV_RB32(&buf[0]);
 }
 
+static int update_stream_extradata(AVFormatContext *s, OutputStream *os,
+                                   AVCodecParameters *par)
+{
+    uint8_t *extradata;
+
+    if (os->ctx->streams[0]->codecpar->extradata_size || !par->extradata_size)
+        return 0;
+
+    extradata = av_malloc(par->extradata_size);
+
+    if (!extradata)
+        return AVERROR(ENOMEM);
+
+    memcpy(extradata, par->extradata, par->extradata_size);
+
+    os->ctx->streams[0]->codecpar->extradata = extradata;
+    os->ctx->streams[0]->codecpar->extradata_size = par->extradata_size;
+
+    set_codec_str(s, par, os->codec_str, sizeof(os->codec_str));
+
+    return 0;
+}
+
 static int dash_flush(AVFormatContext *s, int final, int stream)
 {
     DASHContext *c = s->priv_data;
@@ -740,7 +778,7 @@ static int dash_flush(AVFormatContext *s, int final, int stream)
     for (i = 0; i < s->nb_streams; i++) {
         OutputStream *os = &c->streams[i];
         char filename[1024] = "", full_path[1024], temp_path[1024];
-        int64_t start_pos = avio_tell(os->ctx->pb);
+        int64_t start_pos;
         int range_length, index_length = 0;
 
         if (!os->packets_written)
@@ -750,7 +788,7 @@ static int dash_flush(AVFormatContext *s, int final, int stream)
         // Flush all audio streams as well, in sync with video keyframes,
         // but not the other video streams.
         if (stream >= 0 && i != stream) {
-            if (s->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+            if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
                 continue;
             // Make sure we don't flush audio streams multiple times, when
             // all video streams are flushed one at a time.
@@ -758,30 +796,42 @@ static int dash_flush(AVFormatContext *s, int final, int stream)
                 continue;
         }
 
+        if (!os->init_range_length) {
+            av_write_frame(os->ctx, NULL);
+            os->init_range_length = avio_tell(os->ctx->pb);
+            if (!c->single_file)
+                ff_format_io_close(s, &os->out);
+        }
+
+        start_pos = avio_tell(os->ctx->pb);
+
         if (!c->single_file) {
-            dash_fill_tmpl_params(filename, sizeof(filename), c->media_seg_name, i, os->segment_index, os->bit_rate, os->start_dts);
+            dash_fill_tmpl_params(filename, sizeof(filename), c->media_seg_name, i, os->segment_index, os->bit_rate, os->start_pts);
             snprintf(full_path, sizeof(full_path), "%s%s", c->dirname, filename);
             snprintf(temp_path, sizeof(temp_path), "%s.tmp", full_path);
-            ret = ffurl_open(&os->out, temp_path, AVIO_FLAG_WRITE, &s->interrupt_callback, NULL);
+            ret = s->io_open(s, &os->out, temp_path, AVIO_FLAG_WRITE, NULL);
             if (ret < 0)
                 break;
             write_styp(os->ctx->pb);
+        } else {
+            snprintf(full_path, sizeof(full_path), "%s%s", c->dirname, os->initfile);
         }
+
         av_write_frame(os->ctx, NULL);
         avio_flush(os->ctx->pb);
         os->packets_written = 0;
 
         range_length = avio_tell(os->ctx->pb) - start_pos;
         if (c->single_file) {
-            find_index_range(s, c->dirname, os->initfile, start_pos, &index_length);
+            find_index_range(s, full_path, start_pos, &index_length);
         } else {
-            ffurl_close(os->out);
-            os->out = NULL;
+            ff_format_io_close(s, &os->out);
             ret = ff_rename(temp_path, full_path);
             if (ret < 0)
                 break;
         }
-        add_segment(os, filename, os->start_dts, os->end_dts - os->start_dts, start_pos, range_length, index_length);
+        add_segment(os, filename, os->start_pts, os->max_pts - os->start_pts, start_pos, range_length, index_length);
+        av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written to: %s\n", i, os->segment_index, full_path);
     }
 
     if (c->window_size || (final && c->remove_at_exit)) {
@@ -817,27 +867,41 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt)
     int64_t seg_end_duration = (os->segment_index) * (int64_t) c->min_seg_duration;
     int ret;
 
+    ret = update_stream_extradata(s, os, st->codecpar);
+    if (ret < 0)
+        return ret;
+
+    // Fill in a heuristic guess of the packet duration, if none is available.
+    // The mp4 muxer will do something similar (for the last packet in a fragment)
+    // if nothing is set (setting it for the other packets doesn't hurt).
+    // By setting a nonzero duration here, we can be sure that the mp4 muxer won't
+    // invoke its heuristic (this doesn't have to be identical to that algorithm),
+    // so that we know the exact timestamps of fragments.
+    if (!pkt->duration && os->last_dts != AV_NOPTS_VALUE)
+        pkt->duration = pkt->dts - os->last_dts;
+    os->last_dts = pkt->dts;
+
     // If forcing the stream to start at 0, the mp4 muxer will set the start
     // timestamps to 0. Do the same here, to avoid mismatches in duration/timestamps.
-    if (os->first_dts == AV_NOPTS_VALUE &&
+    if (os->first_pts == AV_NOPTS_VALUE &&
         s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
         pkt->pts -= pkt->dts;
         pkt->dts  = 0;
     }
 
-    if (os->first_dts == AV_NOPTS_VALUE)
-        os->first_dts = pkt->dts;
+    if (os->first_pts == AV_NOPTS_VALUE)
+        os->first_pts = pkt->pts;
 
-    if ((!c->has_video || st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
+    if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
         pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
-        av_compare_ts(pkt->dts - os->first_dts, st->time_base,
+        av_compare_ts(pkt->pts - os->first_pts, st->time_base,
                       seg_end_duration, AV_TIME_BASE_Q) >= 0) {
         int64_t prev_duration = c->last_duration;
 
-        c->last_duration = av_rescale_q(pkt->dts - os->start_dts,
+        c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
                                         st->time_base,
                                         AV_TIME_BASE_Q);
-        c->total_duration = av_rescale_q(pkt->dts - os->first_dts,
+        c->total_duration = av_rescale_q(pkt->pts - os->first_pts,
                                          st->time_base,
                                          AV_TIME_BASE_Q);
 
@@ -854,9 +918,19 @@ static int dash_write_packet(AVFormatContext *s, AVPacket *pkt)
             return ret;
     }
 
-    if (!os->packets_written)
-        os->start_dts = pkt->dts;
-    os->end_dts = pkt->dts + pkt->duration;
+    if (!os->packets_written) {
+        // If we wrote a previous segment, adjust the start time of the segment
+        // to the end of the previous one (which is the same as the mp4 muxer
+        // does). This avoids gaps in the timeline.
+        if (os->max_pts != AV_NOPTS_VALUE)
+            os->start_pts = os->max_pts;
+        else
+            os->start_pts = pkt->pts;
+    }
+    if (os->max_pts == AV_NOPTS_VALUE)
+        os->max_pts = pkt->pts + pkt->duration;
+    else
+        os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration);
     os->packets_written++;
     return ff_write_chained(os->ctx, 0, pkt, s);
 }
@@ -870,10 +944,10 @@ static int dash_write_trailer(AVFormatContext *s)
         // If no segments have been written so far, try to do a crude
         // guess of the segment duration
         if (!c->last_duration)
-            c->last_duration = av_rescale_q(os->end_dts - os->start_dts,
+            c->last_duration = av_rescale_q(os->max_pts - os->start_pts,
                                             s->streams[0]->time_base,
                                             AV_TIME_BASE_Q);
-        c->total_duration = av_rescale_q(os->end_dts - os->first_dts,
+        c->total_duration = av_rescale_q(os->max_pts - os->first_pts,
                                          s->streams[0]->time_base,
                                          AV_TIME_BASE_Q);
     }
@@ -905,8 +979,8 @@ static const AVOption options[] = {
     { "use_timeline", "Use SegmentTimeline in SegmentTemplate", OFFSET(use_timeline), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, E },
     { "single_file", "Store all segments in one file, accessed using byte ranges", OFFSET(single_file), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, E },
     { "single_file_name", "DASH-templated name to be used for baseURL. Implies storing all segments in one file, accessed using byte ranges", OFFSET(single_file_name), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
-    { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.m4s"},  0, 0, E },
-    { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.m4s"},  0, 0, E },
+    { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.m4s"}, 0, 0, E },
+    { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.m4s"}, 0, 0, E },
     { NULL },
 };