struct representation **videos;
int n_audios;
struct representation **audios;
+ int n_subtitles;
+ struct representation **subtitles;
/* MediaPresentationDescription Attribute */
uint64_t media_presentation_duration;
c->n_audios = 0;
}
+static void free_subtitle_list(DASHContext *c)
+{
+ int i;
+ for (i = 0; i < c->n_subtitles; i++) {
+ struct representation *pls = c->subtitles[i];
+ free_representation(pls);
+ }
+ av_freep(&c->subtitles);
+ c->n_subtitles = 0;
+}
+
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
AVDictionary *opts, AVDictionary *opts2, int *is_http)
{
}
if (val)
- av_strlcat(tmp_str, (const char*)val, max_url_size);
+ ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
if (rep_id_val) {
url = av_strireplace(tmp_str, "$RepresentationID$", (const char*)rep_id_val);
type = AVMEDIA_TYPE_VIDEO;
} else if (av_stristr((const char *)val, "audio")) {
type = AVMEDIA_TYPE_AUDIO;
+ } else if (av_stristr((const char *)val, "text")) {
+ type = AVMEDIA_TYPE_SUBTITLE;
}
xmlFree(val);
}
xmlNodePtr adaptionset_supplementalproperty_node)
{
int32_t ret = 0;
+ int32_t subtitle_rep_idx = 0;
int32_t audio_rep_idx = 0;
int32_t video_rep_idx = 0;
DASHContext *c = s->priv_data;
type = get_content_type(adaptionset_node);
if (type == AVMEDIA_TYPE_UNKNOWN) {
av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
- } else if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO) {
+ } else if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
// convert selected representation to our internal struct
rep = av_mallocz(sizeof(struct representation));
if (!rep) {
av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
}
- if (type == AVMEDIA_TYPE_VIDEO) {
- rep->rep_idx = video_rep_idx;
- dynarray_add(&c->videos, &c->n_videos, rep);
- } else {
- rep->rep_idx = audio_rep_idx;
- dynarray_add(&c->audios, &c->n_audios, rep);
+ switch (type) {
+ case AVMEDIA_TYPE_VIDEO:
+ rep->rep_idx = video_rep_idx;
+ dynarray_add(&c->videos, &c->n_videos, rep);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ rep->rep_idx = audio_rep_idx;
+ dynarray_add(&c->audios, &c->n_audios, rep);
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ rep->rep_idx = subtitle_rep_idx;
+ dynarray_add(&c->subtitles, &c->n_subtitles, rep);
+ break;
+ default:
+ av_log(s, AV_LOG_WARNING, "Unsupported the stream type %d\n", type);
+ break;
}
}
}
video_rep_idx += type == AVMEDIA_TYPE_VIDEO;
audio_rep_idx += type == AVMEDIA_TYPE_AUDIO;
+ subtitle_rep_idx += type == AVMEDIA_TYPE_SUBTITLE;
end:
if (rep_id_val)
} else if (pls->fragment_duration){
av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
if (pls->presentation_timeoffset) {
- num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration;
+ num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
} else if (c->publish_time > 0 && !c->availability_start_time) {
if (c->min_buffer_time) {
num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
struct representation **videos = c->videos;
int n_audios = c->n_audios;
struct representation **audios = c->audios;
+ int n_subtitles = c->n_subtitles;
+ struct representation **subtitles = c->subtitles;
char *base_url = c->base_url;
c->base_url = NULL;
c->videos = NULL;
c->n_audios = 0;
c->audios = NULL;
+ c->n_subtitles = 0;
+ c->subtitles = NULL;
ret = parse_manifest(s, s->url, NULL);
if (ret)
goto finish;
n_audios, c->n_audios);
return AVERROR_INVALIDDATA;
}
+ if (c->n_subtitles != n_subtitles) {
+ av_log(c, AV_LOG_ERROR,
+ "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
+ n_subtitles, c->n_subtitles);
+ return AVERROR_INVALIDDATA;
+ }
for (i = 0; i < n_videos; i++) {
struct representation *cur_video = videos[i];
av_free(base_url);
else
c->base_url = base_url;
+
+ if (c->subtitles)
+ free_subtitle_list(c);
if (c->audios)
free_audio_list(c);
if (c->videos)
free_video_list(c);
+
+ c->n_subtitles = n_subtitles;
+ c->subtitles = subtitles;
c->n_audios = n_audios;
c->audios = audios;
c->n_videos = n_videos;
if ((ret = save_avio_options(s)) < 0)
goto fail;
- av_dict_set(&c->avio_opts, "seekable", "0", 0);
-
if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
goto fail;
* stream. */
if (!c->is_live) {
s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
+ } else {
+ av_dict_set(&c->avio_opts, "seekable", "0", 0);
}
if(c->n_videos)
++stream_index;
}
+ if (c->n_subtitles)
+ c->is_init_section_common_audio = is_common_init_section_exist(c->subtitles, c->n_subtitles);
+
+ for (i = 0; i < c->n_subtitles; i++) {
+ struct representation *cur_subtitle = c->subtitles[i];
+ if (i > 0 && c->is_init_section_common_audio) {
+ copy_init_section(cur_subtitle,c->subtitles[0]);
+ }
+ ret = open_demux_for_component(s, cur_subtitle);
+
+ if (ret)
+ goto fail;
+ cur_subtitle->stream_index = stream_index;
+ ++stream_index;
+ }
+
+
if (!stream_index) {
ret = AVERROR_INVALIDDATA;
goto fail;
if (pls->id[0])
av_dict_set(&pls->assoc_stream->metadata, "id", pls->id, 0);
}
+ for (i = 0; i < c->n_subtitles; i++) {
+ struct representation *pls = c->subtitles[i];
+ av_program_add_stream_index(s, 0, pls->stream_index);
+ pls->assoc_stream = s->streams[pls->stream_index];
+ if (pls->id[0])
+ av_dict_set(&pls->assoc_stream->metadata, "id", pls->id, 0);
+ }
+
}
return 0;
recheck_discard_flags(s, c->videos, c->n_videos);
recheck_discard_flags(s, c->audios, c->n_audios);
+ recheck_discard_flags(s, c->subtitles, c->n_subtitles);
for (i = 0; i < c->n_videos; i++) {
struct representation *pls = c->videos[i];
}
}
+ for (i = 0; i < c->n_subtitles; i++) {
+ struct representation *pls = c->subtitles[i];
+ if (!pls->ctx)
+ continue;
+ if (!cur || pls->cur_timestamp < mints) {
+ cur = pls;
+ mints = pls->cur_timestamp;
+ }
+ }
+
if (!cur) {
return AVERROR_INVALIDDATA;
}
if (!ret)
ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
}
+ for (i = 0; i < c->n_subtitles; i++) {
+ if (!ret)
+ ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
+ }
return ret;
}
static const AVOption dash_options[] = {
{"allowed_extensions", "List of file extensions that dash is allowed to access",
OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
- {.str = "aac,m4a,m4s,m4v,mov,mp4"},
+ {.str = "aac,m4a,m4s,m4v,mov,mp4,webm"},
INT_MIN, INT_MAX, FLAGS},
{NULL}
};