typedef struct DASHContext {
const AVClass *class;
char *base_url;
+ char *adaptionset_contenttype_val;
+ char *adaptionset_par_val;
+ char *adaptionset_lang_val;
+ char *adaptionset_minbw_val;
+ char *adaptionset_maxbw_val;
+ char *adaptionset_minwidth_val;
+ char *adaptionset_maxwidth_val;
+ char *adaptionset_minheight_val;
+ char *adaptionset_maxheight_val;
+ char *adaptionset_minframerate_val;
+ char *adaptionset_maxframerate_val;
+ char *adaptionset_segmentalignment_val;
+ char *adaptionset_bitstreamswitching_val;
int n_videos;
struct representation **videos;
int n_audios;
struct representation **audios;
+ int n_subtitles;
+ struct representation **subtitles;
/* MediaPresentationDescription Attribute */
uint64_t media_presentation_duration;
uint64_t suggested_presentation_delay;
uint64_t availability_start_time;
+ uint64_t availability_end_time;
uint64_t publish_time;
uint64_t minimum_update_period;
uint64_t time_shift_buffer_depth;
c->n_audios = 0;
}
+static void free_subtitle_list(DASHContext *c)
+{
+ int i;
+ for (i = 0; i < c->n_subtitles; i++) {
+ struct representation *pls = c->subtitles[i];
+ free_representation(pls);
+ }
+ av_freep(&c->subtitles);
+ c->n_subtitles = 0;
+}
+
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
AVDictionary *opts, AVDictionary *opts2, int *is_http)
{
}
if (val)
- av_strlcat(tmp_str, (const char*)val, max_url_size);
+ ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
if (rep_id_val) {
url = av_strireplace(tmp_str, "$RepresentationID$", (const char*)rep_id_val);
type = AVMEDIA_TYPE_VIDEO;
} else if (av_stristr((const char *)val, "audio")) {
type = AVMEDIA_TYPE_AUDIO;
+ } else if (av_stristr((const char *)val, "text")) {
+ type = AVMEDIA_TYPE_SUBTITLE;
}
xmlFree(val);
}
return 0;
}
-static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes) {
-
+static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
+{
char *tmp_str = NULL;
char *path = NULL;
char *mpdName = NULL;
char *root_url = NULL;
char *text = NULL;
char *tmp = NULL;
-
int isRootHttp = 0;
char token ='/';
int start = 0;
xmlNodePtr adaptionset_supplementalproperty_node)
{
int32_t ret = 0;
+ int32_t subtitle_rep_idx = 0;
int32_t audio_rep_idx = 0;
int32_t video_rep_idx = 0;
DASHContext *c = s->priv_data;
xmlNodePtr representation_segmenttemplate_node = NULL;
xmlNodePtr representation_baseurl_node = NULL;
xmlNodePtr representation_segmentlist_node = NULL;
- xmlNodePtr segmentlists_tab[2];
+ xmlNodePtr segmentlists_tab[3];
xmlNodePtr fragment_timeline_node = NULL;
xmlNodePtr fragment_templates_tab[5];
char *duration_val = NULL;
type = get_content_type(adaptionset_node);
if (type == AVMEDIA_TYPE_UNKNOWN) {
av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
- } else if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO) {
+ } else if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
// convert selected representation to our internal struct
rep = av_mallocz(sizeof(struct representation));
if (!rep) {
if (presentation_timeoffset_val) {
rep->presentation_timeoffset = (int64_t) strtoll(presentation_timeoffset_val, NULL, 10);
+ av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
xmlFree(presentation_timeoffset_val);
}
if (duration_val) {
rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
+ av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
xmlFree(duration_val);
}
if (timescale_val) {
rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
+ av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
xmlFree(timescale_val);
}
if (startnumber_val) {
rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 10);
+ av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
xmlFree(startnumber_val);
}
if (adaptionset_supplementalproperty_node) {
xmlNodePtr fragmenturl_node = NULL;
segmentlists_tab[0] = representation_segmentlist_node;
segmentlists_tab[1] = adaptionset_segmentlist_node;
+ segmentlists_tab[2] = period_segmentlist_node;
- duration_val = get_val_from_nodes_tab(segmentlists_tab, 2, "duration");
- timescale_val = get_val_from_nodes_tab(segmentlists_tab, 2, "timescale");
+ duration_val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
+ timescale_val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
if (duration_val) {
rep->fragment_duration = (int64_t) strtoll(duration_val, NULL, 10);
+ av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
xmlFree(duration_val);
}
if (timescale_val) {
rep->fragment_timescale = (int64_t) strtoll(timescale_val, NULL, 10);
+ av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
xmlFree(timescale_val);
}
fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
}
- if (type == AVMEDIA_TYPE_VIDEO) {
- rep->rep_idx = video_rep_idx;
- dynarray_add(&c->videos, &c->n_videos, rep);
- } else {
- rep->rep_idx = audio_rep_idx;
- dynarray_add(&c->audios, &c->n_audios, rep);
+ switch (type) {
+ case AVMEDIA_TYPE_VIDEO:
+ rep->rep_idx = video_rep_idx;
+ dynarray_add(&c->videos, &c->n_videos, rep);
+ break;
+ case AVMEDIA_TYPE_AUDIO:
+ rep->rep_idx = audio_rep_idx;
+ dynarray_add(&c->audios, &c->n_audios, rep);
+ break;
+ case AVMEDIA_TYPE_SUBTITLE:
+ rep->rep_idx = subtitle_rep_idx;
+ dynarray_add(&c->subtitles, &c->n_subtitles, rep);
+ break;
+ default:
+ av_log(s, AV_LOG_WARNING, "Unsupported the stream type %d\n", type);
+ break;
}
}
}
video_rep_idx += type == AVMEDIA_TYPE_VIDEO;
audio_rep_idx += type == AVMEDIA_TYPE_AUDIO;
+ subtitle_rep_idx += type == AVMEDIA_TYPE_SUBTITLE;
end:
if (rep_id_val)
xmlNodePtr period_segmentlist_node)
{
int ret = 0;
+ DASHContext *c = s->priv_data;
xmlNodePtr fragment_template_node = NULL;
xmlNodePtr content_component_node = NULL;
xmlNodePtr adaptionset_baseurl_node = NULL;
xmlNodePtr adaptionset_segmentlist_node = NULL;
xmlNodePtr adaptionset_supplementalproperty_node = NULL;
xmlNodePtr node = NULL;
+ c->adaptionset_contenttype_val = xmlGetProp(adaptionset_node, "contentType");
+ c->adaptionset_par_val = xmlGetProp(adaptionset_node, "par");
+ c->adaptionset_lang_val = xmlGetProp(adaptionset_node, "lang");
+ c->adaptionset_minbw_val = xmlGetProp(adaptionset_node, "minBandwidth");
+ c->adaptionset_maxbw_val = xmlGetProp(adaptionset_node, "maxBandwidth");
+ c->adaptionset_minwidth_val = xmlGetProp(adaptionset_node, "minWidth");
+ c->adaptionset_maxwidth_val = xmlGetProp(adaptionset_node, "maxWidth");
+ c->adaptionset_minheight_val = xmlGetProp(adaptionset_node, "minHeight");
+ c->adaptionset_maxheight_val = xmlGetProp(adaptionset_node, "maxHeight");
+ c->adaptionset_minframerate_val = xmlGetProp(adaptionset_node, "minFrameRate");
+ c->adaptionset_maxframerate_val = xmlGetProp(adaptionset_node, "maxFrameRate");
+ c->adaptionset_segmentalignment_val = xmlGetProp(adaptionset_node, "segmentAlignment");
+ c->adaptionset_bitstreamswitching_val = xmlGetProp(adaptionset_node, "bitstreamSwitching");
node = xmlFirstElementChild(adaptionset_node);
while (node) {
return 0;
}
+static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
+{
+ xmlChar *val = NULL;
+
+ node = xmlFirstElementChild(node);
+ while (node) {
+ if (!av_strcasecmp(node->name, "Title")) {
+ val = xmlNodeGetContent(node);
+ if (val) {
+ av_dict_set(&s->metadata, "Title", val, 0);
+ }
+ } else if (!av_strcasecmp(node->name, "Source")) {
+ val = xmlNodeGetContent(node);
+ if (val) {
+ av_dict_set(&s->metadata, "Source", val, 0);
+ }
+ } else if (!av_strcasecmp(node->name, "Copyright")) {
+ val = xmlNodeGetContent(node);
+ if (val) {
+ av_dict_set(&s->metadata, "Copyright", val, 0);
+ }
+ }
+ node = xmlNextElementSibling(node);
+ xmlFree(val);
+ }
+ return 0;
+}
+
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
{
DASHContext *c = s->priv_data;
if (!av_strcasecmp(attr->name, (const char *)"availabilityStartTime")) {
c->availability_start_time = get_utc_date_time_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
+ } else if (!av_strcasecmp(attr->name, (const char *)"availabilityEndTime")) {
+ c->availability_end_time = get_utc_date_time_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
} else if (!av_strcasecmp(attr->name, (const char *)"publishTime")) {
c->publish_time = get_utc_date_time_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
} else if (!av_strcasecmp(attr->name, (const char *)"minimumUpdatePeriod")) {
c->minimum_update_period = get_duration_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
} else if (!av_strcasecmp(attr->name, (const char *)"timeShiftBufferDepth")) {
c->time_shift_buffer_depth = get_duration_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
} else if (!av_strcasecmp(attr->name, (const char *)"minBufferTime")) {
c->min_buffer_time = get_duration_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
} else if (!av_strcasecmp(attr->name, (const char *)"suggestedPresentationDelay")) {
c->suggested_presentation_delay = get_duration_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
} else if (!av_strcasecmp(attr->name, (const char *)"mediaPresentationDuration")) {
c->media_presentation_duration = get_duration_insec(s, (const char *)val);
+ av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
}
attr = attr->next;
xmlFree(val);
if (c->period_start > 0)
c->media_presentation_duration = c->period_duration;
}
+ } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
+ parse_programinformation(s, node);
}
node = xmlNextElementSibling(node);
}
if (c->is_live) {
if (pls->n_fragments) {
+ av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
num = pls->first_seq_no;
} else if (pls->n_timelines) {
+ av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
num = calc_next_seg_no_from_timelines(pls, start_time_offset);
if (num == -1)
else
num += pls->first_seq_no;
} else if (pls->fragment_duration){
+ av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
if (pls->presentation_timeoffset) {
- num = pls->presentation_timeoffset * pls->fragment_timescale / pls->fragment_duration;
+ num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
} else if (c->publish_time > 0 && !c->availability_start_time) {
- num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
+ if (c->min_buffer_time) {
+ num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
+ } else {
+ num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
+ }
} else {
num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
}
int64_t num = 0;
if (c->is_live && pls->fragment_duration) {
+ av_log(s, AV_LOG_TRACE, "in live mode\n");
num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->time_shift_buffer_depth) * pls->fragment_timescale) / pls->fragment_duration;
} else {
num = pls->first_seq_no;
static int refresh_manifest(AVFormatContext *s)
{
-
int ret = 0, i;
DASHContext *c = s->priv_data;
-
// save current context
int n_videos = c->n_videos;
struct representation **videos = c->videos;
int n_audios = c->n_audios;
struct representation **audios = c->audios;
+ int n_subtitles = c->n_subtitles;
+ struct representation **subtitles = c->subtitles;
char *base_url = c->base_url;
c->base_url = NULL;
c->videos = NULL;
c->n_audios = 0;
c->audios = NULL;
+ c->n_subtitles = 0;
+ c->subtitles = NULL;
ret = parse_manifest(s, s->url, NULL);
if (ret)
goto finish;
n_audios, c->n_audios);
return AVERROR_INVALIDDATA;
}
+ if (c->n_subtitles != n_subtitles) {
+ av_log(c, AV_LOG_ERROR,
+ "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
+ n_subtitles, c->n_subtitles);
+ return AVERROR_INVALIDDATA;
+ }
for (i = 0; i < n_videos; i++) {
struct representation *cur_video = videos[i];
av_free(base_url);
else
c->base_url = base_url;
+
+ if (c->subtitles)
+ free_subtitle_list(c);
if (c->audios)
free_audio_list(c);
if (c->videos)
free_video_list(c);
+
+ c->n_subtitles = n_subtitles;
+ c->subtitles = subtitles;
c->n_audios = n_audios;
c->audios = audios;
c->n_videos = n_videos;
return seg;
}
-enum ReadFromURLMode {
- READ_NORMAL,
- READ_COMPLETE,
-};
-
static int read_from_url(struct representation *pls, struct fragment *seg,
- uint8_t *buf, int buf_size,
- enum ReadFromURLMode mode)
+ uint8_t *buf, int buf_size)
{
int ret;
if (seg->size >= 0)
buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
- if (mode == READ_COMPLETE) {
- ret = avio_read(pls->input, buf, buf_size);
- if (ret < buf_size) {
- av_log(pls->parent, AV_LOG_WARNING, "Could not read complete fragment.\n");
- }
- } else {
- ret = avio_read(pls->input, buf, buf_size);
- }
+ ret = avio_read(pls->input, buf, buf_size);
if (ret > 0)
pls->cur_seg_offset += ret;
url = av_mallocz(c->max_url_size);
if (!url) {
+ ret = AVERROR(ENOMEM);
goto cleanup;
}
av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64", playlist %d\n",
url, seg->url_offset, pls->rep_idx);
ret = open_url(pls->parent, &pls->input, url, c->avio_opts, opts, NULL);
- if (ret < 0) {
- goto cleanup;
- }
cleanup:
av_free(url);
av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
- pls->init_sec_buf_size, READ_COMPLETE);
+ pls->init_sec_buf_size);
ff_format_io_close(pls->parent, &pls->input);
if (ret < 0)
ret = open_input(c, v, v->cur_seg);
if (ret < 0) {
if (ff_check_interrupt(c->interrupt_callback)) {
- goto end;
ret = AVERROR_EXIT;
+ goto end;
}
av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist %d\n", v->rep_idx);
v->cur_seq_no++;
ret = AVERROR_EOF;
goto end;
}
- ret = read_from_url(v, v->cur_seg, buf, buf_size, READ_NORMAL);
+ ret = read_from_url(v, v->cur_seg, buf, buf_size);
if (ret > 0)
goto end;
static int save_avio_options(AVFormatContext *s)
{
DASHContext *c = s->priv_data;
- const char *opts[] = { "headers", "user_agent", "cookies", NULL }, **opt = opts;
+ const char *opts[] = {
+ "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", NULL };
+ const char **opt = opts;
uint8_t *buf = NULL;
int ret = 0;
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
{
DASHContext *c = s->priv_data;
- AVInputFormat *in_fmt = NULL;
+ ff_const59 AVInputFormat *in_fmt = NULL;
AVDictionary *in_fmt_opts = NULL;
uint8_t *avio_ctx_buffer = NULL;
int ret = 0, i;
pls->ctx->streams[i]->r_frame_rate = pls->framerate;
}
#endif
-
ret = avformat_find_stream_info(pls->ctx, NULL);
if (ret < 0)
goto fail;
goto fail;
}
st->id = i;
- avcodec_parameters_copy(st->codecpar, pls->ctx->streams[i]->codecpar);
+ avcodec_parameters_copy(st->codecpar, ist->codecpar);
avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
}
return 1;
}
-static void copy_init_section(struct representation *rep_dest, struct representation *rep_src)
+static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
{
rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
+ if (!rep_dest->init_sec_buf) {
+ av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
+ return AVERROR(ENOMEM);
+ }
memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
rep_dest->cur_timestamp = rep_src->cur_timestamp;
+
+ return 0;
}
static int dash_read_header(AVFormatContext *s)
{
DASHContext *c = s->priv_data;
+ struct representation *rep;
int ret = 0;
int stream_index = 0;
int i;
if ((ret = save_avio_options(s)) < 0)
goto fail;
- av_dict_set(&c->avio_opts, "seekable", "0", 0);
-
if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
goto fail;
* stream. */
if (!c->is_live) {
s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
+ } else {
+ av_dict_set(&c->avio_opts, "seekable", "0", 0);
}
if(c->n_videos)
/* Open the demuxer for video and audio components if available */
for (i = 0; i < c->n_videos; i++) {
- struct representation *cur_video = c->videos[i];
+ rep = c->videos[i];
if (i > 0 && c->is_init_section_common_video) {
- copy_init_section(cur_video,c->videos[0]);
+ ret = copy_init_section(rep, c->videos[0]);
+ if (ret < 0)
+ goto fail;
}
- ret = open_demux_for_component(s, cur_video);
+ ret = open_demux_for_component(s, rep);
if (ret)
goto fail;
- cur_video->stream_index = stream_index;
+ rep->stream_index = stream_index;
++stream_index;
}
c->is_init_section_common_audio = is_common_init_section_exist(c->audios, c->n_audios);
for (i = 0; i < c->n_audios; i++) {
- struct representation *cur_audio = c->audios[i];
+ rep = c->audios[i];
if (i > 0 && c->is_init_section_common_audio) {
- copy_init_section(cur_audio,c->audios[0]);
+ ret = copy_init_section(rep, c->audios[0]);
+ if (ret < 0)
+ goto fail;
}
- ret = open_demux_for_component(s, cur_audio);
+ ret = open_demux_for_component(s, rep);
if (ret)
goto fail;
- cur_audio->stream_index = stream_index;
+ rep->stream_index = stream_index;
+ ++stream_index;
+ }
+
+ if (c->n_subtitles)
+ c->is_init_section_common_audio = is_common_init_section_exist(c->subtitles, c->n_subtitles);
+
+ for (i = 0; i < c->n_subtitles; i++) {
+ rep = c->subtitles[i];
+ if (i > 0 && c->is_init_section_common_audio) {
+ ret = copy_init_section(rep, c->subtitles[0]);
+ if (ret < 0)
+ goto fail;
+ }
+ ret = open_demux_for_component(s, rep);
+
+ if (ret)
+ goto fail;
+ rep->stream_index = stream_index;
++stream_index;
}
}
for (i = 0; i < c->n_videos; i++) {
- struct representation *pls = c->videos[i];
-
- av_program_add_stream_index(s, 0, pls->stream_index);
- pls->assoc_stream = s->streams[pls->stream_index];
- if (pls->bandwidth > 0)
- av_dict_set_int(&pls->assoc_stream->metadata, "variant_bitrate", pls->bandwidth, 0);
- if (pls->id[0])
- av_dict_set(&pls->assoc_stream->metadata, "id", pls->id, 0);
+ rep = c->videos[i];
+ av_program_add_stream_index(s, 0, rep->stream_index);
+ rep->assoc_stream = s->streams[rep->stream_index];
+ if (rep->bandwidth > 0)
+ av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
+ if (rep->id[0])
+ av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
}
for (i = 0; i < c->n_audios; i++) {
- struct representation *pls = c->audios[i];
-
- av_program_add_stream_index(s, 0, pls->stream_index);
- pls->assoc_stream = s->streams[pls->stream_index];
- if (pls->bandwidth > 0)
- av_dict_set_int(&pls->assoc_stream->metadata, "variant_bitrate", pls->bandwidth, 0);
- if (pls->id[0])
- av_dict_set(&pls->assoc_stream->metadata, "id", pls->id, 0);
+ rep = c->audios[i];
+ av_program_add_stream_index(s, 0, rep->stream_index);
+ rep->assoc_stream = s->streams[rep->stream_index];
+ if (rep->bandwidth > 0)
+ av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
+ if (rep->id[0])
+ av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
+ }
+ for (i = 0; i < c->n_subtitles; i++) {
+ rep = c->subtitles[i];
+ av_program_add_stream_index(s, 0, rep->stream_index);
+ rep->assoc_stream = s->streams[rep->stream_index];
+ if (rep->id[0])
+ av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
}
}
for (i = 0; i < n; i++) {
struct representation *pls = p[i];
-
int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
+
if (needed && !pls->ctx) {
pls->cur_seg_offset = 0;
pls->init_sec_buf_read_offset = 0;
int ret = 0, i;
int64_t mints = 0;
struct representation *cur = NULL;
+ struct representation *rep = NULL;
recheck_discard_flags(s, c->videos, c->n_videos);
recheck_discard_flags(s, c->audios, c->n_audios);
+ recheck_discard_flags(s, c->subtitles, c->n_subtitles);
for (i = 0; i < c->n_videos; i++) {
- struct representation *pls = c->videos[i];
- if (!pls->ctx)
+ rep = c->videos[i];
+ if (!rep->ctx)
continue;
- if (!cur || pls->cur_timestamp < mints) {
- cur = pls;
- mints = pls->cur_timestamp;
+ if (!cur || rep->cur_timestamp < mints) {
+ cur = rep;
+ mints = rep->cur_timestamp;
}
}
for (i = 0; i < c->n_audios; i++) {
- struct representation *pls = c->audios[i];
- if (!pls->ctx)
+ rep = c->audios[i];
+ if (!rep->ctx)
continue;
- if (!cur || pls->cur_timestamp < mints) {
- cur = pls;
- mints = pls->cur_timestamp;
+ if (!cur || rep->cur_timestamp < mints) {
+ cur = rep;
+ mints = rep->cur_timestamp;
+ }
+ }
+
+ for (i = 0; i < c->n_subtitles; i++) {
+ rep = c->subtitles[i];
+ if (!rep->ctx)
+ continue;
+ if (!cur || rep->cur_timestamp < mints) {
+ cur = rep;
+ mints = rep->cur_timestamp;
}
}
DASHContext *c = s->priv_data;
free_audio_list(c);
free_video_list(c);
-
av_dict_free(&c->avio_opts);
av_freep(&c->base_url);
return 0;
if (!ret)
ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
}
+ for (i = 0; i < c->n_subtitles; i++) {
+ if (!ret)
+ ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
+ }
return ret;
}
-static int dash_probe(AVProbeData *p)
+static int dash_probe(const AVProbeData *p)
{
if (!av_stristr(p->buf, "<MPD"))
return 0;
static const AVOption dash_options[] = {
{"allowed_extensions", "List of file extensions that dash is allowed to access",
OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
- {.str = "aac,m4a,m4s,m4v,mov,mp4"},
+ {.str = "aac,m4a,m4s,m4v,mov,mp4,webm"},
INT_MIN, INT_MAX, FLAGS},
{NULL}
};