#include <assert.h>
#include <errno.h>
+#include <math.h>
#include <netdb.h>
#include <netinet/in.h>
#include <poll.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
+#include <sys/time.h>
#include <time.h>
#include <unistd.h>
+#include <math.h>
#include <map>
#include <string>
#include <utility>
#include "httpinput.h"
#include "log.h"
#include "metacube2.h"
-#include "mutexlock.h"
#include "parse.h"
#include "serverpool.h"
#include "state.pb.h"
+#include "stream.h"
+#include "timespec.h"
#include "util.h"
#include "version.h"
using namespace std;
-extern ServerPool *servers;
-
namespace {
-// Compute b-a.
-timespec clock_diff(const timespec &a, const timespec &b)
+string host_header(const string &host, const string &port)
{
- timespec ret;
- ret.tv_sec = b.tv_sec - a.tv_sec;
- ret.tv_nsec = b.tv_nsec - a.tv_nsec;
- if (ret.tv_nsec < 0) {
- ret.tv_sec--;
- ret.tv_nsec += 1000000000;
+ if (port == "http" || atoi(port.c_str()) == 80) {
+ return host;
+ } else {
+ return host + ":" + port;
}
- assert(ret.tv_nsec >= 0);
- return ret;
}
} // namespace
-HTTPInput::HTTPInput(const string &url)
+extern ServerPool *servers;
+
+HTTPInput::HTTPInput(const string &url, Input::Encoding encoding)
: state(NOT_CONNECTED),
url(url),
- has_metacube_header(false),
- sock(-1)
+ encoding(encoding)
{
- pthread_mutex_init(&stats_mutex, NULL);
stats.url = url;
stats.bytes_received = 0;
stats.data_bytes_received = 0;
+ stats.metadata_bytes_received = 0;
stats.connect_time = -1;
+ stats.latency_sec = HUGE_VAL;
}
HTTPInput::HTTPInput(const InputProto &serialized)
: state(State(serialized.state())),
url(serialized.url()),
+ encoding(serialized.is_metacube_encoded() ?
+ Input::INPUT_ENCODING_METACUBE :
+ Input::INPUT_ENCODING_RAW),
request(serialized.request()),
request_bytes_sent(serialized.request_bytes_sent()),
response(serialized.response()),
pending_data.resize(serialized.pending_data().size());
memcpy(&pending_data[0], serialized.pending_data().data(), serialized.pending_data().size());
- string protocol;
- parse_url(url, &protocol, &host, &port, &path); // Don't care if it fails.
+ string protocol, user;
+ parse_url(url, &protocol, &user, &host, &port, &path); // Don't care if it fails.
- // Older versions stored the extra \r\n in the HTTP header.
- // Strip it if we find it.
- if (http_header.size() >= 4 &&
- memcmp(http_header.data() + http_header.size() - 4, "\r\n\r\n", 4) == 0) {
- http_header.resize(http_header.size() - 2);
- }
-
- pthread_mutex_init(&stats_mutex, NULL);
stats.url = url;
stats.bytes_received = serialized.bytes_received();
stats.data_bytes_received = serialized.data_bytes_received();
+ stats.metadata_bytes_received = serialized.metadata_bytes_received();
if (serialized.has_connect_time()) {
stats.connect_time = serialized.connect_time();
} else {
- stats.connect_time = time(NULL);
+ stats.connect_time = time(nullptr);
+ }
+ if (serialized.has_latency_sec()) {
+ stats.latency_sec = serialized.latency_sec();
+ } else {
+ stats.latency_sec = HUGE_VAL;
}
+
+ last_verbose_connection.tv_sec = -3600;
+ last_verbose_connection.tv_nsec = 0;
}
void HTTPInput::close_socket()
{
if (sock != -1) {
safe_close(sock);
+ sock = -1;
}
- MutexLock lock(&stats_mutex);
+ lock_guard<mutex> lock(stats_mutex);
stats.connect_time = -1;
}
serialized.set_sock(sock);
serialized.set_bytes_received(stats.bytes_received);
serialized.set_data_bytes_received(stats.data_bytes_received);
+ if (isfinite(stats.latency_sec)) {
+ serialized.set_latency_sec(stats.latency_sec);
+ }
serialized.set_connect_time(stats.connect_time);
+ if (encoding == Input::INPUT_ENCODING_METACUBE) {
+ serialized.set_is_metacube_encoded(true);
+ } else {
+ assert(encoding == Input::INPUT_ENCODING_RAW);
+ serialized.set_is_metacube_encoded(false);
+ }
return serialized;
}
int HTTPInput::lookup_and_connect(const string &host, const string &port)
{
addrinfo *ai;
- int err = getaddrinfo(host.c_str(), port.c_str(), NULL, &ai);
+ int err = getaddrinfo(host.c_str(), port.c_str(), nullptr, &ai);
if (err != 0) {
- log(WARNING, "[%s] Lookup of '%s' failed (%s).",
- url.c_str(), host.c_str(), gai_strerror(err));
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Lookup of '%s' failed (%s).",
+ url.c_str(), host.c_str(), gai_strerror(err));
+ }
return -1;
}
if (ioctl(sock, FIONBIO, &one) == -1) {
log_perror("ioctl(FIONBIO)");
safe_close(sock);
+ freeaddrinfo(base_ai);
return -1;
}
// Wait for the connect to complete, or an error to happen.
for ( ;; ) {
- bool complete = wait_for_activity(sock, POLLIN | POLLOUT, NULL);
+ bool complete = wait_for_activity(sock, POLLIN | POLLOUT, nullptr);
if (should_stop()) {
safe_close(sock);
return -1;
}
// Give the last one as error.
- log(WARNING, "[%s] Connect to '%s' failed (%s)",
- url.c_str(), host.c_str(), strerror(errno));
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Connect to '%s' failed (%s)",
+ url.c_str(), host.c_str(), strerror(errno));
+ }
freeaddrinfo(base_ai);
return -1;
}
-bool HTTPInput::parse_response(const std::string &request)
+bool HTTPInput::parse_response(const string &request)
{
vector<string> lines = split_lines(response);
if (lines.empty()) {
- log(WARNING, "[%s] Empty HTTP response from input.", url.c_str());
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Empty HTTP response from input.", url.c_str());
+ }
return false;
}
vector<string> first_line_tokens = split_tokens(lines[0]);
if (first_line_tokens.size() < 2) {
- log(WARNING, "[%s] Malformed response line '%s' from input.",
- url.c_str(), lines[0].c_str());
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Malformed response line '%s' from input.",
+ url.c_str(), lines[0].c_str());
+ }
return false;
}
int response = atoi(first_line_tokens[1].c_str());
if (response != 200) {
- log(WARNING, "[%s] Non-200 response '%s' from input.",
- url.c_str(), lines[0].c_str());
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Non-200 response '%s' from input.",
+ url.c_str(), lines[0].c_str());
+ }
return false;
}
- multimap<string, string> parameters;
- for (size_t i = 1; i < lines.size(); ++i) {
- size_t split = lines[i].find(":");
- if (split == string::npos) {
- log(WARNING, "[%s] Ignoring malformed HTTP response line '%s'",
- url.c_str(), lines[i].c_str());
- continue;
- }
+ HTTPHeaderMultimap parameters = extract_headers(lines, url);
- string key(lines[i].begin(), lines[i].begin() + split);
-
- // Skip any spaces after the colon.
- do {
- ++split;
- } while (split < lines[i].size() && lines[i][split] == ' ');
-
- string value(lines[i].begin() + split, lines[i].end());
-
- // Remove “Content-encoding: metacube”.
- // TODO: Make case-insensitive.
- if (key == "Content-encoding" && value == "metacube") {
- continue;
- }
-
- parameters.insert(make_pair(key, value));
+ // Remove “Content-encoding: metacube”.
+ const auto encoding_it = parameters.find("Content-Encoding");
+ if (encoding_it != parameters.end() && encoding_it->second == "metacube") {
+ parameters.erase(encoding_it);
}
// Change “Server: foo” to “Server: metacube/0.1 (reflecting: foo)”
- // TODO: Make case-insensitive.
// XXX: Use a Via: instead?
if (parameters.count("Server") == 0) {
parameters.insert(make_pair("Server", SERVER_IDENTIFICATION));
} else {
- for (multimap<string, string>::iterator it = parameters.begin();
- it != parameters.end();
- ++it) {
- if (it->first != "Server") {
+ for (auto &key_and_value : parameters) {
+ if (key_and_value.first != "Server") {
continue;
}
- it->second = SERVER_IDENTIFICATION " (reflecting: " + it->second + ")";
+ key_and_value.second = SERVER_IDENTIFICATION " (reflecting: " + key_and_value.second + ")";
}
}
- // Set “Connection: close”.
- // TODO: Make case-insensitive.
+ // Erase “Connection: close”; we'll set it on the sending side if needed.
parameters.erase("Connection");
- parameters.insert(make_pair("Connection", "close"));
// Construct the new HTTP header.
http_header = "HTTP/1.0 200 OK\r\n";
- for (multimap<string, string>::iterator it = parameters.begin();
- it != parameters.end();
- ++it) {
- http_header.append(it->first + ": " + it->second + "\r\n");
+ for (const auto &key_and_value : parameters) {
+ http_header.append(key_and_value.first + ": " + key_and_value.second + "\r\n");
}
- for (size_t i = 0; i < stream_indices.size(); ++i) {
- servers->set_header(stream_indices[i], http_header, stream_header);
+ for (int stream_index : stream_indices) {
+ servers->set_header(stream_index, http_header, stream_header);
}
return true;
// TODO: Make the timeout persist across restarts.
if (state == SENDING_REQUEST || state == RECEIVING_HEADER || state == RECEIVING_DATA) {
- int err = clock_gettime(CLOCK_MONOTONIC, &last_activity);
+ int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
assert(err != -1);
}
static const int timeout_secs = 30;
timespec now;
- int err = clock_gettime(CLOCK_MONOTONIC, &now);
+ int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
assert(err != -1);
timespec elapsed = clock_diff(last_activity, now);
if (elapsed.tv_sec >= timeout_secs) {
// Timeout!
- log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec);
+ if (!suppress_logging) {
+ log(ERROR, "[%s] Timeout after %d seconds, closing.", url.c_str(), elapsed.tv_sec);
+ }
state = CLOSING_SOCKET;
continue;
}
bool activity = wait_for_activity(sock, (state == SENDING_REQUEST) ? POLLOUT : POLLIN, &timeout);
if (activity) {
- err = clock_gettime(CLOCK_MONOTONIC, &last_activity);
+ err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
assert(err != -1);
} else {
// OK. Most likely, should_stop was set, or we have timed out.
response.clear();
pending_data.clear();
has_metacube_header = false;
- for (size_t i = 0; i < stream_indices.size(); ++i) {
- servers->set_header(stream_indices[i], "", "");
+ for (int stream_index : stream_indices) {
+ // Don't zero out the header; it might still be of use to HLS clients.
+ servers->set_unavailable(stream_index);
}
{
- string protocol; // Thrown away.
- if (!parse_url(url, &protocol, &host, &port, &path)) {
- log(WARNING, "[%s] Failed to parse URL '%s'", url.c_str(), url.c_str());
+ string protocol, user; // Thrown away.
+ if (!parse_url(url, &protocol, &user, &host, &port, &path)) {
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Failed to parse URL '%s'", url.c_str(), url.c_str());
+ }
break;
}
+
+ // Remove the brackets around IPv6 address literals.
+ // TODO: See if we can join this with the code in parse_ip_address(),
+ // or maybe even more it into parse_url().
+ if (!host.empty() && host[0] == '[' && host[host.size() - 1] == ']') {
+ host = host.substr(1, host.size() - 2);
+ }
}
+ if (suppress_logging) {
+ // See if there's more than one minute since last time we made a connection
+ // with logging enabled. If so, turn it on again.
+ timespec now;
+ int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
+ assert(err != -1);
+
+ double elapsed = now.tv_sec - last_verbose_connection.tv_sec +
+ 1e-9 * (now.tv_nsec - last_verbose_connection.tv_nsec);
+ if (elapsed > 60.0) {
+ suppress_logging = false;
+ }
+ }
+ if (!suppress_logging) {
+ int err = clock_gettime(CLOCK_MONOTONIC_COARSE, &last_verbose_connection);
+ assert(err != -1);
+ }
+ ++num_connection_attempts;
sock = lookup_and_connect(host, port);
if (sock != -1) {
// Yay, successful connect. Try to set it as nonblocking.
state = CLOSING_SOCKET;
} else {
state = SENDING_REQUEST;
- request = "GET " + path + " HTTP/1.0\r\nUser-Agent: cubemap\r\n\r\n";
+ request = "GET " + path + " HTTP/1.0\r\nHost: " + host_header(host, port) + "\r\nUser-Agent: cubemap\r\n\r\n";
request_bytes_sent = 0;
}
- MutexLock lock(&stats_mutex);
- stats.connect_time = time(NULL);
- clock_gettime(CLOCK_MONOTONIC, &last_activity);
+ lock_guard<mutex> lock(stats_mutex);
+ stats.connect_time = time(nullptr);
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &last_activity);
}
break;
case SENDING_REQUEST: {
if (ret == 0) {
// This really shouldn't happen...
- log(ERROR, "[%s] Socket unexpectedly closed while reading header",
- url.c_str());
+ if (!suppress_logging) {
+ log(ERROR, "[%s] Socket unexpectedly closed while reading header",
+ url.c_str());
+ }
state = CLOSING_SOCKET;
continue;
}
RequestParseStatus status = wait_for_double_newline(&response, buf, ret);
if (status == RP_OUT_OF_SPACE) {
- log(WARNING, "[%s] Sever sent overlong HTTP response!", url.c_str());
+ if (!suppress_logging) {
+ log(WARNING, "[%s] Server sent overlong HTTP response!", url.c_str());
+ }
state = CLOSING_SOCKET;
continue;
} else if (status == RP_NOT_FINISHED_YET) {
if (status == RP_EXTRA_DATA) {
char *ptr = static_cast<char *>(
memmem(response.data(), response.size(), "\r\n\r\n", 4));
- assert(ptr != NULL);
+ assert(ptr != nullptr);
extra_data = string(ptr + 4, &response[0] + response.size());
response.resize(ptr - response.data());
}
process_data(&extra_data[0], extra_data.size());
}
- log(INFO, "[%s] Connected to '%s', receiving data.",
- url.c_str(), url.c_str());
+ if (!suppress_logging) {
+ if (encoding == Input::INPUT_ENCODING_RAW) {
+ log(INFO, "[%s] Connected to '%s', receiving raw data.",
+ url.c_str(), url.c_str());
+ } else {
+ assert(encoding == Input::INPUT_ENCODING_METACUBE);
+ log(INFO, "[%s] Connected to '%s', receiving data.",
+ url.c_str(), url.c_str());
+ }
+ }
state = RECEIVING_DATA;
break;
}
if (ret == 0) {
// This really shouldn't happen...
- log(ERROR, "[%s] Socket unexpectedly closed while reading data",
- url.c_str());
+ if (!suppress_logging) {
+ log(ERROR, "[%s] Socket unexpectedly closed while reading data",
+ url.c_str());
+ }
state = CLOSING_SOCKET;
continue;
}
+ num_connection_attempts = 0; // Reset, since we have a successful read.
+ if (suppress_logging) {
+ // This was suppressed earlier, so print it out now.
+ if (encoding == Input::INPUT_ENCODING_RAW) {
+ log(INFO, "[%s] Connected to '%s', receiving raw data.",
+ url.c_str(), url.c_str());
+ } else {
+ assert(encoding == Input::INPUT_ENCODING_METACUBE);
+ log(INFO, "[%s] Connected to '%s', receiving data.",
+ url.c_str(), url.c_str());
+ }
+ suppress_logging = false;
+ }
+
process_data(buf, ret);
break;
}
// or the connection just got closed.
// The earlier steps have already given the error message, if any.
if (state == NOT_CONNECTED && !should_stop()) {
- log(INFO, "[%s] Waiting 0.2 second and restarting...", url.c_str());
+ if (!suppress_logging) {
+ log(INFO, "[%s] Waiting 0.2 seconds and restarting...", url.c_str());
+ }
+
+ if (num_connection_attempts >= 3 && !suppress_logging) {
+ log(INFO, "[%s] %d failed connection attempts, suppressing logging for one minute.",
+ url.c_str(), num_connection_attempts);
+ suppress_logging = true;
+ }
timespec timeout_ts;
timeout_ts.tv_sec = 0;
timeout_ts.tv_nsec = 200000000;
void HTTPInput::process_data(char *ptr, size_t bytes)
{
- pending_data.insert(pending_data.end(), ptr, ptr + bytes);
{
- MutexLock mutex(&stats_mutex);
+ lock_guard<mutex> lock(stats_mutex);
stats.bytes_received += bytes;
}
+ if (encoding == Input::INPUT_ENCODING_RAW) {
+ for (int stream_index : stream_indices) {
+ servers->add_data(stream_index, ptr, bytes, /*metacube_flags=*/0, /*pts=*/RationalPTS());
+ }
+ return;
+ }
+
+ assert(encoding == Input::INPUT_ENCODING_METACUBE);
+ pending_data.insert(pending_data.end(), ptr, ptr + bytes);
+
for ( ;; ) {
// If we don't have enough data (yet) for even the Metacube header, just return.
if (pending_data.size() < sizeof(metacube2_block_header)) {
char *ptr = static_cast<char *>(
memmem(pending_data.data(), pending_data.size(),
METACUBE2_SYNC, strlen(METACUBE2_SYNC)));
- if (ptr == NULL) {
+ if (ptr == nullptr) {
// OK, so we didn't find the sync marker. We know then that
// we do not have the _full_ marker in the buffer, but we
// could have N-1 bytes. Drop everything before that,
}
// Now it's safe to read the header.
- metacube2_block_header *hdr = reinterpret_cast<metacube2_block_header *>(pending_data.data());
- assert(memcmp(hdr->sync, METACUBE2_SYNC, sizeof(hdr->sync)) == 0);
- uint32_t size = ntohl(hdr->size);
- uint16_t flags = ntohs(hdr->flags);
- uint16_t expected_csum = metacube2_compute_crc(hdr);
-
- if (expected_csum != ntohs(hdr->csum)) {
+ metacube2_block_header hdr;
+ memcpy(&hdr, pending_data.data(), sizeof(hdr));
+ assert(memcmp(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync)) == 0);
+ uint32_t size = ntohl(hdr.size);
+ uint16_t flags = ntohs(hdr.flags);
+ uint16_t expected_csum = metacube2_compute_crc(&hdr);
+
+ if (expected_csum != ntohs(hdr.csum)) {
log(WARNING, "[%s] Metacube checksum failed (expected 0x%x, got 0x%x), "
"not reading block claiming to be %d bytes (flags=%x).",
- url.c_str(), expected_csum, ntohs(hdr->csum),
+ url.c_str(), expected_csum, ntohs(hdr.csum),
size, flags);
// Drop only the first byte, and let the rest of the code handle resync.
has_metacube_header = false;
continue;
}
- if (size > 262144) {
- log(WARNING, "[%s] Metacube block of %d bytes (flags=%x); corrupted header?",
+ if (size > 10485760) {
+ log(WARNING, "[%s] Metacube block of %d bytes (flags=%x); corrupted header??",
url.c_str(), size, flags);
}
return;
}
- // Send this block on to the servers.
- {
- MutexLock lock(&stats_mutex);
- stats.data_bytes_received += size;
- }
- char *inner_data = pending_data.data() + sizeof(metacube2_block_header);
- if (flags & METACUBE_FLAGS_HEADER) {
- stream_header = string(inner_data, inner_data + size);
- for (size_t i = 0; i < stream_indices.size(); ++i) {
- servers->set_header(stream_indices[i], http_header, stream_header);
+ // See if this is a metadata block. If so, we don't want to send it on,
+ // but rather process it ourselves.
+ // TODO: Keep metadata when sending on to other Metacube users.
+ if (flags & METACUBE_FLAGS_METADATA) {
+ {
+ lock_guard<mutex> lock(stats_mutex);
+ stats.metadata_bytes_received += size;
}
+ process_metacube_metadata_block(hdr, pending_data.data() + sizeof(hdr), size);
} else {
- StreamStartSuitability suitable_for_stream_start;
- if (flags & METACUBE_FLAGS_NOT_SUITABLE_FOR_STREAM_START) {
- suitable_for_stream_start = NOT_SUITABLE_FOR_STREAM_START;
- } else {
- suitable_for_stream_start = SUITABLE_FOR_STREAM_START;
+ // Send this block on to the servers.
+ {
+ lock_guard<mutex> lock(stats_mutex);
+ stats.data_bytes_received += size;
+ }
+ char *inner_data = pending_data.data() + sizeof(metacube2_block_header);
+ if (flags & METACUBE_FLAGS_HEADER) {
+ stream_header = string(inner_data, inner_data + size);
+ for (int stream_index : stream_indices) {
+ servers->set_header(stream_index, http_header, stream_header);
+ }
}
- for (size_t i = 0; i < stream_indices.size(); ++i) {
- servers->add_data(stream_indices[i], inner_data, size, suitable_for_stream_start);
+ for (int stream_index : stream_indices) {
+ servers->add_data(stream_index, inner_data, size, flags, next_block_pts);
}
+ next_block_pts = RationalPTS();
}
// Consume the block. This isn't the most efficient way of dealing with things
if (num_bytes == 0) {
return;
}
- log(WARNING, "[%s] Dropping %lld junk bytes from stream, maybe it is not a Metacube2 stream?",
+ log(WARNING, "[%s] Dropping %lld junk bytes; not a Metacube2 stream, or data was dropped from the middle of the stream.",
url.c_str(), (long long)num_bytes);
assert(pending_data.size() >= num_bytes);
pending_data.erase(pending_data.begin(), pending_data.begin() + num_bytes);
InputStats HTTPInput::get_stats() const
{
- MutexLock lock(&stats_mutex);
+ lock_guard<mutex> lock(stats_mutex);
return stats;
}
+
+void HTTPInput::process_metacube_metadata_block(const metacube2_block_header &hdr, const char *payload, uint32_t payload_size)
+{
+ if (payload_size < sizeof(uint64_t)) {
+ log(WARNING, "[%s] Undersized Metacube metadata block (%d bytes); corrupted header?",
+ url.c_str(), payload_size);
+ return;
+ }
+
+ uint64_t type = be64toh(*(const uint64_t *)payload);
+ if (type == METACUBE_METADATA_TYPE_ENCODER_TIMESTAMP) {
+ timespec now;
+ clock_gettime(CLOCK_REALTIME, &now);
+
+ const metacube2_timestamp_packet *pkt = (const metacube2_timestamp_packet *)payload;
+ if (payload_size != sizeof(*pkt)) {
+ log(WARNING, "[%s] Metacube timestamp block of wrong size (%d bytes); ignoring.",
+ url.c_str(), payload_size);
+ return;
+ }
+
+ double elapsed = now.tv_sec - be64toh(pkt->tv_sec) +
+ 1e-9 * (now.tv_nsec - long(be64toh(pkt->tv_nsec)));
+ {
+ lock_guard<mutex> lock(stats_mutex);
+ stats.latency_sec = elapsed;
+ }
+ } else if (type == METACUBE_METADATA_TYPE_NEXT_BLOCK_PTS) {
+ const metacube2_pts_packet *pkt = (const metacube2_pts_packet *)payload;
+ if (payload_size != sizeof(*pkt)) {
+ log(WARNING, "[%s] Metacube pts block of wrong size (%d bytes); ignoring.",
+ url.c_str(), payload_size);
+ return;
+ }
+ next_block_pts.pts = be64toh(pkt->pts);
+ next_block_pts.timebase_num = be64toh(pkt->timebase_num);
+ next_block_pts.timebase_den = be64toh(pkt->timebase_den);
+ } else {
+ // Unknown metadata block, ignore
+ log(INFO, "[%s] Metadata block %llu\n", url.c_str(), type);
+ return;
+ }
+}