X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=server.cpp;h=852ecddf1f6212bfb1efb93d050d4fc37473a673;hb=a8c43c314bbda25fa53b487042b4f8f85cb34c2d;hp=7f07483390a8b2846013e594668831b48d5b884b;hpb=b10d63c534fda113e65e24d252e509e616067ef9;p=cubemap diff --git a/server.cpp b/server.cpp index 7f07483..852ecdd 100644 --- a/server.cpp +++ b/server.cpp @@ -23,7 +23,6 @@ #include "accesslog.h" #include "log.h" #include "metacube2.h" -#include "mutexlock.h" #include "parse.h" #include "server.h" #include "state.pb.h" @@ -57,9 +56,6 @@ inline bool is_earlier(timespec a, timespec b) Server::Server() { - pthread_mutex_init(&mutex, NULL); - pthread_mutex_init(&queued_clients_mutex, NULL); - epoll_fd = epoll_create(1024); // Size argument is ignored. if (epoll_fd == -1) { log_perror("epoll_fd"); @@ -69,10 +65,6 @@ Server::Server() Server::~Server() { - for (size_t i = 0; i < streams.size(); ++i) { - delete streams[i]; - } - safe_close(epoll_fd); } @@ -80,11 +72,9 @@ vector Server::get_client_stats() const { vector ret; - MutexLock lock(&mutex); - for (map::const_iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - ret.push_back(client_it->second.get_stats()); + lock_guard lock(mu); + for (const auto &fd_and_client : clients) { + ret.push_back(fd_and_client.second.get_stats()); } return ret; } @@ -106,13 +96,13 @@ void Server::do_work() exit(1); } - MutexLock lock(&mutex); // We release the mutex between iterations. + lock_guard lock(mu); // We release the mutex between iterations. process_queued_data(); // Process each client where we have socket activity. for (int i = 0; i < nfds; ++i) { - Client *client = reinterpret_cast(events[i].data.u64); + Client *client = reinterpret_cast(events[i].data.ptr); if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) { close_client(client); @@ -124,11 +114,11 @@ void Server::do_work() // Process each client where its stream has new data, // even if there was no socket activity. - for (size_t i = 0; i < streams.size(); ++i) { + for (unique_ptr &stream : streams) { vector to_process; - swap(streams[i]->to_process, to_process); - for (size_t i = 0; i < to_process.size(); ++i) { - process_client(to_process[i]); + swap(stream->to_process, to_process); + for (Client *client : to_process) { + process_client(client); } } @@ -152,7 +142,7 @@ void Server::do_work() // If this client doesn't exist anymore, just ignore it // (it was deleted earlier). - map::iterator client_it = clients.find(connect_time_and_fd.second); + auto client_it = clients.find(connect_time_and_fd.second); if (client_it == clients.end()) { clients_ordered_by_connect_time.pop(); continue; @@ -189,37 +179,32 @@ CubemapStateProto Server::serialize() // // TODO: Do this when clients are added back from serialized state instead; // it would probably be less wasteful. - for (map::iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - skip_lost_data(&client_it->second); + for (auto &fd_and_client : clients) { + skip_lost_data(&fd_and_client.second); } CubemapStateProto serialized; - for (map::const_iterator client_it = clients.begin(); - client_it != clients.end(); - ++client_it) { - serialized.add_clients()->MergeFrom(client_it->second.serialize()); + for (const auto &fd_and_client : clients) { + serialized.add_clients()->MergeFrom(fd_and_client.second.serialize()); } - for (size_t i = 0; i < streams.size(); ++i) { - serialized.add_streams()->MergeFrom(streams[i]->serialize()); + for (unique_ptr &stream : streams) { + serialized.add_streams()->MergeFrom(stream->serialize()); } return serialized; } void Server::add_client_deferred(int sock, Acceptor *acceptor) { - MutexLock lock(&queued_clients_mutex); + lock_guard lock(queued_clients_mutex); queued_add_clients.push_back(std::make_pair(sock, acceptor)); } void Server::add_client(int sock, Acceptor *acceptor) { const bool is_tls = acceptor->is_tls(); - pair::iterator, bool> ret = - clients.insert(make_pair(sock, Client(sock))); - assert(ret.second == true); // Should not already exist. - Client *client_ptr = &ret.first->second; + auto inserted = clients.insert(make_pair(sock, Client(sock))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; // Connection timestamps must be nondecreasing. I can't find any guarantee // that even the monotonic clock can't go backwards by a small amount @@ -243,7 +228,7 @@ void Server::add_client(int sock, Acceptor *acceptor) // EPOLLOUT will be added once we go out of READING_REQUEST. ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP; } - ev.data.u64 = reinterpret_cast(client_ptr); + ev.data.ptr = client_ptr; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) { log_perror("epoll_ctl(EPOLL_CTL_ADD)"); exit(1); @@ -252,7 +237,7 @@ void Server::add_client(int sock, Acceptor *acceptor) if (is_tls) { assert(tls_server_contexts.count(acceptor)); client_ptr->tls_context = tls_accept(tls_server_contexts[acceptor]); - if (client_ptr->tls_context == NULL) { + if (client_ptr->tls_context == nullptr) { log(ERROR, "tls_accept() failed"); close_client(client_ptr); return; @@ -265,19 +250,18 @@ void Server::add_client(int sock, Acceptor *acceptor) void Server::add_client_from_serialized(const ClientProto &client) { - MutexLock lock(&mutex); + lock_guard lock(mu); Stream *stream; int stream_index = lookup_stream_by_url(client.url()); if (stream_index == -1) { assert(client.state() != Client::SENDING_DATA); - stream = NULL; + stream = nullptr; } else { - stream = streams[stream_index]; + stream = streams[stream_index].get(); } - pair::iterator, bool> ret = - clients.insert(make_pair(client.sock(), Client(client, stream))); - assert(ret.second == true); // Should not already exist. - Client *client_ptr = &ret.first->second; + auto inserted = clients.insert(make_pair(client.sock(), Client(client, stream))); + assert(inserted.second == true); // Should not already exist. + Client *client_ptr = &inserted.first->second; // Connection timestamps must be nondecreasing. assert(clients_ordered_by_connect_time.empty() || @@ -298,7 +282,7 @@ void Server::add_client_from_serialized(const ClientProto &client) // the sleeping array again soon. ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP; } - ev.data.u64 = reinterpret_cast(client_ptr); + ev.data.ptr = client_ptr; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) { log_perror("epoll_ctl(EPOLL_CTL_ADD)"); exit(1); @@ -325,51 +309,51 @@ int Server::lookup_stream_by_url(const string &url) const int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding, Stream::Encoding src_encoding) { - MutexLock lock(&mutex); + lock_guard lock(mu); stream_url_map.insert(make_pair(url, streams.size())); - streams.push_back(new Stream(url, backlog_size, prebuffering_bytes, encoding, src_encoding)); + streams.emplace_back(new Stream(url, backlog_size, prebuffering_bytes, encoding, src_encoding)); return streams.size() - 1; } int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd) { - MutexLock lock(&mutex); + lock_guard lock(mu); stream_url_map.insert(make_pair(stream.url(), streams.size())); - streams.push_back(new Stream(stream, data_fd)); + streams.emplace_back(new Stream(stream, data_fd)); return streams.size() - 1; } void Server::set_backlog_size(int stream_index, size_t new_size) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); streams[stream_index]->set_backlog_size(new_size); } void Server::set_prebuffering_bytes(int stream_index, size_t new_amount) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); streams[stream_index]->prebuffering_bytes = new_amount; } void Server::set_encoding(int stream_index, Stream::Encoding encoding) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); streams[stream_index]->encoding = encoding; } void Server::set_src_encoding(int stream_index, Stream::Encoding encoding) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); streams[stream_index]->src_encoding = encoding; } void Server::set_header(int stream_index, const string &http_header, const string &stream_header) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); streams[stream_index]->http_header = http_header; @@ -388,7 +372,7 @@ void Server::set_header(int stream_index, const string &http_header, const strin void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(clients.empty()); assert(stream_index >= 0 && stream_index < ssize_t(streams.size())); streams[stream_index]->pacing_rate = pacing_rate; @@ -396,7 +380,7 @@ void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate) void Server::add_gen204(const std::string &url, const std::string &allow_origin) { - MutexLock lock(&mutex); + lock_guard lock(mu); assert(clients.empty()); ping_url_map[url] = allow_origin; } @@ -430,7 +414,7 @@ void Server::process_client(Client *client) { switch (client->state) { case Client::READING_REQUEST: { - if (client->tls_context != NULL) { + if (client->tls_context != nullptr) { if (send_pending_tls_data(client)) { // send_pending_tls_data() hit postconditions #1 or #4. return; @@ -441,7 +425,7 @@ read_request_again: // Try to read more of the request. char buf[1024]; int ret; - if (client->tls_context == NULL) { + if (client->tls_context == nullptr) { ret = read_nontls_data(client, buf, sizeof(buf)); if (ret == -1) { // read_nontls_data() hit postconditions #1 or #2. @@ -673,9 +657,9 @@ sending_data_again: bool Server::send_pending_tls_data(Client *client) { // See if there's data from the TLS library to write. - if (client->tls_data_to_send == NULL) { + if (client->tls_data_to_send == nullptr) { client->tls_data_to_send = tls_get_write_buffer(client->tls_context, &client->tls_data_left_to_send); - if (client->tls_data_to_send == NULL) { + if (client->tls_data_to_send == nullptr) { // Really no data to send. return false; } @@ -704,7 +688,7 @@ send_data_again: if (ret > 0 && size_t(ret) == client->tls_data_left_to_send) { // All data has been sent, so we don't need to go to sleep. tls_buffer_clear(client->tls_context); - client->tls_data_to_send = NULL; + client->tls_data_to_send = nullptr; return false; } @@ -802,7 +786,7 @@ read_again: void Server::skip_lost_data(Client *client) { Stream *stream = client->stream; - if (stream == NULL) { + if (stream == nullptr) { return; } size_t bytes_to_send = stream->bytes_received - client->stream_pos; @@ -860,7 +844,7 @@ int Server::parse_request(Client *client) } } - Stream *stream = streams[stream_url_map_it->second]; + Stream *stream = streams[stream_url_map_it->second].get(); if (stream->http_header.empty()) { return 503; // Service unavailable. } @@ -903,15 +887,7 @@ void Server::construct_header(Client *client) // Switch states. client->state = Client::SENDING_HEADER; - - epoll_event ev; - ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP; - ev.data.u64 = reinterpret_cast(client); - - if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { - log_perror("epoll_ctl(EPOLL_CTL_MOD)"); - exit(1); - } + change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP); } void Server::construct_error(Client *client, int error_code) @@ -923,15 +899,7 @@ void Server::construct_error(Client *client, int error_code) // Switch states. client->state = Client::SENDING_SHORT_RESPONSE; - - epoll_event ev; - ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP; - ev.data.u64 = reinterpret_cast(client); - - if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { - log_perror("epoll_ctl(EPOLL_CTL_MOD)"); - exit(1); - } + change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP); } void Server::construct_204(Client *client) @@ -955,15 +923,7 @@ void Server::construct_204(Client *client) // Switch states. client->state = Client::SENDING_SHORT_RESPONSE; - - epoll_event ev; - ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP; - ev.data.u64 = reinterpret_cast(client); - - if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { - log_perror("epoll_ctl(EPOLL_CTL_MOD)"); - exit(1); - } + change_epoll_events(client, EPOLLOUT | EPOLLET | EPOLLRDHUP); } template @@ -975,13 +935,13 @@ void delete_from(vector *v, T elem) void Server::close_client(Client *client) { - if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) { + if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, nullptr) == -1) { log_perror("epoll_ctl(EPOLL_CTL_DEL)"); exit(1); } // This client could be sleeping, so we'll need to fix that. (Argh, O(n).) - if (client->stream != NULL) { + if (client->stream != nullptr) { delete_from(&client->stream->sleeping_clients, client); delete_from(&client->stream->to_process, client); } @@ -998,19 +958,31 @@ void Server::close_client(Client *client) clients.erase(client->sock); } - + +void Server::change_epoll_events(Client *client, uint32_t events) +{ + epoll_event ev; + ev.events = events; + ev.data.ptr = client; + + if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { + log_perror("epoll_ctl(EPOLL_CTL_MOD)"); + exit(1); + } +} + void Server::process_queued_data() { { - MutexLock lock(&queued_clients_mutex); + lock_guard lock(queued_clients_mutex); - for (size_t i = 0; i < queued_add_clients.size(); ++i) { - add_client(queued_add_clients[i].first, queued_add_clients[i].second); + for (const pair &id_and_acceptor : queued_add_clients) { + add_client(id_and_acceptor.first, id_and_acceptor.second); } queued_add_clients.clear(); } - for (size_t i = 0; i < streams.size(); ++i) { - streams[i]->process_queued_data(); + for (unique_ptr &stream : streams) { + stream->process_queued_data(); } }