X-Git-Url: https://git.sesse.net/?p=cubemap;a=blobdiff_plain;f=server.cpp;h=f47de543e2b69a1350138ab0cb3c0f979fe07484;hp=d6b5b5b8b783446ce08e87972b34430b9bf4e547;hb=0dd17aefa9103f0ad53e19d1ee08771304512886;hpb=a31f0050d6f82fdfec7218dfbe86f777777e3029 diff --git a/server.cpp b/server.cpp index d6b5b5b..f47de54 100644 --- a/server.cpp +++ b/server.cpp @@ -13,13 +13,79 @@ #include #include #include +#include #include "metacube.h" #include "server.h" #include "mutexlock.h" +#include "state.pb.h" using namespace std; +Client::Client(int sock) + : sock(sock), + state(Client::READING_REQUEST), + header_bytes_sent(0), + bytes_sent(0) +{ + request.reserve(1024); +} + +Client::Client(const ClientProto &serialized) + : sock(serialized.sock()), + state(State(serialized.state())), + request(serialized.request()), + stream_id(serialized.stream_id()), + header(serialized.header()), + header_bytes_sent(serialized.header_bytes_sent()), + bytes_sent(serialized.bytes_sent()) +{ +} + +ClientProto Client::serialize() const +{ + ClientProto serialized; + serialized.set_sock(sock); + serialized.set_state(state); + serialized.set_request(request); + serialized.set_stream_id(stream_id); + serialized.set_header(header); + serialized.set_header_bytes_sent(serialized.header_bytes_sent()); + serialized.set_bytes_sent(bytes_sent); + return serialized; +} + +Stream::Stream(const string &stream_id) + : stream_id(stream_id), + data(new char[BACKLOG_SIZE]), + data_size(0) +{ + memset(data, 0, BACKLOG_SIZE); +} + +Stream::~Stream() +{ + delete[] data; +} + +Stream::Stream(const StreamProto &serialized) + : header(serialized.header()), + data(new char[BACKLOG_SIZE]), + data_size(serialized.data_size()) +{ + assert(serialized.data().size() == BACKLOG_SIZE); + memcpy(data, serialized.data().data(), BACKLOG_SIZE); +} + +StreamProto Stream::serialize() const +{ + StreamProto serialized; + serialized.set_header(header); + serialized.set_data(string(data, data + BACKLOG_SIZE)); + serialized.set_data_size(data_size); + return serialized; +} + Server::Server() { pthread_mutex_init(&mutex, NULL); @@ -33,8 +99,26 @@ Server::Server() void Server::run() { - pthread_t thread; - pthread_create(&thread, NULL, Server::do_work_thunk, this); + should_stop = false; + + // Joinable is already the default, but it's good to be certain. + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); + pthread_create(&worker_thread, &attr, Server::do_work_thunk, this); +} + +void Server::stop() +{ + { + MutexLock lock(&mutex); + should_stop = true; + } + + if (pthread_join(worker_thread, NULL) == -1) { + perror("pthread_join"); + exit(1); + } } void *Server::do_work_thunk(void *arg) @@ -48,13 +132,17 @@ void Server::do_work() { for ( ;; ) { int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS); - - MutexLock lock(&mutex); // We release the mutex between iterations. if (nfds == -1) { perror("epoll_wait"); exit(1); } - + + MutexLock lock(&mutex); // We release the mutex between iterations. + + if (should_stop) { + return; + } + for (int i = 0; i < nfds; ++i) { int fd = events[i].data.fd; assert(clients.count(fd) != 0); @@ -69,22 +157,32 @@ void Server::do_work() } } } - + +CubemapStateProto Server::serialize() const +{ + CubemapStateProto serialized; + for (map::const_iterator client_it = clients.begin(); + client_it != clients.end(); + ++client_it) { + serialized.add_clients()->MergeFrom(client_it->second.serialize()); + } + for (map::const_iterator stream_it = streams.begin(); + stream_it != streams.end(); + ++stream_it) { + serialized.add_streams()->MergeFrom(stream_it->second->serialize()); + } + return serialized; +} + void Server::add_client(int sock) { MutexLock lock(&mutex); - Client new_client; - new_client.sock = sock; - new_client.client_request.reserve(1024); - new_client.state = Client::READING_REQUEST; - new_client.header_bytes_sent = 0; - new_client.bytes_sent = 0; - - clients.insert(make_pair(sock, new_client)); + clients.insert(make_pair(sock, Client(sock))); // Start listening on data from this socket. epoll_event ev; ev.events = EPOLLIN | EPOLLRDHUP; + ev.data.u64 = 0; // Keep Valgrind happy. ev.data.fd = sock; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) { perror("epoll_ctl(EPOLL_CTL_ADD)"); @@ -95,19 +193,48 @@ void Server::add_client(int sock) void Server::add_stream(const string &stream_id) { MutexLock lock(&mutex); - streams.insert(make_pair(stream_id, Stream())); + streams.insert(make_pair(stream_id, new Stream(stream_id))); } void Server::set_header(const string &stream_id, const string &header) { MutexLock lock(&mutex); - assert(streams.count(stream_id) != 0); - streams[stream_id].header = header; + find_stream(stream_id)->header = header; + + // If there are clients we haven't sent anything to yet, we should give + // them the header, so push back into the SENDING_HEADER state. + for (map::iterator client_it = clients.begin(); + client_it != clients.end(); + ++client_it) { + Client *client = &client_it->second; + if (client->state == Client::SENDING_DATA && + client->bytes_sent == 0) { + construct_header(client); + } + } } void Server::add_data(const string &stream_id, const char *data, size_t bytes) { - // TODO + if (bytes == 0) { + return; + } + + MutexLock lock(&mutex); + Stream *stream = find_stream(stream_id); + size_t pos = stream->data_size % BACKLOG_SIZE; + stream->data_size += bytes; + + if (pos + bytes > BACKLOG_SIZE) { + size_t to_copy = BACKLOG_SIZE - pos; + memcpy(stream->data + pos, data, to_copy); + data += to_copy; + bytes -= to_copy; + pos = 0; + } + + memcpy(stream->data + pos, data, bytes); + wake_up_all_clients(); } void Server::process_client(Client *client) @@ -131,7 +258,7 @@ void Server::process_client(Client *client) } // Guard against overlong requests gobbling up all of our space. - if (client->client_request.size() + ret > MAX_CLIENT_REQUEST) { + if (client->request.size() + ret > MAX_CLIENT_REQUEST) { fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock); close_client(client); return; @@ -139,25 +266,26 @@ void Server::process_client(Client *client) // See if we have \r\n\r\n anywhere in the request. We start three bytes // before what we just appended, in case we just got the final character. - size_t existing_req_bytes = client->client_request.size(); - client->client_request.append(string(buf, buf + ret)); + size_t existing_req_bytes = client->request.size(); + client->request.append(string(buf, buf + ret)); size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0); const char *ptr = reinterpret_cast( - memmem(client->client_request.data() + start_at, client->client_request.size() - start_at, + memmem(client->request.data() + start_at, client->request.size() - start_at, "\r\n\r\n", 4)); if (ptr == NULL) { // OK, we don't have the entire header yet. Fine; we'll get it later. return; } - if (ptr != client->client_request.data() + client->client_request.size() - 4) { + if (ptr != client->request.data() + client->request.size() - 4) { fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock); close_client(client); return; } parse_request(client); + construct_header(client); break; } case Client::SENDING_HEADER: { @@ -184,27 +312,35 @@ void Server::process_client(Client *client) // Start sending from the end. In other words, we won't send any of the backlog, // but we'll start sending immediately as we get data. client->state = Client::SENDING_DATA; - client->bytes_sent = streams[client->stream_id].data_size; + client->bytes_sent = find_stream(client->stream_id)->data_size; break; } case Client::SENDING_DATA: { // See if there's some data we've lost. Ideally, we should drop to a block boundary, // but resync will be the mux's problem. - const Stream &stream = streams[client->stream_id]; + const Stream &stream = *find_stream(client->stream_id); size_t bytes_to_send = stream.data_size - client->bytes_sent; if (bytes_to_send > BACKLOG_SIZE) { fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n", client->sock, (long long int)(bytes_to_send - BACKLOG_SIZE)); - client->bytes_sent = streams[client->stream_id].data_size - BACKLOG_SIZE; + client->bytes_sent = find_stream(client->stream_id)->data_size - BACKLOG_SIZE; bytes_to_send = BACKLOG_SIZE; } // See if we need to split across the circular buffer. - int ret; + ssize_t ret; if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) { - // TODO: writev - assert(false); + size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE); + + iovec iov[2]; + iov[0].iov_base = const_cast(stream.data + (client->bytes_sent % BACKLOG_SIZE)); + iov[0].iov_len = bytes_first_part; + + iov[1].iov_base = const_cast(stream.data); + iov[1].iov_len = bytes_to_send - bytes_first_part; + + ret = writev(client->sock, iov, 2); } else { ret = write(client->sock, stream.data + (client->bytes_sent % BACKLOG_SIZE), @@ -215,11 +351,15 @@ void Server::process_client(Client *client) close_client(client); return; } - client->bytes_sent += ret; + client->bytes_sent += ret; + + if (client->bytes_sent == stream.data_size) { + // We don't have any more data for this client, so put it to sleep. + put_client_to_sleep(client); + } break; } default: - // TODO assert(false); } } @@ -228,16 +368,20 @@ void Server::parse_request(Client *client) { // TODO: Actually parse the request. :-) client->stream_id = "stream"; + client->request.clear(); +} - // Construct the header. - client->header = "HTTP/1.0 200 OK\r\nContent-type: todo/fixme\r\n\r\n" + - streams[client->stream_id].header; +void Server::construct_header(Client *client) +{ + client->header = "HTTP/1.0 200 OK\r\nContent-type: video/x-flv\r\nCache-Control: no-cache\r\n\r\n" + + find_stream(client->stream_id)->header; // Switch states. client->state = Client::SENDING_HEADER; epoll_event ev; ev.events = EPOLLOUT | EPOLLRDHUP; + ev.data.u64 = 0; // Keep Valgrind happy. ev.data.fd = client->sock; if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { @@ -252,8 +396,50 @@ void Server::close_client(Client *client) perror("epoll_ctl(EPOLL_CTL_DEL)"); exit(1); } + + // This client could be sleeping, so we'll need to fix that. (Argh, O(n).) + vector::iterator new_end = + remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock); + sleeping_clients.erase(new_end, sleeping_clients.end()); // Bye-bye! close(client->sock); clients.erase(client->sock); } + +void Server::put_client_to_sleep(Client *client) +{ + epoll_event ev; + ev.events = EPOLLRDHUP; + ev.data.u64 = 0; // Keep Valgrind happy. + ev.data.fd = client->sock; + + if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) { + perror("epoll_ctl(EPOLL_CTL_MOD)"); + exit(1); + } + + sleeping_clients.push_back(client->sock); +} + +void Server::wake_up_all_clients() +{ + for (unsigned i = 0; i < sleeping_clients.size(); ++i) { + epoll_event ev; + ev.events = EPOLLOUT | EPOLLRDHUP; + ev.data.u64 = 0; // Keep Valgrind happy. + ev.data.fd = sleeping_clients[i]; + if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) { + perror("epoll_ctl(EPOLL_CTL_MOD)"); + exit(1); + } + } + sleeping_clients.clear(); +} + +Stream *Server::find_stream(const string &stream_id) +{ + map::iterator it = streams.find(stream_id); + assert(it != streams.end()); + return it->second; +}