#include <stdio.h>
#include <string.h>
#include <stdint.h>
+#include <unistd.h>
#include <assert.h>
#include <arpa/inet.h>
-#include <curl/curl.h>
#include <sys/socket.h>
#include <pthread.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/epoll.h>
+#include <sys/sendfile.h>
+#include <time.h>
+#include <signal.h>
#include <errno.h>
#include <vector>
#include <string>
#include <map>
#include <algorithm>
+#include "markpool.h"
#include "metacube.h"
#include "server.h"
#include "mutexlock.h"
+#include "parse.h"
+#include "util.h"
+#include "state.pb.h"
using namespace std;
Client::Client(int sock)
- : state(Client::READING_REQUEST),
- header_bytes_sent(0),
+ : sock(sock),
+ fwmark(0),
+ connect_time(time(NULL)),
+ state(Client::READING_REQUEST),
+ stream(NULL),
+ header_or_error_bytes_sent(0),
bytes_sent(0)
{
request.reserve(1024);
+
+ // Find the remote address, and convert it to ASCII.
+ sockaddr_in6 addr;
+ socklen_t addr_len = sizeof(addr);
+
+ if (getpeername(sock, reinterpret_cast<sockaddr *>(&addr), &addr_len) == -1) {
+ perror("getpeername");
+ remote_addr = "";
+ } else {
+ char buf[INET6_ADDRSTRLEN];
+ if (inet_ntop(addr.sin6_family, &addr.sin6_addr, buf, sizeof(buf)) == NULL) {
+ perror("inet_ntop");
+ remote_addr = "";
+ } else {
+ remote_addr = buf;
+ }
+ }
+}
+
+Client::Client(const ClientProto &serialized, Stream *stream)
+ : sock(serialized.sock()),
+ remote_addr(serialized.remote_addr()),
+ connect_time(serialized.connect_time()),
+ state(State(serialized.state())),
+ request(serialized.request()),
+ stream_id(serialized.stream_id()),
+ stream(stream),
+ header_or_error(serialized.header_or_error()),
+ header_or_error_bytes_sent(serialized.header_or_error_bytes_sent()),
+ bytes_sent(serialized.bytes_sent())
+{
+ if (stream->mark_pool != NULL) {
+ fwmark = stream->mark_pool->get_mark();
+ } else {
+ fwmark = 0; // No mark.
+ }
+ if (setsockopt(sock, SOL_SOCKET, SO_MARK, &fwmark, sizeof(fwmark)) == -1) {
+ if (fwmark != 0) {
+ perror("setsockopt(SO_MARK)");
+ }
+ }
}
-Server::Server()
+ClientProto Client::serialize() const
{
- pthread_mutex_init(&mutex, NULL);
+ ClientProto serialized;
+ serialized.set_sock(sock);
+ serialized.set_remote_addr(remote_addr);
+ serialized.set_connect_time(connect_time);
+ serialized.set_state(state);
+ serialized.set_request(request);
+ serialized.set_stream_id(stream_id);
+ serialized.set_header_or_error(header_or_error);
+ serialized.set_header_or_error_bytes_sent(serialized.header_or_error_bytes_sent());
+ serialized.set_bytes_sent(bytes_sent);
+ return serialized;
+}
+
+ClientStats Client::get_stats() const
+{
+ ClientStats stats;
+ stats.stream_id = stream_id;
+ stats.remote_addr = remote_addr;
+ stats.connect_time = connect_time;
+ stats.bytes_sent = bytes_sent;
+ return stats;
+}
- epoll_fd = epoll_create(1024); // Size argument is ignored.
- if (epoll_fd == -1) {
- perror("epoll_fd");
+Stream::Stream(const string &stream_id, size_t backlog_size)
+ : stream_id(stream_id),
+ data_fd(make_tempfile("")),
+ backlog_size(backlog_size),
+ bytes_received(0),
+ mark_pool(NULL)
+{
+ if (data_fd == -1) {
exit(1);
}
}
-void Server::run()
+Stream::~Stream()
{
- should_stop = false;
-
- // Joinable is already the default, but it's good to be certain.
- pthread_attr_t attr;
- pthread_attr_init(&attr);
- pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
- pthread_create(&worker_thread, &attr, Server::do_work_thunk, this);
+ if (data_fd != -1) {
+ int ret;
+ do {
+ ret = close(data_fd);
+ } while (ret == -1 && errno == EINTR);
+ if (ret == -1) {
+ perror("close");
+ }
+ }
}
-void Server::stop()
+Stream::Stream(const StreamProto &serialized)
+ : stream_id(serialized.stream_id()),
+ header(serialized.header()),
+ data_fd(make_tempfile(serialized.data())),
+ backlog_size(serialized.backlog_size()),
+ bytes_received(serialized.bytes_received()),
+ mark_pool(NULL)
{
- {
- MutexLock lock(&mutex);
- should_stop = true;
+ if (data_fd == -1) {
+ exit(1);
}
+}
- if (pthread_join(worker_thread, NULL) == -1) {
- perror("pthread_join");
+StreamProto Stream::serialize()
+{
+ StreamProto serialized;
+ serialized.set_header(header);
+ if (!read_tempfile(data_fd, serialized.mutable_data())) { // Closes data_fd.
exit(1);
}
+ serialized.set_backlog_size(backlog_size);
+ serialized.set_bytes_received(bytes_received);
+ serialized.set_stream_id(stream_id);
+ data_fd = -1;
+ return serialized;
}
-void *Server::do_work_thunk(void *arg)
+void Stream::put_client_to_sleep(Client *client)
{
- Server *server = static_cast<Server *>(arg);
- server->do_work();
- return NULL;
+ sleeping_clients.push_back(client);
+}
+
+void Stream::wake_up_all_clients()
+{
+ if (to_process.empty()) {
+ swap(sleeping_clients, to_process);
+ } else {
+ to_process.insert(to_process.end(), sleeping_clients.begin(), sleeping_clients.end());
+ sleeping_clients.clear();
+ }
+}
+
+Server::Server()
+{
+ pthread_mutex_init(&mutex, NULL);
+ pthread_mutex_init(&queued_data_mutex, NULL);
+
+ epoll_fd = epoll_create(1024); // Size argument is ignored.
+ if (epoll_fd == -1) {
+ perror("epoll_fd");
+ exit(1);
+ }
+}
+
+Server::~Server()
+{
+ int ret;
+ do {
+ ret = close(epoll_fd);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ perror("close(epoll_fd)");
+ }
+}
+
+vector<ClientStats> Server::get_client_stats() const
+{
+ vector<ClientStats> ret;
+
+ MutexLock lock(&mutex);
+ for (map<int, Client>::const_iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ ret.push_back(client_it->second.get_stats());
+ }
+ return ret;
}
void Server::do_work()
{
for ( ;; ) {
int nfds = epoll_wait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS);
+ if (nfds == -1 && errno == EINTR) {
+ if (should_stop) {
+ return;
+ }
+ continue;
+ }
if (nfds == -1) {
perror("epoll_wait");
exit(1);
MutexLock lock(&mutex); // We release the mutex between iterations.
- if (should_stop) {
- return;
- }
-
+ process_queued_data();
+
for (int i = 0; i < nfds; ++i) {
int fd = events[i].data.fd;
assert(clients.count(fd) != 0);
process_client(client);
}
+
+ for (map<string, Stream *>::iterator stream_it = streams.begin();
+ stream_it != streams.end();
+ ++stream_it) {
+ vector<Client *> to_process;
+ swap(stream_it->second->to_process, to_process);
+ for (size_t i = 0; i < to_process.size(); ++i) {
+ process_client(to_process[i]);
+ }
+ }
+
+ if (should_stop) {
+ return;
+ }
}
}
-
+
+CubemapStateProto Server::serialize()
+{
+ // We don't serialize anything queued, so empty the queues.
+ process_queued_data();
+
+ CubemapStateProto serialized;
+ for (map<int, Client>::const_iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ serialized.add_clients()->MergeFrom(client_it->second.serialize());
+ }
+ for (map<string, Stream *>::const_iterator stream_it = streams.begin();
+ stream_it != streams.end();
+ ++stream_it) {
+ serialized.add_streams()->MergeFrom(stream_it->second->serialize());
+ }
+ return serialized;
+}
+
+void Server::add_client_deferred(int sock)
+{
+ MutexLock lock(&queued_data_mutex);
+ queued_add_clients.push_back(sock);
+}
+
void Server::add_client(int sock)
{
- MutexLock lock(&mutex);
clients.insert(make_pair(sock, Client(sock)));
// Start listening on data from this socket.
epoll_event ev;
- ev.events = EPOLLIN | EPOLLRDHUP;
+ ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
+ ev.data.u64 = 0; // Keep Valgrind happy.
ev.data.fd = sock;
if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
perror("epoll_ctl(EPOLL_CTL_ADD)");
exit(1);
}
+
+ process_client(&clients[sock]);
}
-
-void Server::add_stream(const string &stream_id)
+
+void Server::add_client_from_serialized(const ClientProto &client)
{
MutexLock lock(&mutex);
- streams.insert(make_pair(stream_id, Stream()));
+ Stream *stream = find_stream(client.stream_id());
+ clients.insert(make_pair(client.sock(), Client(client, stream)));
+ Client *client_ptr = &clients[client.sock()];
+
+ // Start listening on data from this socket.
+ epoll_event ev;
+ if (client.state() == Client::READING_REQUEST) {
+ ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
+ } else {
+ // If we don't have more data for this client, we'll be putting it into
+ // the sleeping array again soon.
+ ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
+ }
+ ev.data.u64 = 0; // Keep Valgrind happy.
+ ev.data.fd = client.sock();
+ if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
+ perror("epoll_ctl(EPOLL_CTL_ADD)");
+ exit(1);
+ }
+
+ if (client_ptr->state == Client::SENDING_DATA &&
+ client_ptr->bytes_sent == client_ptr->stream->bytes_received) {
+ client_ptr->stream->put_client_to_sleep(client_ptr);
+ } else {
+ process_client(client_ptr);
+ }
+}
+
+void Server::add_stream(const string &stream_id, size_t backlog_size)
+{
+ MutexLock lock(&mutex);
+ streams.insert(make_pair(stream_id, new Stream(stream_id, backlog_size)));
+}
+
+void Server::add_stream_from_serialized(const StreamProto &stream)
+{
+ MutexLock lock(&mutex);
+ streams.insert(make_pair(stream.stream_id(), new Stream(stream)));
}
void Server::set_header(const string &stream_id, const string &header)
{
MutexLock lock(&mutex);
- assert(streams.count(stream_id) != 0);
- streams[stream_id].header = header;
+ find_stream(stream_id)->header = header;
+
+ // If there are clients we haven't sent anything to yet, we should give
+ // them the header, so push back into the SENDING_HEADER state.
+ for (map<int, Client>::iterator client_it = clients.begin();
+ client_it != clients.end();
+ ++client_it) {
+ Client *client = &client_it->second;
+ if (client->state == Client::SENDING_DATA &&
+ client->bytes_sent == 0) {
+ construct_header(client);
+ }
+ }
}
-void Server::add_data(const string &stream_id, const char *data, size_t bytes)
+void Server::set_mark_pool(const std::string &stream_id, MarkPool *mark_pool)
{
- if (bytes == 0) {
- return;
- }
-
MutexLock lock(&mutex);
- assert(streams.count(stream_id) != 0);
- Stream *stream = &streams[stream_id];
- size_t pos = stream->data_size % BACKLOG_SIZE;
- stream->data_size += bytes;
-
- if (pos + bytes > BACKLOG_SIZE) {
- size_t to_copy = BACKLOG_SIZE - pos;
- memcpy(stream->data + pos, data, to_copy);
- data += to_copy;
- bytes -= to_copy;
+ assert(clients.empty());
+ find_stream(stream_id)->mark_pool = mark_pool;
+}
+
+void Server::add_data_deferred(const string &stream_id, const char *data, size_t bytes)
+{
+ MutexLock lock(&queued_data_mutex);
+ queued_data[stream_id].append(string(data, data + bytes));
+}
+
+void Server::add_data(const string &stream_id, const char *data, ssize_t bytes)
+{
+ Stream *stream = find_stream(stream_id);
+ size_t pos = stream->bytes_received % stream->backlog_size;
+ stream->bytes_received += bytes;
+
+ if (pos + bytes > stream->backlog_size) {
+ ssize_t to_copy = stream->backlog_size - pos;
+ while (to_copy > 0) {
+ int ret = pwrite(stream->data_fd, data, to_copy, pos);
+ if (ret == -1 && errno == EINTR) {
+ continue;
+ }
+ if (ret == -1) {
+ perror("pwrite");
+ // Dazed and confused, but trying to continue...
+ break;
+ }
+ pos += ret;
+ data += ret;
+ to_copy -= ret;
+ bytes -= ret;
+ }
pos = 0;
}
- memcpy(stream->data + pos, data, bytes);
- wake_up_all_clients();
+ while (bytes > 0) {
+ int ret = pwrite(stream->data_fd, data, bytes, pos);
+ if (ret == -1 && errno == EINTR) {
+ continue;
+ }
+ if (ret == -1) {
+ perror("pwrite");
+ // Dazed and confused, but trying to continue...
+ break;
+ }
+ pos += ret;
+ data += ret;
+ bytes -= ret;
+ }
+
+ stream->wake_up_all_clients();
}
-
+
+// See the .h file for postconditions after this function.
void Server::process_client(Client *client)
{
switch (client->state) {
case Client::READING_REQUEST: {
+read_request_again:
// Try to read more of the request.
char buf[1024];
- int ret = read(client->sock, buf, sizeof(buf));
+ int ret;
+ do {
+ ret = read(client->sock, buf, sizeof(buf));
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // No more data right now. Nothing to do.
+ // This is postcondition #2.
+ return;
+ }
if (ret == -1) {
perror("read");
close_client(client);
return;
}
if (ret == 0) {
- // No data? This really means that we were triggered for something else than
- // POLLIN (which suggests a logic error in epoll).
- fprintf(stderr, "WARNING: fd %d returned unexpectedly 0 bytes!\n", client->sock);
+ // OK, the socket is closed.
close_client(client);
return;
}
- // Guard against overlong requests gobbling up all of our space.
- if (client->request.size() + ret > MAX_CLIENT_REQUEST) {
+ RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
+
+ switch (status) {
+ case RP_OUT_OF_SPACE:
fprintf(stderr, "WARNING: fd %d sent overlong request!\n", client->sock);
close_client(client);
return;
- }
-
- // See if we have \r\n\r\n anywhere in the request. We start three bytes
- // before what we just appended, in case we just got the final character.
- size_t existing_req_bytes = client->request.size();
- client->request.append(string(buf, buf + ret));
-
- size_t start_at = (existing_req_bytes >= 3 ? existing_req_bytes - 3 : 0);
- const char *ptr = reinterpret_cast<char *>(
- memmem(client->request.data() + start_at, client->request.size() - start_at,
- "\r\n\r\n", 4));
- if (ptr == NULL) {
+ case RP_NOT_FINISHED_YET:
// OK, we don't have the entire header yet. Fine; we'll get it later.
- return;
- }
-
- if (ptr != client->request.data() + client->request.size() - 4) {
+ // See if there's more data for us.
+ goto read_request_again;
+ case RP_EXTRA_DATA:
fprintf(stderr, "WARNING: fd %d had junk data after request!\n", client->sock);
close_client(client);
return;
+ case RP_FINISHED:
+ break;
}
- parse_request(client);
- break;
+ assert(status == RP_FINISHED);
+
+ int error_code = parse_request(client);
+ if (error_code == 200) {
+ construct_header(client);
+ } else {
+ construct_error(client, error_code);
+ }
+
+ // We've changed states, so fall through.
+ assert(client->state == Client::SENDING_ERROR ||
+ client->state == Client::SENDING_HEADER);
}
+ case Client::SENDING_ERROR:
case Client::SENDING_HEADER: {
- int ret = write(client->sock,
- client->header.data() + client->header_bytes_sent,
- client->header.size() - client->header_bytes_sent);
+sending_header_or_error_again:
+ int ret;
+ do {
+ ret = write(client->sock,
+ client->header_or_error.data() + client->header_or_error_bytes_sent,
+ client->header_or_error.size() - client->header_or_error_bytes_sent);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // We're out of socket space, so now we're at the “low edge” of epoll's
+ // edge triggering. epoll will tell us when there is more room, so for now,
+ // just return.
+ // This is postcondition #4.
+ return;
+ }
+
if (ret == -1) {
+ // Error! Postcondition #1.
perror("write");
close_client(client);
return;
}
- client->header_bytes_sent += ret;
- assert(client->header_bytes_sent <= client->header.size());
+ client->header_or_error_bytes_sent += ret;
+ assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
- if (client->header_bytes_sent < client->header.size()) {
- // We haven't sent all yet. Fine; we'll do that later.
- return;
+ if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
+ // We haven't sent all yet. Fine; go another round.
+ goto sending_header_or_error_again;
}
- // We're done sending the header! Clear the entire header to release some memory.
- client->header.clear();
+ // We're done sending the header or error! Clear it to release some memory.
+ client->header_or_error.clear();
+
+ if (client->state == Client::SENDING_ERROR) {
+ // We're done sending the error, so now close.
+ // This is postcondition #1.
+ close_client(client);
+ return;
+ }
// Start sending from the end. In other words, we won't send any of the backlog,
// but we'll start sending immediately as we get data.
+ // This is postcondition #3.
client->state = Client::SENDING_DATA;
- client->bytes_sent = streams[client->stream_id].data_size;
- break;
+ client->bytes_sent = client->stream->bytes_received;
+ client->stream->put_client_to_sleep(client);
+ return;
}
case Client::SENDING_DATA: {
+sending_data_again:
// See if there's some data we've lost. Ideally, we should drop to a block boundary,
// but resync will be the mux's problem.
- const Stream &stream = streams[client->stream_id];
- size_t bytes_to_send = stream.data_size - client->bytes_sent;
- if (bytes_to_send > BACKLOG_SIZE) {
+ Stream *stream = client->stream;
+ size_t bytes_to_send = stream->bytes_received - client->bytes_sent;
+ if (bytes_to_send == 0) {
+ return;
+ }
+ if (bytes_to_send > stream->backlog_size) {
fprintf(stderr, "WARNING: fd %d lost %lld bytes, maybe too slow connection\n",
client->sock,
- (long long int)(bytes_to_send - BACKLOG_SIZE));
- client->bytes_sent = streams[client->stream_id].data_size - BACKLOG_SIZE;
- bytes_to_send = BACKLOG_SIZE;
+ (long long int)(bytes_to_send - stream->backlog_size));
+ client->bytes_sent = stream->bytes_received - stream->backlog_size;
+ bytes_to_send = stream->backlog_size;
}
// See if we need to split across the circular buffer.
- ssize_t ret;
- if ((client->bytes_sent % BACKLOG_SIZE) + bytes_to_send > BACKLOG_SIZE) {
- size_t bytes_first_part = BACKLOG_SIZE - (client->bytes_sent % BACKLOG_SIZE);
-
- iovec iov[2];
- iov[0].iov_base = const_cast<char *>(stream.data + (client->bytes_sent % BACKLOG_SIZE));
- iov[0].iov_len = bytes_first_part;
-
- iov[1].iov_base = const_cast<char *>(stream.data);
- iov[1].iov_len = bytes_to_send - bytes_first_part;
+ bool more_data = false;
+ if ((client->bytes_sent % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
+ bytes_to_send = stream->backlog_size - (client->bytes_sent % stream->backlog_size);
+ more_data = true;
+ }
- ret = writev(client->sock, iov, 2);
- } else {
- ret = write(client->sock,
- stream.data + (client->bytes_sent % BACKLOG_SIZE),
- bytes_to_send);
+ ssize_t ret;
+ do {
+ loff_t offset = client->bytes_sent % stream->backlog_size;
+ ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1 && errno == EAGAIN) {
+ // We're out of socket space, so return; epoll will wake us up
+ // when there is more room.
+ // This is postcondition #4.
+ return;
}
if (ret == -1) {
- perror("write/writev");
+ // Error, close; postcondition #1.
+ perror("sendfile");
close_client(client);
return;
}
client->bytes_sent += ret;
- if (client->bytes_sent == stream.data_size) {
+ if (client->bytes_sent == stream->bytes_received) {
// We don't have any more data for this client, so put it to sleep.
- put_client_to_sleep(client);
+ // This is postcondition #3.
+ stream->put_client_to_sleep(client);
+ } else if (more_data) {
+ goto sending_data_again;
}
break;
}
}
}
-void Server::parse_request(Client *client)
+int Server::parse_request(Client *client)
{
- // TODO: Actually parse the request. :-)
- client->stream_id = "stream";
+ vector<string> lines = split_lines(client->request);
+ if (lines.empty()) {
+ return 400; // Bad request (empty).
+ }
+
+ vector<string> request_tokens = split_tokens(lines[0]);
+ if (request_tokens.size() < 2) {
+ return 400; // Bad request (empty).
+ }
+ if (request_tokens[0] != "GET") {
+ return 400; // Should maybe be 405 instead?
+ }
+ if (streams.count(request_tokens[1]) == 0) {
+ return 404; // Not found.
+ }
+
+ client->stream_id = request_tokens[1];
+ client->stream = find_stream(client->stream_id);
+ if (client->stream->mark_pool != NULL) {
+ client->fwmark = client->stream->mark_pool->get_mark();
+ } else {
+ client->fwmark = 0; // No mark.
+ }
+ if (setsockopt(client->sock, SOL_SOCKET, SO_MARK, &client->fwmark, sizeof(client->fwmark)) == -1) {
+ if (client->fwmark != 0) {
+ perror("setsockopt(SO_MARK)");
+ }
+ }
client->request.clear();
- // Construct the header.
- client->header = "HTTP/1.0 200 OK\r\n Content-type: video/x-flv\r\nCache-Control: no-cache\r\nContent-type: todo/fixme\r\n\r\n" +
- streams[client->stream_id].header;
+ return 200; // OK!
+}
+
+void Server::construct_header(Client *client)
+{
+ client->header_or_error = find_stream(client->stream_id)->header;
// Switch states.
client->state = Client::SENDING_HEADER;
epoll_event ev;
- ev.events = EPOLLOUT | EPOLLRDHUP;
+ ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
+ ev.data.u64 = 0; // Keep Valgrind happy.
+ ev.data.fd = client->sock;
+
+ if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
+ perror("epoll_ctl(EPOLL_CTL_MOD)");
+ exit(1);
+ }
+}
+
+void Server::construct_error(Client *client, int error_code)
+{
+ char error[256];
+ snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
+ error_code);
+ client->header_or_error = error;
+
+ // Switch states.
+ client->state = Client::SENDING_ERROR;
+
+ epoll_event ev;
+ ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
+ ev.data.u64 = 0; // Keep Valgrind happy.
ev.data.fd = client->sock;
if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
exit(1);
}
}
+
+template<class T>
+void delete_from(vector<T> *v, T elem)
+{
+ typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
+ v->erase(new_end, v->end());
+}
void Server::close_client(Client *client)
{
}
// This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
- vector<int>::iterator new_end =
- remove(sleeping_clients.begin(), sleeping_clients.end(), client->sock);
- sleeping_clients.erase(new_end, sleeping_clients.end());
-
+ if (client->stream != NULL) {
+ delete_from(&client->stream->sleeping_clients, client);
+ delete_from(&client->stream->to_process, client);
+ if (client->stream->mark_pool != NULL) {
+ int fwmark = client->fwmark;
+ client->stream->mark_pool->release_mark(fwmark);
+ }
+ }
+
// Bye-bye!
- close(client->sock);
+ int ret;
+ do {
+ ret = close(client->sock);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret == -1) {
+ perror("close");
+ }
+
clients.erase(client->sock);
}
-void Server::put_client_to_sleep(Client *client)
+Stream *Server::find_stream(const string &stream_id)
{
- epoll_event ev;
- ev.events = EPOLLRDHUP;
- ev.data.fd = client->sock;
-
- if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
- perror("epoll_ctl(EPOLL_CTL_MOD)");
- exit(1);
- }
-
- sleeping_clients.push_back(client->sock);
+ map<string, Stream *>::iterator it = streams.find(stream_id);
+ assert(it != streams.end());
+ return it->second;
}
-void Server::wake_up_all_clients()
+void Server::process_queued_data()
{
- for (unsigned i = 0; i < sleeping_clients.size(); ++i) {
- epoll_event ev;
- ev.events = EPOLLOUT | EPOLLRDHUP;
- ev.data.fd = sleeping_clients[i];
- if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, sleeping_clients[i], &ev) == -1) {
- perror("epoll_ctl(EPOLL_CTL_MOD)");
- exit(1);
- }
+ MutexLock lock(&queued_data_mutex);
+
+ for (size_t i = 0; i < queued_add_clients.size(); ++i) {
+ add_client(queued_add_clients[i]);
+ }
+ queued_add_clients.clear();
+
+ for (map<string, string>::iterator queued_it = queued_data.begin();
+ queued_it != queued_data.end();
+ ++queued_it) {
+ add_data(queued_it->first, queued_it->second.data(), queued_it->second.size());
}
- sleeping_clients.clear();
+ queued_data.clear();
}