9ff4758493abf06d4268fc3a1fd5e54087366408
[cubemap] / server.cpp
1 #include <assert.h>
2 #include <errno.h>
3 #include <netinet/in.h>
4 #include <pthread.h>
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <sys/epoll.h>
10 #include <sys/sendfile.h>
11 #include <sys/socket.h>
12 #include <sys/types.h>
13 #include <unistd.h>
14 #include <algorithm>
15 #include <map>
16 #include <string>
17 #include <utility>
18 #include <vector>
19
20 #include "accesslog.h"
21 #include "log.h"
22 #include "metacube2.h"
23 #include "mutexlock.h"
24 #include "parse.h"
25 #include "server.h"
26 #include "state.pb.h"
27 #include "stream.h"
28 #include "util.h"
29
30 #ifndef SO_MAX_PACING_RATE
31 #define SO_MAX_PACING_RATE 47
32 #endif
33
34 using namespace std;
35
36 extern AccessLogThread *access_log;
37
38 namespace {
39
40 inline bool is_equal(timespec a, timespec b)
41 {
42         return a.tv_sec == b.tv_sec &&
43                a.tv_nsec == b.tv_nsec;
44 }
45
46 inline bool is_earlier(timespec a, timespec b)
47 {
48         if (a.tv_sec != b.tv_sec)
49                 return a.tv_sec < b.tv_sec;
50         return a.tv_nsec < b.tv_nsec;
51 }
52
53 }  // namespace
54
55 Server::Server()
56 {
57         pthread_mutex_init(&mutex, NULL);
58         pthread_mutex_init(&queued_clients_mutex, NULL);
59
60         epoll_fd = epoll_create(1024);  // Size argument is ignored.
61         if (epoll_fd == -1) {
62                 log_perror("epoll_fd");
63                 exit(1);
64         }
65 }
66
67 Server::~Server()
68 {
69         for (size_t i = 0; i < streams.size(); ++i) {   
70                 delete streams[i];
71         }
72
73         safe_close(epoll_fd);
74 }
75
76 vector<ClientStats> Server::get_client_stats() const
77 {
78         vector<ClientStats> ret;
79
80         MutexLock lock(&mutex);
81         for (map<int, Client>::const_iterator client_it = clients.begin();
82              client_it != clients.end();
83              ++client_it) {
84                 ret.push_back(client_it->second.get_stats());
85         }
86         return ret;
87 }
88
89 void Server::do_work()
90 {
91         while (!should_stop()) {
92                 // Wait until there's activity on at least one of the fds,
93                 // or 20 ms (about one frame at 50 fps) has elapsed.
94                 //
95                 // We could in theory wait forever and rely on wakeup()
96                 // from add_client_deferred() and add_data_deferred(),
97                 // but wakeup is a pretty expensive operation, and the
98                 // two threads might end up fighting over a lock, so it's
99                 // seemingly (much) more efficient to just have a timeout here.
100                 int nfds = epoll_pwait(epoll_fd, events, EPOLL_MAX_EVENTS, EPOLL_TIMEOUT_MS, &sigset_without_usr1_block);
101                 if (nfds == -1 && errno != EINTR) {
102                         log_perror("epoll_wait");
103                         exit(1);
104                 }
105
106                 MutexLock lock(&mutex);  // We release the mutex between iterations.
107         
108                 process_queued_data();
109
110                 // Process each client where we have socket activity.
111                 for (int i = 0; i < nfds; ++i) {
112                         Client *client = reinterpret_cast<Client *>(events[i].data.u64);
113
114                         if (events[i].events & (EPOLLERR | EPOLLRDHUP | EPOLLHUP)) {
115                                 close_client(client);
116                                 continue;
117                         }
118
119                         process_client(client);
120                 }
121
122                 // Process each client where its stream has new data,
123                 // even if there was no socket activity.
124                 for (size_t i = 0; i < streams.size(); ++i) {   
125                         vector<Client *> to_process;
126                         swap(streams[i]->to_process, to_process);
127                         for (size_t i = 0; i < to_process.size(); ++i) {
128                                 process_client(to_process[i]);
129                         }
130                 }
131
132                 // Finally, go through each client to see if it's timed out
133                 // in the READING_REQUEST state. (Seemingly there are clients
134                 // that can hold sockets up for days at a time without sending
135                 // anything at all.)
136                 timespec timeout_time;
137                 if (clock_gettime(CLOCK_MONOTONIC_COARSE, &timeout_time) == -1) {
138                         log_perror("clock_gettime(CLOCK_MONOTONIC_COARSE)");
139                         continue;
140                 }
141                 timeout_time.tv_sec -= REQUEST_READ_TIMEOUT_SEC;
142                 while (!clients_ordered_by_connect_time.empty()) {
143                         const pair<timespec, int> &connect_time_and_fd = clients_ordered_by_connect_time.front();
144
145                         // See if we have reached the end of clients to process.
146                         if (is_earlier(timeout_time, connect_time_and_fd.first)) {
147                                 break;
148                         }
149
150                         // If this client doesn't exist anymore, just ignore it
151                         // (it was deleted earlier).
152                         std::map<int, Client>::iterator client_it = clients.find(connect_time_and_fd.second);
153                         if (client_it == clients.end()) {
154                                 clients_ordered_by_connect_time.pop();
155                                 continue;
156                         }
157                         Client *client = &client_it->second;
158                         if (!is_equal(client->connect_time, connect_time_and_fd.first)) {
159                                 // Another client has taken this fd in the meantime.
160                                 clients_ordered_by_connect_time.pop();
161                                 continue;
162                         }
163
164                         if (client->state != Client::READING_REQUEST) {
165                                 // Only READING_REQUEST can time out.
166                                 clients_ordered_by_connect_time.pop();
167                                 continue;
168                         }
169
170                         // OK, it timed out.
171                         close_client(client);
172                         clients_ordered_by_connect_time.pop();
173                 }
174         }
175 }
176
177 CubemapStateProto Server::serialize()
178 {
179         // We don't serialize anything queued, so empty the queues.
180         process_queued_data();
181
182         // Set all clients in a consistent state before serializing
183         // (ie., they have no remaining lost data). Otherwise, increasing
184         // the backlog could take clients into a newly valid area of the backlog,
185         // sending a stream of zeros instead of skipping the data as it should.
186         //
187         // TODO: Do this when clients are added back from serialized state instead;
188         // it would probably be less wasteful.
189         for (map<int, Client>::iterator client_it = clients.begin();
190              client_it != clients.end();
191              ++client_it) {
192                 skip_lost_data(&client_it->second);
193         }
194
195         CubemapStateProto serialized;
196         for (map<int, Client>::const_iterator client_it = clients.begin();
197              client_it != clients.end();
198              ++client_it) {
199                 serialized.add_clients()->MergeFrom(client_it->second.serialize());
200         }
201         for (size_t i = 0; i < streams.size(); ++i) {   
202                 serialized.add_streams()->MergeFrom(streams[i]->serialize());
203         }
204         return serialized;
205 }
206
207 void Server::add_client_deferred(int sock)
208 {
209         MutexLock lock(&queued_clients_mutex);
210         queued_add_clients.push_back(sock);
211 }
212
213 void Server::add_client(int sock)
214 {
215         pair<map<int, Client>::iterator, bool> ret =
216                 clients.insert(make_pair(sock, Client(sock)));
217         assert(ret.second == true);  // Should not already exist.
218         Client *client_ptr = &ret.first->second;
219
220         // Connection timestamps must be nondecreasing. I can't find any guarantee
221         // that even the monotonic clock can't go backwards by a small amount
222         // (think switching between CPUs with non-synchronized TSCs), so if
223         // this actually should happen, we hack around it by fudging
224         // connect_time.
225         if (!clients_ordered_by_connect_time.empty() &&
226             is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first)) {
227                 client_ptr->connect_time = clients_ordered_by_connect_time.back().first;
228         }
229         clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, sock));
230
231         // Start listening on data from this socket.
232         epoll_event ev;
233         ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
234         ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
235         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sock, &ev) == -1) {
236                 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
237                 exit(1);
238         }
239
240         process_client(client_ptr);
241 }
242
243 void Server::add_client_from_serialized(const ClientProto &client)
244 {
245         MutexLock lock(&mutex);
246         Stream *stream;
247         int stream_index = lookup_stream_by_url(client.url());
248         if (stream_index == -1) {
249                 assert(client.state() != Client::SENDING_DATA);
250                 stream = NULL;
251         } else {
252                 stream = streams[stream_index];
253         }
254         pair<map<int, Client>::iterator, bool> ret =
255                 clients.insert(make_pair(client.sock(), Client(client, stream)));
256         assert(ret.second == true);  // Should not already exist.
257         Client *client_ptr = &ret.first->second;
258
259         // Connection timestamps must be nondecreasing.
260         assert(clients_ordered_by_connect_time.empty() ||
261                !is_earlier(client_ptr->connect_time, clients_ordered_by_connect_time.back().first));
262         clients_ordered_by_connect_time.push(make_pair(client_ptr->connect_time, client.sock()));
263
264         // Start listening on data from this socket.
265         epoll_event ev;
266         if (client.state() == Client::READING_REQUEST) {
267                 ev.events = EPOLLIN | EPOLLET | EPOLLRDHUP;
268         } else {
269                 // If we don't have more data for this client, we'll be putting it into
270                 // the sleeping array again soon.
271                 ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
272         }
273         ev.data.u64 = reinterpret_cast<uint64_t>(client_ptr);
274         if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
275                 log_perror("epoll_ctl(EPOLL_CTL_ADD)");
276                 exit(1);
277         }
278
279         if (client_ptr->state == Client::WAITING_FOR_KEYFRAME ||
280             client_ptr->state == Client::PREBUFFERING ||
281             (client_ptr->state == Client::SENDING_DATA &&
282              client_ptr->stream_pos == client_ptr->stream->bytes_received)) {
283                 client_ptr->stream->put_client_to_sleep(client_ptr);
284         } else {
285                 process_client(client_ptr);
286         }
287 }
288
289 int Server::lookup_stream_by_url(const std::string &url) const
290 {
291         map<string, int>::const_iterator url_it = url_map.find(url);
292         if (url_it == url_map.end()) {
293                 return -1;
294         }
295         return url_it->second;
296 }
297
298 int Server::add_stream(const string &url, size_t backlog_size, size_t prebuffering_bytes, Stream::Encoding encoding)
299 {
300         MutexLock lock(&mutex);
301         url_map.insert(make_pair(url, streams.size()));
302         streams.push_back(new Stream(url, backlog_size, prebuffering_bytes, encoding));
303         return streams.size() - 1;
304 }
305
306 int Server::add_stream_from_serialized(const StreamProto &stream, int data_fd)
307 {
308         MutexLock lock(&mutex);
309         url_map.insert(make_pair(stream.url(), streams.size()));
310         streams.push_back(new Stream(stream, data_fd));
311         return streams.size() - 1;
312 }
313         
314 void Server::set_backlog_size(int stream_index, size_t new_size)
315 {
316         MutexLock lock(&mutex);
317         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
318         streams[stream_index]->set_backlog_size(new_size);
319 }
320
321 void Server::set_prebuffering_bytes(int stream_index, size_t new_amount)
322 {
323         MutexLock lock(&mutex);
324         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
325         streams[stream_index]->prebuffering_bytes = new_amount;
326 }
327         
328 void Server::set_encoding(int stream_index, Stream::Encoding encoding)
329 {
330         MutexLock lock(&mutex);
331         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
332         streams[stream_index]->encoding = encoding;
333 }
334         
335 void Server::set_header(int stream_index, const string &http_header, const string &stream_header)
336 {
337         MutexLock lock(&mutex);
338         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
339         streams[stream_index]->http_header = http_header;
340
341         if (stream_header != streams[stream_index]->stream_header) {
342                 // We cannot start at any of the older starting points anymore,
343                 // since they'd get the wrong header for the stream (not to mention
344                 // that a changed header probably means the stream restarted,
345                 // which means any client starting on the old one would probably
346                 // stop playing properly at the change point). Next block
347                 // should be a suitable starting point (if not, something is
348                 // pretty strange), so it will fill up again soon enough.
349                 streams[stream_index]->suitable_starting_points.clear();
350         }
351         streams[stream_index]->stream_header = stream_header;
352 }
353         
354 void Server::set_pacing_rate(int stream_index, uint32_t pacing_rate)
355 {
356         MutexLock lock(&mutex);
357         assert(clients.empty());
358         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
359         streams[stream_index]->pacing_rate = pacing_rate;
360 }
361
362 void Server::add_data_deferred(int stream_index, const char *data, size_t bytes, StreamStartSuitability suitable_for_stream_start)
363 {
364         assert(stream_index >= 0 && stream_index < ssize_t(streams.size()));
365         streams[stream_index]->add_data_deferred(data, bytes, suitable_for_stream_start);
366 }
367
368 // See the .h file for postconditions after this function.      
369 void Server::process_client(Client *client)
370 {
371         switch (client->state) {
372         case Client::READING_REQUEST: {
373 read_request_again:
374                 // Try to read more of the request.
375                 char buf[1024];
376                 int ret;
377                 do {
378                         ret = read(client->sock, buf, sizeof(buf));
379                 } while (ret == -1 && errno == EINTR);
380
381                 if (ret == -1 && errno == EAGAIN) {
382                         // No more data right now. Nothing to do.
383                         // This is postcondition #2.
384                         return;
385                 }
386                 if (ret == -1) {
387                         log_perror("read");
388                         close_client(client);
389                         return;
390                 }
391                 if (ret == 0) {
392                         // OK, the socket is closed.
393                         close_client(client);
394                         return;
395                 }
396
397                 RequestParseStatus status = wait_for_double_newline(&client->request, buf, ret);
398         
399                 switch (status) {
400                 case RP_OUT_OF_SPACE:
401                         log(WARNING, "[%s] Client sent overlong request!", client->remote_addr.c_str());
402                         close_client(client);
403                         return;
404                 case RP_NOT_FINISHED_YET:
405                         // OK, we don't have the entire header yet. Fine; we'll get it later.
406                         // See if there's more data for us.
407                         goto read_request_again;
408                 case RP_EXTRA_DATA:
409                         log(WARNING, "[%s] Junk data after request!", client->remote_addr.c_str());
410                         close_client(client);
411                         return;
412                 case RP_FINISHED:
413                         break;
414                 }
415
416                 assert(status == RP_FINISHED);
417
418                 int error_code = parse_request(client);
419                 if (error_code == 200) {
420                         construct_header(client);
421                 } else {
422                         construct_error(client, error_code);
423                 }
424
425                 // We've changed states, so fall through.
426                 assert(client->state == Client::SENDING_ERROR ||
427                        client->state == Client::SENDING_HEADER);
428         }
429         case Client::SENDING_ERROR:
430         case Client::SENDING_HEADER: {
431 sending_header_or_error_again:
432                 int ret;
433                 do {
434                         ret = write(client->sock,
435                                     client->header_or_error.data() + client->header_or_error_bytes_sent,
436                                     client->header_or_error.size() - client->header_or_error_bytes_sent);
437                 } while (ret == -1 && errno == EINTR);
438
439                 if (ret == -1 && errno == EAGAIN) {
440                         // We're out of socket space, so now we're at the “low edge” of epoll's
441                         // edge triggering. epoll will tell us when there is more room, so for now,
442                         // just return.
443                         // This is postcondition #4.
444                         return;
445                 }
446
447                 if (ret == -1) {
448                         // Error! Postcondition #1.
449                         log_perror("write");
450                         close_client(client);
451                         return;
452                 }
453                 
454                 client->header_or_error_bytes_sent += ret;
455                 assert(client->header_or_error_bytes_sent <= client->header_or_error.size());
456
457                 if (client->header_or_error_bytes_sent < client->header_or_error.size()) {
458                         // We haven't sent all yet. Fine; go another round.
459                         goto sending_header_or_error_again;
460                 }
461
462                 // We're done sending the header or error! Clear it to release some memory.
463                 client->header_or_error.clear();
464
465                 if (client->state == Client::SENDING_ERROR) {
466                         // We're done sending the error, so now close.  
467                         // This is postcondition #1.
468                         close_client(client);
469                         return;
470                 }
471
472                 Stream *stream = client->stream;
473                 if (client->stream_pos == size_t(-2)) {
474                         // Start sending from the beginning of the backlog.
475                         client->stream_pos = std::min<size_t>(
476                             stream->bytes_received - stream->backlog_size,
477                             0);
478                         client->state = Client::SENDING_DATA;
479                         goto sending_data;
480                 } else if (stream->prebuffering_bytes == 0) {
481                         // Start sending from the first keyframe we get. In other
482                         // words, we won't send any of the backlog, but we'll start
483                         // sending immediately as we get the next keyframe block.
484                         // Note that this is functionally identical to the next if branch,
485                         // except that we save a binary search.
486                         client->stream_pos = stream->bytes_received;
487                         client->state = Client::WAITING_FOR_KEYFRAME;
488                 } else {
489                         // We're not going to send anything to the client before we have
490                         // N bytes. However, this wait might be boring; we can just as well
491                         // use it to send older data if we have it. We use lower_bound()
492                         // so that we are conservative and never add extra latency over just
493                         // waiting (assuming CBR or nearly so); otherwise, we could want e.g.
494                         // 100 kB prebuffer but end up sending a 10 MB GOP.
495                         deque<size_t>::const_iterator starting_point_it =
496                                 lower_bound(stream->suitable_starting_points.begin(),
497                                             stream->suitable_starting_points.end(),
498                                             stream->bytes_received - stream->prebuffering_bytes);
499                         if (starting_point_it == stream->suitable_starting_points.end()) {
500                                 // None found. Just put us at the end, and then wait for the
501                                 // first keyframe to appear.
502                                 client->stream_pos = stream->bytes_received;
503                                 client->state = Client::WAITING_FOR_KEYFRAME;
504                         } else {
505                                 client->stream_pos = *starting_point_it;
506                                 client->state = Client::PREBUFFERING;
507                                 goto prebuffering;
508                         }
509                 }
510                 // Fall through.
511         }
512         case Client::WAITING_FOR_KEYFRAME: {
513                 Stream *stream = client->stream;
514                 if (stream->suitable_starting_points.empty() ||
515                     client->stream_pos > stream->suitable_starting_points.back()) {
516                         // We haven't received a keyframe since this stream started waiting,
517                         // so keep on waiting for one.
518                         // This is postcondition #3.
519                         stream->put_client_to_sleep(client);
520                         return;
521                 }
522                 client->stream_pos = stream->suitable_starting_points.back();
523                 client->state = Client::PREBUFFERING;
524                 // Fall through.
525         }
526         case Client::PREBUFFERING: {
527 prebuffering:
528                 Stream *stream = client->stream;
529                 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
530                 assert(bytes_to_send <= stream->backlog_size);
531                 if (bytes_to_send < stream->prebuffering_bytes) {
532                         // We don't have enough bytes buffered to start this client yet.
533                         // This is postcondition #3.
534                         stream->put_client_to_sleep(client);
535                         return;
536                 }
537                 client->state = Client::SENDING_DATA;
538                 // Fall through.
539         }
540         case Client::SENDING_DATA: {
541 sending_data:
542                 skip_lost_data(client);
543                 Stream *stream = client->stream;
544
545 sending_data_again:
546                 size_t bytes_to_send = stream->bytes_received - client->stream_pos;
547                 assert(bytes_to_send <= stream->backlog_size);
548                 if (bytes_to_send == 0) {
549                         return;
550                 }
551
552                 // See if we need to split across the circular buffer.
553                 bool more_data = false;
554                 if ((client->stream_pos % stream->backlog_size) + bytes_to_send > stream->backlog_size) {
555                         bytes_to_send = stream->backlog_size - (client->stream_pos % stream->backlog_size);
556                         more_data = true;
557                 }
558
559                 ssize_t ret;
560                 do {
561                         off_t offset = client->stream_pos % stream->backlog_size;
562                         ret = sendfile(client->sock, stream->data_fd, &offset, bytes_to_send);
563                 } while (ret == -1 && errno == EINTR);
564
565                 if (ret == -1 && errno == EAGAIN) {
566                         // We're out of socket space, so return; epoll will wake us up
567                         // when there is more room.
568                         // This is postcondition #4.
569                         return;
570                 }
571                 if (ret == -1) {
572                         // Error, close; postcondition #1.
573                         log_perror("sendfile");
574                         close_client(client);
575                         return;
576                 }
577                 client->stream_pos += ret;
578                 client->bytes_sent += ret;
579
580                 if (client->stream_pos == stream->bytes_received) {
581                         // We don't have any more data for this client, so put it to sleep.
582                         // This is postcondition #3.
583                         stream->put_client_to_sleep(client);
584                 } else if (more_data && size_t(ret) == bytes_to_send) {
585                         goto sending_data_again;
586                 }
587                 break;
588         }
589         default:
590                 assert(false);
591         }
592 }
593
594 // See if there's some data we've lost. Ideally, we should drop to a block boundary,
595 // but resync will be the mux's problem.
596 void Server::skip_lost_data(Client *client)
597 {
598         Stream *stream = client->stream;
599         if (stream == NULL) {
600                 return;
601         }
602         size_t bytes_to_send = stream->bytes_received - client->stream_pos;
603         if (bytes_to_send > stream->backlog_size) {
604                 size_t bytes_lost = bytes_to_send - stream->backlog_size;
605                 client->stream_pos = stream->bytes_received - stream->backlog_size;
606                 client->bytes_lost += bytes_lost;
607                 ++client->num_loss_events;
608         }
609 }
610
611 int Server::parse_request(Client *client)
612 {
613         vector<string> lines = split_lines(client->request);
614         if (lines.empty()) {
615                 return 400;  // Bad request (empty).
616         }
617
618         // Parse the headers, for logging purposes.
619         // TODO: Case-insensitivity.
620         multimap<string, string> headers = extract_headers(lines, client->remote_addr);
621         multimap<string, string>::const_iterator referer_it = headers.find("Referer");
622         if (referer_it != headers.end()) {
623                 client->referer = referer_it->second;
624         }
625         multimap<string, string>::const_iterator user_agent_it = headers.find("User-Agent");
626         if (user_agent_it != headers.end()) {
627                 client->user_agent = user_agent_it->second;
628         }
629
630         vector<string> request_tokens = split_tokens(lines[0]);
631         if (request_tokens.size() < 2) {
632                 return 400;  // Bad request (empty).
633         }
634         if (request_tokens[0] != "GET") {
635                 return 400;  // Should maybe be 405 instead?
636         }
637
638         string url = request_tokens[1];
639         if (url.find("?backlog") == url.size() - 8) {
640                 client->stream_pos = -2;
641                 url = url.substr(0, url.size() - 8);
642         } else {
643                 client->stream_pos = -1;
644         }
645
646         map<string, int>::const_iterator url_map_it = url_map.find(url);
647         if (url_map_it == url_map.end()) {
648                 return 404;  // Not found.
649         }
650
651         Stream *stream = streams[url_map_it->second];
652         if (stream->http_header.empty()) {
653                 return 503;  // Service unavailable.
654         }
655
656         client->url = request_tokens[1];
657
658         client->stream = stream;
659         if (setsockopt(client->sock, SOL_SOCKET, SO_MAX_PACING_RATE, &client->stream->pacing_rate, sizeof(client->stream->pacing_rate)) == -1) {
660                 if (client->stream->pacing_rate != ~0U) {
661                         log_perror("setsockopt(SO_MAX_PACING_RATE)");
662                 }
663         }
664         client->request.clear();
665
666         return 200;  // OK!
667 }
668
669 void Server::construct_header(Client *client)
670 {
671         Stream *stream = client->stream;
672         if (stream->encoding == Stream::STREAM_ENCODING_RAW) {
673                 client->header_or_error = stream->http_header +
674                         "\r\n" +
675                         stream->stream_header;
676         } else if (stream->encoding == Stream::STREAM_ENCODING_METACUBE) {
677                 client->header_or_error = stream->http_header +
678                         "Content-encoding: metacube\r\n" +
679                         "\r\n";
680                 if (!stream->stream_header.empty()) {
681                         metacube2_block_header hdr;
682                         memcpy(hdr.sync, METACUBE2_SYNC, sizeof(hdr.sync));
683                         hdr.size = htonl(stream->stream_header.size());
684                         hdr.flags = htons(METACUBE_FLAGS_HEADER);
685                         hdr.csum = htons(metacube2_compute_crc(&hdr));
686                         client->header_or_error.append(
687                                 string(reinterpret_cast<char *>(&hdr), sizeof(hdr)));
688                 }
689                 client->header_or_error.append(stream->stream_header);
690         } else {
691                 assert(false);
692         }
693
694         // Switch states.
695         client->state = Client::SENDING_HEADER;
696
697         epoll_event ev;
698         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
699         ev.data.u64 = reinterpret_cast<uint64_t>(client);
700
701         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
702                 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
703                 exit(1);
704         }
705 }
706         
707 void Server::construct_error(Client *client, int error_code)
708 {
709         char error[256];
710         snprintf(error, 256, "HTTP/1.0 %d Error\r\nContent-type: text/plain\r\n\r\nSomething went wrong. Sorry.\r\n",
711                 error_code);
712         client->header_or_error = error;
713
714         // Switch states.
715         client->state = Client::SENDING_ERROR;
716
717         epoll_event ev;
718         ev.events = EPOLLOUT | EPOLLET | EPOLLRDHUP;
719         ev.data.u64 = reinterpret_cast<uint64_t>(client);
720
721         if (epoll_ctl(epoll_fd, EPOLL_CTL_MOD, client->sock, &ev) == -1) {
722                 log_perror("epoll_ctl(EPOLL_CTL_MOD)");
723                 exit(1);
724         }
725 }
726
727 template<class T>
728 void delete_from(vector<T> *v, T elem)
729 {
730         typename vector<T>::iterator new_end = remove(v->begin(), v->end(), elem);
731         v->erase(new_end, v->end());
732 }
733         
734 void Server::close_client(Client *client)
735 {
736         if (epoll_ctl(epoll_fd, EPOLL_CTL_DEL, client->sock, NULL) == -1) {
737                 log_perror("epoll_ctl(EPOLL_CTL_DEL)");
738                 exit(1);
739         }
740
741         // This client could be sleeping, so we'll need to fix that. (Argh, O(n).)
742         if (client->stream != NULL) {
743                 delete_from(&client->stream->sleeping_clients, client);
744                 delete_from(&client->stream->to_process, client);
745         }
746
747         // Log to access_log.
748         access_log->write(client->get_stats());
749
750         // Bye-bye!
751         safe_close(client->sock);
752
753         clients.erase(client->sock);
754 }
755         
756 void Server::process_queued_data()
757 {
758         {
759                 MutexLock lock(&queued_clients_mutex);
760
761                 for (size_t i = 0; i < queued_add_clients.size(); ++i) {
762                         add_client(queued_add_clients[i]);
763                 }
764                 queued_add_clients.clear();
765         }
766
767         for (size_t i = 0; i < streams.size(); ++i) {   
768                 streams[i]->process_queued_data();
769         }
770 }