}
}
+// Serialize the given state to a file descriptor, and return the (still open)
+// descriptor.
+int make_tempfile(const CubemapStateProto &state)
+{
+ char tmpl[] = "/tmp/cubemapstate.XXXXXX";
+ int state_fd = mkstemp(tmpl);
+ if (state_fd == -1) {
+ perror("mkstemp");
+ exit(1);
+ }
+
+ string serialized;
+ state.SerializeToString(&serialized);
+
+ const char *ptr = serialized.data();
+ size_t to_write = serialized.size();
+ while (to_write > 0) {
+ ssize_t ret = write(state_fd, ptr, to_write);
+ if (ret == -1) {
+ perror("write");
+ exit(1);
+ }
+
+ ptr += ret;
+ to_write -= ret;
+ }
+
+ return state_fd;
+}
+
+// Read the state back from the file descriptor made by make_tempfile,
+// and close it.
+CubemapStateProto read_tempfile(int state_fd)
+{
+ if (lseek(state_fd, 0, SEEK_SET) == -1) {
+ perror("lseek");
+ exit(1);
+ }
+
+ string serialized;
+ char buf[4096];
+ for ( ;; ) {
+ ssize_t ret = read(state_fd, buf, sizeof(buf));
+ if (ret == -1) {
+ perror("read");
+ exit(1);
+ }
+ if (ret == 0) {
+ // EOF.
+ break;
+ }
+
+ serialized.append(string(buf, buf + ret));
+ }
+
+ close(state_fd); // Implicitly deletes the file.
+
+ CubemapStateProto state;
+ if (!state.ParseFromString(serialized)) {
+ fprintf(stderr, "PANIC: Failed deserialization of state.\n");
+ exit(1);
+ }
+
+ return state;
+}
+
int main(int argc, char **argv)
{
+ fprintf(stderr, "\nCubemap starting.\n");
+
servers = new Server[NUM_SERVERS];
+
+ int server_sock;
+ if (argc == 3 && strcmp(argv[1], "-state") == 0) {
+ fprintf(stderr, "Deserializing state from previous process... ");
+ int state_fd = atoi(argv[2]);
+ CubemapStateProto loaded_state = read_tempfile(state_fd);
+
+ // Deserialize the streams.
+ for (int i = 0; i < loaded_state.streams_size(); ++i) {
+ for (int j = 0; j < NUM_SERVERS; ++j) {
+ servers[j].add_stream_from_serialized(loaded_state.streams(i));
+ }
+ }
+
+ // Put back the existing clients. It doesn't matter which server we
+ // allocate them to, so just do round-robin.
+ for (int i = 0; i < loaded_state.clients_size(); ++i) {
+ servers[i % NUM_SERVERS].add_client_from_serialized(loaded_state.clients(i));
+ }
+
+ // Deserialize the server socket.
+ server_sock = loaded_state.server_sock();
+
+ fprintf(stderr, "done.\n");
+ } else {
+ // TODO: This should come from a config file.
+ server_sock = create_server_socket(PORT);
+ for (int i = 0; i < NUM_SERVERS; ++i) {
+ servers[i].add_stream(STREAM_ID);
+ }
+ }
+
for (int i = 0; i < NUM_SERVERS; ++i) {
- servers[i].add_stream(STREAM_ID);
servers[i].run();
}
- int server_sock = create_server_socket(PORT);
-
pthread_t acceptor_thread;
pthread_create(&acceptor_thread, NULL, acceptor_thread_run, reinterpret_cast<void *>(server_sock));
input.stop();
CubemapStateProto state;
+ state.set_server_sock(server_sock);
for (int i = 0; i < NUM_SERVERS; ++i) {
servers[i].stop();
}
delete[] servers;
- printf("SERIALIZED: [%s]\n", state.DebugString().c_str());
+ fprintf(stderr, "Serializing state and re-execing...\n");
+ int state_fd = make_tempfile(state);
+
+ char buf[16];
+ sprintf(buf, "%d", state_fd);
+
+ for ( ;; ) {
+ execlp(argv[0], argv[0], "-state", buf, NULL);
+ perror("execlp");
+ fprintf(stderr, "PANIC: re-exec of %s failed. Waiting 0.2 seconds and trying again...\n", argv[0]);
+ usleep(200000);
+ }
}
}
Stream::Stream(const StreamProto &serialized)
- : header(serialized.header()),
+ : stream_id(serialized.stream_id()),
+ header(serialized.header()),
data(new char[BACKLOG_SIZE]),
data_size(serialized.data_size())
{
serialized.set_header(header);
serialized.set_data(string(data, data + BACKLOG_SIZE));
serialized.set_data_size(data_size);
+ serialized.set_stream_id(stream_id);
return serialized;
}
}
}
+Server::~Server()
+{
+ close(epoll_fd);
+}
+
void Server::run()
{
should_stop = false;
exit(1);
}
}
-
+
+void Server::add_client_from_serialized(const ClientProto &client)
+{
+ MutexLock lock(&mutex);
+ clients.insert(make_pair(client.sock(), Client(client)));
+
+ // Start listening on data from this socket.
+ epoll_event ev;
+ if (client.state() == Client::READING_REQUEST) {
+ ev.events = EPOLLIN | EPOLLRDHUP;
+ } else {
+ // If we don't have more data for this client, we'll be putting it into
+ // the sleeping array again soon.
+ ev.events = EPOLLOUT | EPOLLRDHUP;
+ }
+ ev.data.u64 = 0; // Keep Valgrind happy.
+ ev.data.fd = client.sock();
+ if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, client.sock(), &ev) == -1) {
+ perror("epoll_ctl(EPOLL_CTL_ADD)");
+ exit(1);
+ }
+}
+
void Server::add_stream(const string &stream_id)
{
MutexLock lock(&mutex);
streams.insert(make_pair(stream_id, new Stream(stream_id)));
}
+
+void Server::add_stream_from_serialized(const StreamProto &stream)
+{
+ MutexLock lock(&mutex);
+ streams.insert(make_pair(stream.stream_id(), new Stream(stream)));
+}
void Server::set_header(const string &stream_id, const string &header)
{