]> git.sesse.net Git - cubemap/blobdiff - stream.cpp
Drop NO_LEVEL; it does not look very pretty now.
[cubemap] / stream.cpp
index 322391ef5c2158dfc04c440301a11b9cc4e792fb..2b9a69593a478605800fd21c9585063eebec8d0c 100644 (file)
@@ -1,13 +1,14 @@
+#include <errno.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <unistd.h>
-#include <errno.h>
-#include <algorithm>
 #include <string>
 #include <vector>
 
+#include "state.pb.h"
+#include "log.h"
 #include "stream.h"
 #include "util.h"
-#include "state.pb.h"
 
 using namespace std;
 
@@ -31,7 +32,7 @@ Stream::~Stream()
                        ret = close(data_fd);
                } while (ret == -1 && errno == EINTR);
                if (ret == -1) {
-                       perror("close");
+                       log_perror("close");
                }
        }
 }
@@ -62,12 +63,88 @@ StreamProto Stream::serialize()
        data_fd = -1;
        return serialized;
 }
+       
+void Stream::set_backlog_size(size_t new_size)
+{
+       if (backlog_size == new_size) {
+               return;
+       }
+
+       string existing_data;
+       if (!read_tempfile(data_fd, &existing_data)) {  // Closes data_fd.
+               exit(1);
+       }
+
+       // Unwrap the data so it's no longer circular.
+       if (bytes_received <= backlog_size) {
+               existing_data.resize(bytes_received);
+       } else {
+               size_t pos = bytes_received % backlog_size;
+               existing_data = existing_data.substr(pos, string::npos) +
+                       existing_data.substr(0, pos);
+       }
+
+       // See if we need to discard data.
+       if (new_size < existing_data.size()) {
+               size_t to_discard = existing_data.size() - new_size;
+               existing_data = existing_data.substr(to_discard, string::npos);
+       }
+
+       // Create a new, empty data file.
+       data_fd = make_tempfile("");
+       backlog_size = new_size;
+
+       // Now cheat a bit by rewinding, and adding all the old data back.
+       bytes_received -= existing_data.size();
+       add_data(existing_data.data(), existing_data.size());
+}
 
 void Stream::put_client_to_sleep(Client *client)
 {
        sleeping_clients.push_back(client);
 }
 
+void Stream::add_data(const char *data, ssize_t bytes)
+{
+       size_t pos = bytes_received % backlog_size;
+       bytes_received += bytes;
+
+       if (pos + bytes > backlog_size) {
+               ssize_t to_copy = backlog_size - pos;
+               while (to_copy > 0) {
+                       int ret = pwrite(data_fd, data, to_copy, pos);
+                       if (ret == -1 && errno == EINTR) {
+                               continue;
+                       }
+                       if (ret == -1) {
+                               log_perror("pwrite");
+                               // Dazed and confused, but trying to continue...
+                               break;
+                       }
+                       pos += ret;
+                       data += ret;
+                       to_copy -= ret;
+                       bytes -= ret;
+               }
+               pos = 0;
+       }
+
+       while (bytes > 0) {
+               int ret = pwrite(data_fd, data, bytes, pos);
+               if (ret == -1 && errno == EINTR) {
+                       continue;
+               }
+               if (ret == -1) {
+                       log_perror("pwrite");
+                       // Dazed and confused, but trying to continue...
+                       break;
+               }
+               pos += ret;
+               data += ret;
+               bytes -= ret;
+       }
+}
+
 void Stream::wake_up_all_clients()
 {
        if (to_process.empty()) {