]> git.sesse.net Git - nageru/blobdiff - h264encode.cpp
Add support for uncompressed video instead of H.264 (while still storing H.264 to...
[nageru] / h264encode.cpp
index 7fc62b3a920cde84d44ea68913138aa51acd9764..fbeeba707949284bb0ba982cdd86d46a4b0c01b6 100644 (file)
@@ -1,6 +1,7 @@
 //#include "sysdeps.h"
 #include "h264encode.h"
 
+#include <movit/util.h>
 #include <EGL/eglplatform.h>
 #include <X11/X.h>
 #include <X11/Xlib.h>
@@ -18,10 +19,13 @@ extern "C" {
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <fcntl.h>
 #include <va/va.h>
+#include <va/va_drm.h>
 #include <va/va_drmcommon.h>
 #include <va/va_enc_h264.h>
 #include <va/va_x11.h>
+#include <algorithm>
 #include <condition_variable>
 #include <cstdint>
 #include <map>
@@ -34,6 +38,7 @@ extern "C" {
 
 #include "context.h"
 #include "defs.h"
+#include "flags.h"
 #include "httpd.h"
 #include "timebase.h"
 
@@ -48,6 +53,8 @@ class QSurface;
         exit(1);                                                        \
     }
 
+#define BUFFER_OFFSET(i) ((char *)NULL + (i))
+
 //#include "loadsurface.h"
 
 #define NAL_REF_IDC_NONE        0
@@ -78,6 +85,8 @@ class QSurface;
    
 #define BITSTREAM_ALLOCATE_STEPPING     4096
 #define SURFACE_NUM 16 /* 16 surfaces for source YUV */
+#define MAX_NUM_REF1 16 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
+#define MAX_NUM_REF2 32 // Seemingly a hardware-fixed value, not related to SURFACE_NUM
 
 static constexpr unsigned int MaxFrameNum = (2<<16);
 static constexpr unsigned int MaxPicOrderCntLsb = (2<<8);
@@ -105,29 +114,107 @@ typedef struct __bitstream bitstream;
 
 using namespace std;
 
+// H.264 video comes out in encoding order (e.g. with two B-frames:
+// 0, 3, 1, 2, 6, 4, 5, etc.), but uncompressed video needs to
+// come in the right order. Since we do everything, including waiting
+// for the frames to come out of OpenGL, in encoding order, we need
+// a reordering buffer for uncompressed frames so that they come out
+// correctly. We go the super-lazy way of not making it understand
+// anything about the true order (which introduces some extra latency,
+// though); we know that for N B-frames we need at most (N-1) frames
+// in the reorder buffer, and can just sort on that.
+//
+// The class also deals with keeping a freelist as needed.
+class FrameReorderer {
+public:
+       FrameReorderer(unsigned queue_length, int width, int height);
+
+       // Returns the next frame to insert with its pts, if any. Otherwise -1 and nullptr.
+       // Does _not_ take ownership of data; a copy is taken if needed.
+       // The returned pointer is valid until the next call to reorder_frame, or destruction.
+       // As a special case, if queue_length == 0, will just return pts and data (no reordering needed).
+       pair<int64_t, const uint8_t *> reorder_frame(int64_t pts, const uint8_t *data);
+
+       // The same as reorder_frame, but without inserting anything. Used to empty the queue.
+       pair<int64_t, const uint8_t *> get_first_frame();
+
+       bool empty() const { return frames.empty(); }
+
+private:
+       unsigned queue_length;
+       int width, height;
+
+       priority_queue<pair<int64_t, uint8_t *>> frames;
+       stack<uint8_t *> freelist;  // Includes the last value returned from reorder_frame.
+
+       // Owns all the pointers. Normally, freelist and frames could do this themselves,
+       // except priority_queue doesn't work well with movable-only types.
+       vector<unique_ptr<uint8_t[]>> owner;
+};
+
+FrameReorderer::FrameReorderer(unsigned queue_length, int width, int height)
+    : queue_length(queue_length), width(width), height(height)
+{
+       for (unsigned i = 0; i < queue_length; ++i) {
+               owner.emplace_back(new uint8_t[width * height * 2]);
+               freelist.push(owner.back().get());
+       }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::reorder_frame(int64_t pts, const uint8_t *data)
+{
+       if (queue_length == 0) {
+               return make_pair(pts, data);
+       }
+
+       assert(!freelist.empty());
+       uint8_t *storage = freelist.top();
+       freelist.pop();
+       memcpy(storage, data, width * height * 2);
+       frames.emplace(-pts, storage);  // Invert pts to get smallest first.
+
+       if (frames.size() >= queue_length) {
+               return get_first_frame();
+       } else {
+               return make_pair(-1, nullptr);
+       }
+}
+
+pair<int64_t, const uint8_t *> FrameReorderer::get_first_frame()
+{
+       assert(!frames.empty());
+       pair<int64_t, uint8_t *> storage = frames.top();
+       frames.pop();
+       int64_t pts = storage.first;
+       freelist.push(storage.second);
+       return make_pair(-pts, storage.second);  // Re-invert pts (see reorder_frame()).
+}
+
 class H264EncoderImpl {
 public:
-       H264EncoderImpl(QSurface *surface, int width, int height, HTTPD *httpd);
+       H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd);
        ~H264EncoderImpl();
-       void add_audio(int64_t pts, std::vector<float> audio);  // Needs to come before end_frame() of same pts.
+       void add_audio(int64_t pts, vector<float> audio);
        bool begin_frame(GLuint *y_tex, GLuint *cbcr_tex);
-       void end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames);
+       void end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames);
+       void shutdown();
 
 private:
        struct storage_task {
                unsigned long long display_order;
                int frame_type;
-               std::vector<float> audio;
+               vector<float> audio;
                int64_t pts, dts;
        };
        struct PendingFrame {
                RefCountedGLsync fence;
-               std::vector<RefCountedFrame> input_frames;
+               vector<RefCountedFrame> input_frames;
                int64_t pts;
        };
 
        void encode_thread_func();
        void encode_remaining_frames_as_p(int encoding_frame_num, int gop_start_display_frame_num, int64_t last_dts);
+       void add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data);
        void encode_frame(PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
                          int frame_type, int64_t pts, int64_t dts);
        void storage_task_thread();
@@ -145,38 +232,43 @@ private:
        void slice_header(bitstream *bs);
        int build_packed_seq_buffer(unsigned char **header_buffer);
        int build_packed_slice_buffer(unsigned char **header_buffer);
-       int init_va();
+       int init_va(const string &va_display);
        int deinit_va();
-       VADisplay va_open_display(void);
+       VADisplay va_open_display(const string &va_display);
        void va_close_display(VADisplay va_dpy);
        int setup_encode();
        int release_encode();
        void update_ReferenceFrames(int frame_type);
        int update_RefPicList(int frame_type);
 
-       std::thread encode_thread, storage_thread;
+       bool is_shutdown = false;
+       bool use_zerocopy;
+       int drm_fd = -1;
 
-       std::mutex storage_task_queue_mutex;
-       std::condition_variable storage_task_queue_changed;
+       thread encode_thread, storage_thread;
+
+       mutex storage_task_queue_mutex;
+       condition_variable storage_task_queue_changed;
        int srcsurface_status[SURFACE_NUM];  // protected by storage_task_queue_mutex
-       std::queue<storage_task> storage_task_queue;  // protected by storage_task_queue_mutex
+       queue<storage_task> storage_task_queue;  // protected by storage_task_queue_mutex
        bool storage_thread_should_quit = false;  // protected by storage_task_queue_mutex
 
-       std::mutex frame_queue_mutex;
-       std::condition_variable frame_queue_nonempty;
+       mutex frame_queue_mutex;
+       condition_variable frame_queue_nonempty;
        bool encode_thread_should_quit = false;  // under frame_queue_mutex
 
        int current_storage_frame;
 
-       std::map<int, PendingFrame> pending_video_frames;  // under frame_queue_mutex
-       std::map<int64_t, std::vector<float>> pending_audio_frames;  // under frame_queue_mutex
+       map<int, PendingFrame> pending_video_frames;  // under frame_queue_mutex
+       map<int64_t, vector<float>> pending_audio_frames;  // under frame_queue_mutex
        QSurface *surface;
 
        AVCodecContext *context_audio;
+       AVFrame *audio_frame = nullptr;
        HTTPD *httpd;
+       unique_ptr<FrameReorderer> reorderer;
 
-       Display *x11_display;
-       Window x11_window;
+       Display *x11_display = nullptr;
 
        // Encoder parameters
        VADisplay va_dpy;
@@ -190,7 +282,14 @@ private:
 
                VAImage surface_image;
                GLuint y_tex, cbcr_tex;
+
+               // Only if use_zerocopy == true.
                EGLImage y_egl_image, cbcr_egl_image;
+
+               // Only if use_zerocopy == false.
+               GLuint pbo;
+               uint8_t *y_ptr, *cbcr_ptr;
+               size_t y_offset, cbcr_offset;
        };
        GLSurface gl_surfaces[SURFACE_NUM];
 
@@ -200,7 +299,7 @@ private:
        VAEncPictureParameterBufferH264 pic_param;
        VAEncSliceParameterBufferH264 slice_param;
        VAPictureH264 CurrentCurrPic;
-       VAPictureH264 ReferenceFrames[16], RefPicList0_P[32], RefPicList0_B[32], RefPicList1_B[32];
+       VAPictureH264 ReferenceFrames[MAX_NUM_REF1], RefPicList0_P[MAX_NUM_REF2], RefPicList0_B[MAX_NUM_REF2], RefPicList1_B[MAX_NUM_REF2];
 
        // Static quality settings.
        static constexpr unsigned int frame_bitrate = 15000000 / 60;  // Doesn't really matter; only initial_qp does.
@@ -228,7 +327,6 @@ private:
        int frame_height_mbaligned;
 };
 
-
 // Supposedly vaRenderPicture() is supposed to destroy the buffer implicitly,
 // but if we don't delete it here, we get leaks. The GStreamer implementation
 // does the same.
@@ -290,7 +388,11 @@ bitstream_put_ui(bitstream *bs, unsigned int val, int size_in_bits)
         bs->buffer[pos] = (bs->buffer[pos] << size_in_bits | val);
     } else {
         size_in_bits -= bit_left;
-        bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits);
+        if (bit_left >= 32) {
+            bs->buffer[pos] = (val >> size_in_bits);
+        } else {
+            bs->buffer[pos] = (bs->buffer[pos] << bit_left) | (val >> size_in_bits);
+        }
         bs->buffer[pos] = va_swap32(bs->buffer[pos]);
 
         if (pos + 1 == bs->max_size_in_dword) {
@@ -824,31 +926,55 @@ static const char *rc_to_string(int rc_mode)
     }
 }
 
-VADisplay H264EncoderImpl::va_open_display(void)
+VADisplay H264EncoderImpl::va_open_display(const string &va_display)
 {
-    x11_display = XOpenDisplay(NULL);
-    if (!x11_display) {
-        fprintf(stderr, "error: can't connect to X server!\n");
-        return NULL;
-    }
-    return vaGetDisplay(x11_display);
+       if (va_display.empty()) {
+               x11_display = XOpenDisplay(NULL);
+               if (!x11_display) {
+                       fprintf(stderr, "error: can't connect to X server!\n");
+                       return NULL;
+               }
+               use_zerocopy = true;
+               if (global_flags.uncompressed_video_to_http) {
+                       fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n");
+                       use_zerocopy = false;
+               }
+               return vaGetDisplay(x11_display);
+       } else if (va_display[0] != '/') {
+               x11_display = XOpenDisplay(va_display.c_str());
+               if (!x11_display) {
+                       fprintf(stderr, "error: can't connect to X server!\n");
+                       return NULL;
+               }
+               use_zerocopy = true;
+               if (global_flags.uncompressed_video_to_http) {
+                       fprintf(stderr, "Disabling zerocopy H.264 encoding due to --uncompressed_video_to_http.\n");
+                       use_zerocopy = false;
+               }
+               return vaGetDisplay(x11_display);
+       } else {
+               drm_fd = open(va_display.c_str(), O_RDWR);
+               if (drm_fd == -1) {
+                       perror(va_display.c_str());
+                       return NULL;
+               }
+               use_zerocopy = false;
+               return vaGetDisplayDRM(drm_fd);
+       }
 }
 
 void H264EncoderImpl::va_close_display(VADisplay va_dpy)
 {
-    if (!x11_display)
-        return;
-
-    if (x11_window) {
-        XUnmapWindow(x11_display, x11_window);
-        XDestroyWindow(x11_display, x11_window);
-        x11_window = None;
-    }
-    XCloseDisplay(x11_display);
-    x11_display = NULL;
+       if (x11_display) {
+               XCloseDisplay(x11_display);
+               x11_display = nullptr;
+       }
+       if (drm_fd != -1) {
+               close(drm_fd);
+       }
 }
 
-int H264EncoderImpl::init_va()
+int H264EncoderImpl::init_va(const string &va_display)
 {
     VAProfile profile_list[]={VAProfileH264High, VAProfileH264Main, VAProfileH264Baseline, VAProfileH264ConstrainedBaseline};
     VAEntrypoint *entrypoints;
@@ -858,7 +984,7 @@ int H264EncoderImpl::init_va()
     VAStatus va_status;
     unsigned int i;
 
-    va_dpy = va_open_display();
+    va_dpy = va_open_display(va_display);
     va_status = vaInitialize(va_dpy, &major_ver, &minor_ver);
     CHECK_VASTATUS(va_status, "vaInitialize");
 
@@ -887,7 +1013,9 @@ int H264EncoderImpl::init_va()
     }
     
     if (support_encode == 0) {
-        printf("Can't find VAEntrypointEncSlice for H264 profiles\n");
+        printf("Can't find VAEntrypointEncSlice for H264 profiles. If you are using a non-Intel GPU\n");
+        printf("but have one in your system, try launching Nageru with --va-display /dev/dri/renderD128\n");
+        printf("to use VA-API against DRM instead of X11.\n");
         exit(1);
     } else {
         switch (h264_profile) {
@@ -1057,6 +1185,28 @@ int H264EncoderImpl::setup_encode()
     for (i = 0; i < SURFACE_NUM; i++) {
         glGenTextures(1, &gl_surfaces[i].y_tex);
         glGenTextures(1, &gl_surfaces[i].cbcr_tex);
+
+        if (!use_zerocopy) {
+            // Create Y image.
+            glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].y_tex);
+            glTexStorage2D(GL_TEXTURE_2D, 1, GL_R8, frame_width, frame_height);
+
+            // Create CbCr image.
+            glBindTexture(GL_TEXTURE_2D, gl_surfaces[i].cbcr_tex);
+            glTexStorage2D(GL_TEXTURE_2D, 1, GL_RG8, frame_width / 2, frame_height / 2);
+
+            // Generate a PBO to read into. It doesn't necessarily fit 1:1 with the VA-API
+            // buffers, due to potentially differing pitch.
+            glGenBuffers(1, &gl_surfaces[i].pbo);
+            glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
+            glBufferStorage(GL_PIXEL_PACK_BUFFER, frame_width * frame_height * 2, nullptr, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+            uint8_t *ptr = (uint8_t *)glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, frame_width * frame_height * 2, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+            gl_surfaces[i].y_offset = 0;
+            gl_surfaces[i].cbcr_offset = frame_width * frame_height;
+            gl_surfaces[i].y_ptr = ptr + gl_surfaces[i].y_offset;
+            gl_surfaces[i].cbcr_ptr = ptr + gl_surfaces[i].cbcr_offset;
+            glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+        }
     }
 
     for (i = 0; i < SURFACE_NUM; i++) {
@@ -1067,68 +1217,15 @@ int H264EncoderImpl::setup_encode()
     return 0;
 }
 
-
-
-#define partition(ref, field, key, ascending)   \
-    while (i <= j) {                            \
-        if (ascending) {                        \
-            while (ref[i].field < key)          \
-                i++;                            \
-            while (ref[j].field > key)          \
-                j--;                            \
-        } else {                                \
-            while (ref[i].field > key)          \
-                i++;                            \
-            while (ref[j].field < key)          \
-                j--;                            \
-        }                                       \
-        if (i <= j) {                           \
-            tmp = ref[i];                       \
-            ref[i] = ref[j];                    \
-            ref[j] = tmp;                       \
-            i++;                                \
-            j--;                                \
-        }                                       \
-    }                                           \
-
-static void sort_one(VAPictureH264 ref[], int left, int right,
-                     int ascending, int frame_idx)
+// Given a list like 1 9 3 0 2 8 4 and a pivot element 3, will produce
+//
+//   2 1 0 [3] 4 8 9
+template<class T, class C>
+static void sort_two(T *begin, T *end, const T &pivot, const C &less_than)
 {
-    int i = left, j = right;
-    unsigned int key;
-    VAPictureH264 tmp;
-
-    if (frame_idx) {
-        key = ref[(left + right) / 2].frame_idx;
-        partition(ref, frame_idx, key, ascending);
-    } else {
-        key = ref[(left + right) / 2].TopFieldOrderCnt;
-        partition(ref, TopFieldOrderCnt, (signed int)key, ascending);
-    }
-    
-    /* recursion */
-    if (left < j)
-        sort_one(ref, left, j, ascending, frame_idx);
-    
-    if (i < right)
-        sort_one(ref, i, right, ascending, frame_idx);
-}
-
-static void sort_two(VAPictureH264 ref[], int left, int right, unsigned int key, unsigned int frame_idx,
-                     int partition_ascending, int list0_ascending, int list1_ascending)
-{
-    int i = left, j = right;
-    VAPictureH264 tmp;
-
-    if (frame_idx) {
-        partition(ref, frame_idx, key, partition_ascending);
-    } else {
-        partition(ref, TopFieldOrderCnt, (signed int)key, partition_ascending);
-    }
-    
-
-    sort_one(ref, left, i-1, list0_ascending, frame_idx);
-    sort_one(ref, j+1, right, list1_ascending, frame_idx);
+       T *middle = partition(begin, end, [&](const T &elem) { return less_than(elem, pivot); });
+       sort(begin, middle, [&](const T &a, const T &b) { return less_than(b, a); });
+       sort(middle, end, less_than);
 }
 
 void H264EncoderImpl::update_ReferenceFrames(int frame_type)
@@ -1154,21 +1251,25 @@ void H264EncoderImpl::update_ReferenceFrames(int frame_type)
 
 int H264EncoderImpl::update_RefPicList(int frame_type)
 {
-    unsigned int current_poc = CurrentCurrPic.TopFieldOrderCnt;
+    const auto descending_by_frame_idx = [](const VAPictureH264 &a, const VAPictureH264 &b) {
+        return a.frame_idx > b.frame_idx;
+    };
+    const auto ascending_by_top_field_order_cnt = [](const VAPictureH264 &a, const VAPictureH264 &b) {
+        return a.TopFieldOrderCnt < b.TopFieldOrderCnt;
+    };
+    const auto descending_by_top_field_order_cnt = [](const VAPictureH264 &a, const VAPictureH264 &b) {
+        return a.TopFieldOrderCnt > b.TopFieldOrderCnt;
+    };
     
     if (frame_type == FRAME_P) {
         memcpy(RefPicList0_P, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
-        sort_one(RefPicList0_P, 0, numShortTerm-1, 0, 1);
-    }
-    
-    if (frame_type == FRAME_B) {
+        sort(&RefPicList0_P[0], &RefPicList0_P[numShortTerm], descending_by_frame_idx);
+    } else if (frame_type == FRAME_B) {
         memcpy(RefPicList0_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
-        sort_two(RefPicList0_B, 0, numShortTerm-1, current_poc, 0,
-                 1, 0, 1);
+        sort_two(&RefPicList0_B[0], &RefPicList0_B[numShortTerm], CurrentCurrPic, ascending_by_top_field_order_cnt);
 
         memcpy(RefPicList1_B, ReferenceFrames, numShortTerm * sizeof(VAPictureH264));
-        sort_two(RefPicList1_B, 0, numShortTerm-1, current_poc, 0,
-                 0, 1, 0);
+        sort_two(&RefPicList1_B[0], &RefPicList1_B[numShortTerm], CurrentCurrPic, descending_by_top_field_order_cnt);
     }
     
     return 0;
@@ -1287,7 +1388,7 @@ int H264EncoderImpl::render_picture(int frame_type, int display_frame_num, int g
     CurrentCurrPic = pic_param.CurrPic;
 
     memcpy(pic_param.ReferenceFrames, ReferenceFrames, numShortTerm*sizeof(VAPictureH264));
-    for (i = numShortTerm; i < SURFACE_NUM; i++) {
+    for (i = numShortTerm; i < MAX_NUM_REF1; i++) {
         pic_param.ReferenceFrames[i].picture_id = VA_INVALID_SURFACE;
         pic_param.ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
     }
@@ -1437,7 +1538,7 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num,
         int refpiclist0_max = h264_maxref & 0xffff;
         memcpy(slice_param.RefPicList0, RefPicList0_P, refpiclist0_max*sizeof(VAPictureH264));
 
-        for (i = refpiclist0_max; i < 32; i++) {
+        for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
         }
@@ -1446,13 +1547,13 @@ int H264EncoderImpl::render_slice(int encoding_frame_num, int display_frame_num,
         int refpiclist1_max = (h264_maxref >> 16) & 0xffff;
 
         memcpy(slice_param.RefPicList0, RefPicList0_B, refpiclist0_max*sizeof(VAPictureH264));
-        for (i = refpiclist0_max; i < 32; i++) {
+        for (i = refpiclist0_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList0[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
         }
 
         memcpy(slice_param.RefPicList1, RefPicList1_B, refpiclist1_max*sizeof(VAPictureH264));
-        for (i = refpiclist1_max; i < 32; i++) {
+        for (i = refpiclist1_max; i < MAX_NUM_REF2; i++) {
             slice_param.RefPicList1[i].picture_id = VA_INVALID_SURFACE;
             slice_param.RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
         }
@@ -1486,7 +1587,7 @@ void H264EncoderImpl::save_codeddata(storage_task task)
 
     string data;
 
-    const int64_t global_delay = (ip_period - 1) * (TIMEBASE / MAX_FPS);  // So we never get negative dts.
+    const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);  // So we never get negative dts.
 
     va_status = vaMapBuffer(va_dpy, gl_surfaces[task.display_order % SURFACE_NUM].coded_buf, (void **)(&buf_list));
     CHECK_VASTATUS(va_status, "vaMapBuffer");
@@ -1510,7 +1611,8 @@ void H264EncoderImpl::save_codeddata(storage_task task)
             pkt.flags = 0;
         }
         //pkt.duration = 1;
-        httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay);
+        httpd->add_packet(pkt, task.pts + global_delay, task.dts + global_delay,
+               global_flags.uncompressed_video_to_http ? HTTPD::DESTINATION_FILE_ONLY : HTTPD::DESTINATION_FILE_AND_HTTP);
     }
     // Encode and add all audio frames up to and including the pts of this video frame.
     for ( ;; ) {
@@ -1527,18 +1629,17 @@ void H264EncoderImpl::save_codeddata(storage_task task)
              pending_audio_frames.erase(it); 
         }
 
-        AVFrame *frame = avcodec_alloc_frame();
-        frame->nb_samples = audio.size() / 2;
-        frame->format = AV_SAMPLE_FMT_S32;
-        frame->channel_layout = AV_CH_LAYOUT_STEREO;
+        audio_frame->nb_samples = audio.size() / 2;
+        audio_frame->format = AV_SAMPLE_FMT_S32;
+        audio_frame->channel_layout = AV_CH_LAYOUT_STEREO;
 
         unique_ptr<int32_t[]> int_samples(new int32_t[audio.size()]);
-        int ret = avcodec_fill_audio_frame(frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
+        int ret = avcodec_fill_audio_frame(audio_frame, 2, AV_SAMPLE_FMT_S32, (const uint8_t*)int_samples.get(), audio.size() * sizeof(int32_t), 1);
         if (ret < 0) {
             fprintf(stderr, "avcodec_fill_audio_frame() failed with %d\n", ret);
             exit(1);
         }
-        for (int i = 0; i < frame->nb_samples * 2; ++i) {
+        for (int i = 0; i < audio_frame->nb_samples * 2; ++i) {
             if (audio[i] >= 1.0f) {
                 int_samples[i] = 2147483647;
             } else if (audio[i] <= -1.0f) {
@@ -1553,35 +1654,16 @@ void H264EncoderImpl::save_codeddata(storage_task task)
         pkt.data = nullptr;
         pkt.size = 0;
         int got_output;
-        avcodec_encode_audio2(context_audio, &pkt, frame, &got_output);
+        avcodec_encode_audio2(context_audio, &pkt, audio_frame, &got_output);
         if (got_output) {
             pkt.stream_index = 1;
-            httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay);
+            httpd->add_packet(pkt, audio_pts + global_delay, audio_pts + global_delay, HTTPD::DESTINATION_FILE_AND_HTTP);
         }
         // TODO: Delayed frames.
-        avcodec_free_frame(&frame);
+        av_frame_unref(audio_frame);
         av_free_packet(&pkt);
         if (audio_pts == task.pts) break;
     }
-
-#if 0
-    printf("\r      "); /* return back to startpoint */
-    switch (encode_order % 4) {
-        case 0:
-            printf("|");
-            break;
-        case 1:
-            printf("/");
-            break;
-        case 2:
-            printf("-");
-            break;
-        case 3:
-            printf("\\");
-            break;
-    }
-    printf("%08lld", encode_order);
-#endif
 }
 
 
@@ -1590,7 +1672,6 @@ void H264EncoderImpl::storage_task_enqueue(storage_task task)
 {
        unique_lock<mutex> lock(storage_task_queue_mutex);
        storage_task_queue.push(move(task));
-       srcsurface_status[task.display_order % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
        storage_task_queue_changed.notify_all();
 }
 
@@ -1624,18 +1705,25 @@ void H264EncoderImpl::storage_task_thread()
 
 int H264EncoderImpl::release_encode()
 {
-    int i;
-    
-    for (i = 0; i < SURFACE_NUM; i++) {
-        vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf);
-        vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1);
-        vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1);
-    }
-    
-    vaDestroyContext(va_dpy, context_id);
-    vaDestroyConfig(va_dpy, config_id);
+       for (unsigned i = 0; i < SURFACE_NUM; i++) {
+               vaDestroyBuffer(va_dpy, gl_surfaces[i].coded_buf);
+               vaDestroySurfaces(va_dpy, &gl_surfaces[i].src_surface, 1);
+               vaDestroySurfaces(va_dpy, &gl_surfaces[i].ref_surface, 1);
+
+               if (!use_zerocopy) {
+                       glBindBuffer(GL_PIXEL_PACK_BUFFER, gl_surfaces[i].pbo);
+                       glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
+                       glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+                       glDeleteBuffers(1, &gl_surfaces[i].pbo);
+               }
+               glDeleteTextures(1, &gl_surfaces[i].y_tex);
+               glDeleteTextures(1, &gl_surfaces[i].cbcr_tex);
+       }
 
-    return 0;
+       vaDestroyContext(va_dpy, context_id);
+       vaDestroyConfig(va_dpy, config_id);
+
+       return 0;
 }
 
 int H264EncoderImpl::deinit_va()
@@ -1648,7 +1736,7 @@ int H264EncoderImpl::deinit_va()
 }
 
 
-H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD *httpd)
+H264EncoderImpl::H264EncoderImpl(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
        : current_storage_frame(0), surface(surface), httpd(httpd)
 {
        AVCodec *codec_audio = avcodec_find_encoder(AUDIO_OUTPUT_CODEC);
@@ -1663,6 +1751,7 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD
                fprintf(stderr, "Could not open codec\n");
                exit(1);
        }
+       audio_frame = av_frame_alloc();
 
        frame_width = width;
        frame_height = height;
@@ -1671,7 +1760,11 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD
 
        //print_input();
 
-       init_va();
+       if (global_flags.uncompressed_video_to_http) {
+               reorderer.reset(new FrameReorderer(ip_period - 1, frame_width, frame_height));
+       }
+
+       init_va(va_display);
        setup_encode();
 
        // No frames are ready yet.
@@ -1698,30 +1791,24 @@ H264EncoderImpl::H264EncoderImpl(QSurface *surface, int width, int height, HTTPD
 
 H264EncoderImpl::~H264EncoderImpl()
 {
-       {
-               unique_lock<mutex> lock(frame_queue_mutex);
-               encode_thread_should_quit = true;
-               frame_queue_nonempty.notify_all();
-       }
-       encode_thread.join();
-       {
-               unique_lock<mutex> lock(storage_task_queue_mutex);
-               storage_thread_should_quit = true;
-               frame_queue_nonempty.notify_all();
-               storage_task_queue_changed.notify_all();
-       }
-       storage_thread.join();
+       shutdown();
+       av_frame_free(&audio_frame);
 
-       release_encode();
-       deinit_va();
+       // TODO: Destroy context.
 }
 
 bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
 {
+       assert(!is_shutdown);
        {
                // Wait until this frame slot is done encoding.
                unique_lock<mutex> lock(storage_task_queue_mutex);
+               if (srcsurface_status[current_storage_frame % SURFACE_NUM] != SRC_SURFACE_FREE) {
+                       fprintf(stderr, "Warning: Slot %d (for frame %d) is still encoding, rendering has to wait for H.264 encoder\n",
+                               current_storage_frame % SURFACE_NUM, current_storage_frame);
+               }
                storage_task_queue_changed.wait(lock, [this]{ return storage_thread_should_quit || (srcsurface_status[current_storage_frame % SURFACE_NUM] == SRC_SURFACE_FREE); });
+               srcsurface_status[current_storage_frame % SURFACE_NUM] = SRC_SURFACE_IN_ENCODING;
                if (storage_thread_should_quit) return false;
        }
 
@@ -1730,58 +1817,60 @@ bool H264EncoderImpl::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
        *y_tex = surf->y_tex;
        *cbcr_tex = surf->cbcr_tex;
 
-       VASurfaceID surface = surf->src_surface;
-        VAStatus va_status = vaDeriveImage(va_dpy, surface, &surf->surface_image);
-        CHECK_VASTATUS(va_status, "vaDeriveImage");
-
-       VABufferInfo buf_info;
-       buf_info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;  // or VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM?
-       va_status = vaAcquireBufferHandle(va_dpy, surf->surface_image.buf, &buf_info);
-        CHECK_VASTATUS(va_status, "vaAcquireBufferHandle");
-
-       // Create Y image.
-       surf->y_egl_image = EGL_NO_IMAGE_KHR;
-       EGLint y_attribs[] = {
-               EGL_WIDTH, frame_width,
-               EGL_HEIGHT, frame_height,
-               EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('R', '8', ' ', ' '),
-               EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
-               EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[0]),
-               EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[0]),
-               EGL_NONE
-       };
-
-       surf->y_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, y_attribs);
-       assert(surf->y_egl_image != EGL_NO_IMAGE_KHR);
-
-       // Associate Y image to a texture.
-       glBindTexture(GL_TEXTURE_2D, *y_tex);
-       glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->y_egl_image);
-
-       // Create CbCr image.
-       surf->cbcr_egl_image = EGL_NO_IMAGE_KHR;
-       EGLint cbcr_attribs[] = {
-               EGL_WIDTH, frame_width,
-               EGL_HEIGHT, frame_height,
-               EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('G', 'R', '8', '8'),
-               EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
-               EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[1]),
-               EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[1]),
-               EGL_NONE
-       };
-
-       surf->cbcr_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, cbcr_attribs);
-       assert(surf->cbcr_egl_image != EGL_NO_IMAGE_KHR);
-
-       // Associate CbCr image to a texture.
-       glBindTexture(GL_TEXTURE_2D, *cbcr_tex);
-       glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image);
+       VAStatus va_status = vaDeriveImage(va_dpy, surf->src_surface, &surf->surface_image);
+       CHECK_VASTATUS(va_status, "vaDeriveImage");
+
+       if (use_zerocopy) {
+               VABufferInfo buf_info;
+               buf_info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;  // or VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM?
+               va_status = vaAcquireBufferHandle(va_dpy, surf->surface_image.buf, &buf_info);
+               CHECK_VASTATUS(va_status, "vaAcquireBufferHandle");
+
+               // Create Y image.
+               surf->y_egl_image = EGL_NO_IMAGE_KHR;
+               EGLint y_attribs[] = {
+                       EGL_WIDTH, frame_width,
+                       EGL_HEIGHT, frame_height,
+                       EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('R', '8', ' ', ' '),
+                       EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
+                       EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[0]),
+                       EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[0]),
+                       EGL_NONE
+               };
+
+               surf->y_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, y_attribs);
+               assert(surf->y_egl_image != EGL_NO_IMAGE_KHR);
+
+               // Associate Y image to a texture.
+               glBindTexture(GL_TEXTURE_2D, *y_tex);
+               glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->y_egl_image);
+
+               // Create CbCr image.
+               surf->cbcr_egl_image = EGL_NO_IMAGE_KHR;
+               EGLint cbcr_attribs[] = {
+                       EGL_WIDTH, frame_width,
+                       EGL_HEIGHT, frame_height,
+                       EGL_LINUX_DRM_FOURCC_EXT, fourcc_code('G', 'R', '8', '8'),
+                       EGL_DMA_BUF_PLANE0_FD_EXT, EGLint(buf_info.handle),
+                       EGL_DMA_BUF_PLANE0_OFFSET_EXT, EGLint(surf->surface_image.offsets[1]),
+                       EGL_DMA_BUF_PLANE0_PITCH_EXT, EGLint(surf->surface_image.pitches[1]),
+                       EGL_NONE
+               };
+
+               surf->cbcr_egl_image = eglCreateImageKHR(eglGetCurrentDisplay(), EGL_NO_CONTEXT, EGL_LINUX_DMA_BUF_EXT, NULL, cbcr_attribs);
+               assert(surf->cbcr_egl_image != EGL_NO_IMAGE_KHR);
+
+               // Associate CbCr image to a texture.
+               glBindTexture(GL_TEXTURE_2D, *cbcr_tex);
+               glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, surf->cbcr_egl_image);
+       }
 
        return true;
 }
 
 void H264EncoderImpl::add_audio(int64_t pts, vector<float> audio)
 {
+       assert(!is_shutdown);
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_audio_frames[pts] = move(audio);
@@ -1791,6 +1880,38 @@ void H264EncoderImpl::add_audio(int64_t pts, vector<float> audio)
 
 void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
 {
+       assert(!is_shutdown);
+
+       if (!use_zerocopy) {
+               GLSurface *surf = &gl_surfaces[current_storage_frame % SURFACE_NUM];
+
+               glPixelStorei(GL_PACK_ROW_LENGTH, 0);
+               check_error();
+
+               glBindBuffer(GL_PIXEL_PACK_BUFFER, surf->pbo);
+               check_error();
+
+               glBindTexture(GL_TEXTURE_2D, surf->y_tex);
+               check_error();
+               glGetTexImage(GL_TEXTURE_2D, 0, GL_RED, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->y_offset));
+               check_error();
+
+               glBindTexture(GL_TEXTURE_2D, surf->cbcr_tex);
+               check_error();
+               glGetTexImage(GL_TEXTURE_2D, 0, GL_RG, GL_UNSIGNED_BYTE, BUFFER_OFFSET(surf->cbcr_offset));
+               check_error();
+
+               glBindTexture(GL_TEXTURE_2D, 0);
+               check_error();
+               glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
+               check_error();
+
+               glMemoryBarrier(GL_TEXTURE_UPDATE_BARRIER_BIT | GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT);
+               check_error();
+               fence = RefCountedGLsync(GL_SYNC_GPU_COMMANDS_COMPLETE, /*flags=*/0);
+               check_error();
+       }
+
        {
                unique_lock<mutex> lock(frame_queue_mutex);
                pending_video_frames[current_storage_frame] = PendingFrame{ fence, input_frames, pts };
@@ -1799,6 +1920,31 @@ void H264EncoderImpl::end_frame(RefCountedGLsync fence, int64_t pts, const vecto
        frame_queue_nonempty.notify_all();
 }
 
+void H264EncoderImpl::shutdown()
+{
+       if (is_shutdown) {
+               return;
+       }
+
+       {
+               unique_lock<mutex> lock(frame_queue_mutex);
+               encode_thread_should_quit = true;
+               frame_queue_nonempty.notify_all();
+       }
+       encode_thread.join();
+       {
+               unique_lock<mutex> lock(storage_task_queue_mutex);
+               storage_thread_should_quit = true;
+               frame_queue_nonempty.notify_all();
+               storage_task_queue_changed.notify_all();
+       }
+       storage_thread.join();
+
+       release_encode();
+       deinit_va();
+       is_shutdown = true;
+}
+
 void H264EncoderImpl::encode_thread_func()
 {
        int64_t last_dts = -1;
@@ -1863,30 +2009,96 @@ void H264EncoderImpl::encode_remaining_frames_as_p(int encoding_frame_num, int g
                encode_frame(frame, encoding_frame_num++, display_frame_num, gop_start_display_frame_num, FRAME_P, frame.pts, dts);
                last_dts = dts;
        }
+
+       if (global_flags.uncompressed_video_to_http) {
+               // Add frames left in reorderer.
+               while (!reorderer->empty()) {
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->get_first_frame();
+                       add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+               }
+       }
 }
 
+void H264EncoderImpl::add_packet_for_uncompressed_frame(int64_t pts, const uint8_t *data)
+{
+       AVPacket pkt;
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.buf = nullptr;
+       pkt.data = const_cast<uint8_t *>(data);
+       pkt.size = frame_width * frame_height * 2;
+       pkt.stream_index = 0;
+       pkt.flags = AV_PKT_FLAG_KEY;
+       httpd->add_packet(pkt, pts, pts, HTTPD::DESTINATION_HTTP_ONLY);
+}
+
+namespace {
+
+void memcpy_with_pitch(uint8_t *dst, const uint8_t *src, size_t src_width, size_t dst_pitch, size_t height)
+{
+       if (src_width == dst_pitch) {
+               memcpy(dst, src, src_width * height);
+       } else {
+               for (size_t y = 0; y < height; ++y) {
+                       const uint8_t *sptr = src + y * src_width;
+                       uint8_t *dptr = dst + y * dst_pitch;
+                       memcpy(dptr, sptr, src_width);
+               }
+       }
+}
+
+}  // namespace
+
 void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int encoding_frame_num, int display_frame_num, int gop_start_display_frame_num,
                                    int frame_type, int64_t pts, int64_t dts)
 {
        // Wait for the GPU to be done with the frame.
-       glClientWaitSync(frame.fence.get(), 0, 0);
+       GLenum sync_status;
+       do {
+               sync_status = glClientWaitSync(frame.fence.get(), 0, 1000000000);
+               check_error();
+       } while (sync_status == GL_TIMEOUT_EXPIRED);
+       assert(sync_status != GL_WAIT_FAILED);
 
        // Release back any input frames we needed to render this frame.
        frame.input_frames.clear();
 
-       // Unmap the image.
        GLSurface *surf = &gl_surfaces[display_frame_num % SURFACE_NUM];
-       eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
-       eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
-       VAStatus va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
-       CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
+       VAStatus va_status;
+
+       if (use_zerocopy) {
+               eglDestroyImageKHR(eglGetCurrentDisplay(), surf->y_egl_image);
+               eglDestroyImageKHR(eglGetCurrentDisplay(), surf->cbcr_egl_image);
+               va_status = vaReleaseBufferHandle(va_dpy, surf->surface_image.buf);
+               CHECK_VASTATUS(va_status, "vaReleaseBufferHandle");
+       } else {
+               unsigned char *surface_p = nullptr;
+               vaMapBuffer(va_dpy, surf->surface_image.buf, (void **)&surface_p);
+
+               unsigned char *va_y_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[0];
+               memcpy_with_pitch(va_y_ptr, surf->y_ptr, frame_width, surf->surface_image.pitches[0], frame_height);
+
+               unsigned char *va_cbcr_ptr = (unsigned char *)surface_p + surf->surface_image.offsets[1];
+               memcpy_with_pitch(va_cbcr_ptr, surf->cbcr_ptr, (frame_width / 2) * sizeof(uint16_t), surf->surface_image.pitches[1], frame_height / 2);
+
+               va_status = vaUnmapBuffer(va_dpy, surf->surface_image.buf);
+               CHECK_VASTATUS(va_status, "vaUnmapBuffer");
+
+               if (global_flags.uncompressed_video_to_http) {
+                       // Add uncompressed video. (Note that pts == dts here.)
+                       const int64_t global_delay = int64_t(ip_period - 1) * (TIMEBASE / MAX_FPS);  // Needs to match audio.
+                       pair<int64_t, const uint8_t *> output_frame = reorderer->reorder_frame(pts + global_delay, reinterpret_cast<uint8_t *>(surf->y_ptr));
+                       if (output_frame.second != nullptr) {
+                               add_packet_for_uncompressed_frame(output_frame.first, output_frame.second);
+                       }
+               }
+       }
+
        va_status = vaDestroyImage(va_dpy, surf->surface_image.image_id);
        CHECK_VASTATUS(va_status, "vaDestroyImage");
 
-       VASurfaceID surface = surf->src_surface;
-
        // Schedule the frame for encoding.
-       va_status = vaBeginPicture(va_dpy, context_id, surface);
+       VASurfaceID va_surface = surf->src_surface;
+       va_status = vaBeginPicture(va_dpy, context_id, va_surface);
        CHECK_VASTATUS(va_status, "vaBeginPicture");
 
        if (frame_type == FRAME_IDR) {
@@ -1918,13 +2130,13 @@ void H264EncoderImpl::encode_frame(H264EncoderImpl::PendingFrame frame, int enco
 }
 
 // Proxy object.
-H264Encoder::H264Encoder(QSurface *surface, int width, int height, HTTPD *httpd)
-       : impl(new H264EncoderImpl(surface, width, height, httpd)) {}
+H264Encoder::H264Encoder(QSurface *surface, const string &va_display, int width, int height, HTTPD *httpd)
+       : impl(new H264EncoderImpl(surface, va_display, width, height, httpd)) {}
 
 // Must be defined here because unique_ptr<> destructor needs to know the impl.
 H264Encoder::~H264Encoder() {}
 
-void H264Encoder::add_audio(int64_t pts, std::vector<float> audio)
+void H264Encoder::add_audio(int64_t pts, vector<float> audio)
 {
        impl->add_audio(pts, audio);
 }
@@ -1934,9 +2146,14 @@ bool H264Encoder::begin_frame(GLuint *y_tex, GLuint *cbcr_tex)
        return impl->begin_frame(y_tex, cbcr_tex);
 }
 
-void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const std::vector<RefCountedFrame> &input_frames)
+void H264Encoder::end_frame(RefCountedGLsync fence, int64_t pts, const vector<RefCountedFrame> &input_frames)
 {
        impl->end_frame(fence, pts, input_frames);
 }
 
+void H264Encoder::shutdown()
+{
+       impl->shutdown();
+}
+
 // Real class.