#include <stddef.h>
#include <stdint.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <deque>
+#include <memory>
+#include <mutex>
#include <string>
#include <vector>
-class MarkPool;
+#include "metacube2.h"
+
class StreamProto;
struct Client;
+// metacube2_pts_packet except the type and byteswapping.
+struct RationalPTS {
+ int64_t pts = 0;
+ uint64_t timebase_num = 0, timebase_den = 0; // 0/0 for unknown PTS.
+};
+
struct Stream {
- Stream(const std::string &stream_id, size_t backlog_size);
+ // Must be in sync with StreamConfig::Encoding.
+ enum Encoding { STREAM_ENCODING_RAW = 0, STREAM_ENCODING_METACUBE };
+
+ Stream(const std::string &url,
+ size_t backlog_size,
+ uint64_t prebuffering_bytes,
+ Encoding encoding,
+ Encoding src_encoding,
+ unsigned hls_frag_duration,
+ size_t hls_backlog_margin,
+ const std::string &allow_origin);
~Stream();
// Serialization/deserialization.
- Stream(const StreamProto &serialized);
+ Stream(const StreamProto &serialized, int data_fd);
StreamProto serialize();
- std::string stream_id;
+ // Changes the backlog size, restructuring the data as needed.
+ void set_backlog_size(size_t new_size);
+
+ // Mutex protecting <queued_data> and <queued_data_last_starting_point>.
+ // Note that if you want to hold both this and the owning server's
+ // <mutex> you will need to take <mutex> before this one.
+ mutable std::mutex queued_data_mutex;
+
+ std::string url;
- // The HTTP response header, plus the video stream header (if any).
- std::string header;
+ // The HTTP response header, without the trailing double newline.
+ std::string http_header;
+
+ // The video stream header (if any).
+ std::string stream_header;
+
+ // What encoding we apply to the outgoing data (usually raw, but can also
+ // be Metacube, for reflecting to another Cubemap instance).
+ Encoding encoding;
+
+ // What encoding we expect the incoming data to be in (usually Metacube).
+ Encoding src_encoding;
+
+ // Contents of CORS header (Access-Control-Allow-Origin), if any.
+ std::string allow_origin;
// The stream data itself, stored in a circular buffer.
//
// How many bytes <data_fd> can hold (the buffer size).
size_t backlog_size;
+ // How many bytes we need to have in the backlog before we start
+ // sending (in practice, we will then send all of them at once,
+ // and then start sending at the normal rate thereafter).
+ // This is basically to force a buffer on the client, which can help
+ // if the client expects us to be able to fill up the buffer much
+ // faster than realtime (ie., it expects a static file).
+ uint64_t prebuffering_bytes;
+
// How many bytes this stream have received. Can very well be larger
// than <backlog_size>, since the buffer wraps.
- size_t bytes_received;
-
+ uint64_t bytes_received = 0;
+
+ // A list of points in the stream that is suitable to start new clients at
+ // (after having sent the header). Empty if no such point exists yet.
+ std::deque<uint64_t> suitable_starting_points;
+
+ // A list of HLS fragment boundaries currently in the backlog; the first fragment
+ // is between point 0 and 1, the second is between 1 and 2, and so on.
+ // This roughly mirrors suitable_starting_points, but we generally make much
+ // larger fragments (we try to get as close as possible without exceeding
+ // <hls_frag_duration> seconds by too much).
+ //
+ // We keep this list even if we don't have HLS, given that we have pts data
+ // from the input stream.
+ //
+ // NOTE: The last fragment is an in-progress fragment, which can still be
+ // extended and thus should not be output. So the last fragment output is
+ // from points N-3..N-2.
+ struct FragmentStart {
+ uint64_t byte_position;
+ double pts;
+ };
+ std::deque<FragmentStart> fragments;
+ size_t first_fragment_index = 0, discontinuity_counter = 0;
+
+ // HLS target duration, in seconds.
+ unsigned hls_frag_duration = 6;
+
+ // Don't advertise new HLS fragments beginning before this point after the
+ // start of the backlog, so that we're reasonably sure that we can actually
+ // serve them even if the client can't completely keep up.
+ size_t hls_backlog_margin = 0;
+
+ // HLS playlists for this stream, in the form of a HTTP response, with
+ // headers and all. These are created on-demand, re-used by clients as
+ // needed, and cleared when they are no longer valid (e.g., when new fragments
+ // are added).
+ std::shared_ptr<const std::string> hls_playlist_http10;
+ std::shared_ptr<const std::string> hls_playlist_http11_close;
+ std::shared_ptr<const std::string> hls_playlist_http11_persistent;
+
// Clients that are in SENDING_DATA, but that we don't listen on,
// because we currently don't have any data for them.
// See put_client_to_sleep() and wake_up_all_clients().
// <sleeping_clients>).
std::vector<Client *> to_process;
- // What pool to fetch marks from, or NULL.
- MarkPool *mark_pool;
+ // Maximum pacing rate for the stream.
+ uint32_t pacing_rate = ~0U;
+
+ // Queued data, if any. Protected by <queued_data_mutex>.
+ // The data pointers in the iovec are owned by us.
+ struct DataElement {
+ iovec data;
+ uint16_t metacube_flags;
+ RationalPTS pts;
+ };
+ std::vector<DataElement> queued_data;
// Put client to sleep, since there is no more data for it; we will on
// longer listen on POLLOUT until we get more data. Also, it will be put
// in the list of clients to wake up when we do.
void put_client_to_sleep(Client *client);
- // Add more input data to the stream, and wake up all clients that are sleeping.
- void add_data(const char *data, ssize_t bytes);
+ // Add more data to <queued_data>, adding Metacube headers if needed.
+ // Does not take ownership of <data>.
+ void add_data_deferred(const char *data, size_t bytes, uint16_t metacube_flags, const RationalPTS &pts);
+
+ // Add queued data to the stream, if any.
+ // You should hold the owning Server's <mutex>.
+ void process_queued_data();
+
+ // Generate a HLS playlist based on the current state, including HTTP headers.
+ std::shared_ptr<const std::string> generate_hls_playlist(bool http_11, bool close_after_response);
+
+ void clear_hls_playlist_cache();
private:
Stream(const Stream& other);
- // We have more data, so mark all clients that are sleeping as ready to go.
- void wake_up_all_clients();
+ // Adds data directly to the stream file descriptor, without adding headers or
+ // going through <queued_data>.
+ // You should hold the owning Server's <mutex>, and probably call
+ // remove_obsolete_starting_points() afterwards.
+ void add_data_raw(const std::vector<DataElement> &data);
+
+ // Remove points from <suitable_starting_points> that are no longer
+ // in the backlog.
+ // You should hold the owning Server's <mutex>.
+ void remove_obsolete_starting_points();
+
+ // Extend the in-progress fragment to the given position, or finish it and start
+ // a new one if that would make it too long. Returns true if a new fragment
+ // was created (ie., the HLS playlists need to be regenerated).
+ bool add_fragment_boundary(size_t byte_position, const RationalPTS &pts);
};
#endif // !defined(_STREAM_H)