/*
* RTMP network protocol
- * Copyright (c) 2009 Kostya Shishkov
+ * Copyright (c) 2009 Konstantin Shishkov
*
* This file is part of Libav.
*
#include "libavcodec/bytestream.h"
#include "libavutil/avstring.h"
+#include "libavutil/base64.h"
#include "libavutil/intfloat.h"
#include "libavutil/lfg.h"
+#include "libavutil/md5.h"
#include "libavutil/opt.h"
+#include "libavutil/random_seed.h"
#include "libavutil/sha.h"
#include "avformat.h"
#include "internal.h"
#include "rtmppkt.h"
#include "url.h"
-//#define DEBUG
+#if CONFIG_ZLIB
+#include <zlib.h>
+#endif
#define APP_MAX_LENGTH 128
#define PLAYPATH_MAX_LENGTH 256
#define TCURL_MAX_LENGTH 512
#define FLASHVER_MAX_LENGTH 64
+#define RTMP_PKTDATA_DEFAULT_SIZE 4096
+#define RTMP_HEADER 11
/** RTMP protocol handler state */
typedef enum {
STATE_HANDSHAKED, ///< client has performed handshake
STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
STATE_PLAYING, ///< client has started receiving multimedia data from server
+ STATE_SEEKING, ///< client has started the seek operation. Back on STATE_PLAYING when the time comes
STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
+ STATE_RECEIVING, ///< received a publish command (for input)
+ STATE_SENDING, ///< received a play command (for output)
STATE_STOPPED, ///< the broadcast has been stopped
} ClientState;
typedef struct RTMPContext {
const AVClass *class;
URLContext* stream; ///< TCP stream used in interactions with RTMP server
- RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
+ RTMPPacket *prev_pkt[2]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing)
+ int nb_prev_pkt[2]; ///< number of elements in prev_pkt
int in_chunk_size; ///< size of the chunks incoming RTMP packets are divided into
int out_chunk_size; ///< size of the chunks outgoing RTMP packets are divided into
int is_input; ///< input/output flag
char *app; ///< name of application
char *conn; ///< append arbitrary AMF data to the Connect message
ClientState state; ///< current state
- int main_channel_id; ///< an additional channel ID which is used for some invocations
+ int stream_id; ///< ID assigned by the server for the stream
uint8_t* flv_data; ///< buffer with data for demuxer
int flv_size; ///< current buffer size
int flv_off; ///< number of bytes read from current buffer
uint32_t client_report_size; ///< number of bytes after which client should report to server
uint32_t bytes_read; ///< number of bytes read from server
uint32_t last_bytes_read; ///< number of bytes read last reported to server
+ uint32_t last_timestamp; ///< last timestamp received in a packet
int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
- uint8_t flv_header[11]; ///< partial incoming flv packet header
+ int has_audio; ///< presence of audio data
+ int has_video; ///< presence of video data
+ int received_metadata; ///< Indicates if we have received metadata about the streams
+ uint8_t flv_header[RTMP_HEADER]; ///< partial incoming flv packet header
int flv_header_bytes; ///< number of initialized bytes in flv_header
int nb_invokes; ///< keeps track of invoke messages
char* tcurl; ///< url of the target stream
int swfhash_len; ///< length of the SHA256 hash
int swfsize; ///< size of the decompressed SWF file
char* swfurl; ///< url of the swf player
+ char* swfverify; ///< URL to player swf file, compute hash/size automatically
char swfverification[42]; ///< hash of the SWF verification
char* pageurl; ///< url of the web page
char* subscribe; ///< name of live stream to subscribe
TrackedMethod*tracked_methods; ///< tracked methods buffer
int nb_tracked_methods; ///< number of tracked methods
int tracked_methods_size; ///< size of the tracked methods buffer
+ int listen; ///< listen mode flag
+ int listen_timeout; ///< listen timeout to wait for new connections
+ int nb_streamid; ///< The next stream id to return on createStream calls
+ double duration; ///< Duration of the stream in seconds as returned by the server (only valid if non-zero)
+ char username[50];
+ char password[50];
+ char auth_params[500];
+ int do_reconnect;
+ int auth_tried;
} RTMPContext;
#define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
};
+static int handle_chunk_size(URLContext *s, RTMPPacket *pkt);
+
static int add_tracked_method(RTMPContext *rt, const char *name, int id)
{
- void *ptr;
+ int err;
if (rt->nb_tracked_methods + 1 > rt->tracked_methods_size) {
rt->tracked_methods_size = (rt->nb_tracked_methods + 1) * 2;
- ptr = av_realloc(rt->tracked_methods,
- rt->tracked_methods_size * sizeof(*rt->tracked_methods));
- if (!ptr)
- return AVERROR(ENOMEM);
- rt->tracked_methods = ptr;
+ if ((err = av_reallocp(&rt->tracked_methods, rt->tracked_methods_size *
+ sizeof(*rt->tracked_methods))) < 0) {
+ rt->nb_tracked_methods = 0;
+ rt->tracked_methods_size = 0;
+ return err;
+ }
}
rt->tracked_methods[rt->nb_tracked_methods].name = av_strdup(name);
int ret;
int i;
- bytestream2_init(&gbc, pkt->data + offset, pkt->data_size - offset);
+ bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset);
if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
return ret;
for (i = 0; i < rt->nb_tracked_methods; i ++)
av_free(rt->tracked_methods[i].name);
av_free(rt->tracked_methods);
+ rt->tracked_methods = NULL;
+ rt->tracked_methods_size = 0;
+ rt->nb_tracked_methods = 0;
}
static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track)
double pkt_id;
int len;
- bytestream2_init(&gbc, pkt->data, pkt->data_size);
+ bytestream2_init(&gbc, pkt->data, pkt->size);
if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0)
goto fail;
}
ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
fail:
ff_rtmp_packet_destroy(pkt);
return ret;
*value = '\0';
value++;
- if (!field || !value)
- goto fail;
-
ff_amf_write_field_name(p, field);
} else {
goto fail;
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_object_start(&p);
ff_amf_write_field_name(&p, "app");
- ff_amf_write_string(&p, rt->app);
+ ff_amf_write_string2(&p, rt->app, rt->auth_params);
if (!rt->is_input) {
ff_amf_write_field_name(&p, "type");
}
ff_amf_write_field_name(&p, "tcUrl");
- ff_amf_write_string(&p, rt->tcurl);
+ ff_amf_write_string2(&p, rt->tcurl, rt->auth_params);
if (rt->is_input) {
ff_amf_write_field_name(&p, "fpad");
ff_amf_write_bool(&p, 0);
char *param = rt->conn;
// Write arbitrary AMF data to the Connect message.
- while (param != NULL) {
+ while (param) {
char *sep;
param += strspn(param, " ");
if (!*param)
}
}
- pkt.data_size = p - pkt.data;
+ pkt.size = p - pkt.data;
return rtmp_send_packet(rt, &pkt, 1);
}
+static int read_connect(URLContext *s, RTMPContext *rt)
+{
+ RTMPPacket pkt = { 0 };
+ uint8_t *p;
+ const uint8_t *cp;
+ int ret;
+ char command[64];
+ int stringlen;
+ double seqnum;
+ uint8_t tmpstr[256];
+ GetByteContext gbc;
+
+ if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
+ &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
+ return ret;
+
+ if (pkt.type == RTMP_PT_CHUNK_SIZE) {
+ if ((ret = handle_chunk_size(s, &pkt)) < 0)
+ return ret;
+ ff_rtmp_packet_destroy(&pkt);
+ if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
+ &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
+ return ret;
+ }
+
+ cp = pkt.data;
+ bytestream2_init(&gbc, cp, pkt.size);
+ if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
+ av_log(s, AV_LOG_ERROR, "Unable to read command string\n");
+ ff_rtmp_packet_destroy(&pkt);
+ return AVERROR_INVALIDDATA;
+ }
+ if (strcmp(command, "connect")) {
+ av_log(s, AV_LOG_ERROR, "Expecting connect, got %s\n", command);
+ ff_rtmp_packet_destroy(&pkt);
+ return AVERROR_INVALIDDATA;
+ }
+ ret = ff_amf_read_number(&gbc, &seqnum);
+ if (ret)
+ av_log(s, AV_LOG_WARNING, "SeqNum not found\n");
+ /* Here one could parse an AMF Object with data as flashVers and others. */
+ ret = ff_amf_get_field_value(gbc.buffer,
+ gbc.buffer + bytestream2_get_bytes_left(&gbc),
+ "app", tmpstr, sizeof(tmpstr));
+ if (ret)
+ av_log(s, AV_LOG_WARNING, "App field not found in connect\n");
+ if (!ret && strcmp(tmpstr, rt->app))
+ av_log(s, AV_LOG_WARNING, "App field don't match up: %s <-> %s\n",
+ tmpstr, rt->app);
+ ff_rtmp_packet_destroy(&pkt);
+
+ // Send Window Acknowledgement Size (as defined in speficication)
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
+ RTMP_PT_SERVER_BW, 0, 4)) < 0)
+ return ret;
+ p = pkt.data;
+ bytestream_put_be32(&p, rt->server_bw);
+ pkt.size = p - pkt.data;
+ ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&pkt);
+ if (ret < 0)
+ return ret;
+ // Send Peer Bandwidth
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
+ RTMP_PT_CLIENT_BW, 0, 5)) < 0)
+ return ret;
+ p = pkt.data;
+ bytestream_put_be32(&p, rt->server_bw);
+ bytestream_put_byte(&p, 2); // dynamic
+ pkt.size = p - pkt.data;
+ ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&pkt);
+ if (ret < 0)
+ return ret;
+
+ // Ping request
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
+ RTMP_PT_PING, 0, 6)) < 0)
+ return ret;
+
+ p = pkt.data;
+ bytestream_put_be16(&p, 0); // 0 -> Stream Begin
+ bytestream_put_be32(&p, 0);
+ ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&pkt);
+ if (ret < 0)
+ return ret;
+
+ // Chunk size
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
+ RTMP_PT_CHUNK_SIZE, 0, 4)) < 0)
+ return ret;
+
+ p = pkt.data;
+ bytestream_put_be32(&p, rt->out_chunk_size);
+ ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&pkt);
+ if (ret < 0)
+ return ret;
+
+ // Send _result NetConnection.Connect.Success to connect
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
+ RTMP_PT_INVOKE, 0,
+ RTMP_PKTDATA_DEFAULT_SIZE)) < 0)
+ return ret;
+
+ p = pkt.data;
+ ff_amf_write_string(&p, "_result");
+ ff_amf_write_number(&p, seqnum);
+
+ ff_amf_write_object_start(&p);
+ ff_amf_write_field_name(&p, "fmsVer");
+ ff_amf_write_string(&p, "FMS/3,0,1,123");
+ ff_amf_write_field_name(&p, "capabilities");
+ ff_amf_write_number(&p, 31);
+ ff_amf_write_object_end(&p);
+
+ ff_amf_write_object_start(&p);
+ ff_amf_write_field_name(&p, "level");
+ ff_amf_write_string(&p, "status");
+ ff_amf_write_field_name(&p, "code");
+ ff_amf_write_string(&p, "NetConnection.Connect.Success");
+ ff_amf_write_field_name(&p, "description");
+ ff_amf_write_string(&p, "Connection succeeded.");
+ ff_amf_write_field_name(&p, "objectEncoding");
+ ff_amf_write_number(&p, 0);
+ ff_amf_write_object_end(&p);
+
+ pkt.size = p - pkt.data;
+ ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&pkt);
+ if (ret < 0)
+ return ret;
+
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
+ RTMP_PT_INVOKE, 0, 30)) < 0)
+ return ret;
+ p = pkt.data;
+ ff_amf_write_string(&p, "onBWDone");
+ ff_amf_write_number(&p, 0);
+ ff_amf_write_null(&p);
+ ff_amf_write_number(&p, 8192);
+ pkt.size = p - pkt.data;
+ ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&pkt);
+
+ return ret;
+}
+
/**
* Generate 'releaseStream' call and send it to the server. It should make
* the server release some channel for media streams.
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
- return rtmp_send_packet(rt, &pkt, 0);
+ return rtmp_send_packet(rt, &pkt, 1);
}
/**
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
- return rtmp_send_packet(rt, &pkt, 0);
+ return rtmp_send_packet(rt, &pkt, 1);
}
/**
ff_amf_write_string(&p, "deleteStream");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
- ff_amf_write_number(&p, rt->main_channel_id);
+ ff_amf_write_number(&p, rt->stream_id);
return rtmp_send_packet(rt, &pkt, 0);
}
+/**
+ * Generate 'getStreamLength' call and send it to the server. If the server
+ * knows the duration of the selected stream, it will reply with the duration
+ * in seconds.
+ */
+static int gen_get_stream_length(URLContext *s, RTMPContext *rt)
+{
+ RTMPPacket pkt;
+ uint8_t *p;
+ int ret;
+
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
+ 0, 31 + strlen(rt->playpath))) < 0)
+ return ret;
+
+ p = pkt.data;
+ ff_amf_write_string(&p, "getStreamLength");
+ ff_amf_write_number(&p, ++rt->nb_invokes);
+ ff_amf_write_null(&p);
+ ff_amf_write_string(&p, rt->playpath);
+
+ return rtmp_send_packet(rt, &pkt, 1);
+}
+
/**
* Generate client buffer time and send it to the server.
*/
p = pkt.data;
bytestream_put_be16(&p, 3);
- bytestream_put_be32(&p, rt->main_channel_id);
+ bytestream_put_be32(&p, rt->stream_id);
bytestream_put_be32(&p, rt->client_buffer_time);
return rtmp_send_packet(rt, &pkt, 0);
av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
- if ((ret = ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE,
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
0, 29 + strlen(rt->playpath))) < 0)
return ret;
- pkt.extra = rt->main_channel_id;
+ pkt.extra = rt->stream_id;
p = pkt.data;
ff_amf_write_string(&p, "play");
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
- ff_amf_write_number(&p, rt->live);
+ ff_amf_write_number(&p, rt->live * 1000);
+
+ return rtmp_send_packet(rt, &pkt, 1);
+}
+
+static int gen_seek(URLContext *s, RTMPContext *rt, int64_t timestamp)
+{
+ RTMPPacket pkt;
+ uint8_t *p;
+ int ret;
+
+ av_log(s, AV_LOG_DEBUG, "Sending seek command for timestamp %"PRId64"\n",
+ timestamp);
+
+ if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 26)) < 0)
+ return ret;
+
+ pkt.extra = rt->stream_id;
+
+ p = pkt.data;
+ ff_amf_write_string(&p, "seek");
+ ff_amf_write_number(&p, 0); //no tracking back responses
+ ff_amf_write_null(&p); //as usual, the first null param
+ ff_amf_write_number(&p, timestamp); //where we want to jump
+
+ return rtmp_send_packet(rt, &pkt, 1);
+}
+
+/**
+ * Generate a pause packet that either pauses or unpauses the current stream.
+ */
+static int gen_pause(URLContext *s, RTMPContext *rt, int pause, uint32_t timestamp)
+{
+ RTMPPacket pkt;
+ uint8_t *p;
+ int ret;
+
+ av_log(s, AV_LOG_DEBUG, "Sending pause command for timestamp %d\n",
+ timestamp);
+
+ if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 29)) < 0)
+ return ret;
+
+ pkt.extra = rt->stream_id;
+
+ p = pkt.data;
+ ff_amf_write_string(&p, "pause");
+ ff_amf_write_number(&p, 0); //no tracking back responses
+ ff_amf_write_null(&p); //as usual, the first null param
+ ff_amf_write_bool(&p, pause); // pause or unpause
+ ff_amf_write_number(&p, timestamp); //where we pause the stream
return rtmp_send_packet(rt, &pkt, 1);
}
0, 30 + strlen(rt->playpath))) < 0)
return ret;
- pkt.extra = rt->main_channel_id;
+ pkt.extra = rt->stream_id;
p = pkt.data;
ff_amf_write_string(&p, "publish");
uint8_t *p;
int ret;
- if (ppkt->data_size < 6) {
+ if (ppkt->size < 6) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
- ppkt->data_size);
+ ppkt->size);
return AVERROR_INVALIDDATA;
}
uint8_t hmac_buf[64+32] = {0};
int i;
- sha = av_mallocz(av_sha_size);
+ sha = av_sha_alloc();
if (!sha)
return AVERROR(ENOMEM);
return 0;
}
+#if CONFIG_ZLIB
+static int rtmp_uncompress_swfplayer(uint8_t *in_data, int64_t in_size,
+ uint8_t **out_data, int64_t *out_size)
+{
+ z_stream zs = { 0 };
+ void *ptr;
+ int size;
+ int ret = 0;
+
+ zs.avail_in = in_size;
+ zs.next_in = in_data;
+ ret = inflateInit(&zs);
+ if (ret != Z_OK)
+ return AVERROR_UNKNOWN;
+
+ do {
+ uint8_t tmp_buf[16384];
+
+ zs.avail_out = sizeof(tmp_buf);
+ zs.next_out = tmp_buf;
+
+ ret = inflate(&zs, Z_NO_FLUSH);
+ if (ret != Z_OK && ret != Z_STREAM_END) {
+ ret = AVERROR_UNKNOWN;
+ goto fail;
+ }
+
+ size = sizeof(tmp_buf) - zs.avail_out;
+ if (!(ptr = av_realloc(*out_data, *out_size + size))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ *out_data = ptr;
+
+ memcpy(*out_data + *out_size, tmp_buf, size);
+ *out_size += size;
+ } while (zs.avail_out == 0);
+
+fail:
+ inflateEnd(&zs);
+ return ret;
+}
+#endif
+
+static int rtmp_calc_swfhash(URLContext *s)
+{
+ RTMPContext *rt = s->priv_data;
+ uint8_t *in_data = NULL, *out_data = NULL, *swfdata;
+ int64_t in_size, out_size;
+ URLContext *stream;
+ char swfhash[32];
+ int swfsize;
+ int ret = 0;
+
+ /* Get the SWF player file. */
+ if ((ret = ffurl_open(&stream, rt->swfverify, AVIO_FLAG_READ,
+ &s->interrupt_callback, NULL)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Cannot open connection %s.\n", rt->swfverify);
+ goto fail;
+ }
+
+ if ((in_size = ffurl_seek(stream, 0, AVSEEK_SIZE)) < 0) {
+ ret = AVERROR(EIO);
+ goto fail;
+ }
+
+ if (!(in_data = av_malloc(in_size))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if ((ret = ffurl_read_complete(stream, in_data, in_size)) < 0)
+ goto fail;
+
+ if (in_size < 3) {
+ ret = AVERROR_INVALIDDATA;
+ goto fail;
+ }
+
+ if (!memcmp(in_data, "CWS", 3)) {
+ /* Decompress the SWF player file using Zlib. */
+ if (!(out_data = av_malloc(8))) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+ *in_data = 'F'; // magic stuff
+ memcpy(out_data, in_data, 8);
+ out_size = 8;
+
+#if CONFIG_ZLIB
+ if ((ret = rtmp_uncompress_swfplayer(in_data + 8, in_size - 8,
+ &out_data, &out_size)) < 0)
+ goto fail;
+#else
+ av_log(s, AV_LOG_ERROR,
+ "Zlib is required for decompressing the SWF player file.\n");
+ ret = AVERROR(EINVAL);
+ goto fail;
+#endif
+ swfsize = out_size;
+ swfdata = out_data;
+ } else {
+ swfsize = in_size;
+ swfdata = in_data;
+ }
+
+ /* Compute the SHA256 hash of the SWF player file. */
+ if ((ret = ff_rtmp_calc_digest(swfdata, swfsize, 0,
+ "Genuine Adobe Flash Player 001", 30,
+ swfhash)) < 0)
+ goto fail;
+
+ /* Set SWFVerification parameters. */
+ av_opt_set_bin(rt, "rtmp_swfhash", swfhash, 32, 0);
+ rt->swfsize = swfsize;
+
+fail:
+ av_freep(&in_data);
+ av_freep(&out_data);
+ ffurl_close(stream);
+ return ret;
+}
+
/**
* Perform handshake with the server by means of exchanging pseudorandom data
* signed with HMAC-SHA2 digest.
for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
tosend[i] = av_lfg_get(&rnd) >> 24;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* When the client wants to use RTMPE, we have to change the command
* byte to 0x06 which means to use encrypted data and we have to set
* the flash version to at least 9.0.115.0. */
if (ret < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Compute the shared secret key sent by the server and initialize
* the RC4 encryption. */
if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
if (ret < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Encrypt the signature to be send to the server. */
ff_rtmpe_encrypt_sig(rt->stream, tosend +
RTMP_HANDSHAKE_PACKET_SIZE - 32, digest,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Set RC4 keys for encryption and update the keystreams. */
if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
return ret;
}
} else {
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Compute the shared secret key sent by the server and initialize
* the RC4 encryption. */
if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Set RC4 keys for encryption and update the keystreams. */
if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
return ret;
return 0;
}
+static int rtmp_receive_hs_packet(RTMPContext* rt, uint32_t *first_int,
+ uint32_t *second_int, char *arraydata,
+ int size)
+{
+ int inoutsize;
+
+ inoutsize = ffurl_read_complete(rt->stream, arraydata,
+ RTMP_HANDSHAKE_PACKET_SIZE);
+ if (inoutsize <= 0)
+ return AVERROR(EIO);
+ if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
+ av_log(rt, AV_LOG_ERROR, "Erroneous Message size %d"
+ " not following standard\n", (int)inoutsize);
+ return AVERROR(EINVAL);
+ }
+
+ *first_int = AV_RB32(arraydata);
+ *second_int = AV_RB32(arraydata + 4);
+ return 0;
+}
+
+static int rtmp_send_hs_packet(RTMPContext* rt, uint32_t first_int,
+ uint32_t second_int, char *arraydata, int size)
+{
+ int inoutsize;
+
+ AV_WB32(arraydata, first_int);
+ AV_WB32(arraydata + 4, second_int);
+ inoutsize = ffurl_write(rt->stream, arraydata,
+ RTMP_HANDSHAKE_PACKET_SIZE);
+ if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
+ av_log(rt, AV_LOG_ERROR, "Unable to write answer\n");
+ return AVERROR(EIO);
+ }
+
+ return 0;
+}
+
+/**
+ * rtmp handshake server side
+ */
+static int rtmp_server_handshake(URLContext *s, RTMPContext *rt)
+{
+ uint8_t buffer[RTMP_HANDSHAKE_PACKET_SIZE];
+ uint32_t hs_epoch;
+ uint32_t hs_my_epoch;
+ uint8_t hs_c1[RTMP_HANDSHAKE_PACKET_SIZE];
+ uint8_t hs_s1[RTMP_HANDSHAKE_PACKET_SIZE];
+ uint32_t zeroes;
+ uint32_t temp = 0;
+ int randomidx = 0;
+ int inoutsize = 0;
+ int ret;
+
+ inoutsize = ffurl_read_complete(rt->stream, buffer, 1); // Receive C0
+ if (inoutsize <= 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to read handshake\n");
+ return AVERROR(EIO);
+ }
+ // Check Version
+ if (buffer[0] != 3) {
+ av_log(s, AV_LOG_ERROR, "RTMP protocol version mismatch\n");
+ return AVERROR(EIO);
+ }
+ if (ffurl_write(rt->stream, buffer, 1) <= 0) { // Send S0
+ av_log(s, AV_LOG_ERROR,
+ "Unable to write answer - RTMP S0\n");
+ return AVERROR(EIO);
+ }
+ /* Receive C1 */
+ ret = rtmp_receive_hs_packet(rt, &hs_epoch, &zeroes, hs_c1,
+ RTMP_HANDSHAKE_PACKET_SIZE);
+ if (ret) {
+ av_log(s, AV_LOG_ERROR, "RTMP Handshake C1 Error\n");
+ return ret;
+ }
+ /* Send S1 */
+ /* By now same epoch will be sent */
+ hs_my_epoch = hs_epoch;
+ /* Generate random */
+ for (randomidx = 8; randomidx < (RTMP_HANDSHAKE_PACKET_SIZE);
+ randomidx += 4)
+ AV_WB32(hs_s1 + randomidx, av_get_random_seed());
+
+ ret = rtmp_send_hs_packet(rt, hs_my_epoch, 0, hs_s1,
+ RTMP_HANDSHAKE_PACKET_SIZE);
+ if (ret) {
+ av_log(s, AV_LOG_ERROR, "RTMP Handshake S1 Error\n");
+ return ret;
+ }
+ /* Send S2 */
+ ret = rtmp_send_hs_packet(rt, hs_epoch, 0, hs_c1,
+ RTMP_HANDSHAKE_PACKET_SIZE);
+ if (ret) {
+ av_log(s, AV_LOG_ERROR, "RTMP Handshake S2 Error\n");
+ return ret;
+ }
+ /* Receive C2 */
+ ret = rtmp_receive_hs_packet(rt, &temp, &zeroes, buffer,
+ RTMP_HANDSHAKE_PACKET_SIZE);
+ if (ret) {
+ av_log(s, AV_LOG_ERROR, "RTMP Handshake C2 Error\n");
+ return ret;
+ }
+ if (temp != hs_my_epoch)
+ av_log(s, AV_LOG_WARNING,
+ "Erroneous C2 Message epoch does not match up with C1 epoch\n");
+ if (memcmp(buffer + 8, hs_s1 + 8,
+ RTMP_HANDSHAKE_PACKET_SIZE - 8))
+ av_log(s, AV_LOG_WARNING,
+ "Erroneous C2 Message random does not match up\n");
+
+ return 0;
+}
+
static int handle_chunk_size(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
int ret;
- if (pkt->data_size < 4) {
+ if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Too short chunk size change packet (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
/* Send the same chunk size change packet back to the server,
* setting the outgoing chunk size to the same as the incoming one. */
if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
- rt->prev_pkt[1])) < 0)
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1])) < 0)
return ret;
rt->out_chunk_size = AV_RB32(pkt->data);
}
RTMPContext *rt = s->priv_data;
int t, ret;
- if (pkt->data_size < 2) {
+ if (pkt->size < 2) {
av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
{
RTMPContext *rt = s->priv_data;
- if (pkt->data_size < 4) {
+ if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Client bandwidth report packet is less than 4 bytes long (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
{
RTMPContext *rt = s->priv_data;
- if (pkt->data_size < 4) {
+ if (pkt->size < 4) {
av_log(s, AV_LOG_ERROR,
"Too short server bandwidth report packet (%d)\n",
- pkt->data_size);
+ pkt->size);
return AVERROR_INVALIDDATA;
}
return 0;
}
+static int do_adobe_auth(RTMPContext *rt, const char *user, const char *salt,
+ const char *opaque, const char *challenge)
+{
+ uint8_t hash[16];
+ char hashstr[AV_BASE64_SIZE(sizeof(hash))], challenge2[10];
+ struct AVMD5 *md5 = av_md5_alloc();
+ if (!md5)
+ return AVERROR(ENOMEM);
+
+ snprintf(challenge2, sizeof(challenge2), "%08x", av_get_random_seed());
+
+ av_md5_init(md5);
+ av_md5_update(md5, user, strlen(user));
+ av_md5_update(md5, salt, strlen(salt));
+ av_md5_update(md5, rt->password, strlen(rt->password));
+ av_md5_final(md5, hash);
+ av_base64_encode(hashstr, sizeof(hashstr), hash,
+ sizeof(hash));
+ av_md5_init(md5);
+ av_md5_update(md5, hashstr, strlen(hashstr));
+ if (opaque)
+ av_md5_update(md5, opaque, strlen(opaque));
+ else if (challenge)
+ av_md5_update(md5, challenge, strlen(challenge));
+ av_md5_update(md5, challenge2, strlen(challenge2));
+ av_md5_final(md5, hash);
+ av_base64_encode(hashstr, sizeof(hashstr), hash,
+ sizeof(hash));
+ snprintf(rt->auth_params, sizeof(rt->auth_params),
+ "?authmod=%s&user=%s&challenge=%s&response=%s",
+ "adobe", user, challenge2, hashstr);
+ if (opaque)
+ av_strlcatf(rt->auth_params, sizeof(rt->auth_params),
+ "&opaque=%s", opaque);
+
+ av_free(md5);
+ return 0;
+}
+
+static int do_llnw_auth(RTMPContext *rt, const char *user, const char *nonce)
+{
+ uint8_t hash[16];
+ char hashstr1[33], hashstr2[33];
+ const char *realm = "live";
+ const char *method = "publish";
+ const char *qop = "auth";
+ const char *nc = "00000001";
+ char cnonce[10];
+ struct AVMD5 *md5 = av_md5_alloc();
+ if (!md5)
+ return AVERROR(ENOMEM);
+
+ snprintf(cnonce, sizeof(cnonce), "%08x", av_get_random_seed());
+
+ av_md5_init(md5);
+ av_md5_update(md5, user, strlen(user));
+ av_md5_update(md5, ":", 1);
+ av_md5_update(md5, realm, strlen(realm));
+ av_md5_update(md5, ":", 1);
+ av_md5_update(md5, rt->password, strlen(rt->password));
+ av_md5_final(md5, hash);
+ ff_data_to_hex(hashstr1, hash, 16, 1);
+ hashstr1[32] = '\0';
+
+ av_md5_init(md5);
+ av_md5_update(md5, method, strlen(method));
+ av_md5_update(md5, ":/", 2);
+ av_md5_update(md5, rt->app, strlen(rt->app));
+ if (!strchr(rt->app, '/'))
+ av_md5_update(md5, "/_definst_", strlen("/_definst_"));
+ av_md5_final(md5, hash);
+ ff_data_to_hex(hashstr2, hash, 16, 1);
+ hashstr2[32] = '\0';
+
+ av_md5_init(md5);
+ av_md5_update(md5, hashstr1, strlen(hashstr1));
+ av_md5_update(md5, ":", 1);
+ if (nonce)
+ av_md5_update(md5, nonce, strlen(nonce));
+ av_md5_update(md5, ":", 1);
+ av_md5_update(md5, nc, strlen(nc));
+ av_md5_update(md5, ":", 1);
+ av_md5_update(md5, cnonce, strlen(cnonce));
+ av_md5_update(md5, ":", 1);
+ av_md5_update(md5, qop, strlen(qop));
+ av_md5_update(md5, ":", 1);
+ av_md5_update(md5, hashstr2, strlen(hashstr2));
+ av_md5_final(md5, hash);
+ ff_data_to_hex(hashstr1, hash, 16, 1);
+
+ snprintf(rt->auth_params, sizeof(rt->auth_params),
+ "?authmod=%s&user=%s&nonce=%s&cnonce=%s&nc=%s&response=%s",
+ "llnw", user, nonce, cnonce, nc, hashstr1);
+
+ av_free(md5);
+ return 0;
+}
+
+static int handle_connect_error(URLContext *s, const char *desc)
+{
+ RTMPContext *rt = s->priv_data;
+ char buf[300], *ptr, authmod[15];
+ int i = 0, ret = 0;
+ const char *user = "", *salt = "", *opaque = NULL,
+ *challenge = NULL, *cptr = NULL, *nonce = NULL;
+
+ if (!(cptr = strstr(desc, "authmod=adobe")) &&
+ !(cptr = strstr(desc, "authmod=llnw"))) {
+ av_log(s, AV_LOG_ERROR,
+ "Unknown connect error (unsupported authentication method?)\n");
+ return AVERROR_UNKNOWN;
+ }
+ cptr += strlen("authmod=");
+ while (*cptr && *cptr != ' ' && i < sizeof(authmod) - 1)
+ authmod[i++] = *cptr++;
+ authmod[i] = '\0';
+
+ if (!rt->username[0] || !rt->password[0]) {
+ av_log(s, AV_LOG_ERROR, "No credentials set\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ if (strstr(desc, "?reason=authfailed")) {
+ av_log(s, AV_LOG_ERROR, "Incorrect username/password\n");
+ return AVERROR_UNKNOWN;
+ } else if (strstr(desc, "?reason=nosuchuser")) {
+ av_log(s, AV_LOG_ERROR, "Incorrect username\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ if (rt->auth_tried) {
+ av_log(s, AV_LOG_ERROR, "Authentication failed\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ rt->auth_params[0] = '\0';
+
+ if (strstr(desc, "code=403 need auth")) {
+ snprintf(rt->auth_params, sizeof(rt->auth_params),
+ "?authmod=%s&user=%s", authmod, rt->username);
+ return 0;
+ }
+
+ if (!(cptr = strstr(desc, "?reason=needauth"))) {
+ av_log(s, AV_LOG_ERROR, "No auth parameters found\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ av_strlcpy(buf, cptr + 1, sizeof(buf));
+ ptr = buf;
+
+ while (ptr) {
+ char *next = strchr(ptr, '&');
+ char *value = strchr(ptr, '=');
+ if (next)
+ *next++ = '\0';
+ if (value)
+ *value++ = '\0';
+ if (!strcmp(ptr, "user")) {
+ user = value;
+ } else if (!strcmp(ptr, "salt")) {
+ salt = value;
+ } else if (!strcmp(ptr, "opaque")) {
+ opaque = value;
+ } else if (!strcmp(ptr, "challenge")) {
+ challenge = value;
+ } else if (!strcmp(ptr, "nonce")) {
+ nonce = value;
+ }
+ ptr = next;
+ }
+
+ if (!strcmp(authmod, "adobe")) {
+ if ((ret = do_adobe_auth(rt, user, salt, opaque, challenge)) < 0)
+ return ret;
+ } else {
+ if ((ret = do_llnw_auth(rt, user, nonce)) < 0)
+ return ret;
+ }
+
+ rt->auth_tried = 1;
+ return 0;
+}
+
static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
{
- const uint8_t *data_end = pkt->data + pkt->data_size;
+ RTMPContext *rt = s->priv_data;
+ const uint8_t *data_end = pkt->data + pkt->size;
char *tracked_method = NULL;
int level = AV_LOG_ERROR;
uint8_t tmpstr[256];
if (!ff_amf_get_field_value(pkt->data + 9, data_end,
"description", tmpstr, sizeof(tmpstr))) {
- if (tracked_method && !strcmp(tracked_method, "_checkbw")) {
- /* Ignore _checkbw errors. */
+ if (tracked_method && (!strcmp(tracked_method, "_checkbw") ||
+ !strcmp(tracked_method, "releaseStream") ||
+ !strcmp(tracked_method, "FCSubscribe") ||
+ !strcmp(tracked_method, "FCPublish"))) {
+ /* Gracefully ignore Adobe-specific historical artifact errors. */
level = AV_LOG_WARNING;
ret = 0;
+ } else if (tracked_method && !strcmp(tracked_method, "getStreamLength")) {
+ level = rt->live ? AV_LOG_DEBUG : AV_LOG_WARNING;
+ ret = 0;
+ } else if (tracked_method && !strcmp(tracked_method, "connect")) {
+ ret = handle_connect_error(s, tmpstr);
+ if (!ret) {
+ rt->do_reconnect = 1;
+ level = AV_LOG_VERBOSE;
+ }
} else
- ret = -1;
+ ret = AVERROR_UNKNOWN;
av_log(s, level, "Server error: %s\n", tmpstr);
}
return ret;
}
+static int write_begin(URLContext *s)
+{
+ RTMPContext *rt = s->priv_data;
+ PutByteContext pbc;
+ RTMPPacket spkt = { 0 };
+ int ret;
+
+ // Send Stream Begin 1
+ if ((ret = ff_rtmp_packet_create(&spkt, RTMP_NETWORK_CHANNEL,
+ RTMP_PT_PING, 0, 6)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
+ return ret;
+ }
+
+ bytestream2_init_writer(&pbc, spkt.data, spkt.size);
+ bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin
+ bytestream2_put_be32(&pbc, rt->nb_streamid);
+
+ ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+
+ ff_rtmp_packet_destroy(&spkt);
+
+ return ret;
+}
+
+static int write_status(URLContext *s, RTMPPacket *pkt,
+ const char *status, const char *filename)
+{
+ RTMPContext *rt = s->priv_data;
+ RTMPPacket spkt = { 0 };
+ char statusmsg[128];
+ uint8_t *pp;
+ int ret;
+
+ if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
+ RTMP_PT_INVOKE, 0,
+ RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
+ return ret;
+ }
+
+ pp = spkt.data;
+ spkt.extra = pkt->extra;
+ ff_amf_write_string(&pp, "onStatus");
+ ff_amf_write_number(&pp, 0);
+ ff_amf_write_null(&pp);
+
+ ff_amf_write_object_start(&pp);
+ ff_amf_write_field_name(&pp, "level");
+ ff_amf_write_string(&pp, "status");
+ ff_amf_write_field_name(&pp, "code");
+ ff_amf_write_string(&pp, status);
+ ff_amf_write_field_name(&pp, "description");
+ snprintf(statusmsg, sizeof(statusmsg),
+ "%s is now published", filename);
+ ff_amf_write_string(&pp, statusmsg);
+ ff_amf_write_field_name(&pp, "details");
+ ff_amf_write_string(&pp, filename);
+ ff_amf_write_field_name(&pp, "clientid");
+ snprintf(statusmsg, sizeof(statusmsg), "%s", LIBAVFORMAT_IDENT);
+ ff_amf_write_string(&pp, statusmsg);
+ ff_amf_write_object_end(&pp);
+
+ spkt.size = pp - spkt.data;
+ ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&spkt);
+
+ return ret;
+}
+
+static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
+{
+ RTMPContext *rt = s->priv_data;
+ double seqnum;
+ char filename[64];
+ char command[64];
+ int stringlen;
+ char *pchar;
+ const uint8_t *p = pkt->data;
+ uint8_t *pp = NULL;
+ RTMPPacket spkt = { 0 };
+ GetByteContext gbc;
+ int ret;
+
+ bytestream2_init(&gbc, p, pkt->size);
+ if (ff_amf_read_string(&gbc, command, sizeof(command),
+ &stringlen)) {
+ av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ ret = ff_amf_read_number(&gbc, &seqnum);
+ if (ret)
+ return ret;
+ ret = ff_amf_read_null(&gbc);
+ if (ret)
+ return ret;
+ if (!strcmp(command, "FCPublish") ||
+ !strcmp(command, "publish")) {
+ ret = ff_amf_read_string(&gbc, filename,
+ sizeof(filename), &stringlen);
+ // check with url
+ if (s->filename) {
+ pchar = strrchr(s->filename, '/');
+ if (!pchar) {
+ av_log(s, AV_LOG_WARNING,
+ "Unable to find / in url %s, bad format\n",
+ s->filename);
+ pchar = s->filename;
+ }
+ pchar++;
+ if (strcmp(pchar, filename))
+ av_log(s, AV_LOG_WARNING, "Unexpected stream %s, expecting"
+ " %s\n", filename, pchar);
+ }
+ rt->state = STATE_RECEIVING;
+ }
+
+ if (!strcmp(command, "FCPublish")) {
+ if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
+ RTMP_PT_INVOKE, 0,
+ RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
+ return ret;
+ }
+ pp = spkt.data;
+ ff_amf_write_string(&pp, "onFCPublish");
+ } else if (!strcmp(command, "publish")) {
+ ret = write_begin(s);
+ if (ret < 0)
+ return ret;
+
+ // Send onStatus(NetStream.Publish.Start)
+ return write_status(s, pkt, "NetStream.Publish.Start",
+ filename);
+ } else if (!strcmp(command, "play")) {
+ ret = write_begin(s);
+ if (ret < 0)
+ return ret;
+ rt->state = STATE_SENDING;
+ return write_status(s, pkt, "NetStream.Play.Start",
+ filename);
+ } else {
+ if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
+ RTMP_PT_INVOKE, 0,
+ RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
+ return ret;
+ }
+ pp = spkt.data;
+ ff_amf_write_string(&pp, "_result");
+ ff_amf_write_number(&pp, seqnum);
+ ff_amf_write_null(&pp);
+ if (!strcmp(command, "createStream")) {
+ rt->nb_streamid++;
+ if (rt->nb_streamid == 0 || rt->nb_streamid == 2)
+ rt->nb_streamid++; /* Values 0 and 2 are reserved */
+ ff_amf_write_number(&pp, rt->nb_streamid);
+ /* By now we don't control which streams are removed in
+ * deleteStream. There is no stream creation control
+ * if a client creates more than 2^32 - 2 streams. */
+ }
+ }
+ spkt.size = pp - spkt.data;
+ ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
+ ff_rtmp_packet_destroy(&spkt);
+ return ret;
+}
+
+/**
+ * Read the AMF_NUMBER response ("_result") to a function call
+ * (e.g. createStream()). This response should be made up of the AMF_STRING
+ * "result", a NULL object and then the response encoded as AMF_NUMBER. On a
+ * successful response, we will return set the value to number (otherwise number
+ * will not be changed).
+ *
+ * @return 0 if reading the value succeeds, negative value otherwiss
+ */
+static int read_number_result(RTMPPacket *pkt, double *number)
+{
+ // We only need to fit "_result" in this.
+ uint8_t strbuffer[8];
+ int stringlen;
+ double numbuffer;
+ GetByteContext gbc;
+
+ bytestream2_init(&gbc, pkt->data, pkt->size);
+
+ // Value 1/4: "_result" as AMF_STRING
+ if (ff_amf_read_string(&gbc, strbuffer, sizeof(strbuffer), &stringlen))
+ return AVERROR_INVALIDDATA;
+ if (strcmp(strbuffer, "_result"))
+ return AVERROR_INVALIDDATA;
+ // Value 2/4: The callee reference number
+ if (ff_amf_read_number(&gbc, &numbuffer))
+ return AVERROR_INVALIDDATA;
+ // Value 3/4: Null
+ if (ff_amf_read_null(&gbc))
+ return AVERROR_INVALIDDATA;
+ // Value 4/4: The resonse as AMF_NUMBER
+ if (ff_amf_read_number(&gbc, &numbuffer))
+ return AVERROR_INVALIDDATA;
+ else
+ *number = numbuffer;
+
+ return 0;
+}
+
static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
return ret;
}
- if (!memcmp(tracked_method, "connect", 7)) {
+ if (!strcmp(tracked_method, "connect")) {
if (!rt->is_input) {
if ((ret = gen_release_stream(s, rt)) < 0)
goto fail;
goto fail;
}
}
- } else if (!memcmp(tracked_method, "createStream", 12)) {
- //extract a number from the result
- if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
+ } else if (!strcmp(tracked_method, "createStream")) {
+ double stream_id;
+ if (read_number_result(pkt, &stream_id)) {
av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
} else {
- rt->main_channel_id = av_int2double(AV_RB64(pkt->data + 21));
+ rt->stream_id = stream_id;
}
if (!rt->is_input) {
if ((ret = gen_publish(s, rt)) < 0)
goto fail;
} else {
+ if (rt->live != -1) {
+ if ((ret = gen_get_stream_length(s, rt)) < 0)
+ goto fail;
+ }
if ((ret = gen_play(s, rt)) < 0)
goto fail;
if ((ret = gen_buffer_time(s, rt)) < 0)
goto fail;
}
+ } else if (!strcmp(tracked_method, "getStreamLength")) {
+ if (read_number_result(pkt, &rt->duration)) {
+ av_log(s, AV_LOG_WARNING, "Unexpected reply on getStreamLength()\n");
+ }
}
fail:
static int handle_invoke_status(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
- const uint8_t *data_end = pkt->data + pkt->data_size;
- const uint8_t *ptr = pkt->data + 11;
+ const uint8_t *data_end = pkt->data + pkt->size;
+ const uint8_t *ptr = pkt->data + RTMP_HEADER;
uint8_t tmpstr[256];
int i, t;
t = ff_amf_get_field_value(ptr, data_end, "level", tmpstr, sizeof(tmpstr));
if (!t && !strcmp(tmpstr, "error")) {
- if (!ff_amf_get_field_value(ptr, data_end,
- "description", tmpstr, sizeof(tmpstr)))
+ t = ff_amf_get_field_value(ptr, data_end,
+ "description", tmpstr, sizeof(tmpstr));
+ if (t || !tmpstr[0])
+ t = ff_amf_get_field_value(ptr, data_end, "code",
+ tmpstr, sizeof(tmpstr));
+ if (!t)
av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr);
return -1;
}
if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
+ if (!t && !strcmp(tmpstr, "NetStream.Seek.Notify")) rt->state = STATE_PLAYING;
return 0;
}
int ret = 0;
//TODO: check for the messages sent for wrong state?
- if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
+ if (ff_amf_match_string(pkt->data, pkt->size, "_error")) {
if ((ret = handle_invoke_error(s, pkt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) {
if ((ret = handle_invoke_result(s, pkt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) {
if ((ret = handle_invoke_status(s, pkt)) < 0)
return ret;
- } else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) {
if ((ret = gen_check_bw(s, rt)) < 0)
return ret;
+ } else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") ||
+ ff_amf_match_string(pkt->data, pkt->size, "FCPublish") ||
+ ff_amf_match_string(pkt->data, pkt->size, "publish") ||
+ ff_amf_match_string(pkt->data, pkt->size, "play") ||
+ ff_amf_match_string(pkt->data, pkt->size, "_checkbw") ||
+ ff_amf_match_string(pkt->data, pkt->size, "createStream")) {
+ if ((ret = send_invoke_response(s, pkt)) < 0)
+ return ret;
}
return ret;
}
+static int update_offset(RTMPContext *rt, int size)
+{
+ int old_flv_size;
+
+ // generate packet header and put data into buffer for FLV demuxer
+ if (rt->flv_off < rt->flv_size) {
+ // There is old unread data in the buffer, thus append at the end
+ old_flv_size = rt->flv_size;
+ rt->flv_size += size;
+ } else {
+ // All data has been read, write the new data at the start of the buffer
+ old_flv_size = 0;
+ rt->flv_size = size;
+ rt->flv_off = 0;
+ }
+
+ return old_flv_size;
+}
+
+static int append_flv_data(RTMPContext *rt, RTMPPacket *pkt, int skip)
+{
+ int old_flv_size, ret;
+ PutByteContext pbc;
+ const uint8_t *data = pkt->data + skip;
+ const int size = pkt->size - skip;
+ uint32_t ts = pkt->timestamp;
+
+ if (pkt->type == RTMP_PT_AUDIO) {
+ rt->has_audio = 1;
+ } else if (pkt->type == RTMP_PT_VIDEO) {
+ rt->has_video = 1;
+ }
+
+ old_flv_size = update_offset(rt, size + 15);
+
+ if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
+ rt->flv_size = rt->flv_off = 0;
+ return ret;
+ }
+ bytestream2_init_writer(&pbc, rt->flv_data, rt->flv_size);
+ bytestream2_skip_p(&pbc, old_flv_size);
+ bytestream2_put_byte(&pbc, pkt->type);
+ bytestream2_put_be24(&pbc, size);
+ bytestream2_put_be24(&pbc, ts);
+ bytestream2_put_byte(&pbc, ts >> 24);
+ bytestream2_put_be24(&pbc, 0);
+ bytestream2_put_buffer(&pbc, data, size);
+ bytestream2_put_be32(&pbc, 0);
+
+ return 0;
+}
+
+static int handle_notify(URLContext *s, RTMPPacket *pkt)
+{
+ RTMPContext *rt = s->priv_data;
+ uint8_t commandbuffer[64];
+ char statusmsg[128];
+ int stringlen, ret, skip = 0;
+ GetByteContext gbc;
+
+ bytestream2_init(&gbc, pkt->data, pkt->size);
+ if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
+ &stringlen))
+ return AVERROR_INVALIDDATA;
+
+ if (!strcmp(commandbuffer, "onMetaData")) {
+ // metadata properties should be stored in a mixed array
+ if (bytestream2_get_byte(&gbc) == AMF_DATA_TYPE_MIXEDARRAY) {
+ // We have found a metaData Array so flv can determine the streams
+ // from this.
+ rt->received_metadata = 1;
+ // skip 32-bit max array index
+ bytestream2_skip(&gbc, 4);
+ while (bytestream2_get_bytes_left(&gbc) > 3) {
+ if (ff_amf_get_string(&gbc, statusmsg, sizeof(statusmsg),
+ &stringlen))
+ return AVERROR_INVALIDDATA;
+ // We do not care about the content of the property (yet).
+ stringlen = ff_amf_tag_size(gbc.buffer, gbc.buffer_end);
+ if (stringlen < 0)
+ return AVERROR_INVALIDDATA;
+ bytestream2_skip(&gbc, stringlen);
+
+ // The presence of the following properties indicates that the
+ // respective streams are present.
+ if (!strcmp(statusmsg, "videocodecid")) {
+ rt->has_video = 1;
+ }
+ if (!strcmp(statusmsg, "audiocodecid")) {
+ rt->has_audio = 1;
+ }
+ }
+ if (bytestream2_get_be24(&gbc) != AMF_END_OF_OBJECT)
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ // Skip the @setDataFrame string and validate it is a notification
+ if (!strcmp(commandbuffer, "@setDataFrame")) {
+ skip = gbc.buffer - pkt->data;
+ ret = ff_amf_read_string(&gbc, statusmsg,
+ sizeof(statusmsg), &stringlen);
+ if (ret < 0)
+ return AVERROR_INVALIDDATA;
+ }
+
+ return append_flv_data(rt, pkt, skip);
+}
+
/**
* Parse received packet and possibly perform some action depending on
* the packet contents.
case RTMP_PT_VIDEO:
case RTMP_PT_AUDIO:
case RTMP_PT_METADATA:
+ case RTMP_PT_NOTIFY:
/* Audio, Video and Metadata packets are parsed in get_packet() */
break;
default:
return 0;
}
+static int handle_metadata(RTMPContext *rt, RTMPPacket *pkt)
+{
+ int ret, old_flv_size, type;
+ const uint8_t *next;
+ uint8_t *p;
+ uint32_t size;
+ uint32_t ts, cts, pts = 0;
+
+ old_flv_size = update_offset(rt, pkt->size);
+
+ if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
+ rt->flv_size = rt->flv_off = 0;
+ return ret;
+ }
+
+ next = pkt->data;
+ p = rt->flv_data + old_flv_size;
+
+ /* copy data while rewriting timestamps */
+ ts = pkt->timestamp;
+
+ while (next - pkt->data < pkt->size - RTMP_HEADER) {
+ type = bytestream_get_byte(&next);
+ size = bytestream_get_be24(&next);
+ cts = bytestream_get_be24(&next);
+ cts |= bytestream_get_byte(&next) << 24;
+ if (!pts)
+ pts = cts;
+ ts += cts - pts;
+ pts = cts;
+ if (size + 3 + 4 > pkt->data + pkt->size - next)
+ break;
+ bytestream_put_byte(&p, type);
+ bytestream_put_be24(&p, size);
+ bytestream_put_be24(&p, ts);
+ bytestream_put_byte(&p, ts >> 24);
+ memcpy(p, next, size + 3 + 4);
+ next += size + 3 + 4;
+ p += size + 3 + 4;
+ }
+ if (p != rt->flv_data + rt->flv_size) {
+ av_log(NULL, AV_LOG_WARNING, "Incomplete flv packets in "
+ "RTMP_PT_METADATA packet\n");
+ rt->flv_size = p - rt->flv_data;
+ }
+
+ return 0;
+}
+
/**
* Interact with the server by receiving and sending RTMP packets until
* there is some significant data (media data or expected status notification).
{
RTMPContext *rt = s->priv_data;
int ret;
- uint8_t *p;
- const uint8_t *next;
- uint32_t data_size;
- uint32_t ts, cts, pts=0;
if (rt->state == STATE_STOPPED)
return AVERROR_EOF;
for (;;) {
RTMPPacket rpkt = { 0 };
if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
- rt->in_chunk_size, rt->prev_pkt[0])) <= 0) {
+ rt->in_chunk_size, &rt->prev_pkt[0],
+ &rt->nb_prev_pkt[0])) <= 0) {
if (ret == 0) {
return AVERROR(EAGAIN);
} else {
return AVERROR(EIO);
}
}
+
+ // Track timestamp for later use
+ rt->last_timestamp = rpkt.timestamp;
+
rt->bytes_read += ret;
if (rt->bytes_read > rt->last_bytes_read + rt->client_report_size) {
av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
}
ret = rtmp_parse_result(s, rt, &rpkt);
+
+ // At this point we must check if we are in the seek state and continue
+ // with the next packet. handle_invoke will get us out of this state
+ // when the right message is encountered
+ if (rt->state == STATE_SEEKING) {
+ ff_rtmp_packet_destroy(&rpkt);
+ // We continue, let the natural flow of things happen:
+ // AVERROR(EAGAIN) or handle_invoke gets us out of here
+ continue;
+ }
+
if (ret < 0) {//serious error in current packet
ff_rtmp_packet_destroy(&rpkt);
return ret;
}
+ if (rt->do_reconnect && for_header) {
+ ff_rtmp_packet_destroy(&rpkt);
+ return 0;
+ }
if (rt->state == STATE_STOPPED) {
ff_rtmp_packet_destroy(&rpkt);
return AVERROR_EOF;
}
- if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
+ if (for_header && (rt->state == STATE_PLAYING ||
+ rt->state == STATE_PUBLISHING ||
+ rt->state == STATE_SENDING ||
+ rt->state == STATE_RECEIVING)) {
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
- if (!rpkt.data_size || !rt->is_input) {
+ if (!rpkt.size || !rt->is_input) {
ff_rtmp_packet_destroy(&rpkt);
continue;
}
- if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
- (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
- ts = rpkt.timestamp;
-
- // generate packet header and put data into buffer for FLV demuxer
- rt->flv_off = 0;
- rt->flv_size = rpkt.data_size + 15;
- rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
- bytestream_put_byte(&p, rpkt.type);
- bytestream_put_be24(&p, rpkt.data_size);
- bytestream_put_be24(&p, ts);
- bytestream_put_byte(&p, ts >> 24);
- bytestream_put_be24(&p, 0);
- bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
- bytestream_put_be32(&p, 0);
+ if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO) {
+ ret = append_flv_data(rt, &rpkt, 0);
ff_rtmp_packet_destroy(&rpkt);
- return 0;
+ return ret;
+ } else if (rpkt.type == RTMP_PT_NOTIFY) {
+ ret = handle_notify(s, &rpkt);
+ ff_rtmp_packet_destroy(&rpkt);
+ return ret;
} else if (rpkt.type == RTMP_PT_METADATA) {
- // we got raw FLV data, make it available for FLV demuxer
- rt->flv_off = 0;
- rt->flv_size = rpkt.data_size;
- rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
- /* rewrite timestamps */
- next = rpkt.data;
- ts = rpkt.timestamp;
- while (next - rpkt.data < rpkt.data_size - 11) {
- next++;
- data_size = bytestream_get_be24(&next);
- p=next;
- cts = bytestream_get_be24(&next);
- cts |= bytestream_get_byte(&next) << 24;
- if (pts==0)
- pts=cts;
- ts += cts - pts;
- pts = cts;
- bytestream_put_be24(&p, ts);
- bytestream_put_byte(&p, ts >> 24);
- next += data_size + 3 + 4;
- }
- memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
+ ret = handle_metadata(rt, &rpkt);
ff_rtmp_packet_destroy(&rpkt);
return 0;
}
static int rtmp_close(URLContext *h)
{
RTMPContext *rt = h->priv_data;
- int ret = 0;
+ int ret = 0, i, j;
if (!rt->is_input) {
rt->flv_data = NULL;
- if (rt->out_pkt.data_size)
+ if (rt->out_pkt.size)
ff_rtmp_packet_destroy(&rt->out_pkt);
if (rt->state > STATE_FCPUBLISH)
ret = gen_fcunpublish_stream(h, rt);
}
if (rt->state > STATE_HANDSHAKED)
ret = gen_delete_stream(h, rt);
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < rt->nb_prev_pkt[i]; j++)
+ ff_rtmp_packet_destroy(&rt->prev_pkt[i][j]);
+ av_freep(&rt->prev_pkt[i]);
+ }
free_tracked_methods(rt);
av_freep(&rt->flv_data);
return ret;
}
+/**
+ * Insert a fake onMetadata packet into the FLV stream to notify the FLV
+ * demuxer about the duration of the stream.
+ *
+ * This should only be done if there was no real onMetadata packet sent by the
+ * server at the start of the stream and if we were able to retrieve a valid
+ * duration via a getStreamLength call.
+ *
+ * @return 0 for successful operation, negative value in case of error
+ */
+static int inject_fake_duration_metadata(RTMPContext *rt)
+{
+ // We need to insert the metdata packet directly after the FLV
+ // header, i.e. we need to move all other already read data by the
+ // size of our fake metadata packet.
+
+ uint8_t* p;
+ // Keep old flv_data pointer
+ uint8_t* old_flv_data = rt->flv_data;
+ // Allocate a new flv_data pointer with enough space for the additional package
+ if (!(rt->flv_data = av_malloc(rt->flv_size + 55))) {
+ rt->flv_data = old_flv_data;
+ return AVERROR(ENOMEM);
+ }
+
+ // Copy FLV header
+ memcpy(rt->flv_data, old_flv_data, 13);
+ // Copy remaining packets
+ memcpy(rt->flv_data + 13 + 55, old_flv_data + 13, rt->flv_size - 13);
+ // Increase the size by the injected packet
+ rt->flv_size += 55;
+ // Delete the old FLV data
+ av_free(old_flv_data);
+
+ p = rt->flv_data + 13;
+ bytestream_put_byte(&p, FLV_TAG_TYPE_META);
+ bytestream_put_be24(&p, 40); // size of data part (sum of all parts below)
+ bytestream_put_be24(&p, 0); // timestamp
+ bytestream_put_be32(&p, 0); // reserved
+
+ // first event name as a string
+ bytestream_put_byte(&p, AMF_DATA_TYPE_STRING);
+ // "onMetaData" as AMF string
+ bytestream_put_be16(&p, 10);
+ bytestream_put_buffer(&p, "onMetaData", 10);
+
+ // mixed array (hash) with size and string/type/data tuples
+ bytestream_put_byte(&p, AMF_DATA_TYPE_MIXEDARRAY);
+ bytestream_put_be32(&p, 1); // metadata_count
+
+ // "duration" as AMF string
+ bytestream_put_be16(&p, 8);
+ bytestream_put_buffer(&p, "duration", 8);
+ bytestream_put_byte(&p, AMF_DATA_TYPE_NUMBER);
+ bytestream_put_be64(&p, av_double2int(rt->duration));
+
+ // Finalise object
+ bytestream_put_be16(&p, 0); // Empty string
+ bytestream_put_byte(&p, AMF_END_OF_OBJECT);
+ bytestream_put_be32(&p, 40); // size of data part (sum of all parts below)
+
+ return 0;
+}
+
/**
* Open RTMP connection and verify that the stream can be played.
*
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
RTMPContext *rt = s->priv_data;
- char proto[8], hostname[256], path[1024], *fname;
- char *old_app;
+ char proto[8], hostname[256], path[1024], auth[100], *fname;
+ char *old_app, *qmark, fname_buffer[1024];
uint8_t buf[2048];
int port;
AVDictionary *opts = NULL;
int ret;
+ if (rt->listen_timeout > 0)
+ rt->listen = 1;
+
rt->is_input = !(flags & AVIO_FLAG_WRITE);
- av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
+ av_url_split(proto, sizeof(proto), auth, sizeof(auth),
+ hostname, sizeof(hostname), &port,
path, sizeof(path), s->filename);
+ if (strchr(path, ' ')) {
+ av_log(s, AV_LOG_WARNING,
+ "Detected librtmp style URL parameters, these aren't supported "
+ "by the libavformat internal RTMP handler currently enabled. "
+ "See the documentation for the correct way to pass parameters.\n");
+ }
+
+ if (auth[0]) {
+ char *ptr = strchr(auth, ':');
+ if (ptr) {
+ *ptr = '\0';
+ av_strlcpy(rt->username, auth, sizeof(rt->username));
+ av_strlcpy(rt->password, ptr + 1, sizeof(rt->password));
+ }
+ }
+
+ if (rt->listen && strcmp(proto, "rtmp")) {
+ av_log(s, AV_LOG_ERROR, "rtmp_listen not available for %s\n",
+ proto);
+ return AVERROR(EINVAL);
+ }
if (!strcmp(proto, "rtmpt") || !strcmp(proto, "rtmpts")) {
if (!strcmp(proto, "rtmpts"))
av_dict_set(&opts, "ffrtmphttp_tls", "1", 1);
/* open the tcp connection */
if (port < 0)
port = RTMP_DEFAULT_PORT;
- ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
+ if (rt->listen)
+ ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port,
+ "?listen&listen_timeout=%d",
+ rt->listen_timeout * 1000);
+ else
+ ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
}
+reconnect:
if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
&s->interrupt_callback, &opts)) < 0) {
av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
goto fail;
}
+ if (rt->swfverify) {
+ if ((ret = rtmp_calc_swfhash(s)) < 0)
+ goto fail;
+ }
+
rt->state = STATE_START;
- if ((ret = rtmp_handshake(s, rt)) < 0)
+ if (!rt->listen && (ret = rtmp_handshake(s, rt)) < 0)
+ goto fail;
+ if (rt->listen && (ret = rtmp_server_handshake(s, rt)) < 0)
goto fail;
rt->out_chunk_size = 128;
}
//extract "app" part from path
- if (!strncmp(path, "/ondemand/", 10)) {
+ qmark = strchr(path, '?');
+ if (qmark && strstr(qmark, "slist=")) {
+ char* amp;
+ // After slist we have the playpath, before the params, the app
+ av_strlcpy(rt->app, path + 1, FFMIN(qmark - path, APP_MAX_LENGTH));
+ fname = strstr(path, "slist=") + 6;
+ // Strip any further query parameters from fname
+ amp = strchr(fname, '&');
+ if (amp) {
+ av_strlcpy(fname_buffer, fname, FFMIN(amp - fname + 1,
+ sizeof(fname_buffer)));
+ fname = fname_buffer;
+ }
+ } else if (!strncmp(path, "/ondemand/", 10)) {
fname = path + 10;
memcpy(rt->app, "ondemand", 9);
} else {
fname = strchr(p + 1, '/');
if (!fname || (c && c < fname)) {
fname = p + 1;
- av_strlcpy(rt->app, path + 1, p - path);
+ av_strlcpy(rt->app, path + 1, FFMIN(p - path, APP_MAX_LENGTH));
} else {
fname++;
- av_strlcpy(rt->app, path + 1, fname - path - 1);
+ av_strlcpy(rt->app, path + 1, FFMIN(fname - path - 1, APP_MAX_LENGTH));
}
}
}
(!strcmp(fname + len - 4, ".f4v") ||
!strcmp(fname + len - 4, ".mp4"))) {
memcpy(rt->playpath, "mp4:", 5);
- } else if (len >= 4 && !strcmp(fname + len - 4, ".flv")) {
- fname[len - 4] = '\0';
} else {
+ if (len >= 4 && !strcmp(fname + len - 4, ".flv"))
+ fname[len - 4] = '\0';
rt->playpath[0] = 0;
}
- strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
+ av_strlcat(rt->playpath, fname, PLAYPATH_MAX_LENGTH);
}
if (!rt->tcurl) {
rt->client_report_size = 1048576;
rt->bytes_read = 0;
+ rt->has_audio = 0;
+ rt->has_video = 0;
+ rt->received_metadata = 0;
rt->last_bytes_read = 0;
rt->server_bw = 2500000;
+ rt->duration = 0;
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
proto, path, rt->app, rt->playpath);
- if ((ret = gen_connect(s, rt)) < 0)
- goto fail;
+ if (!rt->listen) {
+ if ((ret = gen_connect(s, rt)) < 0)
+ goto fail;
+ } else {
+ if ((ret = read_connect(s, s->priv_data)) < 0)
+ goto fail;
+ }
do {
ret = get_packet(s, 1);
- } while (ret == EAGAIN);
+ } while (ret == AVERROR(EAGAIN));
if (ret < 0)
goto fail;
+ if (rt->do_reconnect) {
+ int i;
+ ffurl_close(rt->stream);
+ rt->stream = NULL;
+ rt->do_reconnect = 0;
+ rt->nb_invokes = 0;
+ for (i = 0; i < 2; i++)
+ memset(rt->prev_pkt[i], 0,
+ sizeof(**rt->prev_pkt) * rt->nb_prev_pkt[i]);
+ free_tracked_methods(rt);
+ goto reconnect;
+ }
+
if (rt->is_input) {
// generate FLV header for demuxer
rt->flv_size = 13;
- rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
+ if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
+ goto fail;
rt->flv_off = 0;
- memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
+ memcpy(rt->flv_data, "FLV\1\0\0\0\0\011\0\0\0\0", rt->flv_size);
+
+ // Read packets until we reach the first A/V packet or read metadata.
+ // If there was a metadata package in front of the A/V packets, we can
+ // build the FLV header from this. If we do not receive any metadata,
+ // the FLV decoder will allocate the needed streams when their first
+ // audio or video packet arrives.
+ while (!rt->has_audio && !rt->has_video && !rt->received_metadata) {
+ if ((ret = get_packet(s, 0)) < 0)
+ goto fail;
+ }
+
+ // Either after we have read the metadata or (if there is none) the
+ // first packet of an A/V stream, we have a better knowledge about the
+ // streams, so set the FLV header accordingly.
+ if (rt->has_audio) {
+ rt->flv_data[4] |= FLV_HEADER_FLAG_HASAUDIO;
+ }
+ if (rt->has_video) {
+ rt->flv_data[4] |= FLV_HEADER_FLAG_HASVIDEO;
+ }
+
+ // If we received the first packet of an A/V stream and no metadata but
+ // the server returned a valid duration, create a fake metadata packet
+ // to inform the FLV decoder about the duration.
+ if (!rt->received_metadata && rt->duration > 0) {
+ if ((ret = inject_fake_duration_metadata(rt)) < 0)
+ goto fail;
+ }
} else {
rt->flv_size = 0;
rt->flv_data = NULL;
return orig_size;
}
+static int64_t rtmp_seek(URLContext *s, int stream_index, int64_t timestamp,
+ int flags)
+{
+ RTMPContext *rt = s->priv_data;
+ int ret;
+ av_log(s, AV_LOG_DEBUG,
+ "Seek on stream index %d at timestamp %"PRId64" with flags %08x\n",
+ stream_index, timestamp, flags);
+ if ((ret = gen_seek(s, rt, timestamp)) < 0) {
+ av_log(s, AV_LOG_ERROR,
+ "Unable to send seek command on stream index %d at timestamp "
+ "%"PRId64" with flags %08x\n",
+ stream_index, timestamp, flags);
+ return ret;
+ }
+ rt->flv_off = rt->flv_size;
+ rt->state = STATE_SEEKING;
+ return timestamp;
+}
+
+static int rtmp_pause(URLContext *s, int pause)
+{
+ RTMPContext *rt = s->priv_data;
+ int ret;
+ av_log(s, AV_LOG_DEBUG, "Pause at timestamp %d\n",
+ rt->last_timestamp);
+ if ((ret = gen_pause(s, rt, pause, rt->last_timestamp)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to send pause command at timestamp %d\n",
+ rt->last_timestamp);
+ return ret;
+ }
+ return 0;
+}
+
static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
{
RTMPContext *rt = s->priv_data;
int size_temp = size;
- int pktsize, pkttype;
+ int pktsize, pkttype, copy;
uint32_t ts;
const uint8_t *buf_temp = buf;
uint8_t c;
continue;
}
- if (rt->flv_header_bytes < 11) {
+ if (rt->flv_header_bytes < RTMP_HEADER) {
const uint8_t *header = rt->flv_header;
- int copy = FFMIN(11 - rt->flv_header_bytes, size_temp);
+ int channel = RTMP_AUDIO_CHANNEL;
+ copy = FFMIN(RTMP_HEADER - rt->flv_header_bytes, size_temp);
bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
rt->flv_header_bytes += copy;
size_temp -= copy;
- if (rt->flv_header_bytes < 11)
+ if (rt->flv_header_bytes < RTMP_HEADER)
break;
pkttype = bytestream_get_byte(&header);
bytestream_get_be24(&header);
rt->flv_size = pktsize;
- //force 12bytes header
+ if (pkttype == RTMP_PT_VIDEO)
+ channel = RTMP_VIDEO_CHANNEL;
+
if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
pkttype == RTMP_PT_NOTIFY) {
- if (pkttype == RTMP_PT_NOTIFY)
- pktsize += 16;
- rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
+ if ((ret = ff_rtmp_check_alloc_array(&rt->prev_pkt[1],
+ &rt->nb_prev_pkt[1],
+ channel)) < 0)
+ return ret;
+ // Force sending a full 12 bytes header by clearing the
+ // channel id, to make it not match a potential earlier
+ // packet in the same channel.
+ rt->prev_pkt[1][channel].channel_id = 0;
}
//this can be a big packet, it's better to send it right here
- if ((ret = ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL,
+ if ((ret = ff_rtmp_packet_create(&rt->out_pkt, channel,
pkttype, ts, pktsize)) < 0)
return ret;
- rt->out_pkt.extra = rt->main_channel_id;
+ rt->out_pkt.extra = rt->stream_id;
rt->flv_data = rt->out_pkt.data;
-
- if (pkttype == RTMP_PT_NOTIFY)
- ff_amf_write_string(&rt->flv_data, "@setDataFrame");
}
- if (rt->flv_size - rt->flv_off > size_temp) {
- bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
- rt->flv_off += size_temp;
- size_temp = 0;
- } else {
- bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
- size_temp -= rt->flv_size - rt->flv_off;
- rt->flv_off += rt->flv_size - rt->flv_off;
- }
+ copy = FFMIN(rt->flv_size - rt->flv_off, size_temp);
+ bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, copy);
+ rt->flv_off += copy;
+ size_temp -= copy;
if (rt->flv_off == rt->flv_size) {
rt->skip_bytes = 4;
+ if (rt->out_pkt.type == RTMP_PT_NOTIFY) {
+ // For onMetaData and |RtmpSampleAccess packets, we want
+ // @setDataFrame prepended to the packet before it gets sent.
+ // However, not all RTMP_PT_NOTIFY packets (e.g., onTextData
+ // and onCuePoint).
+ uint8_t commandbuffer[64];
+ int stringlen = 0;
+ GetByteContext gbc;
+
+ bytestream2_init(&gbc, rt->flv_data, rt->flv_size);
+ if (!ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
+ &stringlen)) {
+ if (!strcmp(commandbuffer, "onMetaData") ||
+ !strcmp(commandbuffer, "|RtmpSampleAccess")) {
+ uint8_t *ptr;
+ if ((ret = av_reallocp(&rt->out_pkt.data, rt->out_pkt.size + 16)) < 0) {
+ rt->flv_size = rt->flv_off = rt->flv_header_bytes = 0;
+ return ret;
+ }
+ memmove(rt->out_pkt.data + 16, rt->out_pkt.data, rt->out_pkt.size);
+ rt->out_pkt.size += 16;
+ ptr = rt->out_pkt.data;
+ ff_amf_write_string(&ptr, "@setDataFrame");
+ }
+ }
+ }
+
if ((ret = rtmp_send_packet(rt, &rt->out_pkt, 0)) < 0)
return ret;
rt->flv_size = 0;
if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
rt->in_chunk_size,
- rt->prev_pkt[0], c)) <= 0)
+ &rt->prev_pkt[0],
+ &rt->nb_prev_pkt[0], c)) <= 0)
return ret;
if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
static const AVOption rtmp_options[] = {
{"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
- {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {3000}, 0, INT_MAX, DEC|ENC},
+ {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {.i64 = 3000}, 0, INT_MAX, DEC|ENC},
{"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
- {"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {10}, 0, INT_MAX, ENC},
- {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
- {"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
- {"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
- {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
+ {"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {.i64 = 10}, 0, INT_MAX, ENC},
+ {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {.i64 = -2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
+ {"any", "both", 0, AV_OPT_TYPE_CONST, {.i64 = -2}, 0, 0, DEC, "rtmp_live"},
+ {"live", "live stream", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, DEC, "rtmp_live"},
+ {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, DEC, "rtmp_live"},
{"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_swfhash", "SHA256 hash of the decompressed SWF file (32 bytes).", OFFSET(swfhash), AV_OPT_TYPE_BINARY, .flags = DEC},
- {"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {0}, 0, INT_MAX, DEC},
+ {"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC},
{"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
+ {"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
+ {"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
+ {"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
+ {"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{ NULL },
};
.name = #flavor, \
.url_open = rtmp_open, \
.url_read = rtmp_read, \
+ .url_read_seek = rtmp_seek, \
+ .url_read_pause = rtmp_pause, \
.url_write = rtmp_write, \
.url_close = rtmp_close, \
.priv_data_size = sizeof(RTMPContext), \