typedef struct RTMPContext {
const AVClass *class;
URLContext* stream; ///< TCP stream used in interactions with RTMP server
- RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing)
+ RTMPPacket *prev_pkt[2]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing)
+ int nb_prev_pkt[2]; ///< number of elements in prev_pkt
int in_chunk_size; ///< size of the chunks incoming RTMP packets are divided into
int out_chunk_size; ///< size of the chunks outgoing RTMP packets are divided into
int is_input; ///< input/output flag
uint32_t client_report_size; ///< number of bytes after which client should report to server
uint32_t bytes_read; ///< number of bytes read from server
uint32_t last_bytes_read; ///< number of bytes read last reported to server
+ uint32_t last_timestamp; ///< last timestamp received in a packet
int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
+ int has_audio; ///< presence of audio data
+ int has_video; ///< presence of video data
+ int received_metadata; ///< Indicates if we have received metadata about the streams
uint8_t flv_header[RTMP_HEADER]; ///< partial incoming flv packet header
int flv_header_bytes; ///< number of initialized bytes in flv_header
int nb_invokes; ///< keeps track of invoke messages
int listen; ///< listen mode flag
int listen_timeout; ///< listen timeout to wait for new connections
int nb_streamid; ///< The next stream id to return on createStream calls
+ double duration; ///< Duration of the stream in seconds as returned by the server (only valid if non-zero)
char username[50];
char password[50];
char auth_params[500];
0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
};
+static int handle_chunk_size(URLContext *s, RTMPPacket *pkt);
+
static int add_tracked_method(RTMPContext *rt, const char *name, int id)
{
int err;
if (rt->nb_tracked_methods + 1 > rt->tracked_methods_size) {
rt->tracked_methods_size = (rt->nb_tracked_methods + 1) * 2;
if ((err = av_reallocp(&rt->tracked_methods, rt->tracked_methods_size *
- sizeof(*rt->tracked_methods))) < 0)
+ sizeof(*rt->tracked_methods))) < 0) {
+ rt->nb_tracked_methods = 0;
+ rt->tracked_methods_size = 0;
return err;
+ }
}
rt->tracked_methods[rt->nb_tracked_methods].name = av_strdup(name);
}
ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
fail:
ff_rtmp_packet_destroy(pkt);
return ret;
*value = '\0';
value++;
- if (!field || !value)
- goto fail;
-
ff_amf_write_field_name(p, field);
} else {
goto fail;
char *param = rt->conn;
// Write arbitrary AMF data to the Connect message.
- while (param != NULL) {
+ while (param) {
char *sep;
param += strspn(param, " ");
if (!*param)
GetByteContext gbc;
if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
- rt->prev_pkt[0])) < 0)
+ &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
return ret;
+
+ if (pkt.type == RTMP_PT_CHUNK_SIZE) {
+ if ((ret = handle_chunk_size(s, &pkt)) < 0)
+ return ret;
+ ff_rtmp_packet_destroy(&pkt);
+ if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
+ &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
+ return ret;
+ }
+
cp = pkt.data;
bytestream2_init(&gbc, cp, pkt.size);
if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
bytestream_put_be32(&p, rt->server_bw);
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
bytestream_put_byte(&p, 2); // dynamic
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
bytestream_put_be16(&p, 0); // 0 -> Stream Begin
bytestream_put_be32(&p, 0);
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
p = pkt.data;
bytestream_put_be32(&p, rt->out_chunk_size);
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
- // Send result_ NetConnection.Connect.Success to connect
+ // Send _result NetConnection.Connect.Success to connect
if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
RTMP_PT_INVOKE, 0,
RTMP_PKTDATA_DEFAULT_SIZE)) < 0)
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
if (ret < 0)
return ret;
ff_amf_write_number(&p, 8192);
pkt.size = p - pkt.data;
ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&pkt);
return ret;
return rtmp_send_packet(rt, &pkt, 0);
}
+/**
+ * Generate 'getStreamLength' call and send it to the server. If the server
+ * knows the duration of the selected stream, it will reply with the duration
+ * in seconds.
+ */
+static int gen_get_stream_length(URLContext *s, RTMPContext *rt)
+{
+ RTMPPacket pkt;
+ uint8_t *p;
+ int ret;
+
+ if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
+ 0, 31 + strlen(rt->playpath))) < 0)
+ return ret;
+
+ p = pkt.data;
+ ff_amf_write_string(&p, "getStreamLength");
+ ff_amf_write_number(&p, ++rt->nb_invokes);
+ ff_amf_write_null(&p);
+ ff_amf_write_string(&p, rt->playpath);
+
+ return rtmp_send_packet(rt, &pkt, 1);
+}
+
/**
* Generate client buffer time and send it to the server.
*/
ff_amf_write_number(&p, ++rt->nb_invokes);
ff_amf_write_null(&p);
ff_amf_write_string(&p, rt->playpath);
- ff_amf_write_number(&p, rt->live);
+ ff_amf_write_number(&p, rt->live * 1000);
return rtmp_send_packet(rt, &pkt, 1);
}
return rtmp_send_packet(rt, &pkt, 1);
}
+/**
+ * Generate a pause packet that either pauses or unpauses the current stream.
+ */
+static int gen_pause(URLContext *s, RTMPContext *rt, int pause, uint32_t timestamp)
+{
+ RTMPPacket pkt;
+ uint8_t *p;
+ int ret;
+
+ av_log(s, AV_LOG_DEBUG, "Sending pause command for timestamp %d\n",
+ timestamp);
+
+ if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 29)) < 0)
+ return ret;
+
+ pkt.extra = rt->stream_id;
+
+ p = pkt.data;
+ ff_amf_write_string(&p, "pause");
+ ff_amf_write_number(&p, 0); //no tracking back responses
+ ff_amf_write_null(&p); //as usual, the first null param
+ ff_amf_write_bool(&p, pause); // pause or unpause
+ ff_amf_write_number(&p, timestamp); //where we pause the stream
+
+ return rtmp_send_packet(rt, &pkt, 1);
+}
+
/**
* Generate 'publish' call and send it to the server.
*/
for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
tosend[i] = av_lfg_get(&rnd) >> 24;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* When the client wants to use RTMPE, we have to change the command
* byte to 0x06 which means to use encrypted data and we have to set
* the flash version to at least 9.0.115.0. */
if (ret < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Compute the shared secret key sent by the server and initialize
* the RC4 encryption. */
if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
if (ret < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Encrypt the signature to be send to the server. */
ff_rtmpe_encrypt_sig(rt->stream, tosend +
RTMP_HANDSHAKE_PACKET_SIZE - 32, digest,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Set RC4 keys for encryption and update the keystreams. */
if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
return ret;
}
} else {
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Compute the shared secret key sent by the server and initialize
* the RC4 encryption. */
if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
return ret;
- if (rt->encrypted && CONFIG_FFRTMPCRYPT_PROTOCOL) {
+ if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
/* Set RC4 keys for encryption and update the keystreams. */
if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
return ret;
/* Send the same chunk size change packet back to the server,
* setting the outgoing chunk size to the same as the incoming one. */
if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
- rt->prev_pkt[1])) < 0)
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1])) < 0)
return ret;
rt->out_chunk_size = AV_RB32(pkt->data);
}
bytestream2_put_be32(&pbc, rt->nb_streamid);
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
return ret;
}
spkt.size = pp - spkt.data;
ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
- rt->prev_pkt[1]);
+ &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
ff_rtmp_packet_destroy(&spkt);
return ret;
}
+/**
+ * Read the AMF_NUMBER response ("_result") to a function call
+ * (e.g. createStream()). This response should be made up of the AMF_STRING
+ * "result", a NULL object and then the response encoded as AMF_NUMBER. On a
+ * successful response, we will return set the value to number (otherwise number
+ * will not be changed).
+ *
+ * @return 0 if reading the value succeeds, negative value otherwiss
+ */
+static int read_number_result(RTMPPacket *pkt, double *number)
+{
+ // We only need to fit "_result" in this.
+ uint8_t strbuffer[8];
+ int stringlen;
+ double numbuffer;
+ GetByteContext gbc;
+
+ bytestream2_init(&gbc, pkt->data, pkt->size);
+
+ // Value 1/4: "_result" as AMF_STRING
+ if (ff_amf_read_string(&gbc, strbuffer, sizeof(strbuffer), &stringlen))
+ return AVERROR_INVALIDDATA;
+ if (strcmp(strbuffer, "_result"))
+ return AVERROR_INVALIDDATA;
+ // Value 2/4: The callee reference number
+ if (ff_amf_read_number(&gbc, &numbuffer))
+ return AVERROR_INVALIDDATA;
+ // Value 3/4: Null
+ if (ff_amf_read_null(&gbc))
+ return AVERROR_INVALIDDATA;
+ // Value 4/4: The resonse as AMF_NUMBER
+ if (ff_amf_read_number(&gbc, &numbuffer))
+ return AVERROR_INVALIDDATA;
+ else
+ *number = numbuffer;
+
+ return 0;
+}
+
static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
{
RTMPContext *rt = s->priv_data;
}
}
} else if (!strcmp(tracked_method, "createStream")) {
- //extract a number from the result
- if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
+ double stream_id;
+ if (read_number_result(pkt, &stream_id)) {
av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
} else {
- rt->stream_id = av_int2double(AV_RB64(pkt->data + 21));
+ rt->stream_id = stream_id;
}
if (!rt->is_input) {
if ((ret = gen_publish(s, rt)) < 0)
goto fail;
} else {
+ if (rt->live != -1) {
+ if ((ret = gen_get_stream_length(s, rt)) < 0)
+ goto fail;
+ }
if ((ret = gen_play(s, rt)) < 0)
goto fail;
if ((ret = gen_buffer_time(s, rt)) < 0)
goto fail;
}
+ } else if (!strcmp(tracked_method, "getStreamLength")) {
+ if (read_number_result(pkt, &rt->duration)) {
+ av_log(s, AV_LOG_WARNING, "Unexpected reply on getStreamLength()\n");
+ }
}
fail:
t = ff_amf_get_field_value(ptr, data_end, "level", tmpstr, sizeof(tmpstr));
if (!t && !strcmp(tmpstr, "error")) {
- if (!ff_amf_get_field_value(ptr, data_end,
- "description", tmpstr, sizeof(tmpstr)))
+ t = ff_amf_get_field_value(ptr, data_end,
+ "description", tmpstr, sizeof(tmpstr));
+ if (t || !tmpstr[0])
+ t = ff_amf_get_field_value(ptr, data_end, "code",
+ tmpstr, sizeof(tmpstr));
+ if (!t)
av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr);
return -1;
}
if (rt->flv_off < rt->flv_size) {
// There is old unread data in the buffer, thus append at the end
old_flv_size = rt->flv_size;
- rt->flv_size += size + 15;
+ rt->flv_size += size;
} else {
// All data has been read, write the new data at the start of the buffer
old_flv_size = 0;
- rt->flv_size = size + 15;
+ rt->flv_size = size;
rt->flv_off = 0;
}
const int size = pkt->size - skip;
uint32_t ts = pkt->timestamp;
- old_flv_size = update_offset(rt, size);
+ if (pkt->type == RTMP_PT_AUDIO) {
+ rt->has_audio = 1;
+ } else if (pkt->type == RTMP_PT_VIDEO) {
+ rt->has_video = 1;
+ }
+
+ old_flv_size = update_offset(rt, size + 15);
- if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
+ if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
+ rt->flv_size = rt->flv_off = 0;
return ret;
+ }
bytestream2_init_writer(&pbc, rt->flv_data, rt->flv_size);
bytestream2_skip_p(&pbc, old_flv_size);
bytestream2_put_byte(&pbc, pkt->type);
&stringlen))
return AVERROR_INVALIDDATA;
+ if (!strcmp(commandbuffer, "onMetaData")) {
+ // metadata properties should be stored in a mixed array
+ if (bytestream2_get_byte(&gbc) == AMF_DATA_TYPE_MIXEDARRAY) {
+ // We have found a metaData Array so flv can determine the streams
+ // from this.
+ rt->received_metadata = 1;
+ // skip 32-bit max array index
+ bytestream2_skip(&gbc, 4);
+ while (bytestream2_get_bytes_left(&gbc) > 3) {
+ if (ff_amf_get_string(&gbc, statusmsg, sizeof(statusmsg),
+ &stringlen))
+ return AVERROR_INVALIDDATA;
+ // We do not care about the content of the property (yet).
+ stringlen = ff_amf_tag_size(gbc.buffer, gbc.buffer_end);
+ if (stringlen < 0)
+ return AVERROR_INVALIDDATA;
+ bytestream2_skip(&gbc, stringlen);
+
+ // The presence of the following properties indicates that the
+ // respective streams are present.
+ if (!strcmp(statusmsg, "videocodecid")) {
+ rt->has_video = 1;
+ }
+ if (!strcmp(statusmsg, "audiocodecid")) {
+ rt->has_audio = 1;
+ }
+ }
+ if (bytestream2_get_be24(&gbc) != AMF_END_OF_OBJECT)
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
// Skip the @setDataFrame string and validate it is a notification
if (!strcmp(commandbuffer, "@setDataFrame")) {
skip = gbc.buffer - pkt->data;
old_flv_size = update_offset(rt, pkt->size);
- if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
+ if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
+ rt->flv_size = rt->flv_off = 0;
return ret;
+ }
next = pkt->data;
p = rt->flv_data + old_flv_size;
pts = cts;
ts += cts - pts;
pts = cts;
+ if (size + 3 + 4 > pkt->data + pkt->size - next)
+ break;
bytestream_put_byte(&p, type);
bytestream_put_be24(&p, size);
bytestream_put_be24(&p, ts);
next += size + 3 + 4;
p += size + 3 + 4;
}
- memcpy(p, next, RTMP_HEADER);
+ if (p != rt->flv_data + rt->flv_size) {
+ av_log(NULL, AV_LOG_WARNING, "Incomplete flv packets in "
+ "RTMP_PT_METADATA packet\n");
+ rt->flv_size = p - rt->flv_data;
+ }
return 0;
}
for (;;) {
RTMPPacket rpkt = { 0 };
if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
- rt->in_chunk_size, rt->prev_pkt[0])) <= 0) {
+ rt->in_chunk_size, &rt->prev_pkt[0],
+ &rt->nb_prev_pkt[0])) <= 0) {
if (ret == 0) {
return AVERROR(EAGAIN);
} else {
return AVERROR(EIO);
}
}
+
+ // Track timestamp for later use
+ rt->last_timestamp = rpkt.timestamp;
+
rt->bytes_read += ret;
if (rt->bytes_read > rt->last_bytes_read + rt->client_report_size) {
av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
}
if (rt->state > STATE_HANDSHAKED)
ret = gen_delete_stream(h, rt);
- for (i = 0; i < 2; i++)
- for (j = 0; j < RTMP_CHANNELS; j++)
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < rt->nb_prev_pkt[i]; j++)
ff_rtmp_packet_destroy(&rt->prev_pkt[i][j]);
+ av_freep(&rt->prev_pkt[i]);
+ }
free_tracked_methods(rt);
av_freep(&rt->flv_data);
return ret;
}
+/**
+ * Insert a fake onMetadata packet into the FLV stream to notify the FLV
+ * demuxer about the duration of the stream.
+ *
+ * This should only be done if there was no real onMetadata packet sent by the
+ * server at the start of the stream and if we were able to retrieve a valid
+ * duration via a getStreamLength call.
+ *
+ * @return 0 for successful operation, negative value in case of error
+ */
+static int inject_fake_duration_metadata(RTMPContext *rt)
+{
+ // We need to insert the metdata packet directly after the FLV
+ // header, i.e. we need to move all other already read data by the
+ // size of our fake metadata packet.
+
+ uint8_t* p;
+ // Keep old flv_data pointer
+ uint8_t* old_flv_data = rt->flv_data;
+ // Allocate a new flv_data pointer with enough space for the additional package
+ if (!(rt->flv_data = av_malloc(rt->flv_size + 55))) {
+ rt->flv_data = old_flv_data;
+ return AVERROR(ENOMEM);
+ }
+
+ // Copy FLV header
+ memcpy(rt->flv_data, old_flv_data, 13);
+ // Copy remaining packets
+ memcpy(rt->flv_data + 13 + 55, old_flv_data + 13, rt->flv_size - 13);
+ // Increase the size by the injected packet
+ rt->flv_size += 55;
+ // Delete the old FLV data
+ av_free(old_flv_data);
+
+ p = rt->flv_data + 13;
+ bytestream_put_byte(&p, FLV_TAG_TYPE_META);
+ bytestream_put_be24(&p, 40); // size of data part (sum of all parts below)
+ bytestream_put_be24(&p, 0); // timestamp
+ bytestream_put_be32(&p, 0); // reserved
+
+ // first event name as a string
+ bytestream_put_byte(&p, AMF_DATA_TYPE_STRING);
+ // "onMetaData" as AMF string
+ bytestream_put_be16(&p, 10);
+ bytestream_put_buffer(&p, "onMetaData", 10);
+
+ // mixed array (hash) with size and string/type/data tuples
+ bytestream_put_byte(&p, AMF_DATA_TYPE_MIXEDARRAY);
+ bytestream_put_be32(&p, 1); // metadata_count
+
+ // "duration" as AMF string
+ bytestream_put_be16(&p, 8);
+ bytestream_put_buffer(&p, "duration", 8);
+ bytestream_put_byte(&p, AMF_DATA_TYPE_NUMBER);
+ bytestream_put_be64(&p, av_double2int(rt->duration));
+
+ // Finalise object
+ bytestream_put_be16(&p, 0); // Empty string
+ bytestream_put_byte(&p, AMF_END_OF_OBJECT);
+ bytestream_put_be32(&p, 40); // size of data part (sum of all parts below)
+
+ return 0;
+}
+
/**
* Open RTMP connection and verify that the stream can be played.
*
{
RTMPContext *rt = s->priv_data;
char proto[8], hostname[256], path[1024], auth[100], *fname;
- char *old_app;
+ char *old_app, *qmark, fname_buffer[1024];
uint8_t buf[2048];
int port;
AVDictionary *opts = NULL;
}
//extract "app" part from path
- if (!strncmp(path, "/ondemand/", 10)) {
+ qmark = strchr(path, '?');
+ if (qmark && strstr(qmark, "slist=")) {
+ char* amp;
+ // After slist we have the playpath, before the params, the app
+ av_strlcpy(rt->app, path + 1, FFMIN(qmark - path, APP_MAX_LENGTH));
+ fname = strstr(path, "slist=") + 6;
+ // Strip any further query parameters from fname
+ amp = strchr(fname, '&');
+ if (amp) {
+ av_strlcpy(fname_buffer, fname, FFMIN(amp - fname + 1,
+ sizeof(fname_buffer)));
+ fname = fname_buffer;
+ }
+ } else if (!strncmp(path, "/ondemand/", 10)) {
fname = path + 10;
memcpy(rt->app, "ondemand", 9);
} else {
fname = strchr(p + 1, '/');
if (!fname || (c && c < fname)) {
fname = p + 1;
- av_strlcpy(rt->app, path + 1, p - path);
+ av_strlcpy(rt->app, path + 1, FFMIN(p - path, APP_MAX_LENGTH));
} else {
fname++;
- av_strlcpy(rt->app, path + 1, fname - path - 1);
+ av_strlcpy(rt->app, path + 1, FFMIN(fname - path - 1, APP_MAX_LENGTH));
}
}
}
(!strcmp(fname + len - 4, ".f4v") ||
!strcmp(fname + len - 4, ".mp4"))) {
memcpy(rt->playpath, "mp4:", 5);
- } else if (len >= 4 && !strcmp(fname + len - 4, ".flv")) {
- fname[len - 4] = '\0';
} else {
+ if (len >= 4 && !strcmp(fname + len - 4, ".flv"))
+ fname[len - 4] = '\0';
rt->playpath[0] = 0;
}
av_strlcat(rt->playpath, fname, PLAYPATH_MAX_LENGTH);
rt->client_report_size = 1048576;
rt->bytes_read = 0;
+ rt->has_audio = 0;
+ rt->has_video = 0;
+ rt->received_metadata = 0;
rt->last_bytes_read = 0;
rt->server_bw = 2500000;
+ rt->duration = 0;
av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
proto, path, rt->app, rt->playpath);
if ((ret = gen_connect(s, rt)) < 0)
goto fail;
} else {
- if (read_connect(s, s->priv_data) < 0)
+ if ((ret = read_connect(s, s->priv_data)) < 0)
goto fail;
}
goto fail;
if (rt->do_reconnect) {
+ int i;
ffurl_close(rt->stream);
rt->stream = NULL;
rt->do_reconnect = 0;
rt->nb_invokes = 0;
- memset(rt->prev_pkt, 0, sizeof(rt->prev_pkt));
+ for (i = 0; i < 2; i++)
+ memset(rt->prev_pkt[i], 0,
+ sizeof(**rt->prev_pkt) * rt->nb_prev_pkt[i]);
free_tracked_methods(rt);
goto reconnect;
}
if (rt->is_input) {
- int err;
// generate FLV header for demuxer
rt->flv_size = 13;
- if ((err = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
- return err;
+ if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
+ goto fail;
rt->flv_off = 0;
- memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
+ memcpy(rt->flv_data, "FLV\1\0\0\0\0\011\0\0\0\0", rt->flv_size);
+
+ // Read packets until we reach the first A/V packet or read metadata.
+ // If there was a metadata package in front of the A/V packets, we can
+ // build the FLV header from this. If we do not receive any metadata,
+ // the FLV decoder will allocate the needed streams when their first
+ // audio or video packet arrives.
+ while (!rt->has_audio && !rt->has_video && !rt->received_metadata) {
+ if ((ret = get_packet(s, 0)) < 0)
+ goto fail;
+ }
+
+ // Either after we have read the metadata or (if there is none) the
+ // first packet of an A/V stream, we have a better knowledge about the
+ // streams, so set the FLV header accordingly.
+ if (rt->has_audio) {
+ rt->flv_data[4] |= FLV_HEADER_FLAG_HASAUDIO;
+ }
+ if (rt->has_video) {
+ rt->flv_data[4] |= FLV_HEADER_FLAG_HASVIDEO;
+ }
+
+ // If we received the first packet of an A/V stream and no metadata but
+ // the server returned a valid duration, create a fake metadata packet
+ // to inform the FLV decoder about the duration.
+ if (!rt->received_metadata && rt->duration > 0) {
+ if ((ret = inject_fake_duration_metadata(rt)) < 0)
+ goto fail;
+ }
} else {
rt->flv_size = 0;
rt->flv_data = NULL;
return timestamp;
}
+static int rtmp_pause(URLContext *s, int pause)
+{
+ RTMPContext *rt = s->priv_data;
+ int ret;
+ av_log(s, AV_LOG_DEBUG, "Pause at timestamp %d\n",
+ rt->last_timestamp);
+ if ((ret = gen_pause(s, rt, pause, rt->last_timestamp)) < 0) {
+ av_log(s, AV_LOG_ERROR, "Unable to send pause command at timestamp %d\n",
+ rt->last_timestamp);
+ return ret;
+ }
+ return 0;
+}
+
static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
{
RTMPContext *rt = s->priv_data;
pkttype == RTMP_PT_NOTIFY) {
if (pkttype == RTMP_PT_NOTIFY)
pktsize += 16;
+ if ((ret = ff_rtmp_check_alloc_array(&rt->prev_pkt[1],
+ &rt->nb_prev_pkt[1],
+ channel)) < 0)
+ return ret;
rt->prev_pkt[1][channel].channel_id = 0;
}
if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
rt->in_chunk_size,
- rt->prev_pkt[0], c)) <= 0)
+ &rt->prev_pkt[0],
+ &rt->nb_prev_pkt[0], c)) <= 0)
return ret;
if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
{"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
{"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
{"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
+ {"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
{ NULL },
};
.url_open = rtmp_open, \
.url_read = rtmp_read, \
.url_read_seek = rtmp_seek, \
+ .url_read_pause = rtmp_pause, \
.url_write = rtmp_write, \
.url_close = rtmp_close, \
.priv_data_size = sizeof(RTMPContext), \