#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
-#include <ctype.h>
#include <signal.h>
#ifdef CONFIG_HAVE_DLFCN
#include <dlfcn.h>
RTSPSTATE_WAIT_REQUEST,
RTSPSTATE_SEND_REPLY,
+ RTSPSTATE_SEND_PACKET,
};
const char *http_state[] = {
"RTSP_WAIT_REQUEST",
"RTSP_SEND_REPLY",
+ "RTSP_SEND_PACKET",
};
#define IOBUFFER_INIT_SIZE 8192
#define SYNC_TIMEOUT (10 * 1000)
typedef struct {
- INT64 count1, count2;
+ int64_t count1, count2;
long time1, time2;
} DataRateData;
struct sockaddr_in from_addr; /* origin */
struct pollfd *poll_entry; /* used when polling */
long timeout;
- UINT8 *buffer_ptr, *buffer_end;
+ uint8_t *buffer_ptr, *buffer_end;
int http_error;
struct HTTPContext *next;
int got_key_frame; /* stream 0 => 1, stream 1 => 2, stream 2=> 4 */
- INT64 data_count;
+ int64_t data_count;
/* feed input */
int feed_fd;
/* input format handling */
AVFormatContext *fmt_in;
long start_time; /* In milliseconds - this wraps fairly often */
- INT64 first_pts; /* initial pts value */
+ int64_t first_pts; /* initial pts value */
+ int64_t cur_pts; /* current pts value */
int pts_stream_index; /* stream we choose as clock reference */
/* output format handling */
struct FFStream *stream;
char method[16];
char url[128];
int buffer_size;
- UINT8 *buffer;
+ uint8_t *buffer;
int is_packetized; /* if true, the stream is packetized */
int packet_stream_index; /* current stream for output in state machine */
/* RTSP state specific */
- UINT8 *pb_buffer; /* XXX: use that in all the code */
+ uint8_t *pb_buffer; /* XXX: use that in all the code */
ByteIOContext *pb;
int seq; /* RTSP sequence number */
enum RTSPProtocol rtp_protocol;
char session_id[32]; /* session id */
AVFormatContext *rtp_ctx[MAX_STREAMS];
- URLContext *rtp_handles[MAX_STREAMS];
/* RTP short term bandwidth limitation */
int packet_byte_count;
int packet_start_time_us; /* used for short durations (a few
seconds max) */
+ /* RTP/UDP specific */
+ URLContext *rtp_handles[MAX_STREAMS];
+
+ /* RTP/TCP specific */
+ struct HTTPContext *rtsp_c;
+ uint8_t *packet_buffer, *packet_buffer_ptr, *packet_buffer_end;
} HTTPContext;
+static AVFrame dummy_frame;
+
/* each generated stream is described here */
enum StreamType {
STREAM_TYPE_LIVE,
typedef struct IPAddressACL {
struct IPAddressACL *next;
enum IPAddressAction action;
+ /* These are in host order */
struct in_addr first;
struct in_addr last;
} IPAddressACL;
/* feed specific */
int feed_opened; /* true if someone is writing to the feed */
int is_feed; /* true if it is a feed */
+ int readonly; /* True if writing is prohibited to the file */
int conns_served;
- INT64 bytes_served;
- INT64 feed_max_size; /* maximum storage size */
- INT64 feed_write_index; /* current write position in feed (it wraps round) */
- INT64 feed_size; /* current size of feed */
+ int64_t bytes_served;
+ int64_t feed_max_size; /* maximum storage size */
+ int64_t feed_write_index; /* current write position in feed (it wraps round) */
+ int64_t feed_size; /* current size of feed */
struct FFStream *next_feed;
} FFStream;
/* RTSP handling */
static int rtsp_parse_request(HTTPContext *c);
static void rtsp_cmd_describe(HTTPContext *c, const char *url);
+static void rtsp_cmd_options(HTTPContext *c, const char *url);
static void rtsp_cmd_setup(HTTPContext *c, const char *url, RTSPHeader *h);
static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h);
static void rtsp_cmd_pause(HTTPContext *c, const char *url, RTSPHeader *h);
static void rtsp_cmd_teardown(HTTPContext *c, const char *url, RTSPHeader *h);
/* SDP handling */
-static int prepare_sdp_description(FFStream *stream, UINT8 **pbuffer,
+static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
struct in_addr my_ip);
/* RTP handling */
static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
- FFStream *stream, const char *session_id);
+ FFStream *stream, const char *session_id,
+ enum RTSPProtocol rtp_protocol);
static int rtp_new_av_stream(HTTPContext *c,
- int stream_index, struct sockaddr_in *dest_addr);
+ int stream_index, struct sockaddr_in *dest_addr,
+ HTTPContext *rtsp_c);
static const char *my_program_name;
static const char *my_program_dir;
static FILE *logfile = NULL;
-static void http_log(char *fmt, ...)
+static void __attribute__ ((format (printf, 1, 2))) http_log(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
c->protocol, (c->http_error ? c->http_error : 200), c->data_count);
}
-static void update_datarate(DataRateData *drd, INT64 count)
+static void update_datarate(DataRateData *drd, int64_t count)
{
if (!drd->time1 && !drd->count1) {
drd->time1 = drd->time2 = cur_time;
}
/* In bytes per second */
-static int compute_datarate(DataRateData *drd, INT64 count)
+static int compute_datarate(DataRateData *drd, int64_t count)
{
if (cur_time == drd->time1)
return 0;
return ((count - drd->count1) * 1000) / (cur_time - drd->time1);
}
-static int get_longterm_datarate(DataRateData *drd, INT64 count)
+static int get_longterm_datarate(DataRateData *drd, int64_t count)
{
/* You get the first 3 seconds flat out */
if (cur_time - drd->time1 < 3000)
/* This is needed to make relative pathnames work */
chdir(my_program_dir);
+ signal(SIGPIPE, SIG_DFL);
+
execvp(pathname, feed->child_argv);
_exit(1);
setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR, &tmp, sizeof(tmp));
if (bind (server_fd, (struct sockaddr *) my_addr, sizeof (*my_addr)) < 0) {
- perror ("bind");
+ char bindmsg[32];
+ snprintf(bindmsg, sizeof(bindmsg), "bind(port %d)", ntohs(my_addr->sin_port));
+ perror (bindmsg);
close(server_fd);
return -1;
}
dest_addr.sin_addr = stream->multicast_ip;
dest_addr.sin_port = htons(stream->multicast_port);
- rtp_c = rtp_new_connection(&dest_addr, stream, session_id);
+ rtp_c = rtp_new_connection(&dest_addr, stream, session_id,
+ RTSP_PROTOCOL_RTP_UDP_MULTICAST);
if (!rtp_c) {
continue;
}
continue;
}
- rtp_c->rtp_protocol = RTSP_PROTOCOL_RTP_UDP_MULTICAST;
-
/* open each RTP stream */
for(stream_index = 0; stream_index < stream->nb_streams;
stream_index++) {
dest_addr.sin_port = htons(stream->multicast_port +
2 * stream_index);
- if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr) < 0) {
+ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, NULL) < 0) {
fprintf(stderr, "Could not open output stream '%s/streamid=%d'\n",
stream->filename, stream_index);
exit(1);
first_http_ctx = NULL;
nb_connections = 0;
- first_http_ctx = NULL;
start_multicast();
switch(c->state) {
case HTTPSTATE_SEND_HEADER:
case RTSPSTATE_SEND_REPLY:
+ case RTSPSTATE_SEND_PACKET:
c->poll_entry = poll_entry;
poll_entry->fd = fd;
poll_entry->events = POLLOUT;
if (!c)
goto fail;
- c->next = first_http_ctx;
- first_http_ctx = c;
c->fd = fd;
c->poll_entry = NULL;
c->from_addr = from_addr;
c->buffer = av_malloc(c->buffer_size);
if (!c->buffer)
goto fail;
+
+ c->next = first_http_ctx;
+ first_http_ctx = c;
nb_connections++;
start_wait_request(c, is_rtsp);
}
}
+ /* remove references, if any (XXX: do it faster) */
+ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
+ if (c1->rtsp_c == c)
+ c1->rtsp_c = NULL;
+ }
+
/* remove connection associated resources */
if (c->fd >= 0)
close(c->fd);
url_close(h);
}
}
+
+ ctx = &c->fmt_ctx;
+
+ if (!c->last_packet_sent) {
+ if (ctx->oformat) {
+ /* prepare header */
+ if (url_open_dyn_buf(&ctx->pb) >= 0) {
+ av_write_trailer(ctx);
+ url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ }
+ }
+ }
+
+ for(i=0; i<ctx->nb_streams; i++)
+ av_free(ctx->streams[i]) ;
if (c->stream)
current_bandwidth -= c->stream->bandwidth;
av_freep(&c->pb_buffer);
+ av_freep(&c->packet_buffer);
av_free(c->buffer);
av_free(c);
nb_connections--;
if (!(c->poll_entry->revents & POLLIN))
return 0;
/* read the data */
- len = read(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ read_loop:
+ len = read(c->fd, c->buffer_ptr, 1);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR)
return -1;
} else if (len == 0) {
return -1;
} else {
- /* search for end of request. XXX: not fully correct since garbage could come after the end */
- UINT8 *ptr;
+ /* search for end of request. */
+ uint8_t *ptr;
c->buffer_ptr += len;
ptr = c->buffer_ptr;
if ((ptr >= c->buffer + 2 && !memcmp(ptr-2, "\n\n", 2)) ||
} else if (ptr >= c->buffer_end) {
/* request too long: cannot do anything */
return -1;
- }
+ } else goto read_loop;
}
break;
}
}
break;
+ case RTSPSTATE_SEND_PACKET:
+ if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
+ av_freep(&c->packet_buffer);
+ return -1;
+ }
+ /* no need to write if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ len = write(c->fd, c->packet_buffer_ptr,
+ c->packet_buffer_end - c->packet_buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ av_freep(&c->packet_buffer);
+ return -1;
+ }
+ } else {
+ c->packet_buffer_ptr += len;
+ if (c->packet_buffer_ptr >= c->packet_buffer_end) {
+ /* all the buffer was sent : wait for a new request */
+ av_freep(&c->packet_buffer);
+ c->state = RTSPSTATE_WAIT_REQUEST;
+ }
+ }
+ break;
case HTTPSTATE_READY:
/* nothing to do */
break;
FFStream *req = c->stream;
int action_required = 0;
+ /* Not much we can do for a feed */
+ if (!req->feed)
+ return 0;
+
for (i = 0; i < req->nb_streams; i++) {
AVCodecContext *codec = &req->streams[i]->codec;
enum IPAddressAction last_action = IP_DENY;
IPAddressACL *acl;
struct in_addr *src = &c->from_addr.sin_addr;
+ unsigned long src_addr = ntohl(src->s_addr);
for (acl = stream->acl; acl; acl = acl->next) {
- if (src->s_addr >= acl->first.s_addr && src->s_addr <= acl->last.s_addr) {
+ if (src_addr >= acl->first.s_addr && src_addr <= acl->last.s_addr) {
return (acl->action == IP_ALLOW) ? 1 : 0;
}
last_action = acl->action;
if (match_ext(filename, "asx")) {
redir_type = REDIR_ASX;
filename[strlen(filename)-1] = 'f';
- } else if (match_ext(filename, ".asf") &&
+ } else if (match_ext(filename, "asf") &&
(!useragent || strncasecmp(useragent, "NSPlayer", 8) != 0)) {
/* if this isn't WMP or lookalike, return the redirector file */
redir_type = REDIR_ASF;
break;
case REDIR_SDP:
{
- UINT8 *sdp_data;
+ uint8_t *sdp_data;
int sdp_data_size, len;
struct sockaddr_in my_addr;
}
sprintf(msg, "POST command not handled");
+ c->stream = 0;
goto send_error;
}
if (http_start_receive_data(c) < 0) {
c->wmp_client_id = random() & 0x7fffffff;
q += sprintf(q, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
- mime_type = "application/octet-stream";
}
q += sprintf(q, "Content-Type: %s\r\n", mime_type);
q += sprintf(q, "\r\n");
return 0;
}
-static void fmt_bytecount(ByteIOContext *pb, INT64 count)
+static void fmt_bytecount(ByteIOContext *pb, int64_t count)
{
static const char *suffix = " kMGTP";
const char *s;
for (i = 0; i < stream->nb_streams; i++) {
AVStream *st = stream->streams[i];
AVCodec *codec = avcodec_find_encoder(st->codec.codec_id);
- char *type = "unknown";
+ const char *type = "unknown";
char parameters[64];
parameters[0] = 0;
case CODEC_TYPE_VIDEO:
type = "video";
sprintf(parameters, "%dx%d, q=%d-%d, fps=%d", st->codec.width, st->codec.height,
- st->codec.qmin, st->codec.qmax, st->codec.frame_rate / FRAME_RATE_BASE);
+ st->codec.qmin, st->codec.qmax, st->codec.frame_rate / st->codec.frame_rate_base);
break;
default:
av_abort();
char input_filename[1024];
AVFormatContext *s;
int buf_size, i;
- INT64 stream_pos;
+ int64_t stream_pos;
/* find file name */
if (c->stream->feed) {
stream_pos = parse_date(buf, 0);
} else if (find_info_tag(buf, sizeof(buf), "buffer", info)) {
int prebuffer = strtol(buf, 0, 10);
- stream_pos = av_gettime() - prebuffer * (INT64)1000000;
+ stream_pos = av_gettime() - prebuffer * (int64_t)1000000;
} else {
- stream_pos = av_gettime() - c->stream->prebuffer * (INT64)1000;
+ stream_pos = av_gettime() - c->stream->prebuffer * (int64_t)1000;
}
} else {
strcpy(input_filename, c->stream->feed_filename);
if (st->pts.den == 0) {
switch(st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
- st->pts_incr = (INT64)s->pts_den;
+ st->pts_incr = (int64_t)s->pts_den;
av_frac_init(&st->pts, st->pts.val, 0,
- (INT64)s->pts_num * st->codec.sample_rate);
+ (int64_t)s->pts_num * st->codec.sample_rate);
break;
case CODEC_TYPE_VIDEO:
- st->pts_incr = (INT64)s->pts_den * FRAME_RATE_BASE;
+ st->pts_incr = (int64_t)s->pts_den * st->codec.frame_rate_base;
av_frac_init(&st->pts, st->pts.val, 0,
- (INT64)s->pts_num * st->codec.frame_rate);
+ (int64_t)s->pts_num * st->codec.frame_rate);
break;
default:
av_abort();
/* we use the codec indication because it is
more accurate than the demux flags */
pkt->flags = 0;
- if (st->codec.key_frame)
+ if (st->codec.coded_frame->key_frame)
pkt->flags |= PKT_FLAG_KEY;
return 0;
}
if (st->pts.den == 0) {
switch(st->codec.codec_type) {
case CODEC_TYPE_AUDIO:
- st->pts_incr = (INT64)s->pts_den * st->codec.frame_size;
+ st->pts_incr = (int64_t)s->pts_den * st->codec.frame_size;
av_frac_init(&st->pts, st->pts.val, 0,
- (INT64)s->pts_num * st->codec.sample_rate);
+ (int64_t)s->pts_num * st->codec.sample_rate);
break;
case CODEC_TYPE_VIDEO:
- st->pts_incr = (INT64)s->pts_den * FRAME_RATE_BASE;
+ st->pts_incr = (int64_t)s->pts_den * st->codec.frame_rate_base;
av_frac_init(&st->pts, st->pts.val, 0,
- (INT64)s->pts_num * st->codec.frame_rate);
+ (int64_t)s->pts_num * st->codec.frame_rate);
break;
default:
av_abort();
static int compute_send_delay(HTTPContext *c)
{
- INT64 cur_pts, delta_pts, next_pts;
+ int64_t cur_pts, delta_pts, next_pts;
int delay1;
/* compute current pts value from system time */
- cur_pts = ((INT64)(cur_time - c->start_time) * c->fmt_in->pts_den) /
+ cur_pts = ((int64_t)(cur_time - c->start_time) * c->fmt_in->pts_den) /
(c->fmt_in->pts_num * 1000LL);
/* compute the delta from the stream we choose as
main clock (we do that to avoid using explicit
#else
/* just fall backs */
-int av_read_frame(AVFormatContext *s, AVPacket *pkt)
+static int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
return av_read_packet(s, pkt);
}
static int compute_send_delay(HTTPContext *c)
{
int datarate = 8 * get_longterm_datarate(&c->datarate, c->data_count);
+ int64_t delta_pts;
+ int64_t time_pts;
+ int m_delay;
if (datarate > c->stream->bandwidth * 2000) {
return 1000;
}
- return 0;
+ if (!c->stream->feed && c->first_pts!=AV_NOPTS_VALUE) {
+ time_pts = ((int64_t)(cur_time - c->start_time) * c->fmt_in->pts_den) /
+ ((int64_t) c->fmt_in->pts_num*1000);
+ delta_pts = c->cur_pts - time_pts;
+ m_delay = (delta_pts * 1000 * c->fmt_in->pts_num) / c->fmt_in->pts_den;
+ return m_delay>0 ? m_delay : 0;
+ } else {
+ return 0;
+ }
}
#endif
int i, len, ret;
AVFormatContext *ctx;
+ av_freep(&c->pb_buffer);
switch(c->state) {
case HTTPSTATE_SEND_DATA_HEADER:
memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx));
sizeof(AVStream));
st->codec.frame_number = 0; /* XXX: should be done in
AVStream, not in codec */
+ /* I'm pretty sure that this is not correct...
+ * However, without it, we crash
+ */
+ st->codec.coded_frame = &dummy_frame;
}
c->got_key_frame = 0;
}
c->fmt_ctx.pb.is_streamed = 1;
+ av_set_parameters(&c->fmt_ctx, NULL);
av_write_header(&c->fmt_ctx);
len = url_close_dyn_buf(&c->fmt_ctx.pb, &c->pb_buffer);
}
} else {
/* update first pts if needed */
- if (c->first_pts == AV_NOPTS_VALUE)
+ if (c->first_pts == AV_NOPTS_VALUE) {
c->first_pts = pkt.pts;
-
+ c->start_time = cur_time;
+ }
+ c->cur_pts = pkt.pts;
/* send it to the appropriate stream */
if (c->stream->feed) {
/* if coming from a feed, select the right stream */
if (c->is_packetized) {
c->packet_stream_index = pkt.stream_index;
ctx = c->rtp_ctx[c->packet_stream_index];
+ if(!ctx) {
+ av_free_packet(&pkt);
+ break;
+ }
codec = &ctx->streams[0]->codec;
/* only one stream per RTP connection */
pkt.stream_index = 0;
codec = &ctx->streams[pkt.stream_index]->codec;
}
- codec->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
+ codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
#ifdef PJSG
if (codec->codec_type == CODEC_TYPE_AUDIO) {
#endif
if (c->is_packetized) {
- ret = url_open_dyn_packet_buf(&ctx->pb,
- url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]));
+ int max_packet_size;
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP)
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ else
+ max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
+ ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size);
c->packet_byte_count = 0;
c->packet_start_time_us = av_gettime();
} else {
#define SHORT_TERM_BANDWIDTH 8000000
/* should convert the format at the same time */
+/* send data starting at c->buffer_ptr to the output connection
+ (either UDP or TCP connection) */
static int http_send_data(HTTPContext *c)
{
int len, ret, dt;
-
- while (c->buffer_ptr >= c->buffer_end) {
- av_freep(&c->pb_buffer);
- ret = http_prepare_data(c);
- if (ret < 0)
- return -1;
- else if (ret == 0) {
- continue;
- } else {
- /* state change requested */
- return 0;
- }
- }
- if (c->buffer_ptr < c->buffer_end) {
- if (c->is_packetized) {
- /* RTP/UDP data output */
- len = c->buffer_end - c->buffer_ptr;
- if (len < 4) {
- /* fail safe - should never happen */
- fail1:
- c->buffer_ptr = c->buffer_end;
- return 0;
- }
- len = (c->buffer_ptr[0] << 24) |
- (c->buffer_ptr[1] << 16) |
- (c->buffer_ptr[2] << 8) |
- (c->buffer_ptr[3]);
- if (len > (c->buffer_end - c->buffer_ptr))
- goto fail1;
-
- /* short term bandwidth limitation */
- dt = av_gettime() - c->packet_start_time_us;
- if (dt < 1)
- dt = 1;
-
- if ((c->packet_byte_count + len) * (INT64)1000000 >=
- (SHORT_TERM_BANDWIDTH / 8) * (INT64)dt) {
- /* bandwidth overflow : wait at most one tick and retry */
- c->state = HTTPSTATE_WAIT_SHORT;
- return 0;
+ for(;;) {
+ if (c->buffer_ptr >= c->buffer_end) {
+ ret = http_prepare_data(c);
+ if (ret < 0)
+ return -1;
+ else if (ret != 0) {
+ /* state change requested */
+ break;
}
-
- c->buffer_ptr += 4;
- url_write(c->rtp_handles[c->packet_stream_index],
- c->buffer_ptr, len);
- c->buffer_ptr += len;
- c->packet_byte_count += len;
} else {
- /* TCP data output */
- len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
- if (len < 0) {
- if (errno != EAGAIN && errno != EINTR) {
- /* error : close connection */
- return -1;
- } else {
+ if (c->is_packetized) {
+ /* RTP data output */
+ len = c->buffer_end - c->buffer_ptr;
+ if (len < 4) {
+ /* fail safe - should never happen */
+ fail1:
+ c->buffer_ptr = c->buffer_end;
return 0;
}
- } else {
+ len = (c->buffer_ptr[0] << 24) |
+ (c->buffer_ptr[1] << 16) |
+ (c->buffer_ptr[2] << 8) |
+ (c->buffer_ptr[3]);
+ if (len > (c->buffer_end - c->buffer_ptr))
+ goto fail1;
+
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) {
+ /* RTP packets are sent inside the RTSP TCP connection */
+ ByteIOContext pb1, *pb = &pb1;
+ int interleaved_index, size;
+ uint8_t header[4];
+ HTTPContext *rtsp_c;
+
+ rtsp_c = c->rtsp_c;
+ /* if no RTSP connection left, error */
+ if (!rtsp_c)
+ return -1;
+ /* if already sending something, then wait. */
+ if (rtsp_c->state != RTSPSTATE_WAIT_REQUEST) {
+ break;
+ }
+ if (url_open_dyn_buf(pb) < 0)
+ goto fail1;
+ interleaved_index = c->packet_stream_index * 2;
+ /* RTCP packets are sent at odd indexes */
+ if (c->buffer_ptr[1] == 200)
+ interleaved_index++;
+ /* write RTSP TCP header */
+ header[0] = '$';
+ header[1] = interleaved_index;
+ header[2] = len >> 8;
+ header[3] = len;
+ put_buffer(pb, header, 4);
+ /* write RTP packet data */
+ c->buffer_ptr += 4;
+ put_buffer(pb, c->buffer_ptr, len);
+ size = url_close_dyn_buf(pb, &c->packet_buffer);
+ /* prepare asynchronous TCP sending */
+ rtsp_c->packet_buffer_ptr = c->packet_buffer;
+ rtsp_c->packet_buffer_end = c->packet_buffer + size;
+ rtsp_c->state = RTSPSTATE_SEND_PACKET;
+ } else {
+ /* send RTP packet directly in UDP */
+
+ /* short term bandwidth limitation */
+ dt = av_gettime() - c->packet_start_time_us;
+ if (dt < 1)
+ dt = 1;
+
+ if ((c->packet_byte_count + len) * (int64_t)1000000 >=
+ (SHORT_TERM_BANDWIDTH / 8) * (int64_t)dt) {
+ /* bandwidth overflow : wait at most one tick and retry */
+ c->state = HTTPSTATE_WAIT_SHORT;
+ return 0;
+ }
+
+ c->buffer_ptr += 4;
+ url_write(c->rtp_handles[c->packet_stream_index],
+ c->buffer_ptr, len);
+ }
c->buffer_ptr += len;
+ c->packet_byte_count += len;
+ } else {
+ /* TCP data output */
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ return -1;
+ } else {
+ return 0;
+ }
+ } else {
+ c->buffer_ptr += len;
+ }
}
+ c->data_count += len;
+ update_datarate(&c->datarate, c->data_count);
+ if (c->stream)
+ c->stream->bytes_served += len;
+ break;
}
- c->data_count += len;
- update_datarate(&c->datarate, c->data_count);
- if (c->stream)
- c->stream->bytes_served += len;
- }
+ } /* for(;;) */
return 0;
}
if (c->stream->feed_opened)
return -1;
+ /* Don't permit writing to this one */
+ if (c->stream->readonly)
+ return -1;
+
/* open feed */
fd = open(c->stream->feed_filename, O_RDWR);
if (fd < 0)
if (!fmt_in)
goto fail;
- s.priv_data = av_mallocz(fmt_in->priv_data_size);
- if (!s.priv_data)
- goto fail;
+ if (fmt_in->priv_data_size > 0) {
+ s.priv_data = av_mallocz(fmt_in->priv_data_size);
+ if (!s.priv_data)
+ goto fail;
+ } else
+ s.priv_data = NULL;
if (fmt_in->read_header(&s, 0) < 0) {
av_freep(&s.priv_data);
if (!strcmp(cmd, "DESCRIBE")) {
rtsp_cmd_describe(c, url);
+ } else if (!strcmp(cmd, "OPTIONS")) {
+ rtsp_cmd_options(c, url);
} else if (!strcmp(cmd, "SETUP")) {
rtsp_cmd_setup(c, url, header);
} else if (!strcmp(cmd, "PLAY")) {
/* XXX: move that to rtsp.c, but would need to replace FFStream by
AVFormatContext */
-static int prepare_sdp_description(FFStream *stream, UINT8 **pbuffer,
+static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
struct in_addr my_ip)
{
ByteIOContext pb1, *pb = &pb1;
/* we must also add the mpeg4 header */
data = st->codec.extradata;
if (data) {
- url_fprintf(pb, "a=fmtp:%d config=");
+ url_fprintf(pb, "a=fmtp:%d config=", payload_type);
for(j=0;j<st->codec.extradata_size;j++) {
url_fprintf(pb, "%02x", data[j]);
}
return -1;
}
+static void rtsp_cmd_options(HTTPContext *c, const char *url)
+{
+// rtsp_reply_header(c, RTSP_STATUS_OK);
+ url_fprintf(c->pb, "RTSP/1.0 %d %s\r\n", RTSP_STATUS_OK, "OK");
+ url_fprintf(c->pb, "CSeq: %d\r\n", c->seq);
+ url_fprintf(c->pb, "Public: %s\r\n", "OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE");
+ url_fprintf(c->pb, "\r\n");
+}
+
static void rtsp_cmd_describe(HTTPContext *c, const char *url)
{
FFStream *stream;
char path1[1024];
const char *path;
- UINT8 *content;
+ uint8_t *content;
int content_length, len;
struct sockaddr_in my_addr;
return NULL;
}
-RTSPTransportField *find_transport(RTSPHeader *h, enum RTSPProtocol protocol)
+static RTSPTransportField *find_transport(RTSPHeader *h, enum RTSPProtocol protocol)
{
RTSPTransportField *th;
int i;
/* find rtp session, and create it if none found */
rtp_c = find_rtp_session(h->session_id);
if (!rtp_c) {
- rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id);
+ /* always prefer UDP */
+ th = find_transport(h, RTSP_PROTOCOL_RTP_UDP);
+ if (!th) {
+ th = find_transport(h, RTSP_PROTOCOL_RTP_TCP);
+ if (!th) {
+ rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
+ return;
+ }
+ }
+
+ rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id,
+ th->protocol);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_BANDWIDTH);
return;
rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
return;
}
-
- /* always prefer UDP */
- th = find_transport(h, RTSP_PROTOCOL_RTP_UDP);
- if (!th) {
- th = find_transport(h, RTSP_PROTOCOL_RTP_TCP);
- if (!th) {
- rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
- return;
- }
- }
- rtp_c->rtp_protocol = th->protocol;
}
/* test if stream is OK (test needed because several SETUP needs
}
/* setup stream */
- if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr) < 0) {
+ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, c) < 0) {
rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
return;
}
HTTPContext *rtp_c;
char path1[1024];
const char *path;
+ char buf[1024];
+ int s;
rtp_c = find_rtp_session(session_id);
if (!rtp_c)
path = path1;
if (*path == '/')
path++;
- if (strcmp(path, rtp_c->stream->filename) != 0)
- return NULL;
- return rtp_c;
+ if(!strcmp(path, rtp_c->stream->filename)) return rtp_c;
+ for(s=0; s<rtp_c->stream->nb_streams; ++s) {
+ snprintf(buf, sizeof(buf), "%s/streamid=%d",
+ rtp_c->stream->filename, s);
+ if(!strncmp(path, buf, sizeof(buf))) {
+ // XXX: Should we reply with RTSP_STATUS_ONLY_AGGREGATE if nb_streams>1?
+ return rtp_c;
+ }
+ }
+ return NULL;
}
static void rtsp_cmd_play(HTTPContext *c, const char *url, RTSPHeader *h)
}
rtp_c->state = HTTPSTATE_READY;
-
+ rtp_c->first_pts = AV_NOPTS_VALUE;
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
/* RTP handling */
static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
- FFStream *stream, const char *session_id)
+ FFStream *stream, const char *session_id,
+ enum RTSPProtocol rtp_protocol)
{
HTTPContext *c = NULL;
-
+ const char *proto_str;
+
/* XXX: should output a warning page when coming
close to the connection limit */
if (nb_connections >= nb_max_connections)
pstrcpy(c->session_id, sizeof(c->session_id), session_id);
c->state = HTTPSTATE_READY;
c->is_packetized = 1;
+ c->rtp_protocol = rtp_protocol;
+
/* protocol is shown in statistics */
- pstrcpy(c->protocol, sizeof(c->protocol), "RTP");
+ switch(c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ proto_str = "MCAST";
+ break;
+ case RTSP_PROTOCOL_RTP_UDP:
+ proto_str = "UDP";
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ proto_str = "TCP";
+ break;
+ default:
+ proto_str = "???";
+ break;
+ }
+ pstrcpy(c->protocol, sizeof(c->protocol), "RTP/");
+ pstrcat(c->protocol, sizeof(c->protocol), proto_str);
current_bandwidth += stream->bandwidth;
}
/* add a new RTP stream in an RTP connection (used in RTSP SETUP
- command). if dest_addr is NULL, then TCP tunneling in RTSP is
+ command). If RTP/TCP protocol is used, TCP connection 'rtsp_c' is
used. */
static int rtp_new_av_stream(HTTPContext *c,
- int stream_index, struct sockaddr_in *dest_addr)
+ int stream_index, struct sockaddr_in *dest_addr,
+ HTTPContext *rtsp_c)
{
AVFormatContext *ctx;
AVStream *st;
char *ipaddr;
URLContext *h;
- UINT8 *dummy_buf;
+ uint8_t *dummy_buf;
char buf2[32];
+ int max_packet_size;
/* now we can open the relevant output stream */
ctx = av_mallocz(sizeof(AVFormatContext));
sizeof(AVStream));
}
- if (dest_addr) {
- /* build destination RTP address */
- ipaddr = inet_ntoa(dest_addr->sin_addr);
+ /* build destination RTP address */
+ ipaddr = inet_ntoa(dest_addr->sin_addr);
+
+ switch(c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP:
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ /* RTP/UDP case */
/* XXX: also pass as parameter to function ? */
if (c->stream->is_multicast) {
if (url_open(&h, ctx->filename, URL_WRONLY) < 0)
goto fail;
c->rtp_handles[stream_index] = h;
- } else {
+ max_packet_size = url_get_max_packet_size(h);
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ /* RTP/TCP case */
+ c->rtsp_c = rtsp_c;
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ break;
+ default:
goto fail;
}
- http_log("%s:%d - - [%s] \"RTPSTART %s/streamid=%d\"\n",
+ http_log("%s:%d - - [%s] \"PLAY %s/streamid=%d %s\"\n",
ipaddr, ntohs(dest_addr->sin_port),
ctime1(buf2),
- c->stream->filename, stream_index);
+ c->stream->filename, stream_index, c->protocol);
/* normally, no packets should be output here, but the packet size may be checked */
- if (url_open_dyn_packet_buf(&ctx->pb,
- url_get_max_packet_size(h)) < 0) {
+ if (url_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) {
/* XXX: close stream */
goto fail;
}
+ av_set_parameters(ctx, NULL);
if (av_write_header(ctx) < 0) {
fail:
if (h)
/********************************************************************/
/* ffserver initialization */
-AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec)
+static AVStream *add_av_stream1(FFStream *stream, AVCodecContext *codec)
{
AVStream *fst;
return NULL;
fst->priv_data = av_mallocz(sizeof(FeedData));
memcpy(&fst->codec, codec, sizeof(AVCodecContext));
+ fst->codec.coded_frame = &dummy_frame;
stream->streams[stream->nb_streams++] = fst;
return fst;
}
/* return the stream number in the feed */
-int add_av_stream(FFStream *feed,
- AVStream *st)
+static int add_av_stream(FFStream *feed, AVStream *st)
{
AVStream *fst;
AVCodecContext *av, *av1;
if (av1->width == av->width &&
av1->height == av->height &&
av1->frame_rate == av->frame_rate &&
+ av1->frame_rate_base == av->frame_rate_base &&
av1->gop_size == av->gop_size)
goto found;
break;
return i;
}
-void remove_stream(FFStream *stream)
+static void remove_stream(FFStream *stream)
{
FFStream **ps;
ps = &first_stream;
}
/* specific mpeg4 handling : we extract the raw parameters */
-void extract_mpeg4_header(AVFormatContext *infile)
+static void extract_mpeg4_header(AVFormatContext *infile)
{
int mpeg4_count, i, size;
AVPacket pkt;
AVStream *st;
- const UINT8 *p;
+ const uint8_t *p;
mpeg4_count = 0;
for(i=0;i<infile->nb_streams;i++) {
st = infile->streams[i];
if (st->codec.codec_id == CODEC_ID_MPEG4 &&
- st->codec.extradata == NULL) {
+ st->codec.extradata_size == 0) {
mpeg4_count++;
}
}
break;
st = infile->streams[pkt.stream_index];
if (st->codec.codec_id == CODEC_ID_MPEG4 &&
- st->codec.extradata == NULL) {
+ st->codec.extradata_size == 0) {
+ av_freep(&st->codec.extradata);
/* fill extradata with the header */
/* XXX: we make hard suppositions here ! */
p = pkt.data;
}
/* compute the needed AVStream for each file */
-void build_file_streams(void)
+static void build_file_streams(void)
{
FFStream *stream, *stream_next;
AVFormatContext *infile;
}
/* compute the needed AVStream for each feed */
-void build_feed_streams(void)
+static void build_feed_streams(void)
{
FFStream *stream, *feed;
int i;
matches = 0;
} else if (ccf->codec_type == CODEC_TYPE_VIDEO) {
if (CHECK_CODEC(frame_rate) ||
+ CHECK_CODEC(frame_rate_base) ||
CHECK_CODEC(width) ||
CHECK_CODEC(height)) {
printf("Codec width, height and framerate do not match for stream %d\n", i);
printf("Deleting feed file '%s' as it appears to be corrupt\n",
feed->feed_filename);
}
- if (!matches)
+ if (!matches) {
+ if (feed->readonly) {
+ printf("Unable to delete feed file '%s' as it is marked readonly\n",
+ feed->feed_filename);
+ exit(1);
+ }
unlink(feed->feed_filename);
+ }
}
if (!url_exist(feed->feed_filename)) {
AVFormatContext s1, *s = &s1;
+ if (feed->readonly) {
+ printf("Unable to create feed file '%s' as it is marked readonly\n",
+ feed->feed_filename);
+ exit(1);
+ }
+
/* only write the header of the ffm file */
if (url_fopen(&s->pb, feed->feed_filename, URL_WRONLY) < 0) {
fprintf(stderr, "Could not open output feed file '%s'\n",
st = feed->streams[i];
s->streams[i] = st;
}
+ av_set_parameters(s, NULL);
av_write_header(s);
/* XXX: need better api */
av_freep(&s->priv_data);
}
/* add a codec and set the default parameters */
-void add_codec(FFStream *stream, AVCodecContext *av)
+static void add_codec(FFStream *stream, AVCodecContext *av)
{
AVStream *st;
case CODEC_TYPE_VIDEO:
if (av->bit_rate == 0)
av->bit_rate = 64000;
- if (av->frame_rate == 0)
- av->frame_rate = 5 * FRAME_RATE_BASE;
+ if (av->frame_rate == 0){
+ av->frame_rate = 5;
+ av->frame_rate_base = 1;
+ }
if (av->width == 0 || av->height == 0) {
av->width = 160;
av->height = 128;
memcpy(&st->codec, av, sizeof(AVCodecContext));
}
-int opt_audio_codec(const char *arg)
+static int opt_audio_codec(const char *arg)
{
AVCodec *p;
return p->id;
}
-int opt_video_codec(const char *arg)
+static int opt_video_codec(const char *arg)
{
AVCodec *p;
}
#endif
-int parse_ffconfig(const char *filename)
+static int parse_ffconfig(const char *filename)
{
FILE *f;
char line[1024];
if (!argbuf[0])
break;
- feed->child_argv[i] = av_malloc(strlen(argbuf + 1));
+ feed->child_argv[i] = av_malloc(strlen(argbuf) + 1);
strcpy(feed->child_argv[i], argbuf);
}
snprintf(feed->child_argv[i], 256, "http://127.0.0.1:%d/%s",
ntohs(my_http_addr.sin_port), feed->filename);
}
+ } else if (!strcasecmp(cmd, "ReadOnlyFile")) {
+ if (feed) {
+ get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
+ feed->readonly = 1;
+ } else if (stream) {
+ get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
+ }
} else if (!strcasecmp(cmd, "File")) {
if (feed) {
get_arg(feed->feed_filename, sizeof(feed->feed_filename), &p);
fsize *= 1024 * 1024 * 1024;
break;
}
- feed->feed_max_size = (INT64)fsize;
+ feed->feed_max_size = (int64_t)fsize;
}
} else if (!strcasecmp(cmd, "</Feed>")) {
if (!feed) {
} else if (!strcasecmp(cmd, "AudioQuality")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
- audio_enc.quality = atof(arg) * 1000;
+// audio_enc.quality = atof(arg) * 1000;
}
} else if (!strcasecmp(cmd, "VideoBitRateRange")) {
if (stream) {
} else if (!strcasecmp(cmd, "VideoFrameRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
- video_enc.frame_rate = (int)(strtod(arg, NULL) * FRAME_RATE_BASE);
+ video_enc.frame_rate_base= DEFAULT_FRAME_RATE_BASE;
+ video_enc.frame_rate = (int)(strtod(arg, NULL) * video_enc.frame_rate_base);
}
} else if (!strcasecmp(cmd, "VideoGopSize")) {
get_arg(arg, sizeof(arg), &p);
}
} else if (!strcasecmp(cmd, "VideoHighQuality")) {
if (stream) {
- video_enc.flags |= CODEC_FLAG_HQ;
+ video_enc.mb_decision = FF_MB_DECISION_BITS;
+ }
+ } else if (!strcasecmp(cmd, "Video4MotionVector")) {
+ if (stream) {
+ video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove
+ video_enc.flags |= CODEC_FLAG_4MV;
}
} else if (!strcasecmp(cmd, "VideoQDiff")) {
get_arg(arg, sizeof(arg), &p);
errors++;
} else {
/* Only take the first */
- acl.first = *(struct in_addr *) he->h_addr_list[0];
+ acl.first.s_addr = ntohl(((struct in_addr *) he->h_addr_list[0])->s_addr);
acl.last = acl.first;
}
errors++;
} else {
/* Only take the first */
- acl.last = *(struct in_addr *) he->h_addr_list[0];
+ acl.last.s_addr = ntohl(((struct in_addr *) he->h_addr_list[0])->s_addr);
}
}
#if 0
static void write_packet(FFCodec *ffenc,
- UINT8 *buf, int size)
+ uint8_t *buf, int size)
{
PacketHeader hdr;
AVCodecContext *enc = &ffenc->enc;
- UINT8 *wptr;
+ uint8_t *wptr;
mk_header(&hdr, enc, size);
wptr = http_fifo.wptr;
- fifo_write(&http_fifo, (UINT8 *)&hdr, sizeof(hdr), &wptr);
+ fifo_write(&http_fifo, (uint8_t *)&hdr, sizeof(hdr), &wptr);
fifo_write(&http_fifo, buf, size, &wptr);
/* atomic modification of wptr */
http_fifo.wptr = wptr;
}
#endif
-void help(void)
+static void show_banner(void)
+{
+ printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000-2003 Fabrice Bellard\n");
+}
+
+static void show_help(void)
{
- printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000, 2001, 2002 Fabrice Bellard\n"
- "usage: ffserver [-L] [-h] [-f configfile]\n"
+ show_banner();
+ printf("usage: ffserver [-L] [-h] [-f configfile]\n"
"Hyper fast multi format Audio/Video streaming server\n"
"\n"
- "-L : print the LICENCE\n"
+ "-L : print the LICENSE\n"
"-h : this help\n"
"-f configfile : use configfile instead of /etc/ffserver.conf\n"
);
}
-void licence(void)
+static void show_license(void)
{
+ show_banner();
printf(
- "ffserver version " FFMPEG_VERSION "\n"
- "Copyright (c) 2000, 2001, 2002 Fabrice Bellard\n"
"This library is free software; you can redistribute it and/or\n"
"modify it under the terms of the GNU Lesser General Public\n"
"License as published by the Free Software Foundation; either\n"
break;
switch(c) {
case 'L':
- licence();
+ show_license();
exit(1);
case '?':
case 'h':
- help();
+ show_help();
exit(1);
case 'n':
no_launch = 1;