* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define HAVE_AV_CONFIG_H
-#include "common.h"
#include "avformat.h"
#include <stdarg.h>
#include <sys/poll.h>
#include <errno.h>
#include <sys/time.h>
+#undef time //needed because HAVE_AV_CONFIG_H is defined on top
#include <time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
-#include <ctype.h>
#include <signal.h>
#ifdef CONFIG_HAVE_DLFCN
#include <dlfcn.h>
HTTPSTATE_SEND_DATA_TRAILER,
HTTPSTATE_RECEIVE_DATA,
HTTPSTATE_WAIT_FEED, /* wait for data from the feed */
- HTTPSTATE_WAIT, /* wait before sending next packets */
- HTTPSTATE_WAIT_SHORT, /* short wait for short term
- bandwidth limitation */
HTTPSTATE_READY,
RTSPSTATE_WAIT_REQUEST,
RTSPSTATE_SEND_REPLY,
+ RTSPSTATE_SEND_PACKET,
};
const char *http_state[] = {
"SEND_DATA_TRAILER",
"RECEIVE_DATA",
"WAIT_FEED",
- "WAIT",
- "WAIT_SHORT",
"READY",
"RTSP_WAIT_REQUEST",
"RTSP_SEND_REPLY",
+ "RTSP_SEND_PACKET",
};
#define IOBUFFER_INIT_SIZE 8192
long timeout;
uint8_t *buffer_ptr, *buffer_end;
int http_error;
+ int post;
struct HTTPContext *next;
int got_key_frame; /* stream 0 => 1, stream 1 => 2, stream 2=> 4 */
int64_t data_count;
AVFormatContext *fmt_in;
long start_time; /* In milliseconds - this wraps fairly often */
int64_t first_pts; /* initial pts value */
- int pts_stream_index; /* stream we choose as clock reference */
+ int64_t cur_pts; /* current pts value from the stream in us */
+ int64_t cur_frame_duration; /* duration of the current frame in us */
+ int cur_frame_bytes; /* output frame size, needed to compute
+ the time at which we send each
+ packet */
+ int pts_stream_index; /* stream we choose as clock reference */
+ int64_t cur_clock; /* current clock reference value in us */
/* output format handling */
struct FFStream *stream;
/* -1 is invalid stream */
uint8_t *pb_buffer; /* XXX: use that in all the code */
ByteIOContext *pb;
int seq; /* RTSP sequence number */
-
+
/* RTP state specific */
enum RTSPProtocol rtp_protocol;
char session_id[32]; /* session id */
AVFormatContext *rtp_ctx[MAX_STREAMS];
+
+ /* RTP/UDP specific */
URLContext *rtp_handles[MAX_STREAMS];
- /* RTP short term bandwidth limitation */
- int packet_byte_count;
- int packet_start_time_us; /* used for short durations (a few
- seconds max) */
+
+ /* RTP/TCP specific */
+ struct HTTPContext *rtsp_c;
+ uint8_t *packet_buffer, *packet_buffer_ptr, *packet_buffer_end;
} HTTPContext;
static AVFrame dummy_frame;
char filename[1024]; /* stream filename */
struct FFStream *feed; /* feed we are using (can be null if
coming from file) */
+ AVFormatParameters *ap_in; /* input parameters */
+ AVInputFormat *ifmt; /* if non NULL, force input format */
AVOutputFormat *fmt;
IPAddressACL *acl;
int nb_streams;
static int open_input_stream(HTTPContext *c, const char *info);
static int http_start_receive_data(HTTPContext *c);
static int http_receive_data(HTTPContext *c);
-static int compute_send_delay(HTTPContext *c);
/* RTSP handling */
static int rtsp_parse_request(HTTPContext *c);
/* RTP handling */
static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
- FFStream *stream, const char *session_id);
+ FFStream *stream, const char *session_id,
+ enum RTSPProtocol rtp_protocol);
static int rtp_new_av_stream(HTTPContext *c,
- int stream_index, struct sockaddr_in *dest_addr);
+ int stream_index, struct sockaddr_in *dest_addr,
+ HTTPContext *rtsp_c);
static const char *my_program_name;
static const char *my_program_dir;
static FILE *logfile = NULL;
-static void http_log(const char *fmt, ...)
+static void __attribute__ ((format (printf, 1, 2))) http_log(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
dest_addr.sin_addr = stream->multicast_ip;
dest_addr.sin_port = htons(stream->multicast_port);
- rtp_c = rtp_new_connection(&dest_addr, stream, session_id);
+ rtp_c = rtp_new_connection(&dest_addr, stream, session_id,
+ RTSP_PROTOCOL_RTP_UDP_MULTICAST);
if (!rtp_c) {
continue;
}
continue;
}
- rtp_c->rtp_protocol = RTSP_PROTOCOL_RTP_UDP_MULTICAST;
-
/* open each RTP stream */
for(stream_index = 0; stream_index < stream->nb_streams;
stream_index++) {
dest_addr.sin_port = htons(stream->multicast_port +
2 * stream_index);
- if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr) < 0) {
+ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, NULL) < 0) {
fprintf(stderr, "Could not open output stream '%s/streamid=%d'\n",
stream->filename, stream_index);
exit(1);
first_http_ctx = NULL;
nb_connections = 0;
- first_http_ctx = NULL;
start_multicast();
switch(c->state) {
case HTTPSTATE_SEND_HEADER:
case RTSPSTATE_SEND_REPLY:
+ case RTSPSTATE_SEND_PACKET:
c->poll_entry = poll_entry;
poll_entry->fd = fd;
poll_entry->events = POLLOUT;
poll_entry->events = POLLOUT;
poll_entry++;
} else {
- /* not strictly correct, but currently cannot add
- more than one fd in poll entry */
- delay = 0;
+ /* when ffserver is doing the timing, we work by
+ looking at which packet need to be sent every
+ 10 ms */
+ delay1 = 10; /* one tick wait XXX: 10 ms assumed */
+ if (delay1 < delay)
+ delay = delay1;
}
break;
case HTTPSTATE_WAIT_REQUEST:
poll_entry->events = POLLIN;/* Maybe this will work */
poll_entry++;
break;
- case HTTPSTATE_WAIT:
- c->poll_entry = NULL;
- delay1 = compute_send_delay(c);
- if (delay1 < delay)
- delay = delay1;
- break;
- case HTTPSTATE_WAIT_SHORT:
- c->poll_entry = NULL;
- delay1 = 10; /* one tick wait XXX: 10 ms assumed */
- if (delay1 < delay)
- delay = delay1;
- break;
default:
c->poll_entry = NULL;
break;
second to handle timeouts */
do {
ret = poll(poll_table, poll_entry - poll_table, delay);
- } while (ret == -1);
+ if (ret < 0 && errno != EAGAIN && errno != EINTR)
+ return -1;
+ } while (ret <= 0);
cur_time = gettime_ms();
if (!c)
goto fail;
- c->next = first_http_ctx;
- first_http_ctx = c;
c->fd = fd;
c->poll_entry = NULL;
c->from_addr = from_addr;
c->buffer = av_malloc(c->buffer_size);
if (!c->buffer)
goto fail;
+
+ c->next = first_http_ctx;
+ first_http_ctx = c;
nb_connections++;
start_wait_request(c, is_rtsp);
}
}
+ /* remove references, if any (XXX: do it faster) */
+ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
+ if (c1->rtsp_c == c)
+ c1->rtsp_c = NULL;
+ }
+
/* remove connection associated resources */
if (c->fd >= 0)
close(c->fd);
/* close each frame parser */
for(i=0;i<c->fmt_in->nb_streams;i++) {
st = c->fmt_in->streams[i];
- if (st->codec.codec) {
- avcodec_close(&st->codec);
+ if (st->codec->codec) {
+ avcodec_close(st->codec);
}
}
av_close_input_file(c->fmt_in);
url_close(h);
}
}
-
+
ctx = &c->fmt_ctx;
if (!c->last_packet_sent) {
/* prepare header */
if (url_open_dyn_buf(&ctx->pb) >= 0) {
av_write_trailer(ctx);
- (void) url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
}
}
}
for(i=0; i<ctx->nb_streams; i++)
av_free(ctx->streams[i]) ;
- if (c->stream)
+ if (c->stream && !c->post && c->stream->stream_type == STREAM_TYPE_LIVE)
current_bandwidth -= c->stream->bandwidth;
av_freep(&c->pb_buffer);
+ av_freep(&c->packet_buffer);
av_free(c->buffer);
av_free(c);
nb_connections--;
if (!(c->poll_entry->revents & POLLIN))
return 0;
/* read the data */
+ read_loop:
len = read(c->fd, c->buffer_ptr, 1);
if (len < 0) {
if (errno != EAGAIN && errno != EINTR)
} else if (ptr >= c->buffer_end) {
/* request too long: cannot do anything */
return -1;
- }
+ } else goto read_loop;
}
break;
/* nothing to do, we'll be waken up by incoming feed packets */
break;
- case HTTPSTATE_WAIT:
- /* if the delay expired, we can send new packets */
- if (compute_send_delay(c) <= 0)
- c->state = HTTPSTATE_SEND_DATA;
- break;
- case HTTPSTATE_WAIT_SHORT:
- /* just return back to send data */
- c->state = HTTPSTATE_SEND_DATA;
- break;
-
case RTSPSTATE_SEND_REPLY:
if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
av_freep(&c->pb_buffer);
}
}
break;
+ case RTSPSTATE_SEND_PACKET:
+ if (c->poll_entry->revents & (POLLERR | POLLHUP)) {
+ av_freep(&c->packet_buffer);
+ return -1;
+ }
+ /* no need to write if no events */
+ if (!(c->poll_entry->revents & POLLOUT))
+ return 0;
+ len = write(c->fd, c->packet_buffer_ptr,
+ c->packet_buffer_end - c->packet_buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ av_freep(&c->packet_buffer);
+ return -1;
+ }
+ } else {
+ c->packet_buffer_ptr += len;
+ if (c->packet_buffer_ptr >= c->packet_buffer_end) {
+ /* all the buffer was sent : wait for a new request */
+ av_freep(&c->packet_buffer);
+ c->state = RTSPSTATE_WAIT_REQUEST;
+ }
+ }
+ break;
case HTTPSTATE_READY:
/* nothing to do */
break;
int best = -1;
for (i = 0; i < feed->nb_streams; i++) {
- AVCodecContext *feed_codec = &feed->streams[i]->codec;
+ AVCodecContext *feed_codec = feed->streams[i]->codec;
if (feed_codec->codec_id != codec->codec_id ||
feed_codec->sample_rate != codec->sample_rate ||
return 0;
for (i = 0; i < req->nb_streams; i++) {
- AVCodecContext *codec = &req->streams[i]->codec;
+ AVCodecContext *codec = req->streams[i]->codec;
switch(rates[i]) {
case 0:
static int http_parse_request(HTTPContext *c)
{
char *p;
- int post;
enum RedirType redir_type;
char cmd[32];
char info[1024], *filename;
pstrcpy(c->method, sizeof(c->method), cmd);
if (!strcmp(cmd, "GET"))
- post = 0;
+ c->post = 0;
else if (!strcmp(cmd, "POST"))
- post = 1;
+ c->post = 1;
else
return -1;
stream = stream->next;
}
if (stream == NULL) {
- sprintf(msg, "File '%s' not found", url);
+ snprintf(msg, sizeof(msg), "File '%s' not found", url);
goto send_error;
}
if (stream->stream_type == STREAM_TYPE_REDIRECT) {
c->http_error = 301;
q = c->buffer;
- q += sprintf(q, "HTTP/1.0 301 Moved\r\n");
- q += sprintf(q, "Location: %s\r\n", stream->feed_filename);
- q += sprintf(q, "Content-type: text/html\r\n");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "<html><head><title>Moved</title></head><body>\r\n");
- q += sprintf(q, "You should be <a href=\"%s\">redirected</a>.\r\n", stream->feed_filename);
- q += sprintf(q, "</body></html>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 301 Moved\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Location: %s\r\n", stream->feed_filename);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: text/html\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Moved</title></head><body>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "You should be <a href=\"%s\">redirected</a>.\r\n", stream->feed_filename);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
/* prepare output buffer */
c->buffer_ptr = c->buffer;
}
}
- if (post == 0 && stream->stream_type == STREAM_TYPE_LIVE) {
+ if (c->post == 0 && stream->stream_type == STREAM_TYPE_LIVE) {
current_bandwidth += stream->bandwidth;
}
- if (post == 0 && max_bandwidth < current_bandwidth) {
+ if (c->post == 0 && max_bandwidth < current_bandwidth) {
c->http_error = 200;
q = c->buffer;
- q += sprintf(q, "HTTP/1.0 200 Server too busy\r\n");
- q += sprintf(q, "Content-type: text/html\r\n");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "<html><head><title>Too busy</title></head><body>\r\n");
- q += sprintf(q, "The server is too busy to serve your request at this time.<p>\r\n");
- q += sprintf(q, "The bandwidth being served (including your stream) is %dkbit/sec, and this exceeds the limit of %dkbit/sec\r\n",
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 Server too busy\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: text/html\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Too busy</title></head><body>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "The server is too busy to serve your request at this time.<p>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "The bandwidth being served (including your stream) is %dkbit/sec, and this exceeds the limit of %dkbit/sec\r\n",
current_bandwidth, max_bandwidth);
- q += sprintf(q, "</body></html>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
/* prepare output buffer */
c->buffer_ptr = c->buffer;
q = c->buffer;
switch(redir_type) {
case REDIR_ASX:
- q += sprintf(q, "HTTP/1.0 200 ASX Follows\r\n");
- q += sprintf(q, "Content-type: video/x-ms-asf\r\n");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "<ASX Version=\"3\">\r\n");
- q += sprintf(q, "<!-- Autogenerated by ffserver -->\r\n");
- q += sprintf(q, "<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n",
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 ASX Follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: video/x-ms-asf\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<ASX Version=\"3\">\r\n");
+ //q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<!-- Autogenerated by ffserver -->\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<ENTRY><REF HREF=\"http://%s/%s%s\"/></ENTRY>\r\n",
hostbuf, filename, info);
- q += sprintf(q, "</ASX>\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</ASX>\r\n");
break;
case REDIR_RAM:
- q += sprintf(q, "HTTP/1.0 200 RAM Follows\r\n");
- q += sprintf(q, "Content-type: audio/x-pn-realaudio\r\n");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "# Autogenerated by ffserver\r\n");
- q += sprintf(q, "http://%s/%s%s\r\n",
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 RAM Follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: audio/x-pn-realaudio\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "# Autogenerated by ffserver\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "http://%s/%s%s\r\n",
hostbuf, filename, info);
break;
case REDIR_ASF:
- q += sprintf(q, "HTTP/1.0 200 ASF Redirect follows\r\n");
- q += sprintf(q, "Content-type: video/x-ms-asf\r\n");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "[Reference]\r\n");
- q += sprintf(q, "Ref1=http://%s/%s%s\r\n",
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 ASF Redirect follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: video/x-ms-asf\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "[Reference]\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Ref1=http://%s/%s%s\r\n",
hostbuf, filename, info);
break;
case REDIR_RTSP:
p = strrchr(hostname, ':');
if (p)
*p = '\0';
- q += sprintf(q, "HTTP/1.0 200 RTSP Redirect follows\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 RTSP Redirect follows\r\n");
/* XXX: incorrect mime type ? */
- q += sprintf(q, "Content-type: application/x-rtsp\r\n");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "rtsp://%s:%d/%s\r\n",
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: application/x-rtsp\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "rtsp://%s:%d/%s\r\n",
hostname, ntohs(my_rtsp_addr.sin_port),
filename);
}
int sdp_data_size, len;
struct sockaddr_in my_addr;
- q += sprintf(q, "HTTP/1.0 200 OK\r\n");
- q += sprintf(q, "Content-type: application/sdp\r\n");
- q += sprintf(q, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: application/sdp\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
len = sizeof(my_addr);
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
}
}
- sprintf(msg, "ASX/RAM file not handled");
+ snprintf(msg, sizeof(msg), "ASX/RAM file not handled");
goto send_error;
}
/* XXX: add there authenticate and IP match */
- if (post) {
+ if (c->post) {
/* if post, it means a feed is being sent */
if (!stream->is_feed) {
/* However it might be a status report from WMP! Lets log the data
if (eol) {
if (eol[-1] == '\r')
eol--;
- http_log("%.*s\n", eol - logline, logline);
+ http_log("%.*s\n", (int) (eol - logline), logline);
c->suppress_log = 1;
}
}
}
}
- sprintf(msg, "POST command not handled");
+ snprintf(msg, sizeof(msg), "POST command not handled");
c->stream = 0;
goto send_error;
}
if (http_start_receive_data(c) < 0) {
- sprintf(msg, "could not open feed");
+ snprintf(msg, sizeof(msg), "could not open feed");
goto send_error;
}
c->http_error = 0;
/* open input stream */
if (open_input_stream(c, info) < 0) {
- sprintf(msg, "Input stream corresponding to '%s' not found", url);
+ snprintf(msg, sizeof(msg), "Input stream corresponding to '%s' not found", url);
goto send_error;
}
/* prepare http header */
q = c->buffer;
- q += sprintf(q, "HTTP/1.0 200 OK\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 200 OK\r\n");
mime_type = c->stream->fmt->mime_type;
if (!mime_type)
mime_type = "application/x-octet_stream";
- q += sprintf(q, "Pragma: no-cache\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Pragma: no-cache\r\n");
/* for asf, we need extra headers */
if (!strcmp(c->stream->fmt->name,"asf_stream")) {
c->wmp_client_id = random() & 0x7fffffff;
- q += sprintf(q, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Server: Cougar 4.1.0.3923\r\nCache-Control: no-cache\r\nPragma: client-id=%d\r\nPragma: features=\"broadcast\"\r\n", c->wmp_client_id);
}
- q += sprintf(q, "Content-Type: %s\r\n", mime_type);
- q += sprintf(q, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-Type: %s\r\n", mime_type);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
/* prepare output buffer */
c->http_error = 0;
send_error:
c->http_error = 404;
q = c->buffer;
- q += sprintf(q, "HTTP/1.0 404 Not Found\r\n");
- q += sprintf(q, "Content-type: %s\r\n", "text/html");
- q += sprintf(q, "\r\n");
- q += sprintf(q, "<HTML>\n");
- q += sprintf(q, "<HEAD><TITLE>404 Not Found</TITLE></HEAD>\n");
- q += sprintf(q, "<BODY>%s</BODY>\n", msg);
- q += sprintf(q, "</HTML>\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "HTTP/1.0 404 Not Found\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "Content-type: %s\r\n", "text/html");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<HTML>\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<HEAD><TITLE>404 Not Found</TITLE></HEAD>\n");
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<BODY>%s</BODY>\n", msg);
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</HTML>\n");
/* prepare output buffer */
c->buffer_ptr = c->buffer;
for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i];
- AVCodec *codec = avcodec_find_encoder(st->codec.codec_id);
- switch(st->codec.codec_type) {
+ AVCodec *codec = avcodec_find_encoder(st->codec->codec_id);
+ switch(st->codec->codec_type) {
case CODEC_TYPE_AUDIO:
- audio_bit_rate += st->codec.bit_rate;
+ audio_bit_rate += st->codec->bit_rate;
if (codec) {
if (*audio_codec_name)
audio_codec_name_extra = "...";
}
break;
case CODEC_TYPE_VIDEO:
- video_bit_rate += st->codec.bit_rate;
+ video_bit_rate += st->codec->bit_rate;
if (codec) {
if (*video_codec_name)
video_codec_name_extra = "...";
video_codec_name = codec->name;
}
break;
+ case CODEC_TYPE_DATA:
+ video_bit_rate += st->codec->bit_rate;
+ break;
default:
av_abort();
}
for (i = 0; i < stream->nb_streams; i++) {
AVStream *st = stream->streams[i];
- AVCodec *codec = avcodec_find_encoder(st->codec.codec_id);
+ AVCodec *codec = avcodec_find_encoder(st->codec->codec_id);
const char *type = "unknown";
char parameters[64];
parameters[0] = 0;
- switch(st->codec.codec_type) {
+ switch(st->codec->codec_type) {
case CODEC_TYPE_AUDIO:
type = "audio";
break;
case CODEC_TYPE_VIDEO:
type = "video";
- sprintf(parameters, "%dx%d, q=%d-%d, fps=%d", st->codec.width, st->codec.height,
- st->codec.qmin, st->codec.qmax, st->codec.frame_rate / st->codec.frame_rate_base);
+ snprintf(parameters, sizeof(parameters), "%dx%d, q=%d-%d, fps=%d", st->codec->width, st->codec->height,
+ st->codec->qmin, st->codec->qmax, st->codec->time_base.den / st->codec->time_base.num);
break;
default:
av_abort();
}
url_fprintf(pb, "<tr><td align=right>%d<td>%s<td align=right>%d<td>%s<td>%s\n",
- i, type, st->codec.bit_rate/1000, codec ? codec->name : "", parameters);
+ i, type, st->codec->bit_rate/1000, codec ? codec->name : "", parameters);
}
url_fprintf(pb, "</table>\n");
for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i];
FeedData *fdata = st->priv_data;
- enc = &st->codec;
+ enc = st->codec;
avcodec_string(buf, sizeof(buf), enc);
avg = fdata->avg_frame_size * (float)enc->rate * 8.0;
if (c1->stream) {
for (j = 0; j < c1->stream->nb_streams; j++) {
if (!c1->stream->feed) {
- bitrate += c1->stream->streams[j]->codec.bit_rate;
+ bitrate += c1->stream->streams[j]->codec->bit_rate;
} else {
if (c1->feed_streams[j] >= 0) {
- bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec.bit_rate;
+ bitrate += c1->stream->feed->streams[c1->feed_streams[j]]->codec->bit_rate;
}
}
}
AVStream *st = s->streams[i];
AVCodec *codec;
- if (!st->codec.codec) {
- codec = avcodec_find_decoder(st->codec.codec_id);
+ if (!st->codec->codec) {
+ codec = avcodec_find_decoder(st->codec->codec_id);
if (codec && (codec->capabilities & CODEC_CAP_PARSE_ONLY)) {
- st->codec.parse_only = 1;
- if (avcodec_open(&st->codec, codec) < 0) {
- st->codec.parse_only = 0;
+ st->codec->parse_only = 1;
+ if (avcodec_open(st->codec, codec) < 0) {
+ st->codec->parse_only = 0;
}
}
}
#endif
/* open stream */
- if (av_open_input_file(&s, input_filename, NULL, buf_size, NULL) < 0) {
+ if (av_open_input_file(&s, input_filename, c->stream->ifmt,
+ buf_size, c->stream->ap_in) < 0) {
http_log("%s not found", input_filename);
return -1;
}
c->pts_stream_index = 0;
for(i=0;i<c->stream->nb_streams;i++) {
if (c->pts_stream_index == 0 &&
- c->stream->streams[i]->codec.codec_type == CODEC_TYPE_VIDEO) {
+ c->stream->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
c->pts_stream_index = i;
}
}
+#if 1
if (c->fmt_in->iformat->read_seek) {
- c->fmt_in->iformat->read_seek(c->fmt_in, stream_pos);
+ c->fmt_in->iformat->read_seek(c->fmt_in, 0, stream_pos, 0);
}
+#endif
/* set the start time (needed for maxtime and RTP packet timing) */
c->start_time = cur_time;
c->first_pts = AV_NOPTS_VALUE;
return 0;
}
-/* currently desactivated because the new PTS handling is not
- satisfactory yet */
-//#define AV_READ_FRAME
-#ifdef AV_READ_FRAME
-
-/* XXX: generalize that in ffmpeg for picture/audio/data. Currently
- the return packet MUST NOT be freed */
-int av_read_frame(AVFormatContext *s, AVPacket *pkt)
+/* return the server clock (in us) */
+static int64_t get_server_clock(HTTPContext *c)
{
- AVStream *st;
- int len, ret, old_nb_streams, i;
-
- /* see if remaining frames must be parsed */
- for(;;) {
- if (s->cur_len > 0) {
- st = s->streams[s->cur_pkt.stream_index];
- len = avcodec_parse_frame(&st->codec, &pkt->data, &pkt->size,
- s->cur_ptr, s->cur_len);
- if (len < 0) {
- /* error: get next packet */
- s->cur_len = 0;
- } else {
- s->cur_ptr += len;
- s->cur_len -= len;
- if (pkt->size) {
- /* init pts counter if not done */
- if (st->pts.den == 0) {
- switch(st->codec.codec_type) {
- case CODEC_TYPE_AUDIO:
- st->pts_incr = (int64_t)s->pts_den;
- av_frac_init(&st->pts, st->pts.val, 0,
- (int64_t)s->pts_num * st->codec.sample_rate);
- break;
- case CODEC_TYPE_VIDEO:
- st->pts_incr = (int64_t)s->pts_den * st->codec.frame_rate_base;
- av_frac_init(&st->pts, st->pts.val, 0,
- (int64_t)s->pts_num * st->codec.frame_rate);
- break;
- default:
- av_abort();
- }
- }
-
- /* a frame was read: return it */
- pkt->pts = st->pts.val;
-#if 0
- printf("add pts=%Lx num=%Lx den=%Lx incr=%Lx\n",
- st->pts.val, st->pts.num, st->pts.den, st->pts_incr);
-#endif
- switch(st->codec.codec_type) {
- case CODEC_TYPE_AUDIO:
- av_frac_add(&st->pts, st->pts_incr * st->codec.frame_size);
- break;
- case CODEC_TYPE_VIDEO:
- av_frac_add(&st->pts, st->pts_incr);
- break;
- default:
- av_abort();
- }
- pkt->stream_index = s->cur_pkt.stream_index;
- /* we use the codec indication because it is
- more accurate than the demux flags */
- pkt->flags = 0;
- if (st->codec.coded_frame->key_frame)
- pkt->flags |= PKT_FLAG_KEY;
- return 0;
- }
- }
- } else {
- /* free previous packet */
- av_free_packet(&s->cur_pkt);
-
- old_nb_streams = s->nb_streams;
- ret = av_read_packet(s, &s->cur_pkt);
- if (ret)
- return ret;
- /* open parsers for each new streams */
- for(i = old_nb_streams; i < s->nb_streams; i++)
- open_parser(s, i);
- st = s->streams[s->cur_pkt.stream_index];
-
- /* update current pts (XXX: dts handling) from packet, or
- use current pts if none given */
- if (s->cur_pkt.pts != AV_NOPTS_VALUE) {
- av_frac_set(&st->pts, s->cur_pkt.pts);
- } else {
- s->cur_pkt.pts = st->pts.val;
- }
- if (!st->codec.codec) {
- /* no codec opened: just return the raw packet */
- *pkt = s->cur_pkt;
-
- /* no codec opened: just update the pts by considering we
- have one frame and free the packet */
- if (st->pts.den == 0) {
- switch(st->codec.codec_type) {
- case CODEC_TYPE_AUDIO:
- st->pts_incr = (int64_t)s->pts_den * st->codec.frame_size;
- av_frac_init(&st->pts, st->pts.val, 0,
- (int64_t)s->pts_num * st->codec.sample_rate);
- break;
- case CODEC_TYPE_VIDEO:
- st->pts_incr = (int64_t)s->pts_den * st->codec.frame_rate_base;
- av_frac_init(&st->pts, st->pts.val, 0,
- (int64_t)s->pts_num * st->codec.frame_rate);
- break;
- default:
- av_abort();
- }
- }
- av_frac_add(&st->pts, st->pts_incr);
- return 0;
- } else {
- s->cur_ptr = s->cur_pkt.data;
- s->cur_len = s->cur_pkt.size;
- }
- }
- }
+ /* compute current pts value from system time */
+ return (int64_t)(cur_time - c->start_time) * 1000LL;
}
-static int compute_send_delay(HTTPContext *c)
+/* return the estimated time at which the current packet must be sent
+ (in us) */
+static int64_t get_packet_send_clock(HTTPContext *c)
{
- int64_t cur_pts, delta_pts, next_pts;
- int delay1;
+ int bytes_left, bytes_sent, frame_bytes;
- /* compute current pts value from system time */
- cur_pts = ((int64_t)(cur_time - c->start_time) * c->fmt_in->pts_den) /
- (c->fmt_in->pts_num * 1000LL);
- /* compute the delta from the stream we choose as
- main clock (we do that to avoid using explicit
- buffers to do exact packet reordering for each
- stream */
- /* XXX: really need to fix the number of streams */
- if (c->pts_stream_index >= c->fmt_in->nb_streams)
- next_pts = cur_pts;
- else
- next_pts = c->fmt_in->streams[c->pts_stream_index]->pts.val;
- delta_pts = next_pts - cur_pts;
- if (delta_pts <= 0) {
- delay1 = 0;
+ frame_bytes = c->cur_frame_bytes;
+ if (frame_bytes <= 0) {
+ return c->cur_pts;
} else {
- delay1 = (delta_pts * 1000 * c->fmt_in->pts_num) / c->fmt_in->pts_den;
+ bytes_left = c->buffer_end - c->buffer_ptr;
+ bytes_sent = frame_bytes - bytes_left;
+ return c->cur_pts + (c->cur_frame_duration * bytes_sent) / frame_bytes;
}
- return delay1;
-}
-#else
-
-/* just fall backs */
-static int av_read_frame(AVFormatContext *s, AVPacket *pkt)
-{
- return av_read_packet(s, pkt);
}
-static int compute_send_delay(HTTPContext *c)
-{
- int datarate = 8 * get_longterm_datarate(&c->datarate, c->data_count);
-
- if (datarate > c->stream->bandwidth * 2000) {
- return 1000;
- }
- return 0;
-}
-#endif
-
static int http_prepare_data(HTTPContext *c)
{
int i, len, ret;
AVFormatContext *ctx;
+ av_freep(&c->pb_buffer);
switch(c->state) {
case HTTPSTATE_SEND_DATA_HEADER:
memset(&c->fmt_ctx, 0, sizeof(c->fmt_ctx));
c->fmt_ctx.nb_streams = c->stream->nb_streams;
for(i=0;i<c->fmt_ctx.nb_streams;i++) {
AVStream *st;
+ AVStream *src;
st = av_mallocz(sizeof(AVStream));
+ st->codec= avcodec_alloc_context();
c->fmt_ctx.streams[i] = st;
/* if file or feed, then just take streams from FFStream struct */
if (!c->stream->feed ||
c->stream->feed == c->stream)
- memcpy(st, c->stream->streams[i], sizeof(AVStream));
+ src = c->stream->streams[i];
else
- memcpy(st, c->stream->feed->streams[c->stream->feed_streams[i]],
- sizeof(AVStream));
- st->codec.frame_number = 0; /* XXX: should be done in
+ src = c->stream->feed->streams[c->stream->feed_streams[i]];
+
+ *st = *src;
+ st->priv_data = 0;
+ st->codec->frame_number = 0; /* XXX: should be done in
AVStream, not in codec */
/* I'm pretty sure that this is not correct...
* However, without it, we crash
*/
- st->codec.coded_frame = &dummy_frame;
+ st->codec->coded_frame = &dummy_frame;
}
c->got_key_frame = 0;
/* We have timed out */
c->state = HTTPSTATE_SEND_DATA_TRAILER;
} else {
- if (1 || c->is_packetized) {
- if (compute_send_delay(c) > 0) {
- c->state = HTTPSTATE_WAIT;
- return 1; /* state changed */
- }
- }
redo:
if (av_read_frame(c->fmt_in, &pkt) < 0) {
if (c->stream->feed && c->stream->feed->feed_opened) {
}
} else {
/* update first pts if needed */
- if (c->first_pts == AV_NOPTS_VALUE)
- c->first_pts = pkt.pts;
-
+ if (c->first_pts == AV_NOPTS_VALUE) {
+ c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, AV_TIME_BASE_Q);
+ c->start_time = cur_time;
+ }
/* send it to the appropriate stream */
if (c->stream->feed) {
/* if coming from a feed, select the right stream */
output stream (one for each RTP
connection). XXX: need more abstract handling */
if (c->is_packetized) {
+ AVStream *st;
+ /* compute send time and duration */
+ st = c->fmt_in->streams[pkt.stream_index];
+ c->cur_pts = av_rescale_q(pkt.dts, st->time_base, AV_TIME_BASE_Q);
+ if (st->start_time != AV_NOPTS_VALUE)
+ c->cur_pts -= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+ c->cur_frame_duration = av_rescale_q(pkt.duration, st->time_base, AV_TIME_BASE_Q);
+#if 0
+ printf("index=%d pts=%0.3f duration=%0.6f\n",
+ pkt.stream_index,
+ (double)c->cur_pts /
+ AV_TIME_BASE,
+ (double)c->cur_frame_duration /
+ AV_TIME_BASE);
+#endif
+ /* find RTP context */
c->packet_stream_index = pkt.stream_index;
ctx = c->rtp_ctx[c->packet_stream_index];
if(!ctx) {
av_free_packet(&pkt);
- return -1;
+ break;
}
- codec = &ctx->streams[0]->codec;
+ codec = ctx->streams[0]->codec;
/* only one stream per RTP connection */
pkt.stream_index = 0;
} else {
ctx = &c->fmt_ctx;
/* Fudge here */
- codec = &ctx->streams[pkt.stream_index]->codec;
+ codec = ctx->streams[pkt.stream_index]->codec;
}
codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
-
-#ifdef PJSG
- if (codec->codec_type == CODEC_TYPE_AUDIO) {
- codec->frame_size = (codec->sample_rate * pkt.duration + 500000) / 1000000;
- /* printf("Calculated size %d, from sr %d, duration %d\n", codec->frame_size, codec->sample_rate, pkt.duration); */
- }
-#endif
-
if (c->is_packetized) {
- ret = url_open_dyn_packet_buf(&ctx->pb,
- url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]));
- c->packet_byte_count = 0;
- c->packet_start_time_us = av_gettime();
+ int max_packet_size;
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP)
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ else
+ max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
+ ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size);
} else {
ret = url_open_dyn_buf(&ctx->pb);
}
/* XXX: potential leak */
return -1;
}
- if (av_write_frame(ctx, pkt.stream_index, pkt.data, pkt.size)) {
+ if (av_write_frame(ctx, &pkt)) {
c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ c->cur_frame_bytes = len;
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
codec->frame_number++;
+ if (len == 0)
+ goto redo;
}
-#ifndef AV_READ_FRAME
av_free_packet(&pkt);
-#endif
}
}
}
#define SHORT_TERM_BANDWIDTH 8000000
/* should convert the format at the same time */
+/* send data starting at c->buffer_ptr to the output connection
+ (either UDP or TCP connection) */
static int http_send_data(HTTPContext *c)
{
- int len, ret, dt;
-
- while (c->buffer_ptr >= c->buffer_end) {
- av_freep(&c->pb_buffer);
- ret = http_prepare_data(c);
- if (ret < 0)
- return -1;
- else if (ret == 0) {
- continue;
- } else {
- /* state change requested */
- return 0;
- }
- }
+ int len, ret;
- if (c->buffer_ptr < c->buffer_end) {
- if (c->is_packetized) {
- /* RTP/UDP data output */
- len = c->buffer_end - c->buffer_ptr;
- if (len < 4) {
- /* fail safe - should never happen */
- fail1:
- c->buffer_ptr = c->buffer_end;
- return 0;
- }
- len = (c->buffer_ptr[0] << 24) |
- (c->buffer_ptr[1] << 16) |
- (c->buffer_ptr[2] << 8) |
- (c->buffer_ptr[3]);
- if (len > (c->buffer_end - c->buffer_ptr))
- goto fail1;
-
- /* short term bandwidth limitation */
- dt = av_gettime() - c->packet_start_time_us;
- if (dt < 1)
- dt = 1;
-
- if ((c->packet_byte_count + len) * (int64_t)1000000 >=
- (SHORT_TERM_BANDWIDTH / 8) * (int64_t)dt) {
- /* bandwidth overflow : wait at most one tick and retry */
- c->state = HTTPSTATE_WAIT_SHORT;
- return 0;
+ for(;;) {
+ if (c->buffer_ptr >= c->buffer_end) {
+ ret = http_prepare_data(c);
+ if (ret < 0)
+ return -1;
+ else if (ret != 0) {
+ /* state change requested */
+ break;
}
-
- c->buffer_ptr += 4;
- url_write(c->rtp_handles[c->packet_stream_index],
- c->buffer_ptr, len);
- c->buffer_ptr += len;
- c->packet_byte_count += len;
} else {
- /* TCP data output */
- len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
- if (len < 0) {
- if (errno != EAGAIN && errno != EINTR) {
- /* error : close connection */
- return -1;
- } else {
+ if (c->is_packetized) {
+ /* RTP data output */
+ len = c->buffer_end - c->buffer_ptr;
+ if (len < 4) {
+ /* fail safe - should never happen */
+ fail1:
+ c->buffer_ptr = c->buffer_end;
return 0;
}
+ len = (c->buffer_ptr[0] << 24) |
+ (c->buffer_ptr[1] << 16) |
+ (c->buffer_ptr[2] << 8) |
+ (c->buffer_ptr[3]);
+ if (len > (c->buffer_end - c->buffer_ptr))
+ goto fail1;
+ if ((get_packet_send_clock(c) - get_server_clock(c)) > 0) {
+ /* nothing to send yet: we can wait */
+ return 0;
+ }
+
+ c->data_count += len;
+ update_datarate(&c->datarate, c->data_count);
+ if (c->stream)
+ c->stream->bytes_served += len;
+
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) {
+ /* RTP packets are sent inside the RTSP TCP connection */
+ ByteIOContext pb1, *pb = &pb1;
+ int interleaved_index, size;
+ uint8_t header[4];
+ HTTPContext *rtsp_c;
+
+ rtsp_c = c->rtsp_c;
+ /* if no RTSP connection left, error */
+ if (!rtsp_c)
+ return -1;
+ /* if already sending something, then wait. */
+ if (rtsp_c->state != RTSPSTATE_WAIT_REQUEST) {
+ break;
+ }
+ if (url_open_dyn_buf(pb) < 0)
+ goto fail1;
+ interleaved_index = c->packet_stream_index * 2;
+ /* RTCP packets are sent at odd indexes */
+ if (c->buffer_ptr[1] == 200)
+ interleaved_index++;
+ /* write RTSP TCP header */
+ header[0] = '$';
+ header[1] = interleaved_index;
+ header[2] = len >> 8;
+ header[3] = len;
+ put_buffer(pb, header, 4);
+ /* write RTP packet data */
+ c->buffer_ptr += 4;
+ put_buffer(pb, c->buffer_ptr, len);
+ size = url_close_dyn_buf(pb, &c->packet_buffer);
+ /* prepare asynchronous TCP sending */
+ rtsp_c->packet_buffer_ptr = c->packet_buffer;
+ rtsp_c->packet_buffer_end = c->packet_buffer + size;
+ c->buffer_ptr += len;
+
+ /* send everything we can NOW */
+ len = write(rtsp_c->fd, rtsp_c->packet_buffer_ptr,
+ rtsp_c->packet_buffer_end - rtsp_c->packet_buffer_ptr);
+ if (len > 0) {
+ rtsp_c->packet_buffer_ptr += len;
+ }
+ if (rtsp_c->packet_buffer_ptr < rtsp_c->packet_buffer_end) {
+ /* if we could not send all the data, we will
+ send it later, so a new state is needed to
+ "lock" the RTSP TCP connection */
+ rtsp_c->state = RTSPSTATE_SEND_PACKET;
+ break;
+ } else {
+ /* all data has been sent */
+ av_freep(&c->packet_buffer);
+ }
+ } else {
+ /* send RTP packet directly in UDP */
+ c->buffer_ptr += 4;
+ url_write(c->rtp_handles[c->packet_stream_index],
+ c->buffer_ptr, len);
+ c->buffer_ptr += len;
+ /* here we continue as we can send several packets per 10 ms slot */
+ }
} else {
- c->buffer_ptr += len;
+ /* TCP data output */
+ len = write(c->fd, c->buffer_ptr, c->buffer_end - c->buffer_ptr);
+ if (len < 0) {
+ if (errno != EAGAIN && errno != EINTR) {
+ /* error : close connection */
+ return -1;
+ } else {
+ return 0;
+ }
+ } else {
+ c->buffer_ptr += len;
+ }
+ c->data_count += len;
+ update_datarate(&c->datarate, c->data_count);
+ if (c->stream)
+ c->stream->bytes_served += len;
+ break;
}
}
- c->data_count += len;
- update_datarate(&c->datarate, c->data_count);
- if (c->stream)
- c->stream->bytes_served += len;
- }
+ } /* for(;;) */
return 0;
}
}
}
+ if (c->buffer_ptr - c->buffer >= 2 && c->data_count > FFM_PACKET_SIZE) {
+ if (c->buffer[0] != 'f' ||
+ c->buffer[1] != 'm') {
+ http_log("Feed stream has become desynchronized -- disconnecting\n");
+ goto fail;
+ }
+ }
+
if (c->buffer_ptr >= c->buffer_end) {
FFStream *feed = c->stream;
/* a packet has been received : write it in the store, except
goto fail;
}
for (i = 0; i < s.nb_streams; i++) {
- memcpy(&feed->streams[i]->codec,
- &s.streams[i]->codec, sizeof(AVCodecContext));
+ memcpy(feed->streams[i]->codec,
+ s.streams[i]->codec, sizeof(AVCodecContext));
}
av_freep(&s.priv_data);
}
url_fprintf(pb, "c=IN IP4 %s\n", inet_ntoa(stream->multicast_ip));
}
/* for each stream, we output the necessary info */
- private_payload_type = 96;
+ private_payload_type = RTP_PT_PRIVATE;
for(i = 0; i < stream->nb_streams; i++) {
st = stream->streams[i];
- switch(st->codec.codec_type) {
- case CODEC_TYPE_AUDIO:
- mediatype = "audio";
- break;
- case CODEC_TYPE_VIDEO:
+ if (st->codec->codec_id == CODEC_ID_MPEG2TS) {
mediatype = "video";
- break;
- default:
- mediatype = "application";
- break;
+ } else {
+ switch(st->codec->codec_type) {
+ case CODEC_TYPE_AUDIO:
+ mediatype = "audio";
+ break;
+ case CODEC_TYPE_VIDEO:
+ mediatype = "video";
+ break;
+ default:
+ mediatype = "application";
+ break;
+ }
}
/* NOTE: the port indication is not correct in case of
unicast. It is not an issue because RTSP gives it */
- payload_type = rtp_get_payload_type(&st->codec);
+ payload_type = rtp_get_payload_type(st->codec);
if (payload_type < 0)
payload_type = private_payload_type++;
if (stream->is_multicast) {
}
url_fprintf(pb, "m=%s %d RTP/AVP %d\n",
mediatype, port, payload_type);
- if (payload_type >= 96) {
+ if (payload_type >= RTP_PT_PRIVATE) {
/* for private payload type, we need to give more info */
- switch(st->codec.codec_id) {
+ switch(st->codec->codec_id) {
case CODEC_ID_MPEG4:
{
uint8_t *data;
url_fprintf(pb, "a=rtpmap:%d MP4V-ES/%d\n",
payload_type, 90000);
/* we must also add the mpeg4 header */
- data = st->codec.extradata;
+ data = st->codec->extradata;
if (data) {
- url_fprintf(pb, "a=fmtp:%d config=");
- for(j=0;j<st->codec.extradata_size;j++) {
+ url_fprintf(pb, "a=fmtp:%d config=", payload_type);
+ for(j=0;j<st->codec->extradata_size;j++) {
url_fprintf(pb, "%02x", data[j]);
}
url_fprintf(pb, "\n");
struct sockaddr_in my_addr;
/* find which url is asked */
- url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
+ url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
path = path1;
if (*path == '/')
path++;
/* get the host IP */
len = sizeof(my_addr);
getsockname(c->fd, (struct sockaddr *)&my_addr, &len);
-
content_length = prepare_sdp_description(stream, &content, my_addr.sin_addr);
if (content_length < 0) {
rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
RTSPActionServerSetup setup;
/* find which url is asked */
- url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
+ url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
path = path1;
if (*path == '/')
path++;
/* find rtp session, and create it if none found */
rtp_c = find_rtp_session(h->session_id);
if (!rtp_c) {
- rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id);
+ /* always prefer UDP */
+ th = find_transport(h, RTSP_PROTOCOL_RTP_UDP);
+ if (!th) {
+ th = find_transport(h, RTSP_PROTOCOL_RTP_TCP);
+ if (!th) {
+ rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
+ return;
+ }
+ }
+
+ rtp_c = rtp_new_connection(&c->from_addr, stream, h->session_id,
+ th->protocol);
if (!rtp_c) {
rtsp_reply_error(c, RTSP_STATUS_BANDWIDTH);
return;
rtsp_reply_error(c, RTSP_STATUS_INTERNAL);
return;
}
-
- /* always prefer UDP */
- th = find_transport(h, RTSP_PROTOCOL_RTP_UDP);
- if (!th) {
- th = find_transport(h, RTSP_PROTOCOL_RTP_TCP);
- if (!th) {
- rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
- return;
- }
- }
- rtp_c->rtp_protocol = th->protocol;
}
/* test if stream is OK (test needed because several SETUP needs
}
/* setup stream */
- if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr) < 0) {
+ if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, c) < 0) {
rtsp_reply_error(c, RTSP_STATUS_TRANSPORT);
return;
}
return NULL;
/* find which url is asked */
- url_split(NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
+ url_split(NULL, 0, NULL, 0, NULL, 0, NULL, path1, sizeof(path1), url);
path = path1;
if (*path == '/')
path++;
return;
}
+#if 0
+ /* XXX: seek in stream */
+ if (h->range_start != AV_NOPTS_VALUE) {
+ printf("range_start=%0.3f\n", (double)h->range_start / AV_TIME_BASE);
+ av_seek_frame(rtp_c->fmt_in, -1, h->range_start);
+ }
+#endif
+
rtp_c->state = HTTPSTATE_SEND_DATA;
/* now everything is OK, so we can send the connection parameters */
}
rtp_c->state = HTTPSTATE_READY;
-
+ rtp_c->first_pts = AV_NOPTS_VALUE;
/* now everything is OK, so we can send the connection parameters */
rtsp_reply_header(c, RTSP_STATUS_OK);
/* session ID */
/* RTP handling */
static HTTPContext *rtp_new_connection(struct sockaddr_in *from_addr,
- FFStream *stream, const char *session_id)
+ FFStream *stream, const char *session_id,
+ enum RTSPProtocol rtp_protocol)
{
HTTPContext *c = NULL;
-
+ const char *proto_str;
+
/* XXX: should output a warning page when coming
close to the connection limit */
if (nb_connections >= nb_max_connections)
pstrcpy(c->session_id, sizeof(c->session_id), session_id);
c->state = HTTPSTATE_READY;
c->is_packetized = 1;
+ c->rtp_protocol = rtp_protocol;
+
/* protocol is shown in statistics */
- pstrcpy(c->protocol, sizeof(c->protocol), "RTP");
+ switch(c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ proto_str = "MCAST";
+ break;
+ case RTSP_PROTOCOL_RTP_UDP:
+ proto_str = "UDP";
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ proto_str = "TCP";
+ break;
+ default:
+ proto_str = "???";
+ break;
+ }
+ pstrcpy(c->protocol, sizeof(c->protocol), "RTP/");
+ pstrcat(c->protocol, sizeof(c->protocol), proto_str);
current_bandwidth += stream->bandwidth;
}
/* add a new RTP stream in an RTP connection (used in RTSP SETUP
- command). if dest_addr is NULL, then TCP tunneling in RTSP is
+ command). If RTP/TCP protocol is used, TCP connection 'rtsp_c' is
used. */
static int rtp_new_av_stream(HTTPContext *c,
- int stream_index, struct sockaddr_in *dest_addr)
+ int stream_index, struct sockaddr_in *dest_addr,
+ HTTPContext *rtsp_c)
{
AVFormatContext *ctx;
AVStream *st;
URLContext *h;
uint8_t *dummy_buf;
char buf2[32];
+ int max_packet_size;
/* now we can open the relevant output stream */
- ctx = av_mallocz(sizeof(AVFormatContext));
+ ctx = av_alloc_format_context();
if (!ctx)
return -1;
ctx->oformat = &rtp_mux;
st = av_mallocz(sizeof(AVStream));
if (!st)
goto fail;
+ st->codec= avcodec_alloc_context();
ctx->nb_streams = 1;
ctx->streams[0] = st;
sizeof(AVStream));
}
- if (dest_addr) {
- /* build destination RTP address */
- ipaddr = inet_ntoa(dest_addr->sin_addr);
+ /* build destination RTP address */
+ ipaddr = inet_ntoa(dest_addr->sin_addr);
+
+ switch(c->rtp_protocol) {
+ case RTSP_PROTOCOL_RTP_UDP:
+ case RTSP_PROTOCOL_RTP_UDP_MULTICAST:
+ /* RTP/UDP case */
/* XXX: also pass as parameter to function ? */
if (c->stream->is_multicast) {
if (url_open(&h, ctx->filename, URL_WRONLY) < 0)
goto fail;
c->rtp_handles[stream_index] = h;
- } else {
+ max_packet_size = url_get_max_packet_size(h);
+ break;
+ case RTSP_PROTOCOL_RTP_TCP:
+ /* RTP/TCP case */
+ c->rtsp_c = rtsp_c;
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ break;
+ default:
goto fail;
}
- http_log("%s:%d - - [%s] \"RTPSTART %s/streamid=%d\"\n",
+ http_log("%s:%d - - [%s] \"PLAY %s/streamid=%d %s\"\n",
ipaddr, ntohs(dest_addr->sin_port),
ctime1(buf2),
- c->stream->filename, stream_index);
+ c->stream->filename, stream_index, c->protocol);
/* normally, no packets should be output here, but the packet size may be checked */
- if (url_open_dyn_packet_buf(&ctx->pb,
- url_get_max_packet_size(h)) < 0) {
+ if (url_open_dyn_packet_buf(&ctx->pb, max_packet_size) < 0) {
/* XXX: close stream */
goto fail;
}
fst = av_mallocz(sizeof(AVStream));
if (!fst)
return NULL;
+ fst->codec= avcodec_alloc_context();
fst->priv_data = av_mallocz(sizeof(FeedData));
- memcpy(&fst->codec, codec, sizeof(AVCodecContext));
- fst->codec.coded_frame = &dummy_frame;
+ memcpy(fst->codec, codec, sizeof(AVCodecContext));
+ fst->codec->coded_frame = &dummy_frame;
+ fst->index = stream->nb_streams;
+ av_set_pts_info(fst, 33, 1, 90000);
stream->streams[stream->nb_streams++] = fst;
return fst;
}
AVCodecContext *av, *av1;
int i;
- av = &st->codec;
+ av = st->codec;
for(i=0;i<feed->nb_streams;i++) {
st = feed->streams[i];
- av1 = &st->codec;
+ av1 = st->codec;
if (av1->codec_id == av->codec_id &&
av1->codec_type == av->codec_type &&
av1->bit_rate == av->bit_rate) {
case CODEC_TYPE_VIDEO:
if (av1->width == av->width &&
av1->height == av->height &&
- av1->frame_rate == av->frame_rate &&
- av1->frame_rate_base == av->frame_rate_base &&
+ av1->time_base.den == av->time_base.den &&
+ av1->time_base.num == av->time_base.num &&
av1->gop_size == av->gop_size)
goto found;
break;
mpeg4_count = 0;
for(i=0;i<infile->nb_streams;i++) {
st = infile->streams[i];
- if (st->codec.codec_id == CODEC_ID_MPEG4 &&
- st->codec.extradata == NULL) {
+ if (st->codec->codec_id == CODEC_ID_MPEG4 &&
+ st->codec->extradata_size == 0) {
mpeg4_count++;
}
}
if (!mpeg4_count)
return;
- printf("MPEG4 without extra data: trying to find header\n");
+ printf("MPEG4 without extra data: trying to find header in %s\n", infile->filename);
while (mpeg4_count > 0) {
if (av_read_packet(infile, &pkt) < 0)
break;
st = infile->streams[pkt.stream_index];
- if (st->codec.codec_id == CODEC_ID_MPEG4 &&
- st->codec.extradata == NULL) {
+ if (st->codec->codec_id == CODEC_ID_MPEG4 &&
+ st->codec->extradata_size == 0) {
+ av_freep(&st->codec->extradata);
/* fill extradata with the header */
/* XXX: we make hard suppositions here ! */
p = pkt.data;
p[2] == 0x01 && p[3] == 0xb6) {
size = p - pkt.data;
// av_hex_dump(pkt.data, size);
- st->codec.extradata = av_malloc(size);
- st->codec.extradata_size = size;
- memcpy(st->codec.extradata, pkt.data, size);
+ st->codec->extradata = av_malloc(size);
+ st->codec->extradata_size = size;
+ memcpy(st->codec->extradata, pkt.data, size);
break;
}
p++;
/* the stream comes from a file */
/* try to open the file */
/* open stream */
+ stream->ap_in = av_mallocz(sizeof(AVFormatParameters));
+ if (stream->fmt == &rtp_mux) {
+ /* specific case : if transport stream output to RTP,
+ we use a raw transport stream reader */
+ stream->ap_in->mpeg2ts_raw = 1;
+ stream->ap_in->mpeg2ts_compute_pcr = 1;
+ }
+
if (av_open_input_file(&infile, stream->feed_filename,
- NULL, 0, NULL) < 0) {
+ stream->ifmt, 0, stream->ap_in) < 0) {
http_log("%s not found", stream->feed_filename);
/* remove stream (no need to spend more time on it) */
fail:
extract_mpeg4_header(infile);
for(i=0;i<infile->nb_streams;i++) {
- add_av_stream1(stream, &infile->streams[i]->codec);
+ add_av_stream1(stream, infile->streams[i]->codec);
}
av_close_input_file(infile);
}
if (sf->index != ss->index ||
sf->id != ss->id) {
- printf("Index & Id do not match for stream %d\n", i);
+ printf("Index & Id do not match for stream %d (%s)\n",
+ i, feed->feed_filename);
matches = 0;
} else {
AVCodecContext *ccf, *ccs;
- ccf = &sf->codec;
- ccs = &ss->codec;
+ ccf = sf->codec;
+ ccs = ss->codec;
#define CHECK_CODEC(x) (ccf->x != ccs->x)
if (CHECK_CODEC(codec) || CHECK_CODEC(codec_type)) {
printf("Codec bitrates do not match for stream %d\n", i);
matches = 0;
} else if (ccf->codec_type == CODEC_TYPE_VIDEO) {
- if (CHECK_CODEC(frame_rate) ||
- CHECK_CODEC(frame_rate_base) ||
+ if (CHECK_CODEC(time_base.den) ||
+ CHECK_CODEC(time_base.num) ||
CHECK_CODEC(width) ||
CHECK_CODEC(height)) {
printf("Codec width, height and framerate do not match for stream %d\n", i);
bandwidth = 0;
for(i=0;i<stream->nb_streams;i++) {
AVStream *st = stream->streams[i];
- switch(st->codec.codec_type) {
+ switch(st->codec->codec_type) {
case CODEC_TYPE_AUDIO:
case CODEC_TYPE_VIDEO:
- bandwidth += st->codec.bit_rate;
+ bandwidth += st->codec->bit_rate;
break;
default:
break;
case CODEC_TYPE_VIDEO:
if (av->bit_rate == 0)
av->bit_rate = 64000;
- if (av->frame_rate == 0){
- av->frame_rate = 5;
- av->frame_rate_base = 1;
+ if (av->time_base.num == 0){
+ av->time_base.den = 5;
+ av->time_base.num = 1;
}
if (av->width == 0 || av->height == 0) {
av->width = 160;
av->qcompress = 0.5;
av->qblur = 0.5;
+ if (!av->nsse_weight)
+ av->nsse_weight = 8;
+
+ av->frame_skip_cmp = FF_CMP_DCTMAX;
+ av->me_method = ME_EPZS;
+ av->rc_buffer_aggressivity = 1.0;
+
if (!av->rc_eq)
av->rc_eq = "tex^qComp";
if (!av->i_quant_factor)
av->b_quant_factor = 1.25;
if (!av->b_quant_offset)
av->b_quant_offset = 1.25;
- if (!av->rc_min_rate)
- av->rc_min_rate = av->bit_rate / 2;
if (!av->rc_max_rate)
av->rc_max_rate = av->bit_rate * 2;
+ if (av->rc_max_rate && !av->rc_buffer_size) {
+ av->rc_buffer_size = av->rc_max_rate;
+ }
+
+
break;
default:
av_abort();
st = av_mallocz(sizeof(AVStream));
if (!st)
return;
+ st->codec = avcodec_alloc_context();
stream->streams[stream->nb_streams++] = st;
- memcpy(&st->codec, av, sizeof(AVCodecContext));
+ memcpy(st->codec, av, sizeof(AVCodecContext));
}
static int opt_audio_codec(const char *arg)
feed->child_argv = (char **) av_mallocz(64 * sizeof(char *));
- feed->child_argv[0] = av_malloc(7);
- strcpy(feed->child_argv[0], "ffmpeg");
-
- for (i = 1; i < 62; i++) {
+ for (i = 0; i < 62; i++) {
char argbuf[256];
get_arg(argbuf, sizeof(argbuf), &p);
feed->child_argv[i] = av_malloc(30 + strlen(feed->filename));
- snprintf(feed->child_argv[i], 256, "http://127.0.0.1:%d/%s",
- ntohs(my_http_addr.sin_port), feed->filename);
+ snprintf(feed->child_argv[i], 30+strlen(feed->filename),
+ "http://%s:%d/%s",
+ (my_http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
+ inet_ntoa(my_http_addr.sin_addr),
+ ntohs(my_http_addr.sin_port), feed->filename);
}
} else if (!strcasecmp(cmd, "ReadOnlyFile")) {
if (feed) {
stream->stream_type = STREAM_TYPE_LIVE;
/* jpeg cannot be used here, so use single frame jpeg */
if (!strcmp(arg, "jpeg"))
- strcpy(arg, "singlejpeg");
+ strcpy(arg, "mjpeg");
stream->fmt = guess_stream_format(arg, NULL, NULL);
if (!stream->fmt) {
fprintf(stderr, "%s:%d: Unknown Format: %s\n",
audio_id = stream->fmt->audio_codec;
video_id = stream->fmt->video_codec;
}
+ } else if (!strcasecmp(cmd, "InputFormat")) {
+ stream->ifmt = av_find_input_format(arg);
+ if (!stream->ifmt) {
+ fprintf(stderr, "%s:%d: Unknown input format: %s\n",
+ filename, line_num, arg);
+ }
} else if (!strcasecmp(cmd, "FaviconURL")) {
if (stream && stream->stream_type == STREAM_TYPE_STATUS) {
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
errors++;
}
}
+ } else if (!strcasecmp(cmd, "Debug")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.debug = strtol(arg,0,0);
+ }
+ } else if (!strcasecmp(cmd, "Strict")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.strict_std_compliance = atoi(arg);
+ }
+ } else if (!strcasecmp(cmd, "VideoBufferSize")) {
+ if (stream) {
+ get_arg(arg, sizeof(arg), &p);
+ video_enc.rc_buffer_size = atoi(arg) * 8*1024;
+ }
} else if (!strcasecmp(cmd, "VideoBitRateTolerance")) {
if (stream) {
get_arg(arg, sizeof(arg), &p);
} else if (!strcasecmp(cmd, "VideoFrameRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
- video_enc.frame_rate_base= DEFAULT_FRAME_RATE_BASE;
- video_enc.frame_rate = (int)(strtod(arg, NULL) * video_enc.frame_rate_base);
+ video_enc.time_base.num= DEFAULT_FRAME_RATE_BASE;
+ video_enc.time_base.den = (int)(strtod(arg, NULL) * video_enc.time_base.num);
}
} else if (!strcasecmp(cmd, "VideoGopSize")) {
get_arg(arg, sizeof(arg), &p);
}
} else if (!strcasecmp(cmd, "VideoHighQuality")) {
if (stream) {
- video_enc.flags |= CODEC_FLAG_HQ;
+ video_enc.mb_decision = FF_MB_DECISION_BITS;
}
} else if (!strcasecmp(cmd, "Video4MotionVector")) {
if (stream) {
- video_enc.flags |= CODEC_FLAG_HQ;
+ video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove
video_enc.flags |= CODEC_FLAG_4MV;
}
} else if (!strcasecmp(cmd, "VideoQDiff")) {
}
#endif
-static void help(void)
+static void show_banner(void)
+{
+ printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000-2003 Fabrice Bellard\n");
+}
+
+static void show_help(void)
{
- printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000, 2001, 2002 Fabrice Bellard\n"
- "usage: ffserver [-L] [-h] [-f configfile]\n"
+ show_banner();
+ printf("usage: ffserver [-L] [-h] [-f configfile]\n"
"Hyper fast multi format Audio/Video streaming server\n"
"\n"
- "-L : print the LICENCE\n"
+ "-L : print the LICENSE\n"
"-h : this help\n"
"-f configfile : use configfile instead of /etc/ffserver.conf\n"
);
}
-static void licence(void)
+static void show_license(void)
{
+ show_banner();
printf(
- "ffserver version " FFMPEG_VERSION "\n"
- "Copyright (c) 2000, 2001, 2002 Fabrice Bellard\n"
"This library is free software; you can redistribute it and/or\n"
"modify it under the terms of the GNU Lesser General Public\n"
"License as published by the Free Software Foundation; either\n"
break;
switch(c) {
case 'L':
- licence();
+ show_license();
exit(1);
case '?':
case 'h':
- help();
+ show_help();
exit(1);
case 'n':
no_launch = 1;