#endif
#include <string.h>
#include <stdlib.h>
-#include "avformat.h"
-
+#include "libavutil/random.h"
+#include "libavutil/avstring.h"
+#include "libavformat/avformat.h"
+#include "libavformat/network.h"
+#include "libavformat/os_support.h"
+#include "libavformat/rtp.h"
+#include "libavformat/rtsp.h"
+#include "libavcodec/opt.h"
#include <stdarg.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
-#ifdef HAVE_SYS_POLL_H
-#include <sys/poll.h>
+#ifdef HAVE_POLL_H
+#include <poll.h>
#endif
#include <errno.h>
#include <sys/time.h>
#include <dlfcn.h>
#endif
-#include "network.h"
-#include "version.h"
-#include "ffserver.h"
-#include "random.h"
-#include "avstring.h"
+#include "cmdutils.h"
#undef exit
+const char program_name[] = "FFserver";
+const int program_birth_year = 2000;
+
+static const OptionDef options[];
+
/* maximum number of simultaneous HTTP connections */
#define HTTP_MAX_CONNECTIONS 2000
RTSPSTATE_SEND_PACKET,
};
-const char *http_state[] = {
+static const char *http_state[] = {
"HTTP_WAIT_REQUEST",
"HTTP_SEND_HEADER",
uint8_t *packet_buffer, *packet_buffer_ptr, *packet_buffer_end;
} HTTPContext;
-static AVFrame dummy_frame;
-
/* each generated stream is described here */
enum StreamType {
STREAM_TYPE_LIVE,
int conns_served;
int64_t bytes_served;
int64_t feed_max_size; /* maximum storage size, zero means unlimited */
- int64_t feed_write_index; /* current write position in feed (it wraps round) */
+ int64_t feed_write_index; /* current write position in feed (it wraps around) */
int64_t feed_size; /* current size of feed */
struct FFStream *next_feed;
} FFStream;
typedef struct FeedData {
long long data_count;
- float avg_frame_size; /* frame size averraged over last frames with exponential mean */
+ float avg_frame_size; /* frame size averaged over last frames with exponential mean */
} FeedData;
static struct sockaddr_in my_http_addr;
static int handle_connection(HTTPContext *c);
static int http_parse_request(HTTPContext *c);
static int http_send_data(HTTPContext *c);
-static void compute_stats(HTTPContext *c);
+static void compute_status(HTTPContext *c);
static int open_input_stream(HTTPContext *c, const char *info);
static int http_start_receive_data(HTTPContext *c);
static int http_receive_data(HTTPContext *c);
static const char *my_program_name;
static const char *my_program_dir;
+static const char *config_filename;
static int ffserver_debug;
static int ffserver_daemon;
static int no_launch;
static int nb_max_connections;
static int nb_connections;
-static int max_bandwidth;
-static int current_bandwidth;
+static uint64_t max_bandwidth;
+static uint64_t current_bandwidth;
static int64_t cur_time; // Making this global saves on passing it around everywhere
feed->pid = fork();
if (feed->pid < 0) {
- fprintf(stderr, "Unable to create children\n");
+ http_log("Unable to create children\n");
exit(1);
}
if (!feed->pid) {
continue;
if (open_input_stream(rtp_c, "") < 0) {
- fprintf(stderr, "Could not open input stream for stream '%s'\n",
- stream->filename);
+ http_log("Could not open input stream for stream '%s'\n",
+ stream->filename);
continue;
}
dest_addr.sin_port = htons(stream->multicast_port +
2 * stream_index);
if (rtp_new_av_stream(rtp_c, stream_index, &dest_addr, NULL) < 0) {
- fprintf(stderr, "Could not open output stream '%s/streamid=%d'\n",
- stream->filename, stream_index);
+ http_log("Could not open output stream '%s/streamid=%d'\n",
+ stream->filename, stream_index);
exit(1);
}
}
len = sizeof(from_addr);
fd = accept(server_fd, (struct sockaddr *)&from_addr,
&len);
- if (fd < 0)
+ if (fd < 0) {
+ http_log("error during accept %s\n", strerror(errno));
return;
+ }
ff_socket_nonblock(fd, 1);
/* XXX: should output a warning page when coming
/* prepare header */
if (url_open_dyn_buf(&ctx->pb) >= 0) {
av_write_trailer(ctx);
- url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ url_close_dyn_buf(ctx->pb, &c->pb_buffer);
}
}
}
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<html><head><title>Too busy</title></head><body>\r\n");
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<p>The server is too busy to serve your request at this time.</p>\r\n");
- q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<p>The bandwidth being served (including your stream) is %dkbit/sec, and this exceeds the limit of %dkbit/sec.</p>\r\n",
+ q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "<p>The bandwidth being served (including your stream) is %lldkbit/sec, and this exceeds the limit of %lldkbit/sec.</p>\r\n",
current_bandwidth, max_bandwidth);
q += snprintf(q, q - (char *) c->buffer + c->buffer_size, "</body></html>\r\n");
#endif
if (c->stream->stream_type == STREAM_TYPE_STATUS)
- goto send_stats;
+ goto send_status;
/* open input stream */
if (open_input_stream(c, info) < 0) {
c->buffer_end = q;
c->state = HTTPSTATE_SEND_HEADER;
return 0;
- send_stats:
- compute_stats(c);
+ send_status:
+ compute_status(c);
c->http_error = 200; /* horrible : we use this value to avoid
going to the send data state */
c->state = HTTPSTATE_SEND_HEADER;
url_fprintf(pb, "%"PRId64"%c", count, *s);
}
-static void compute_stats(HTTPContext *c)
+static void compute_status(HTTPContext *c)
{
HTTPContext *c1;
FFStream *stream;
char *p;
time_t ti;
int i, len;
- ByteIOContext pb1, *pb = &pb1;
+ ByteIOContext *pb;
- if (url_open_dyn_buf(pb) < 0) {
+ if (url_open_dyn_buf(&pb) < 0) {
/* XXX: return an error ? */
c->buffer_ptr = c->buffer;
c->buffer_end = c->buffer;
url_fprintf(pb, "Pragma: no-cache\r\n");
url_fprintf(pb, "\r\n");
- url_fprintf(pb, "<HEAD><TITLE>FFServer Status</TITLE>\n");
+ url_fprintf(pb, "<HEAD><TITLE>%s Status</TITLE>\n", program_name);
if (c->stream->feed_filename)
url_fprintf(pb, "<link rel=\"shortcut icon\" href=\"%s\">\n", c->stream->feed_filename);
url_fprintf(pb, "</HEAD>\n<BODY>");
- url_fprintf(pb, "<H1>FFServer Status</H1>\n");
+ url_fprintf(pb, "<H1>%s Status</H1>\n", program_name);
/* format status */
url_fprintf(pb, "<H2>Available Streams</H2>\n");
url_fprintf(pb, "<TABLE cellspacing=0 cellpadding=4>\n");
strcpy(eosf - 4, ".asx");
else if (strcmp(eosf - 3, ".rm") == 0)
strcpy(eosf - 3, ".ram");
- else if (stream->fmt == &rtp_muxer) {
+ else if (stream->fmt && !strcmp(stream->fmt->name, "rtp")) {
/* generate a sample RTSP director if
unicast. Generate an SDP redirector if
multicast */
url_fprintf(pb, "Number of connections: %d / %d<BR>\n",
nb_connections, nb_max_connections);
- url_fprintf(pb, "Bandwidth in use: %dk / %dk<BR>\n",
+ url_fprintf(pb, "Bandwidth in use: %lldk / %lldk<BR>\n",
current_bandwidth, max_bandwidth);
url_fprintf(pb, "<TABLE>\n");
char buf[128];
char input_filename[1024];
AVFormatContext *s;
- int buf_size, i;
+ int buf_size, i, ret;
int64_t stream_pos;
/* find file name */
buf_size = FFM_PACKET_SIZE;
/* compute position (absolute time) */
if (find_info_tag(buf, sizeof(buf), "date", info))
+ {
stream_pos = parse_date(buf, 0);
+ if (stream_pos == INT64_MIN)
+ return -1;
+ }
else if (find_info_tag(buf, sizeof(buf), "buffer", info)) {
int prebuffer = strtol(buf, 0, 10);
stream_pos = av_gettime() - prebuffer * (int64_t)1000000;
buf_size = 0;
/* compute position (relative time) */
if (find_info_tag(buf, sizeof(buf), "date", info))
+ {
stream_pos = parse_date(buf, 1);
+ if (stream_pos == INT64_MIN)
+ return -1;
+ }
else
stream_pos = 0;
}
#endif
/* open stream */
- if (av_open_input_file(&s, input_filename, c->stream->ifmt,
- buf_size, c->stream->ap_in) < 0) {
- http_log("%s not found", input_filename);
+ if ((ret = av_open_input_file(&s, input_filename, c->stream->ifmt,
+ buf_size, c->stream->ap_in)) < 0) {
+ http_log("could not open %s: %d\n", input_filename, ret);
return -1;
}
+ s->flags |= AVFMT_FLAG_GENPTS;
c->fmt_in = s;
+ av_find_stream_info(c->fmt_in);
/* open each parser */
for(i=0;i<s->nb_streams;i++)
st->priv_data = 0;
st->codec->frame_number = 0; /* XXX: should be done in
AVStream, not in codec */
- /* I'm pretty sure that this is not correct...
- * However, without it, we crash
- */
- st->codec->coded_frame = &dummy_frame;
}
c->got_key_frame = 0;
/* XXX: potential leak */
return -1;
}
- c->fmt_ctx.pb.is_streamed = 1;
+ c->fmt_ctx.pb->is_streamed = 1;
av_set_parameters(&c->fmt_ctx, NULL);
- if (av_write_header(&c->fmt_ctx) < 0)
+ if (av_write_header(&c->fmt_ctx) < 0) {
+ http_log("Error writing output header\n");
return -1;
+ }
- len = url_close_dyn_buf(&c->fmt_ctx.pb, &c->pb_buffer);
+ len = url_close_dyn_buf(c->fmt_ctx.pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
break;
case HTTPSTATE_SEND_DATA:
/* find a new packet */
- {
+ /* read a packet from the input stream */
+ if (c->stream->feed)
+ ffm_set_write_index(c->fmt_in,
+ c->stream->feed->feed_write_index,
+ c->stream->feed->feed_size);
+
+ if (c->stream->max_time &&
+ c->stream->max_time + c->start_time - cur_time < 0)
+ /* We have timed out */
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ else {
AVPacket pkt;
-
- /* read a packet from the input stream */
- if (c->stream->feed)
- ffm_set_write_index(c->fmt_in,
- c->stream->feed->feed_write_index,
- c->stream->feed->feed_size);
-
- if (c->stream->max_time &&
- c->stream->max_time + c->start_time - cur_time < 0)
- /* We have timed out */
- c->state = HTTPSTATE_SEND_DATA_TRAILER;
- else {
- redo:
- if (av_read_frame(c->fmt_in, &pkt) < 0) {
- if (c->stream->feed && c->stream->feed->feed_opened) {
- /* if coming from feed, it means we reached the end of the
- ffm file, so must wait for more data */
- c->state = HTTPSTATE_WAIT_FEED;
- return 1; /* state changed */
- } else {
- if (c->stream->loop) {
- av_close_input_file(c->fmt_in);
- c->fmt_in = NULL;
- if (open_input_stream(c, "") < 0)
- goto no_loop;
- goto redo;
- } else {
- no_loop:
- /* must send trailer now because eof or error */
- c->state = HTTPSTATE_SEND_DATA_TRAILER;
- }
- }
+ redo:
+ if (av_read_frame(c->fmt_in, &pkt) < 0) {
+ if (c->stream->feed && c->stream->feed->feed_opened) {
+ /* if coming from feed, it means we reached the end of the
+ ffm file, so must wait for more data */
+ c->state = HTTPSTATE_WAIT_FEED;
+ return 1; /* state changed */
} else {
- /* update first pts if needed */
- if (c->first_pts == AV_NOPTS_VALUE) {
- c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, AV_TIME_BASE_Q);
- c->start_time = cur_time;
+ if (c->stream->loop) {
+ av_close_input_file(c->fmt_in);
+ c->fmt_in = NULL;
+ if (open_input_stream(c, "") < 0)
+ goto no_loop;
+ goto redo;
+ } else {
+ no_loop:
+ /* must send trailer now because eof or error */
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
}
- /* send it to the appropriate stream */
- if (c->stream->feed) {
- /* if coming from a feed, select the right stream */
- if (c->switch_pending) {
- c->switch_pending = 0;
- for(i=0;i<c->stream->nb_streams;i++) {
- if (c->switch_feed_streams[i] == pkt.stream_index)
- if (pkt.flags & PKT_FLAG_KEY)
- do_switch_stream(c, i);
- if (c->switch_feed_streams[i] >= 0)
- c->switch_pending = 1;
- }
- }
+ }
+ } else {
+ int source_index = pkt.stream_index;
+ /* update first pts if needed */
+ if (c->first_pts == AV_NOPTS_VALUE) {
+ c->first_pts = av_rescale_q(pkt.dts, c->fmt_in->streams[pkt.stream_index]->time_base, AV_TIME_BASE_Q);
+ c->start_time = cur_time;
+ }
+ /* send it to the appropriate stream */
+ if (c->stream->feed) {
+ /* if coming from a feed, select the right stream */
+ if (c->switch_pending) {
+ c->switch_pending = 0;
for(i=0;i<c->stream->nb_streams;i++) {
- if (c->feed_streams[i] == pkt.stream_index) {
- pkt.stream_index = i;
+ if (c->switch_feed_streams[i] == pkt.stream_index)
if (pkt.flags & PKT_FLAG_KEY)
- c->got_key_frame |= 1 << i;
- /* See if we have all the key frames, then
- * we start to send. This logic is not quite
- * right, but it works for the case of a
- * single video stream with one or more
- * audio streams (for which every frame is
- * typically a key frame).
- */
- if (!c->stream->send_on_key ||
- ((c->got_key_frame + 1) >> c->stream->nb_streams))
- goto send_it;
- }
+ do_switch_stream(c, i);
+ if (c->switch_feed_streams[i] >= 0)
+ c->switch_pending = 1;
}
- } else {
- AVCodecContext *codec;
-
- send_it:
- /* specific handling for RTP: we use several
- output stream (one for each RTP
- connection). XXX: need more abstract handling */
- if (c->is_packetized) {
- AVStream *st;
- /* compute send time and duration */
- st = c->fmt_in->streams[pkt.stream_index];
- c->cur_pts = av_rescale_q(pkt.dts, st->time_base, AV_TIME_BASE_Q);
- if (st->start_time != AV_NOPTS_VALUE)
- c->cur_pts -= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
- c->cur_frame_duration = av_rescale_q(pkt.duration, st->time_base, AV_TIME_BASE_Q);
+ }
+ for(i=0;i<c->stream->nb_streams;i++) {
+ if (c->feed_streams[i] == pkt.stream_index) {
+ AVStream *st = c->fmt_in->streams[source_index];
+ pkt.stream_index = i;
+ if (pkt.flags & PKT_FLAG_KEY &&
+ (st->codec->codec_type == CODEC_TYPE_VIDEO ||
+ c->stream->nb_streams == 1))
+ c->got_key_frame = 1;
+ if (!c->stream->send_on_key || c->got_key_frame)
+ goto send_it;
+ }
+ }
+ } else {
+ AVCodecContext *codec;
+
+ send_it:
+ /* specific handling for RTP: we use several
+ output stream (one for each RTP
+ connection). XXX: need more abstract handling */
+ if (c->is_packetized) {
+ AVStream *st;
+ /* compute send time and duration */
+ st = c->fmt_in->streams[pkt.stream_index];
+ c->cur_pts = av_rescale_q(pkt.dts, st->time_base, AV_TIME_BASE_Q);
+ if (st->start_time != AV_NOPTS_VALUE)
+ c->cur_pts -= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
+ c->cur_frame_duration = av_rescale_q(pkt.duration, st->time_base, AV_TIME_BASE_Q);
#if 0
- printf("index=%d pts=%0.3f duration=%0.6f\n",
- pkt.stream_index,
- (double)c->cur_pts /
- AV_TIME_BASE,
- (double)c->cur_frame_duration /
- AV_TIME_BASE);
+ printf("index=%d pts=%0.3f duration=%0.6f\n",
+ pkt.stream_index,
+ (double)c->cur_pts /
+ AV_TIME_BASE,
+ (double)c->cur_frame_duration /
+ AV_TIME_BASE);
#endif
- /* find RTP context */
- c->packet_stream_index = pkt.stream_index;
- ctx = c->rtp_ctx[c->packet_stream_index];
- if(!ctx) {
- av_free_packet(&pkt);
- break;
- }
- codec = ctx->streams[0]->codec;
- /* only one stream per RTP connection */
- pkt.stream_index = 0;
- } else {
- ctx = &c->fmt_ctx;
- /* Fudge here */
- codec = ctx->streams[pkt.stream_index]->codec;
+ /* find RTP context */
+ c->packet_stream_index = pkt.stream_index;
+ ctx = c->rtp_ctx[c->packet_stream_index];
+ if(!ctx) {
+ av_free_packet(&pkt);
+ break;
}
+ codec = ctx->streams[0]->codec;
+ /* only one stream per RTP connection */
+ pkt.stream_index = 0;
+ } else {
+ ctx = &c->fmt_ctx;
+ /* Fudge here */
+ codec = ctx->streams[pkt.stream_index]->codec;
+ }
- codec->coded_frame->key_frame = ((pkt.flags & PKT_FLAG_KEY) != 0);
- if (c->is_packetized) {
- int max_packet_size;
- if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP)
- max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
- else
- max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
- ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size);
- } else {
- ret = url_open_dyn_buf(&ctx->pb);
- }
- if (ret < 0) {
- /* XXX: potential leak */
- return -1;
- }
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts,
- c->fmt_in->streams[pkt.stream_index]->time_base,
- ctx->streams[pkt.stream_index]->time_base);
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts,
- c->fmt_in->streams[pkt.stream_index]->time_base,
- ctx->streams[pkt.stream_index]->time_base);
- if (av_write_frame(ctx, &pkt))
- c->state = HTTPSTATE_SEND_DATA_TRAILER;
-
- len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
- c->cur_frame_bytes = len;
- c->buffer_ptr = c->pb_buffer;
- c->buffer_end = c->pb_buffer + len;
-
- codec->frame_number++;
- if (len == 0)
- goto redo;
+ if (c->is_packetized) {
+ int max_packet_size;
+ if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP)
+ max_packet_size = RTSP_TCP_MAX_PACKET_SIZE;
+ else
+ max_packet_size = url_get_max_packet_size(c->rtp_handles[c->packet_stream_index]);
+ ret = url_open_dyn_packet_buf(&ctx->pb, max_packet_size);
+ } else {
+ ret = url_open_dyn_buf(&ctx->pb);
+ }
+ if (ret < 0) {
+ /* XXX: potential leak */
+ return -1;
+ }
+ c->fmt_ctx.pb->is_streamed = 1;
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts = av_rescale_q(pkt.dts,
+ c->fmt_in->streams[source_index]->time_base,
+ ctx->streams[pkt.stream_index]->time_base);
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts = av_rescale_q(pkt.pts,
+ c->fmt_in->streams[source_index]->time_base,
+ ctx->streams[pkt.stream_index]->time_base);
+ pkt.duration = av_rescale_q(pkt.duration,
+ c->fmt_in->streams[source_index]->time_base,
+ ctx->streams[pkt.stream_index]->time_base);
+ if (av_write_frame(ctx, &pkt) < 0) {
+ http_log("Error writing frame to output\n");
+ c->state = HTTPSTATE_SEND_DATA_TRAILER;
+ }
+
+ len = url_close_dyn_buf(ctx->pb, &c->pb_buffer);
+ c->cur_frame_bytes = len;
+ c->buffer_ptr = c->pb_buffer;
+ c->buffer_end = c->pb_buffer + len;
+
+ codec->frame_number++;
+ if (len == 0) {
+ av_free_packet(&pkt);
+ goto redo;
}
- av_free_packet(&pkt);
}
+ av_free_packet(&pkt);
}
}
break;
/* XXX: potential leak */
return -1;
}
+ c->fmt_ctx.pb->is_streamed = 1;
av_write_trailer(ctx);
- len = url_close_dyn_buf(&ctx->pb, &c->pb_buffer);
+ len = url_close_dyn_buf(ctx->pb, &c->pb_buffer);
c->buffer_ptr = c->pb_buffer;
c->buffer_end = c->pb_buffer + len;
if (c->rtp_protocol == RTSP_PROTOCOL_RTP_TCP) {
/* RTP packets are sent inside the RTSP TCP connection */
- ByteIOContext pb1, *pb = &pb1;
+ ByteIOContext *pb;
int interleaved_index, size;
uint8_t header[4];
HTTPContext *rtsp_c;
/* if already sending something, then wait. */
if (rtsp_c->state != RTSPSTATE_WAIT_REQUEST)
break;
- if (url_open_dyn_buf(pb) < 0)
+ if (url_open_dyn_buf(&pb) < 0)
goto fail1;
interleaved_index = c->packet_stream_index * 2;
/* RTCP packets are sent at odd indexes */
/* open feed */
fd = open(c->stream->feed_filename, O_RDWR);
- if (fd < 0)
+ if (fd < 0) {
+ http_log("Error opening feeder file: %s\n", strerror(errno));
return -1;
+ }
c->feed_fd = fd;
c->stream->feed_write_index = ffm_read_write_index(fd);
// printf("writing pos=0x%"PRIx64" size=0x%"PRIx64"\n", feed->feed_write_index, feed->feed_size);
/* XXX: use llseek or url_seek */
lseek(c->feed_fd, feed->feed_write_index, SEEK_SET);
- write(c->feed_fd, c->buffer, FFM_PACKET_SIZE);
+ if (write(c->feed_fd, c->buffer, FFM_PACKET_SIZE) < 0) {
+ http_log("Error writing to feed file: %s\n", strerror(errno));
+ goto fail;
+ }
feed->feed_write_index += FFM_PACKET_SIZE;
/* update file size */
/* We have a header in our hands that contains useful data */
AVFormatContext s;
AVInputFormat *fmt_in;
- ByteIOContext *pb = &s.pb;
int i;
memset(&s, 0, sizeof(s));
- url_open_buf(pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY);
- pb->buf_end = c->buffer_end; /* ?? */
- pb->is_streamed = 1;
+ url_open_buf(&s.pb, c->buffer, c->buffer_end - c->buffer, URL_RDONLY);
+ s.pb->is_streamed = 1;
/* use feed output format name to find corresponding input format */
fmt_in = av_find_input_format(feed->fmt->name);
fail:
c->stream->feed_opened = 0;
close(c->feed_fd);
+ /* wake up any waiting connections to stop waiting for feed */
+ for(c1 = first_http_ctx; c1 != NULL; c1 = c1->next) {
+ if (c1->state == HTTPSTATE_WAIT_FEED &&
+ c1->stream->feed == c->stream->feed)
+ c1->state = HTTPSTATE_SEND_DATA_TRAILER;
+ }
return -1;
}
char url[1024];
char protocol[32];
char line[1024];
- ByteIOContext pb1;
int len;
RTSPHeader header1, *header = &header1;
av_strlcpy(c->url, url, sizeof(c->url));
av_strlcpy(c->protocol, protocol, sizeof(c->protocol));
- c->pb = &pb1;
- if (url_open_dyn_buf(c->pb) < 0) {
+ if (url_open_dyn_buf(&c->pb) < 0) {
/* XXX: cannot do more */
c->pb = NULL; /* safety */
return -1;
return 0;
}
-/* XXX: move that to rtsp.c, but would need to replace FFStream by
- AVFormatContext */
static int prepare_sdp_description(FFStream *stream, uint8_t **pbuffer,
struct in_addr my_ip)
{
- ByteIOContext pb1, *pb = &pb1;
- int i, payload_type, port, private_payload_type, j;
- const char *ipstr, *title, *mediatype;
- AVStream *st;
+ AVFormatContext *avc;
+ AVStream avs[MAX_STREAMS];
+ int i;
- if (url_open_dyn_buf(pb) < 0)
+ avc = av_alloc_format_context();
+ if (avc == NULL) {
return -1;
+ }
+ if (stream->title[0] != 0) {
+ av_strlcpy(avc->title, stream->title, sizeof(avc->title));
+ } else {
+ av_strlcpy(avc->title, "No Title", sizeof(avc->title));
+ }
+ avc->nb_streams = stream->nb_streams;
+ if (stream->is_multicast) {
+ snprintf(avc->filename, 1024, "rtp://%s:%d?multicast=1?ttl=%d",
+ inet_ntoa(stream->multicast_ip),
+ stream->multicast_port, stream->multicast_ttl);
+ }
- /* general media info */
-
- url_fprintf(pb, "v=0\n");
- ipstr = inet_ntoa(my_ip);
- url_fprintf(pb, "o=- 0 0 IN IP4 %s\n", ipstr);
- title = stream->title;
- if (title[0] == '\0')
- title = "No Title";
- url_fprintf(pb, "s=%s\n", title);
- if (stream->comment[0] != '\0')
- url_fprintf(pb, "i=%s\n", stream->comment);
- if (stream->is_multicast)
- url_fprintf(pb, "c=IN IP4 %s\n", inet_ntoa(stream->multicast_ip));
-
- /* for each stream, we output the necessary info */
- private_payload_type = RTP_PT_PRIVATE;
for(i = 0; i < stream->nb_streams; i++) {
- st = stream->streams[i];
- if (st->codec->codec_id == CODEC_ID_MPEG2TS)
- mediatype = "video";
- else {
- switch(st->codec->codec_type) {
- case CODEC_TYPE_AUDIO:
- mediatype = "audio";
- break;
- case CODEC_TYPE_VIDEO:
- mediatype = "video";
- break;
- default:
- mediatype = "application";
- break;
- }
- }
- /* NOTE: the port indication is not correct in case of
- unicast. It is not an issue because RTSP gives it */
- payload_type = rtp_get_payload_type(st->codec);
- if (payload_type < 0)
- payload_type = private_payload_type++;
- if (stream->is_multicast)
- port = stream->multicast_port + 2 * i;
- else
- port = 0;
-
- url_fprintf(pb, "m=%s %d RTP/AVP %d\n",
- mediatype, port, payload_type);
- if (payload_type >= RTP_PT_PRIVATE) {
- /* for private payload type, we need to give more info */
- switch(st->codec->codec_id) {
- case CODEC_ID_MPEG4:
- {
- uint8_t *data;
- url_fprintf(pb, "a=rtpmap:%d MP4V-ES/%d\n",
- payload_type, 90000);
- /* we must also add the mpeg4 header */
- data = st->codec->extradata;
- if (data) {
- url_fprintf(pb, "a=fmtp:%d config=", payload_type);
- for(j=0;j<st->codec->extradata_size;j++)
- url_fprintf(pb, "%02x", data[j]);
- url_fprintf(pb, "\n");
- }
- }
- break;
- default:
- /* XXX: add other codecs ? */
- goto fail;
- }
- }
- url_fprintf(pb, "a=control:streamid=%d\n", i);
+ avc->streams[i] = &avs[i];
+ avc->streams[i]->codec = stream->streams[i]->codec;
}
- return url_close_dyn_buf(pb, pbuffer);
- fail:
- url_close_dyn_buf(pb, pbuffer);
- av_free(*pbuffer);
- return -1;
+ *pbuffer = av_mallocz(2048);
+ avf_sdp_create(&avc, 1, *pbuffer, 2048);
+ av_free(avc);
+
+ return strlen(*pbuffer);
}
static void rtsp_cmd_options(HTTPContext *c, const char *url)
path++;
for(stream = first_stream; stream != NULL; stream = stream->next) {
- if (!stream->is_feed && stream->fmt == &rtp_muxer &&
+ if (!stream->is_feed &&
+ stream->fmt && !strcmp(stream->fmt->name, "rtp") &&
!strcmp(path, stream->filename)) {
goto found;
}
/* now check each stream */
for(stream = first_stream; stream != NULL; stream = stream->next) {
- if (!stream->is_feed && stream->fmt == &rtp_muxer) {
+ if (!stream->is_feed &&
+ stream->fmt && !strcmp(stream->fmt->name, "rtp")) {
/* accept aggregate filenames only if single stream */
if (!strcmp(path, stream->filename)) {
if (stream->nb_streams != 1) {
AVFormatContext *ctx;
AVStream *st;
char *ipaddr;
- URLContext *h;
+ URLContext *h = NULL;
uint8_t *dummy_buf;
char buf2[32];
int max_packet_size;
ctx = av_alloc_format_context();
if (!ctx)
return -1;
- ctx->oformat = &rtp_muxer;
+ ctx->oformat = guess_format("rtp", NULL, NULL);
st = av_mallocz(sizeof(AVStream));
if (!st)
av_free(ctx);
return -1;
}
- url_close_dyn_buf(&ctx->pb, &dummy_buf);
+ url_close_dyn_buf(ctx->pb, &dummy_buf);
av_free(dummy_buf);
c->rtp_ctx[stream_index] = ctx;
fst->codec= avcodec_alloc_context();
fst->priv_data = av_mallocz(sizeof(FeedData));
memcpy(fst->codec, codec, sizeof(AVCodecContext));
- fst->codec->coded_frame = &dummy_frame;
fst->index = stream->nb_streams;
av_set_pts_info(fst, 33, 1, 90000);
stream->streams[stream->nb_streams++] = fst;
{
FFStream *stream, *stream_next;
AVFormatContext *infile;
- int i;
+ int i, ret;
/* gather all streams */
for(stream = first_stream; stream != NULL; stream = stream_next) {
/* try to open the file */
/* open stream */
stream->ap_in = av_mallocz(sizeof(AVFormatParameters));
- if (stream->fmt == &rtp_muxer) {
+ if (stream->fmt && !strcmp(stream->fmt->name, "rtp")) {
/* specific case : if transport stream output to RTP,
we use a raw transport stream reader */
stream->ap_in->mpeg2ts_raw = 1;
stream->ap_in->mpeg2ts_compute_pcr = 1;
}
- if (av_open_input_file(&infile, stream->feed_filename,
- stream->ifmt, 0, stream->ap_in) < 0) {
- http_log("%s not found", stream->feed_filename);
+ if ((ret = av_open_input_file(&infile, stream->feed_filename,
+ stream->ifmt, 0, stream->ap_in)) < 0) {
+ http_log("could not open %s: %d\n", stream->feed_filename, ret);
/* remove stream (no need to spend more time on it) */
fail:
remove_stream(stream);
/* find all the AVStreams inside and reference them in
'stream' */
if (av_find_stream_info(infile) < 0) {
- http_log("Could not find codec parameters from '%s'",
+ http_log("Could not find codec parameters from '%s'\n",
stream->feed_filename);
av_close_input_file(infile);
goto fail;
/* only write the header of the ffm file */
if (url_fopen(&s->pb, feed->feed_filename, URL_WRONLY) < 0) {
- fprintf(stderr, "Could not open output feed file '%s'\n",
- feed->feed_filename);
+ http_log("Could not open output feed file '%s'\n",
+ feed->feed_filename);
exit(1);
}
s->oformat = feed->fmt;
}
av_set_parameters(s, NULL);
if (av_write_header(s) < 0) {
- fprintf(stderr, "Container doesn't supports the required parameters\n");
+ http_log("Container doesn't supports the required parameters\n");
exit(1);
}
/* XXX: need better api */
av_freep(&s->priv_data);
- url_fclose(&s->pb);
+ url_fclose(s->pb);
}
/* get feed size and write index */
fd = open(feed->feed_filename, O_RDONLY);
if (fd < 0) {
- fprintf(stderr, "Could not open output feed file '%s'\n",
+ http_log("Could not open output feed file '%s'\n",
feed->feed_filename);
exit(1);
}
}
/* Bitrate tolerance is less for streaming */
if (av->bit_rate_tolerance == 0)
- av->bit_rate_tolerance = av->bit_rate / 4;
+ av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,
+ (int64_t)av->bit_rate*av->time_base.num/av->time_base.den);
if (av->qmin == 0)
av->qmin = 3;
if (av->qmax == 0)
static int opt_audio_codec(const char *arg)
{
- AVCodec *p;
+ AVCodec *p= avcodec_find_encoder_by_name(arg);
- p = first_avcodec;
- while (p) {
- if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_AUDIO)
- break;
- p = p->next;
- }
- if (p == NULL)
+ if (p == NULL || p->type != CODEC_TYPE_AUDIO)
return CODEC_ID_NONE;
return p->id;
static int opt_video_codec(const char *arg)
{
- AVCodec *p;
+ AVCodec *p= avcodec_find_encoder_by_name(arg);
- p = first_avcodec;
- while (p) {
- if (!strcmp(p->name, arg) && p->type == CODEC_TYPE_VIDEO)
- break;
- p = p->next;
- }
- if (p == NULL)
+ if (p == NULL || p->type != CODEC_TYPE_VIDEO)
return CODEC_ID_NONE;
return p->id;
}
#endif
+static int opt_default(const char *opt, const char *arg,
+ AVCodecContext *avctx, int type)
+{
+ const AVOption *o = NULL;
+ const AVOption *o2 = av_find_opt(avctx, opt, NULL, type, type);
+ if(o2)
+ o = av_set_string(avctx, opt, arg);
+ if(!o)
+ return -1;
+ return 0;
+}
+
static int parse_ffconfig(const char *filename)
{
FILE *f;
nb_max_connections = val;
}
} else if (!strcasecmp(cmd, "MaxBandwidth")) {
+ int64_t llval;
get_arg(arg, sizeof(arg), &p);
- val = atoi(arg);
- if (val < 10 || val > 100000) {
+ llval = atoll(arg);
+ if (llval < 10 || llval > 10000000) {
fprintf(stderr, "%s:%d: Invalid MaxBandwidth: %s\n",
filename, line_num, arg);
errors++;
} else
- max_bandwidth = val;
+ max_bandwidth = llval;
} else if (!strcasecmp(cmd, "CustomLog")) {
- get_arg(logfilename, sizeof(logfilename), &p);
+ if (!ffserver_debug)
+ get_arg(logfilename, sizeof(logfilename), &p);
} else if (!strcasecmp(cmd, "<Feed")) {
/*********************************************/
/* Feed related options */
if (feed) {
int i;
- feed->child_argv = (char **) av_mallocz(64 * sizeof(char *));
+ feed->child_argv = av_mallocz(64 * sizeof(char *));
for (i = 0; i < 62; i++) {
get_arg(arg, sizeof(arg), &p);
get_arg(stream->feed_filename, sizeof(stream->feed_filename), &p);
} else if (!strcasecmp(cmd, "FileMaxSize")) {
if (feed) {
- const char *p1;
+ char *p1;
double fsize;
get_arg(arg, sizeof(arg), &p);
p1 = arg;
- fsize = strtod(p1, (char **)&p1);
+ fsize = strtod(p1, &p1);
switch(toupper(*p1)) {
case 'K':
fsize *= 1024;
fprintf(stderr, "%s:%d: Already in a tag\n",
filename, line_num);
} else {
+ const AVClass *class;
stream = av_mallocz(sizeof(FFStream));
*last_stream = stream;
last_stream = &stream->next;
if (*q)
*q = '\0';
stream->fmt = guess_stream_format(NULL, stream->filename, NULL);
+ /* fetch avclass so AVOption works
+ * FIXME try to use avcodec_get_context_defaults2
+ * without changing defaults too much */
+ avcodec_get_context_defaults(&video_enc);
+ class = video_enc.av_class;
memset(&audio_enc, 0, sizeof(AVCodecContext));
memset(&video_enc, 0, sizeof(AVCodecContext));
+ audio_enc.av_class = class;
+ video_enc.av_class = class;
audio_id = CODEC_ID_NONE;
video_id = CODEC_ID_NONE;
if (stream->fmt) {
}
} else if (!strcasecmp(cmd, "Format")) {
get_arg(arg, sizeof(arg), &p);
+ if (stream) {
if (!strcmp(arg, "status")) {
stream->stream_type = STREAM_TYPE_STATUS;
stream->fmt = NULL;
audio_id = stream->fmt->audio_codec;
video_id = stream->fmt->video_codec;
}
+ }
} else if (!strcasecmp(cmd, "InputFormat")) {
get_arg(arg, sizeof(arg), &p);
stream->ifmt = av_find_input_format(arg);
} else if (!strcasecmp(cmd, "VideoFrameRate")) {
get_arg(arg, sizeof(arg), &p);
if (stream) {
- video_enc.time_base.num= DEFAULT_FRAME_RATE_BASE;
- video_enc.time_base.den = (int)(strtod(arg, NULL) * video_enc.time_base.num);
+ AVRational frame_rate;
+ if (av_parse_video_frame_rate(&frame_rate, arg) < 0) {
+ fprintf(stderr, "Incorrect frame rate\n");
+ errors++;
+ } else {
+ video_enc.time_base.num = frame_rate.den;
+ video_enc.time_base.den = frame_rate.num;
+ }
}
} else if (!strcasecmp(cmd, "VideoGopSize")) {
get_arg(arg, sizeof(arg), &p);
video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove
video_enc.flags |= CODEC_FLAG_4MV;
}
+ } else if (!strcasecmp(cmd, "AVOptionVideo") ||
+ !strcasecmp(cmd, "AVOptionAudio")) {
+ char arg2[1024];
+ AVCodecContext *avctx;
+ int type;
+ get_arg(arg, sizeof(arg), &p);
+ get_arg(arg2, sizeof(arg2), &p);
+ if (!strcasecmp(cmd, "AVOptionVideo")) {
+ avctx = &video_enc;
+ type = AV_OPT_FLAG_VIDEO_PARAM;
+ } else {
+ avctx = &audio_enc;
+ type = AV_OPT_FLAG_AUDIO_PARAM;
+ }
+ if (opt_default(arg, arg2, avctx, type|AV_OPT_FLAG_ENCODING_PARAM)) {
+ fprintf(stderr, "AVOption error: %s %s\n", arg, arg2);
+ errors++;
+ }
} else if (!strcasecmp(cmd, "VideoTag")) {
get_arg(arg, sizeof(arg), &p);
if ((strlen(arg) == 4) && stream)
}
if (!errors) {
- IPAddressACL *nacl = (IPAddressACL *) av_mallocz(sizeof(*nacl));
+ IPAddressACL *nacl = av_mallocz(sizeof(*nacl));
IPAddressACL **naclp = 0;
acl.next = 0;
fprintf(stderr, "%s:%d: No corresponding <Stream> for </Stream>\n",
filename, line_num);
errors++;
- }
- if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
- if (audio_id != CODEC_ID_NONE) {
- audio_enc.codec_type = CODEC_TYPE_AUDIO;
- audio_enc.codec_id = audio_id;
- add_codec(stream, &audio_enc);
- }
- if (video_id != CODEC_ID_NONE) {
- video_enc.codec_type = CODEC_TYPE_VIDEO;
- video_enc.codec_id = video_id;
- add_codec(stream, &video_enc);
+ } else {
+ if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
+ if (audio_id != CODEC_ID_NONE) {
+ audio_enc.codec_type = CODEC_TYPE_AUDIO;
+ audio_enc.codec_id = audio_id;
+ add_codec(stream, &audio_enc);
+ }
+ if (video_id != CODEC_ID_NONE) {
+ video_enc.codec_type = CODEC_TYPE_VIDEO;
+ video_enc.codec_id = video_id;
+ add_codec(stream, &video_enc);
+ }
}
+ stream = NULL;
}
- stream = NULL;
} else if (!strcasecmp(cmd, "<Redirect")) {
/*********************************************/
char *q;
fprintf(stderr, "%s:%d: No corresponding <Redirect> for </Redirect>\n",
filename, line_num);
errors++;
+ } else {
+ if (!redirect->feed_filename[0]) {
+ fprintf(stderr, "%s:%d: No URL found for <Redirect>\n",
+ filename, line_num);
+ errors++;
+ }
+ redirect = NULL;
}
- if (!redirect->feed_filename[0]) {
- fprintf(stderr, "%s:%d: No URL found for <Redirect>\n",
- filename, line_num);
- errors++;
- }
- redirect = NULL;
} else if (!strcasecmp(cmd, "LoadModule")) {
get_arg(arg, sizeof(arg), &p);
#ifdef HAVE_DLOPEN
return 0;
}
-static void show_banner(void)
-{
- printf("ffserver version " FFMPEG_VERSION ", Copyright (c) 2000-2006 Fabrice Bellard, et al.\n");
-}
-
-static void show_help(void)
-{
- show_banner();
- printf("usage: ffserver [-L] [-h] [-f configfile]\n"
- "Hyper fast multi format Audio/Video streaming server\n"
- "\n"
- "-L : print the LICENSE\n"
- "-h : this help\n"
- "-f configfile : use configfile instead of /etc/ffserver.conf\n"
- );
-}
-
-static void show_license(void)
-{
- show_banner();
- printf(
- "FFmpeg is free software; you can redistribute it and/or\n"
- "modify it under the terms of the GNU Lesser General Public\n"
- "License as published by the Free Software Foundation; either\n"
- "version 2.1 of the License, or (at your option) any later version.\n"
- "\n"
- "FFmpeg is distributed in the hope that it will be useful,\n"
- "but WITHOUT ANY WARRANTY; without even the implied warranty of\n"
- "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n"
- "Lesser General Public License for more details.\n"
- "\n"
- "You should have received a copy of the GNU Lesser General Public\n"
- "License along with FFmpeg; if not, write to the Free Software\n"
- "Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n"
- );
-}
-
static void handle_child_exit(int sig)
{
pid_t pid;
need_to_start_children = 1;
}
+static void opt_debug()
+{
+ ffserver_debug = 1;
+ ffserver_daemon = 0;
+ logfilename[0] = '-';
+}
+
+static void opt_show_help(void)
+{
+ printf("usage: ffserver [options]\n"
+ "Hyper fast multi format Audio/Video streaming server\n");
+ printf("\n");
+ show_help_options(options, "Main options:\n", 0, 0);
+}
+
+static const OptionDef options[] = {
+ { "h", OPT_EXIT, {(void*)opt_show_help}, "show help" },
+ { "version", OPT_EXIT, {(void*)show_version}, "show version" },
+ { "L", OPT_EXIT, {(void*)show_license}, "show license" },
+ { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
+ { "n", OPT_BOOL, {(void *)&no_launch }, "enable no-launch mode" },
+ { "d", 0, {(void*)opt_debug}, "enable debug mode" },
+ { "f", HAS_ARG | OPT_STRING, {(void*)&config_filename }, "use configfile instead of /etc/ffserver.conf", "configfile" },
+ { NULL },
+};
+
int main(int argc, char **argv)
{
- const char *config_filename;
- int c;
struct sigaction sigact;
av_register_all();
+ show_banner();
+
config_filename = "/etc/ffserver.conf";
my_program_name = argv[0];
my_program_dir = getcwd(0, 0);
ffserver_daemon = 1;
- for(;;) {
- c = getopt(argc, argv, "ndLh?f:");
- if (c == -1)
- break;
- switch(c) {
- case 'L':
- show_license();
- exit(0);
- case '?':
- case 'h':
- show_help();
- exit(0);
- case 'n':
- no_launch = 1;
- break;
- case 'd':
- ffserver_debug = 1;
- ffserver_daemon = 0;
- break;
- case 'f':
- config_filename = optarg;
- break;
- default:
- exit(2);
- }
- }
+ parse_options(argc, argv, options, NULL);
- putenv("http_proxy"); /* Kill the http_proxy */
+ unsetenv("http_proxy"); /* Kill the http_proxy */
av_init_random(av_gettime() + (getpid() << 16), &random_state);
if (!strcmp(logfilename, "-"))
logfile = stdout;
else
- logfile = fopen(logfilename, "w");
+ logfile = fopen(logfilename, "a");
}
if (http_server() < 0) {