+ unsigned i;
+ char buf[65536];
+ unsigned long long bytes_sent = 0;
+ int ep_fd;
+ struct epoll_event *events;
+
+ ep_fd = epoll_create(num_sockets_per_sender);
+ if (ep_fd == -1) {
+ perror("epoll_create");
+ exit(1);
+ }
+
+ // malloc, since there might not be enough room on the stack
+ events = (struct epoll_event *)malloc(sizeof(struct epoll_event) * num_sockets_per_sender);
+ if (events == NULL) {
+ perror("malloc");
+ exit(1);
+ }
+
+ // fill the buffer with random junk
+ for (i = 0; i < 65536; ++i)
+ buf[i] = rand() & 0xff;
+
+ // allocate all the senders
+ for (i = 0; i < num_sockets_per_sender; ++i)
+ generate_new_sender(ep_fd);
+
+ for ( ;; ) {
+ int num_active = epoll_wait(ep_fd, events, num_sockets_per_sender, -1);
+ if (num_active == -1) {
+ perror("epoll_wait");
+ exit(1);
+ }
+
+ for (i = 0; i < num_active; ++i) {
+ struct sender *s = (struct sender *)events[i].data.ptr;
+ unsigned long long bytes_to_send = s->bytes_left;
+ unsigned ret;
+
+ if (bytes_to_send > 65536) {
+ bytes_to_send = 65536;
+ }
+
+ ret = send(s->fd, buf, bytes_to_send, MSG_NOSIGNAL);
+ if (ret == -1) {
+ if (errno == EAGAIN)
+ continue;
+
+ perror("send()");
+ exit(1);
+ }
+
+ s->bytes_left -= ret;
+ bytes_sent += ret;
+
+ // update the central counter after every 1MB (8ms
+ // at gigabit speeds, should be enough) of sent data
+ if (bytes_sent > 1048576) {
+ pthread_mutex_lock(&send_mutex);
+ total_bytes_sent += bytes_sent;
+ pthread_mutex_unlock(&send_mutex);
+
+ bytes_sent = 0;
+ }
+
+ if (s->bytes_left == 0) {
+ if (epoll_ctl(ep_fd, EPOLL_CTL_DEL, s->fd, NULL) == -1) {
+ perror("EPOLL_CTL_ADD");
+ exit(1);
+ }
+ close(s->fd);
+
+ free(s);
+ generate_new_sender(ep_fd);
+ }
+ }
+ }
+
+ pthread_mutex_lock(&send_mutex);
+ total_bytes_sent += bytes_sent;
+ pthread_mutex_unlock(&send_mutex);
+
+ free(events);
+ close(ep_fd);
+