*/
#include <stdlib.h>
#include <stdio.h>
+#include <string.h>
#include <math.h>
+#ifndef M_PI
+#define M_PI 3.1415926535897931
+#endif
+
#include "avformat.h"
/* 5 seconds stream duration */
-#define STREAM_DURATION 5.0
-
+#define STREAM_DURATION 5.0
+#define STREAM_FRAME_RATE 25 /* 25 images/s */
+#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
+#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
/**************************************************************/
/* audio output */
exit(1);
}
- c = &st->codec;
+ c = st->codec;
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_AUDIO;
AVCodecContext *c;
AVCodec *codec;
- c = &st->codec;
+ c = st->codec;
/* find the audio encoder */
codec = avcodec_find_encoder(c->codec_id);
support to compute the input frame size in samples */
if (c->frame_size <= 1) {
audio_input_frame_size = audio_outbuf_size / c->channels;
- switch(st->codec.codec_id) {
+ switch(st->codec->codec_id) {
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
samples = malloc(audio_input_frame_size * 2 * c->channels);
}
+/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
+ 'nb_channels' channels */
+void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
+{
+ int j, i, v;
+ int16_t *q;
+
+ q = samples;
+ for(j=0;j<frame_size;j++) {
+ v = (int)(sin(t) * 10000);
+ for(i = 0; i < nb_channels; i++)
+ *q++ = v;
+ t += tincr;
+ tincr += tincr2;
+ }
+}
+
void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
- int j, out_size;
AVCodecContext *c;
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ c = st->codec;
+ get_audio_frame(samples, audio_input_frame_size, c->channels);
- c = &st->codec;
+ pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
- for(j=0;j<audio_input_frame_size;j++) {
- samples[2*j] = (int)(sin(t) * 10000);
- samples[2*j+1] = samples[2*j];
- t += tincr;
- tincr += tincr2;
- }
-
- out_size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
+ pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
+ pkt.flags |= PKT_FLAG_KEY;
+ pkt.stream_index= st->index;
+ pkt.data= audio_outbuf;
/* write the compressed frame in the media file */
- if (av_write_frame(oc, st->index, audio_outbuf, out_size) != 0) {
+ if (av_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n");
exit(1);
}
void close_audio(AVFormatContext *oc, AVStream *st)
{
- avcodec_close(&st->codec);
+ avcodec_close(st->codec);
av_free(samples);
av_free(audio_outbuf);
exit(1);
}
- c = &st->codec;
+ c = st->codec;
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_VIDEO;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
- /* frames per second */
- c->frame_rate = 25;
- c->frame_rate_base= 1;
- c->gop_size = 12; /* emit one intra frame every twelve frames */
-
+ /* time base: this is the fundamental unit of time (in seconds) in terms
+ of which frame timestamps are represented. for fixed-fps content,
+ timebase should be 1/framerate and timestamp increments should be
+ identically 1. */
+ c->time_base.den = STREAM_FRAME_RATE;
+ c->time_base.num = 1;
+ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
+ c->pix_fmt = STREAM_PIX_FMT;
+ if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
+ /* just for testing, we also add B frames */
+ c->max_b_frames = 2;
+ }
+ if (c->codec_id == CODEC_ID_MPEG1VIDEO){
+ /* needed to avoid using macroblocks in which some coeffs overflow
+ this doesnt happen with normal video, it just happens here as the
+ motion of the chroma plane doesnt match the luma plane */
+ c->mb_decision=2;
+ }
+ // some formats want stream headers to be seperate
+ if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
+ c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+
return st;
}
AVCodec *codec;
AVCodecContext *c;
- c = &st->codec;
+ c = st->codec;
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
{
int out_size, ret;
AVCodecContext *c;
-
- c = &st->codec;
- if (c->pix_fmt != PIX_FMT_YUV420P) {
- /* as we only generate a YUV420P picture, we must convert it
- to the codec pixel format if needed */
- fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
- img_convert((AVPicture *)picture, c->pix_fmt,
- (AVPicture *)tmp_picture, PIX_FMT_YUV420P,
- c->width, c->height);
+ c = st->codec;
+
+ if (frame_count >= STREAM_NB_FRAMES) {
+ /* no more frame to compress. The codec has a latency of a few
+ frames if using B frames, so we get the last frames by
+ passing the same picture again */
} else {
- fill_yuv_image(picture, frame_count, c->width, c->height);
+ if (c->pix_fmt != PIX_FMT_YUV420P) {
+ /* as we only generate a YUV420P picture, we must convert it
+ to the codec pixel format if needed */
+ fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
+ img_convert((AVPicture *)picture, c->pix_fmt,
+ (AVPicture *)tmp_picture, PIX_FMT_YUV420P,
+ c->width, c->height);
+ } else {
+ fill_yuv_image(picture, frame_count, c->width, c->height);
+ }
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
- ret = av_write_frame(oc, st->index,
- (uint8_t *)picture, sizeof(AVPicture));
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ pkt.flags |= PKT_FLAG_KEY;
+ pkt.stream_index= st->index;
+ pkt.data= (uint8_t *)picture;
+ pkt.size= sizeof(AVPicture);
+
+ ret = av_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
-
- /* write the compressed frame in the media file */
- ret = av_write_frame(oc, st->index, video_outbuf, out_size);
+ /* if zero size, it means the image was buffered */
+ if (out_size > 0) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+
+ pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
+ if(c->coded_frame->key_frame)
+ pkt.flags |= PKT_FLAG_KEY;
+ pkt.stream_index= st->index;
+ pkt.data= video_outbuf;
+ pkt.size= out_size;
+
+ /* write the compressed frame in the media file */
+ ret = av_write_frame(oc, &pkt);
+ } else {
+ ret = 0;
+ }
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
void close_video(AVFormatContext *oc, AVStream *st)
{
- avcodec_close(&st->codec);
+ avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
}
/* allocate the output media context */
- oc = av_mallocz(sizeof(AVFormatContext));
+ oc = av_alloc_format_context();
if (!oc) {
fprintf(stderr, "Memory error\n");
exit(1);
for(;;) {
/* compute current audio and video time */
if (audio_st)
- audio_pts = (double)audio_st->pts.val * oc->pts_num / oc->pts_den;
+ audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
else
audio_pts = 0.0;
if (video_st)
- video_pts = (double)video_st->pts.val * oc->pts_num / oc->pts_den;
+ video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
else
video_pts = 0.0;