* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+/**
+ * @file utils.c
+ * utils.
+ */
+
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
void *av_mallocz(unsigned int size)
{
void *ptr;
+
ptr = av_malloc(size);
if (!ptr)
return NULL;
return ptr;
}
+char *av_strdup(const char *s)
+{
+ char *ptr;
+ int len;
+ len = strlen(s) + 1;
+ ptr = av_malloc(len);
+ if (!ptr)
+ return NULL;
+ memcpy(ptr, s, len);
+ return ptr;
+}
+
+/**
+ * realloc which does nothing if the block is large enough
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size)
+{
+ if(min_size < *size)
+ return ptr;
+
+ *size= min_size + 10*1024;
+
+ return av_realloc(ptr, *size);
+}
+
+
+/* allocation of static arrays - do not use for normal allocation */
+static unsigned int last_static = 0;
+static char*** array_static = NULL;
+static const unsigned int grow_static = 64; // ^2
+void *__av_mallocz_static(void** location, unsigned int size)
+{
+ unsigned int l = (last_static + grow_static) & ~(grow_static - 1);
+ void *ptr = av_mallocz(size);
+ if (!ptr)
+ return NULL;
+
+ if (location)
+ {
+ if (l > last_static)
+ array_static = av_realloc(array_static, l);
+ array_static[last_static++] = (char**) location;
+ *location = ptr;
+ }
+ return ptr;
+}
+/* free all static arrays and reset pointers to 0 */
+void av_free_static()
+{
+ if (array_static)
+ {
+ unsigned i;
+ for (i = 0; i < last_static; i++)
+ {
+ av_free(*array_static[i]);
+ *array_static[i] = NULL;
+ }
+ av_free(array_static);
+ array_static = 0;
+ }
+ last_static = 0;
+}
+
/* cannot call it directly because of 'void **' casting is not automatic */
void __av_freep(void **ptr)
{
format->next = NULL;
}
+typedef struct DefaultPicOpaque{
+ int last_pic_num;
+ uint8_t *data[4];
+}DefaultPicOpaque;
+
+int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
+ int i;
+ const int width = s->width;
+ const int height= s->height;
+ DefaultPicOpaque *opaque;
+
+ assert(pic->data[0]==NULL);
+ assert(pic->type==0 || pic->type==FF_TYPE_INTERNAL);
+
+ if(pic->opaque){
+ opaque= (DefaultPicOpaque *)pic->opaque;
+ for(i=0; i<3; i++)
+ pic->data[i]= opaque->data[i];
+
+// printf("get_buffer %X coded_pic_num:%d last:%d\n", pic->opaque, pic->coded_picture_number, opaque->last_pic_num);
+ pic->age= pic->coded_picture_number - opaque->last_pic_num;
+ opaque->last_pic_num= pic->coded_picture_number;
+//printf("age: %d %d %d\n", pic->age, c->picture_number, pic->coded_picture_number);
+ }else{
+ int align, h_chroma_shift, v_chroma_shift;
+ int w, h, pixel_size;
+
+ avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+
+ switch(s->pix_fmt){
+ case PIX_FMT_YUV422:
+ pixel_size=2;
+ break;
+ case PIX_FMT_RGB24:
+ case PIX_FMT_BGR24:
+ pixel_size=3;
+ break;
+ case PIX_FMT_RGBA32:
+ pixel_size=4;
+ break;
+ default:
+ pixel_size=1;
+ }
+
+ if(s->codec_id==CODEC_ID_SVQ1) align=63;
+ else align=15;
+
+ w= (width +align)&~align;
+ h= (height+align)&~align;
+
+ if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
+ w+= EDGE_WIDTH*2;
+ h+= EDGE_WIDTH*2;
+ }
+
+ opaque= av_mallocz(sizeof(DefaultPicOpaque));
+ if(opaque==NULL) return -1;
+
+ pic->opaque= opaque;
+ opaque->last_pic_num= -256*256*256*64;
+
+ for(i=0; i<3; i++){
+ int h_shift= i==0 ? 0 : h_chroma_shift;
+ int v_shift= i==0 ? 0 : v_chroma_shift;
+
+ pic->linesize[i]= pixel_size*w>>h_shift;
+
+ pic->base[i]= av_mallocz((pic->linesize[i]*h>>v_shift)+16); //FIXME 16
+ if(pic->base[i]==NULL) return -1;
+
+ memset(pic->base[i], 128, pic->linesize[i]*h>>v_shift);
+
+ if(s->flags&CODEC_FLAG_EMU_EDGE)
+ pic->data[i] = pic->base[i];
+ else
+ pic->data[i] = pic->base[i] + (pic->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift);
+
+ opaque->data[i]= pic->data[i];
+ }
+ pic->age= 256*256*256*64;
+ pic->type= FF_BUFFER_TYPE_INTERNAL;
+ }
+
+ return 0;
+}
+
+void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
+ int i;
+
+ assert(pic->type==FF_BUFFER_TYPE_INTERNAL);
+
+ for(i=0; i<3; i++)
+ pic->data[i]=NULL;
+//printf("R%X\n", pic->opaque);
+}
+
+enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, enum PixelFormat * fmt){
+ return fmt[0];
+}
+
void avcodec_get_context_defaults(AVCodecContext *s){
s->bit_rate= 800*1000;
s->bit_rate_tolerance= s->bit_rate*10;
s->qmin= 2;
s->qmax= 31;
+ s->mb_qmin= 2;
+ s->mb_qmax= 31;
s->rc_eq= "tex^qComp";
s->qcompress= 0.5;
s->max_qdiff= 3;
s->error_concealment= 3;
s->error_resilience= 1;
s->workaround_bugs= FF_BUG_AUTODETECT;
- s->frame_rate = 25 * FRAME_RATE_BASE;
+ s->frame_rate_base= 1;
+ s->frame_rate = 25;
s->gop_size= 50;
s->me_method= ME_EPZS;
+ s->get_buffer= avcodec_default_get_buffer;
+ s->release_buffer= avcodec_default_release_buffer;
+ s->get_format= avcodec_default_get_format;
+ s->me_subpel_quality=8;
}
/**
return avctx;
}
+/**
+ * allocates a AVPFrame and set it to defaults.
+ * this can be deallocated by simply calling free()
+ */
+AVFrame *avcodec_alloc_frame(void){
+ AVFrame *pic= av_mallocz(sizeof(AVFrame));
+
+ return pic;
+}
+
int avcodec_open(AVCodecContext *avctx, AVCodec *codec)
{
int ret;
avctx->codec = codec;
+ avctx->codec_id = codec->id;
avctx->frame_number = 0;
if (codec->priv_data_size > 0) {
avctx->priv_data = av_mallocz(codec->priv_data_size);
return 0;
}
-int avcodec_encode_audio(AVCodecContext *avctx, UINT8 *buf, int buf_size,
+int avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const short *samples)
{
int ret;
return ret;
}
-int avcodec_encode_video(AVCodecContext *avctx, UINT8 *buf, int buf_size,
- const AVPicture *pict)
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict)
{
int ret;
/* decode a frame. return -1 if error, otherwise return the number of
bytes used. If no frame could be decompressed, *got_picture_ptr is
zero. Otherwise, it is non zero */
-int avcodec_decode_video(AVCodecContext *avctx, AVPicture *picture,
+int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
int *got_picture_ptr,
- UINT8 *buf, int buf_size)
+ uint8_t *buf, int buf_size)
{
int ret;
-
+
ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
buf, buf_size);
emms_c(); //needed to avoid a emms_c() call before every return;
-
+
if (*got_picture_ptr)
avctx->frame_number++;
return ret;
*number of bytes used. If no frame could be decompressed,
*frame_size_ptr is zero. Otherwise, it is the decompressed frame
*size in BYTES. */
-int avcodec_decode_audio(AVCodecContext *avctx, INT16 *samples,
+int avcodec_decode_audio(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
- UINT8 *buf, int buf_size)
+ uint8_t *buf, int buf_size)
{
int ret;
return NULL;
}
-const char *pix_fmt_str[] = {
- "yuv420p",
- "yuv422",
- "rgb24",
- "bgr24",
- "yuv422p",
- "yuv444p",
- "rgba32",
- "bgra32",
- "yuv410p",
- "yuv411p",
-};
-
void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
{
const char *codec_name;
if (enc->codec_id == CODEC_ID_RAWVIDEO) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %s",
- pix_fmt_str[enc->pix_fmt]);
+ avcodec_get_pix_fmt_name(enc->pix_fmt));
}
if (enc->width) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
", %dx%d, %0.2f fps",
enc->width, enc->height,
- (float)enc->frame_rate / FRAME_RATE_BASE);
+ (float)enc->frame_rate / enc->frame_rate_base);
}
if (encode) {
snprintf(buf + strlen(buf), buf_size - strlen(buf),
}
}
-/* Picture field are filled with 'ptr' addresses */
-void avpicture_fill(AVPicture *picture, UINT8 *ptr,
- int pix_fmt, int width, int height)
-{
- int size;
-
- size = width * height;
- switch(pix_fmt) {
- case PIX_FMT_YUV420P:
- picture->data[0] = ptr;
- picture->data[1] = picture->data[0] + size;
- picture->data[2] = picture->data[1] + size / 4;
- picture->linesize[0] = width;
- picture->linesize[1] = width / 2;
- picture->linesize[2] = width / 2;
- break;
- case PIX_FMT_YUV422P:
- picture->data[0] = ptr;
- picture->data[1] = picture->data[0] + size;
- picture->data[2] = picture->data[1] + size / 2;
- picture->linesize[0] = width;
- picture->linesize[1] = width / 2;
- picture->linesize[2] = width / 2;
- break;
- case PIX_FMT_YUV444P:
- picture->data[0] = ptr;
- picture->data[1] = picture->data[0] + size;
- picture->data[2] = picture->data[1] + size;
- picture->linesize[0] = width;
- picture->linesize[1] = width;
- picture->linesize[2] = width;
- break;
- case PIX_FMT_RGB24:
- case PIX_FMT_BGR24:
- picture->data[0] = ptr;
- picture->data[1] = NULL;
- picture->data[2] = NULL;
- picture->linesize[0] = width * 3;
- break;
- case PIX_FMT_RGBA32:
- case PIX_FMT_BGRA32:
- picture->data[0] = ptr;
- picture->data[1] = NULL;
- picture->data[2] = NULL;
- picture->linesize[0] = width * 4;
- break;
- case PIX_FMT_YUV422:
- picture->data[0] = ptr;
- picture->data[1] = NULL;
- picture->data[2] = NULL;
- picture->linesize[0] = width * 2;
- break;
- default:
- picture->data[0] = NULL;
- picture->data[1] = NULL;
- picture->data[2] = NULL;
- break;
- }
-}
-
-int avpicture_get_size(int pix_fmt, int width, int height)
-{
- int size;
-
- size = width * height;
- switch(pix_fmt) {
- case PIX_FMT_YUV420P:
- size = (size * 3) / 2;
- break;
- case PIX_FMT_YUV422P:
- size = (size * 2);
- break;
- case PIX_FMT_YUV444P:
- size = (size * 3);
- break;
- case PIX_FMT_RGB24:
- case PIX_FMT_BGR24:
- size = (size * 3);
- break;
- case PIX_FMT_RGBA32:
- case PIX_FMT_BGRA32:
- size = (size * 4);
- break;
- case PIX_FMT_YUV422:
- size = (size * 2);
- break;
- default:
- size = -1;
- break;
- }
- return size;
-}
-
unsigned avcodec_version( void )
{
return LIBAVCODEC_VERSION_INT;
//dsputil_init();
}
-/* this should be called after seeking and before trying to decode the next frame */
+/* this can be called after seeking and before trying to decode the next keyframe */
void avcodec_flush_buffers(AVCodecContext *avctx)
{
+ int i;
MpegEncContext *s = avctx->priv_data;
- s->num_available_buffers=0;
+
+ switch(avctx->codec_id){
+ case CODEC_ID_MPEG1VIDEO:
+ case CODEC_ID_H263:
+ case CODEC_ID_RV10:
+ case CODEC_ID_MJPEG:
+ case CODEC_ID_MJPEGB:
+ case CODEC_ID_MPEG4:
+ case CODEC_ID_MSMPEG4V1:
+ case CODEC_ID_MSMPEG4V2:
+ case CODEC_ID_MSMPEG4V3:
+ case CODEC_ID_WMV1:
+ case CODEC_ID_WMV2:
+ case CODEC_ID_H263P:
+ case CODEC_ID_H263I:
+ case CODEC_ID_SVQ1:
+ for(i=0; i<MAX_PICTURE_COUNT; i++){
+ if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
+ || s->picture[i].type == FF_BUFFER_TYPE_USER))
+ avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
+ }
+ s->last_picture_ptr = s->next_picture_ptr = NULL;
+ break;
+ default:
+ //FIXME
+ break;
+ }
}
+int av_reduce(int *dst_nom, int *dst_den, int64_t nom, int64_t den, int64_t max){
+ int exact=1, sign=0;
+ int64_t gcd, larger;
-static int raw_encode_init(AVCodecContext *s)
-{
- return 0;
-}
+ assert(den != 0);
-static int raw_decode_frame(AVCodecContext *avctx,
- void *data, int *data_size,
- UINT8 *buf, int buf_size)
-{
- return -1;
+ if(den < 0){
+ den= -den;
+ nom= -nom;
+ }
+
+ if(nom < 0){
+ nom= -nom;
+ sign= 1;
+ }
+
+ for(;;){ //note is executed 1 or 2 times
+ gcd = ff_gcd(nom, den);
+ nom /= gcd;
+ den /= gcd;
+
+ larger= FFMAX(nom, den);
+
+ if(larger > max){
+ int64_t div= (larger + max - 1) / max;
+ nom = (nom + div/2)/div;
+ den = (den + div/2)/div;
+ exact=0;
+ }else
+ break;
+ }
+
+ if(sign) nom= -nom;
+
+ *dst_nom = nom;
+ *dst_den = den;
+
+ return exact;
}
-static int raw_encode_frame(AVCodecContext *avctx,
- unsigned char *frame, int buf_size, void *data)
-{
- return -1;
-}
+int64_t av_rescale(int64_t a, int b, int c){
+ uint64_t h, l;
+ assert(c > 0);
+ assert(b >=0);
+
+ if(a<0) return -av_rescale(-a, b, c);
+
+ h= a>>32;
+ if(h==0) return a*b/c;
+
+ l= a&0xFFFFFFFF;
+ l *= b;
+ h *= b;
+
+ l += (h%c)<<32;
-AVCodec rawvideo_codec = {
- "rawvideo",
- CODEC_TYPE_VIDEO,
- CODEC_ID_RAWVIDEO,
- 0,
- raw_encode_init,
- raw_encode_frame,
- NULL,
- raw_decode_frame,
-};
+ return ((h/c)<<32) + l/c;
+}