]> git.sesse.net Git - ffmpeg/blob - libavcodec/mpeg4video.c
avformat/avio: Add Metacube support
[ffmpeg] / libavcodec / mpeg4video.c
1 /*
2  * MPEG-4 decoder / encoder common code
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2010 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 #include "libavutil/thread.h"
24
25 #include "mpegutils.h"
26 #include "mpegvideo.h"
27 #include "mpeg4video.h"
28 #include "mpeg4data.h"
29
30 static av_cold void mpeg4_init_rl_intra(void)
31 {
32     static uint8_t mpeg4_rl_intra_table[2][2 * MAX_RUN + MAX_LEVEL + 3];
33     ff_rl_init(&ff_mpeg4_rl_intra, mpeg4_rl_intra_table);
34 }
35
36 av_cold void ff_mpeg4_init_rl_intra(void)
37 {
38     static AVOnce init_static_once = AV_ONCE_INIT;
39     ff_thread_once(&init_static_once, mpeg4_init_rl_intra);
40 }
41
42 int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s)
43 {
44     switch (s->pict_type) {
45     case AV_PICTURE_TYPE_I:
46         return 16;
47     case AV_PICTURE_TYPE_P:
48     case AV_PICTURE_TYPE_S:
49         return s->f_code + 15;
50     case AV_PICTURE_TYPE_B:
51         return FFMAX3(s->f_code, s->b_code, 2) + 15;
52     default:
53         return -1;
54     }
55 }
56
57 void ff_mpeg4_clean_buffers(MpegEncContext *s)
58 {
59     int c_wrap, c_xy, l_wrap, l_xy;
60
61     l_wrap = s->b8_stride;
62     l_xy   = (2 * s->mb_y - 1) * l_wrap + s->mb_x * 2 - 1;
63     c_wrap = s->mb_stride;
64     c_xy   = (s->mb_y - 1) * c_wrap + s->mb_x - 1;
65
66     /* clean AC */
67     memset(s->ac_val[0] + l_xy, 0, (l_wrap * 2 + 1) * 16 * sizeof(int16_t));
68     memset(s->ac_val[1] + c_xy, 0, (c_wrap     + 1) * 16 * sizeof(int16_t));
69     memset(s->ac_val[2] + c_xy, 0, (c_wrap     + 1) * 16 * sizeof(int16_t));
70
71     /* clean MV */
72     // we can't clear the MVs as they might be needed by a B-frame
73     s->last_mv[0][0][0] =
74     s->last_mv[0][0][1] =
75     s->last_mv[1][0][0] =
76     s->last_mv[1][0][1] = 0;
77 }
78
79 #define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
80 #define tab_bias (tab_size / 2)
81
82 // used by MPEG-4 and rv10 decoder
83 void ff_mpeg4_init_direct_mv(MpegEncContext *s)
84 {
85     int i;
86     for (i = 0; i < tab_size; i++) {
87         s->direct_scale_mv[0][i] = (i - tab_bias) * s->pb_time / s->pp_time;
88         s->direct_scale_mv[1][i] = (i - tab_bias) * (s->pb_time - s->pp_time) /
89                                    s->pp_time;
90     }
91 }
92
93 static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx,
94                                               int my, int i)
95 {
96     int xy           = s->block_index[i];
97     uint16_t time_pp = s->pp_time;
98     uint16_t time_pb = s->pb_time;
99     int p_mx, p_my;
100
101     p_mx = s->next_picture.motion_val[0][xy][0];
102     if ((unsigned)(p_mx + tab_bias) < tab_size) {
103         s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
104         s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
105                             : s->direct_scale_mv[1][p_mx + tab_bias];
106     } else {
107         s->mv[0][i][0] = p_mx * time_pb / time_pp + mx;
108         s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
109                             : p_mx * (time_pb - time_pp) / time_pp;
110     }
111     p_my = s->next_picture.motion_val[0][xy][1];
112     if ((unsigned)(p_my + tab_bias) < tab_size) {
113         s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
114         s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
115                             : s->direct_scale_mv[1][p_my + tab_bias];
116     } else {
117         s->mv[0][i][1] = p_my * time_pb / time_pp + my;
118         s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
119                             : p_my * (time_pb - time_pp) / time_pp;
120     }
121 }
122
123 #undef tab_size
124 #undef tab_bias
125
126 /**
127  * @return the mb_type
128  */
129 int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
130 {
131     const int mb_index          = s->mb_x + s->mb_y * s->mb_stride;
132     const int colocated_mb_type = s->next_picture.mb_type[mb_index];
133     uint16_t time_pp;
134     uint16_t time_pb;
135     int i;
136
137     // FIXME avoid divides
138     // try special case with shifts for 1 and 3 B-frames?
139
140     if (IS_8X8(colocated_mb_type)) {
141         s->mv_type = MV_TYPE_8X8;
142         for (i = 0; i < 4; i++)
143             ff_mpeg4_set_one_direct_mv(s, mx, my, i);
144         return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
145     } else if (IS_INTERLACED(colocated_mb_type)) {
146         s->mv_type = MV_TYPE_FIELD;
147         for (i = 0; i < 2; i++) {
148             int field_select = s->next_picture.ref_index[0][4 * mb_index + 2 * i];
149             s->field_select[0][i] = field_select;
150             s->field_select[1][i] = i;
151             if (s->top_field_first) {
152                 time_pp = s->pp_field_time - field_select + i;
153                 time_pb = s->pb_field_time - field_select + i;
154             } else {
155                 time_pp = s->pp_field_time + field_select - i;
156                 time_pb = s->pb_field_time + field_select - i;
157             }
158             s->mv[0][i][0] = s->p_field_mv_table[i][0][mb_index][0] *
159                              time_pb / time_pp + mx;
160             s->mv[0][i][1] = s->p_field_mv_table[i][0][mb_index][1] *
161                              time_pb / time_pp + my;
162             s->mv[1][i][0] = mx ? s->mv[0][i][0] -
163                                   s->p_field_mv_table[i][0][mb_index][0]
164                                 : s->p_field_mv_table[i][0][mb_index][0] *
165                                   (time_pb - time_pp) / time_pp;
166             s->mv[1][i][1] = my ? s->mv[0][i][1] -
167                                   s->p_field_mv_table[i][0][mb_index][1]
168                                 : s->p_field_mv_table[i][0][mb_index][1] *
169                                   (time_pb - time_pp) / time_pp;
170         }
171         return MB_TYPE_DIRECT2 | MB_TYPE_16x8 |
172                MB_TYPE_L0L1    | MB_TYPE_INTERLACED;
173     } else {
174         ff_mpeg4_set_one_direct_mv(s, mx, my, 0);
175         s->mv[0][1][0] =
176         s->mv[0][2][0] =
177         s->mv[0][3][0] = s->mv[0][0][0];
178         s->mv[0][1][1] =
179         s->mv[0][2][1] =
180         s->mv[0][3][1] = s->mv[0][0][1];
181         s->mv[1][1][0] =
182         s->mv[1][2][0] =
183         s->mv[1][3][0] = s->mv[1][0][0];
184         s->mv[1][1][1] =
185         s->mv[1][2][1] =
186         s->mv[1][3][1] = s->mv[1][0][1];
187         if ((s->avctx->workaround_bugs & FF_BUG_DIRECT_BLOCKSIZE) ||
188             !s->quarter_sample)
189             s->mv_type = MV_TYPE_16X16;
190         else
191             s->mv_type = MV_TYPE_8X8;
192         // Note see prev line
193         return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1;
194     }
195 }