2 * arbitrary precision integers
3 * Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * arbitrary precision integers
25 * @author Michael Niedermayer <michaelni@gmx.at>
32 AVInteger av_add_i(AVInteger a, AVInteger b){
35 for(i=0; i<AV_INTEGER_SIZE; i++){
36 carry= (carry>>16) + a.v[i] + b.v[i];
42 AVInteger av_sub_i(AVInteger a, AVInteger b){
45 for(i=0; i<AV_INTEGER_SIZE; i++){
46 carry= (carry>>16) + a.v[i] - b.v[i];
52 int av_log2_i(AVInteger a){
55 for(i=AV_INTEGER_SIZE-1; i>=0; i--){
57 return av_log2_16bit(a.v[i]) + 16*i;
62 AVInteger av_mul_i(AVInteger a, AVInteger b){
65 int na= (av_log2_i(a)+16) >> 4;
66 int nb= (av_log2_i(b)+16) >> 4;
68 memset(&out, 0, sizeof(out));
74 for(j=i; j<AV_INTEGER_SIZE && j-i<=nb; j++){
75 carry= (carry>>16) + out.v[j] + a.v[i]*b.v[j-i];
83 int av_cmp_i(AVInteger a, AVInteger b){
85 int v= (int16_t)a.v[AV_INTEGER_SIZE-1] - (int16_t)b.v[AV_INTEGER_SIZE-1];
86 if(v) return (v>>16)|1;
88 for(i=AV_INTEGER_SIZE-2; i>=0; i--){
89 int v= a.v[i] - b.v[i];
90 if(v) return (v>>16)|1;
95 AVInteger av_shr_i(AVInteger a, int s){
99 for(i=0; i<AV_INTEGER_SIZE; i++){
100 unsigned int index= i + (s>>4);
102 if(index+1<AV_INTEGER_SIZE) v = a.v[index+1]<<16;
103 if(index <AV_INTEGER_SIZE) v+= a.v[index ];
104 out.v[i]= v >> (s&15);
109 AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b){
110 int i= av_log2_i(a) - av_log2_i(b);
112 if(!quot) quot = "_temp;
114 av_assert2((int16_t)a.v[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b.v[AV_INTEGER_SIZE-1] >= 0);
115 av_assert2(av_log2_i(b)>=0);
120 memset(quot, 0, sizeof(AVInteger));
123 *quot= av_shr_i(*quot, -1);
124 if(av_cmp_i(a, b) >= 0){
133 AVInteger av_div_i(AVInteger a, AVInteger b){
135 av_mod_i(", a, b);
139 AVInteger av_int2i(int64_t a){
143 for(i=0; i<AV_INTEGER_SIZE; i++){
150 int64_t av_i2int(AVInteger a){
152 int64_t out=(int8_t)a.v[AV_INTEGER_SIZE-1];
154 for(i= AV_INTEGER_SIZE-2; i>=0; i--){
155 out = (out<<16) + a.v[i];
162 const uint8_t ff_log2_tab[256]={
163 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
164 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
165 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
166 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
167 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
168 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
169 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
170 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
176 for(a=7; a<256*256*256; a+=13215){
177 for(b=3; b<256*256*256; b+=27118){
178 AVInteger ai= av_int2i(a);
179 AVInteger bi= av_int2i(b);
181 av_assert0(av_i2int(ai) == a);
182 av_assert0(av_i2int(bi) == b);
183 av_assert0(av_i2int(av_add_i(ai,bi)) == a+b);
184 av_assert0(av_i2int(av_sub_i(ai,bi)) == a-b);
185 av_assert0(av_i2int(av_mul_i(ai,bi)) == a*b);
186 av_assert0(av_i2int(av_shr_i(ai, 9)) == a>>9);
187 av_assert0(av_i2int(av_shr_i(ai,-9)) == a<<9);
188 av_assert0(av_i2int(av_shr_i(ai, 17)) == a>>17);
189 av_assert0(av_i2int(av_shr_i(ai,-17)) == a<<17);
190 av_assert0(av_log2_i(ai) == av_log2(a));
191 av_assert0(av_i2int(av_div_i(ai,bi)) == a/b);