2 * Compute the Adler-32 checksum of a data stream.
3 * This is a modified version based on adler32.c from the zlib library.
5 * Copyright (C) 1995 Mark Adler
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, subject to the following restrictions:
15 * 1. The origin of this software must not be misrepresented; you must not
16 * claim that you wrote the original software. If you use this software
17 * in a product, an acknowledgment in the product documentation would be
18 * appreciated but is not required.
19 * 2. Altered source versions must be plainly marked as such, and must not be
20 * misrepresented as being the original software.
21 * 3. This notice may not be removed or altered from any source distribution.
26 * Computes the Adler-32 checksum of a data stream
28 * This is a modified version based on adler32.c from the zlib library.
30 * @ingroup lavu_adler32
36 #include "intreadwrite.h"
38 #define BASE 65521L /* largest prime smaller than 65536 */
40 #define DO1(buf) { s1 += *buf++; s2 += s1; }
41 #define DO4(buf) DO1(buf); DO1(buf); DO1(buf); DO1(buf);
42 #define DO16(buf) DO4(buf); DO4(buf); DO4(buf); DO4(buf);
44 #if FF_API_CRYPTO_SIZE_T
45 unsigned long av_adler32_update(unsigned long adler, const uint8_t * buf,
48 AVAdler av_adler32_update(AVAdler adler, const uint8_t *buf, size_t len)
51 unsigned long s1 = adler & 0xffff;
52 unsigned long s2 = adler >> 16;
55 #if HAVE_FAST_64BIT && HAVE_FAST_UNALIGNED && !CONFIG_SMALL
56 unsigned len2 = FFMIN((len-1) & ~7, 23*8);
65 uint64_t v = AV_RN64(buf);
68 a1 += v &0x00FF00FF00FF00FF;
69 b1 += (v>>8)&0x00FF00FF00FF00FF;
74 //We combine the 8 interleaved adler32 checksums without overflows
75 //Decreasing the number of iterations would allow below code to be
76 //simplified but would likely be slower due to the fewer iterations
78 s1 += ((a1+b1)*0x1000100010001)>>48;
79 s2 += ((((a2&0xFFFF0000FFFF)+(b2&0xFFFF0000FFFF)+((a2>>16)&0xFFFF0000FFFF)+((b2>>16)&0xFFFF0000FFFF))*0x800000008)>>32)
81 + 2*((b1*0x1000200030004)>>48)
82 + ((a1*0x1000100010001)>>48)
83 + 2*((a1*0x0000100020003)>>48);
85 + 2*((a1*0x4000300020001)>>48)
86 + ((b1*0x1000100010001)>>48)
87 + 2*((b1*0x3000200010000)>>48);
91 while (len > 4 && s2 < (1U << 31)) {
100 return (s2 << 16) | s1;