#define AVUTIL_INTREADWRITE_H
#include <stdint.h>
-#include "config.h"
+#include "libavutil/avconfig.h"
+#include "attributes.h"
#include "bswap.h"
-#include "common.h"
typedef union {
uint64_t u64;
* as inline functions.
*/
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
#if ARCH_ARM
# include "arm/intreadwrite.h"
#elif ARCH_AVR32
# include "mips/intreadwrite.h"
#elif ARCH_PPC
# include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+# include "tomi/intreadwrite.h"
#elif ARCH_X86
# include "x86/intreadwrite.h"
#endif
+#endif /* HAVE_AV_CONFIG_H */
+
/*
* Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
*/
-#if HAVE_BIGENDIAN
+#if AV_HAVE_BIGENDIAN
# if defined(AV_RN16) && !defined(AV_RB16)
# define AV_RB16(p) AV_RN16(p)
# define AV_WN64(p, v) AV_WB64(p, v)
# endif
-#else /* HAVE_BIGENDIAN */
+#else /* AV_HAVE_BIGENDIAN */
# if defined(AV_RN16) && !defined(AV_RL16)
# define AV_RL16(p) AV_RN16(p)
# define AV_WN64(p, v) AV_WL64(p, v)
# endif
-#endif /* !HAVE_BIGENDIAN */
+#endif /* !AV_HAVE_BIGENDIAN */
/*
* Define AV_[RW]N helper macros to simplify definitions not provided
* by per-arch headers.
*/
-#if HAVE_ATTRIBUTE_PACKED
+#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
-#elif HAVE_FAST_UNALIGNED
+#elif AV_HAVE_FAST_UNALIGNED
# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
-# define AV_WN(s, p, v) (((uint##s##_t*)(p))->u##s = (v))
+# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
#else
} while(0)
#endif
-#if HAVE_BIGENDIAN
+#if AV_HAVE_BIGENDIAN
# define AV_RN(s, p) AV_RB##s(p)
# define AV_WN(s, p, v) AV_WB##s(p, v)
#else
# define AV_WN64(p, v) AV_WN(64, p, v)
#endif
-#if HAVE_BIGENDIAN
+#if AV_HAVE_BIGENDIAN
# define AV_RB(s, p) AV_RN##s(p)
# define AV_WB(s, p, v) AV_WN##s(p, v)
-# define AV_RL(s, p) bswap_##s(AV_RN##s(p))
-# define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v))
+# define AV_RL(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
#else
-# define AV_RB(s, p) bswap_##s(AV_RN##s(p))
-# define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v))
+# define AV_RB(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
# define AV_RL(s, p) AV_RN##s(p)
# define AV_WL(s, p, v) AV_WN##s(p, v)
#endif
} while(0)
#endif
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+# define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+# define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+# define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+# define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+# define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+# define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
* naturally aligned. They may be implemented using MMX,
* so emms_c() must be called before using any float code
#define AV_COPY(n, d, s) \
(((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+#ifndef AV_COPY16
+# define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
#ifndef AV_COPY32
# define AV_COPY32(d, s) AV_COPY(32, d, s)
#endif
#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+#ifndef AV_ZERO16
+# define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
#ifndef AV_ZERO32
# define AV_ZERO32(d) AV_ZERO(32, d)
#endif