From bf0436a85beddbb10af28daeba2008fbf331e45c Mon Sep 17 00:00:00 2001 From: wheremyfoodat Date: Wed, 7 Jun 2023 16:59:46 +0300 Subject: [PATCH] Add CityHash --- CMakeLists.txt | 2 + third_party/cityhash/cityhash.cpp | 340 ++++++++++++++ third_party/cityhash/cityhash.hpp | 111 +++++ third_party/cityhash/swap.hpp | 718 ++++++++++++++++++++++++++++++ 4 files changed, 1171 insertions(+) create mode 100644 third_party/cityhash/cityhash.cpp create mode 100644 third_party/cityhash/cityhash.hpp create mode 100644 third_party/cityhash/swap.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index c6dd4c53..369ae0e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -128,6 +128,8 @@ set(THIRD_PARTY_SOURCE_FILES third_party/imgui/imgui.cpp third_party/imgui/imgui_widgets.cpp third_party/imgui/imgui_demo.cpp third_party/gl3w/gl3w.cpp + + third_party/cityhash/cityhash.cpp ) source_group("Header Files\\Core" FILES ${HEADER_FILES}) diff --git a/third_party/cityhash/cityhash.cpp b/third_party/cityhash/cityhash.cpp new file mode 100644 index 00000000..a9d5406b --- /dev/null +++ b/third_party/cityhash/cityhash.cpp @@ -0,0 +1,340 @@ +// Copyright (c) 2011 Google, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// +// CityHash, by Geoff Pike and Jyrki Alakuijala +// +// This file provides CityHash64() and related functions. +// +// It's probably possible to create even faster hash functions by +// writing a program that systematically explores some of the space of +// possible hash functions, by using SIMD instructions, or by +// compromising on hash quality. + +#include +#include // for memcpy and memset +#include "cityhash.hpp" +#include "swap.hpp" + +// #include "config.h" +#ifdef __GNUC__ +#define HAVE_BUILTIN_EXPECT 1 +#endif +#ifdef COMMON_BIG_ENDIAN +#define WORDS_BIGENDIAN 1 +#endif + +using namespace std; + +typedef uint8_t uint8; +typedef uint32_t uint32; +typedef uint64_t uint64; + +namespace CityHash { + +static uint64 UNALIGNED_LOAD64(const char* p) { + uint64 result; + memcpy(&result, p, sizeof(result)); + return result; +} + +static uint32 UNALIGNED_LOAD32(const char* p) { + uint32 result; + memcpy(&result, p, sizeof(result)); + return result; +} + +#ifdef WORDS_BIGENDIAN +#define uint32_in_expected_order(x) (swap32(x)) +#define uint64_in_expected_order(x) (swap64(x)) +#else +#define uint32_in_expected_order(x) (x) +#define uint64_in_expected_order(x) (x) +#endif + +#if !defined(LIKELY) +#if HAVE_BUILTIN_EXPECT +#define LIKELY(x) (__builtin_expect(!!(x), 1)) +#else +#define LIKELY(x) (x) +#endif +#endif + +static uint64 Fetch64(const char* p) { + return uint64_in_expected_order(UNALIGNED_LOAD64(p)); +} + +static uint32 Fetch32(const char* p) { + return uint32_in_expected_order(UNALIGNED_LOAD32(p)); +} + +// Some primes between 2^63 and 2^64 for various uses. +static const uint64 k0 = 0xc3a5c85c97cb3127ULL; +static const uint64 k1 = 0xb492b66fbe98f273ULL; +static const uint64 k2 = 0x9ae16a3b2f90404fULL; + +// Bitwise right rotate. Normally this will compile to a single +// instruction, especially if the shift is a manifest constant. +static uint64 Rotate(uint64 val, int shift) { + // Avoid shifting by 64: doing so yields an undefined result. + return shift == 0 ? val : ((val >> shift) | (val << (64 - shift))); +} + +static uint64 ShiftMix(uint64 val) { + return val ^ (val >> 47); +} + +static uint64 HashLen16(uint64 u, uint64 v) { + return Hash128to64(uint128(u, v)); +} + +static uint64 HashLen16(uint64 u, uint64 v, uint64 mul) { + // Murmur-inspired hashing. + uint64 a = (u ^ v) * mul; + a ^= (a >> 47); + uint64 b = (v ^ a) * mul; + b ^= (b >> 47); + b *= mul; + return b; +} + +static uint64 HashLen0to16(const char* s, std::size_t len) { + if (len >= 8) { + uint64 mul = k2 + len * 2; + uint64 a = Fetch64(s) + k2; + uint64 b = Fetch64(s + len - 8); + uint64 c = Rotate(b, 37) * mul + a; + uint64 d = (Rotate(a, 25) + b) * mul; + return HashLen16(c, d, mul); + } + if (len >= 4) { + uint64 mul = k2 + len * 2; + uint64 a = Fetch32(s); + return HashLen16(len + (a << 3), Fetch32(s + len - 4), mul); + } + if (len > 0) { + uint8 a = s[0]; + uint8 b = s[len >> 1]; + uint8 c = s[len - 1]; + uint32 y = static_cast(a) + (static_cast(b) << 8); + uint32 z = static_cast(len) + (static_cast(c) << 2); + return ShiftMix(y * k2 ^ z * k0) * k2; + } + return k2; +} + +// This probably works well for 16-byte strings as well, but it may be overkill +// in that case. +static uint64 HashLen17to32(const char* s, std::size_t len) { + uint64 mul = k2 + len * 2; + uint64 a = Fetch64(s) * k1; + uint64 b = Fetch64(s + 8); + uint64 c = Fetch64(s + len - 8) * mul; + uint64 d = Fetch64(s + len - 16) * k2; + return HashLen16(Rotate(a + b, 43) + Rotate(c, 30) + d, a + Rotate(b + k2, 18) + c, mul); +} + +// Return a 16-byte hash for 48 bytes. Quick and dirty. +// Callers do best to use "random-looking" values for a and b. +static pair WeakHashLen32WithSeeds(uint64 w, uint64 x, uint64 y, uint64 z, uint64 a, + uint64 b) { + a += w; + b = Rotate(b + a + z, 21); + uint64 c = a; + a += x; + a += y; + b += Rotate(a, 44); + return make_pair(a + z, b + c); +} + +// Return a 16-byte hash for s[0] ... s[31], a, and b. Quick and dirty. +static pair WeakHashLen32WithSeeds(const char* s, uint64 a, uint64 b) { + return WeakHashLen32WithSeeds(Fetch64(s), Fetch64(s + 8), Fetch64(s + 16), Fetch64(s + 24), a, + b); +} + +// Return an 8-byte hash for 33 to 64 bytes. +static uint64 HashLen33to64(const char* s, std::size_t len) { + uint64 mul = k2 + len * 2; + uint64 a = Fetch64(s) * k2; + uint64 b = Fetch64(s + 8); + uint64 c = Fetch64(s + len - 24); + uint64 d = Fetch64(s + len - 32); + uint64 e = Fetch64(s + 16) * k2; + uint64 f = Fetch64(s + 24) * 9; + uint64 g = Fetch64(s + len - 8); + uint64 h = Fetch64(s + len - 16) * mul; + uint64 u = Rotate(a + g, 43) + (Rotate(b, 30) + c) * 9; + uint64 v = ((a + g) ^ d) + f + 1; + uint64 w = Common::swap64((u + v) * mul) + h; + uint64 x = Rotate(e + f, 42) + c; + uint64 y = (Common::swap64((v + w) * mul) + g) * mul; + uint64 z = e + f + c; + a = Common::swap64((x + z) * mul + y) + b; + b = ShiftMix((z + a) * mul + d + h) * mul; + return b + x; +} + +uint64 CityHash64(const char* s, std::size_t len) { + if (len <= 32) { + if (len <= 16) { + return HashLen0to16(s, len); + } else { + return HashLen17to32(s, len); + } + } else if (len <= 64) { + return HashLen33to64(s, len); + } + + // For strings over 64 bytes we hash the end first, and then as we + // loop we keep 56 bytes of state: v, w, x, y, and z. + uint64 x = Fetch64(s + len - 40); + uint64 y = Fetch64(s + len - 16) + Fetch64(s + len - 56); + uint64 z = HashLen16(Fetch64(s + len - 48) + len, Fetch64(s + len - 24)); + pair v = WeakHashLen32WithSeeds(s + len - 64, len, z); + pair w = WeakHashLen32WithSeeds(s + len - 32, y + k1, x); + x = x * k1 + Fetch64(s); + + // Decrease len to the nearest multiple of 64, and operate on 64-byte chunks. + len = (len - 1) & ~static_cast(63); + do { + x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1; + y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1; + x ^= w.second; + y += v.first + Fetch64(s + 40); + z = Rotate(z + w.first, 33) * k1; + v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); + w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16)); + std::swap(z, x); + s += 64; + len -= 64; + } while (len != 0); + return HashLen16(HashLen16(v.first, w.first) + ShiftMix(y) * k1 + z, + HashLen16(v.second, w.second) + x); +} + +uint64 CityHash64WithSeed(const char* s, std::size_t len, uint64 seed) { + return CityHash64WithSeeds(s, len, k2, seed); +} + +uint64 CityHash64WithSeeds(const char* s, std::size_t len, uint64 seed0, uint64 seed1) { + return HashLen16(CityHash64(s, len) - seed0, seed1); +} + +// A subroutine for CityHash128(). Returns a decent 128-bit hash for strings +// of any length representable in signed long. Based on City and Murmur. +static uint128 CityMurmur(const char* s, std::size_t len, uint128 seed) { + uint64 a = Uint128Low64(seed); + uint64 b = Uint128High64(seed); + uint64 c = 0; + uint64 d = 0; + signed long l = static_cast(len) - 16; + if (l <= 0) { // len <= 16 + a = ShiftMix(a * k1) * k1; + c = b * k1 + HashLen0to16(s, len); + d = ShiftMix(a + (len >= 8 ? Fetch64(s) : c)); + } else { // len > 16 + c = HashLen16(Fetch64(s + len - 8) + k1, a); + d = HashLen16(b + len, c + Fetch64(s + len - 16)); + a += d; + do { + a ^= ShiftMix(Fetch64(s) * k1) * k1; + a *= k1; + b ^= a; + c ^= ShiftMix(Fetch64(s + 8) * k1) * k1; + c *= k1; + d ^= c; + s += 16; + l -= 16; + } while (l > 0); + } + a = HashLen16(a, c); + b = HashLen16(d, b); + return uint128(a ^ b, HashLen16(b, a)); +} + +uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed) { + if (len < 128) { + return CityMurmur(s, len, seed); + } + + // We expect len >= 128 to be the common case. Keep 56 bytes of state: + // v, w, x, y, and z. + pair v, w; + uint64 x = Uint128Low64(seed); + uint64 y = Uint128High64(seed); + uint64 z = len * k1; + v.first = Rotate(y ^ k1, 49) * k1 + Fetch64(s); + v.second = Rotate(v.first, 42) * k1 + Fetch64(s + 8); + w.first = Rotate(y + z, 35) * k1 + x; + w.second = Rotate(x + Fetch64(s + 88), 53) * k1; + + // This is the same inner loop as CityHash64(), manually unrolled. + do { + x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1; + y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1; + x ^= w.second; + y += v.first + Fetch64(s + 40); + z = Rotate(z + w.first, 33) * k1; + v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); + w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16)); + std::swap(z, x); + s += 64; + x = Rotate(x + y + v.first + Fetch64(s + 8), 37) * k1; + y = Rotate(y + v.second + Fetch64(s + 48), 42) * k1; + x ^= w.second; + y += v.first + Fetch64(s + 40); + z = Rotate(z + w.first, 33) * k1; + v = WeakHashLen32WithSeeds(s, v.second * k1, x + w.first); + w = WeakHashLen32WithSeeds(s + 32, z + w.second, y + Fetch64(s + 16)); + std::swap(z, x); + s += 64; + len -= 128; + } while (LIKELY(len >= 128)); + x += Rotate(v.first + z, 49) * k0; + y = y * k0 + Rotate(w.second, 37); + z = z * k0 + Rotate(w.first, 27); + w.first *= 9; + v.first *= k0; + // If 0 < len < 128, hash up to 4 chunks of 32 bytes each from the end of s. + for (std::size_t tail_done = 0; tail_done < len;) { + tail_done += 32; + y = Rotate(x + y, 42) * k0 + v.second; + w.first += Fetch64(s + len - tail_done + 16); + x = x * k0 + w.first; + z += w.second + Fetch64(s + len - tail_done); + w.second += v.first; + v = WeakHashLen32WithSeeds(s + len - tail_done, v.first + z, v.second); + v.first *= k0; + } + // At this point our 56 bytes of state should contain more than + // enough information for a strong 128-bit hash. We use two + // different 56-byte-to-8-byte hashes to get a 16-byte final result. + x = HashLen16(x, v.first); + y = HashLen16(y + z, w.first); + return uint128(HashLen16(x + v.second, w.second) + y, HashLen16(x + w.second, y + v.second)); +} + +uint128 CityHash128(const char* s, std::size_t len) { + return len >= 16 + ? CityHash128WithSeed(s + 16, len - 16, uint128(Fetch64(s), Fetch64(s + 8) + k0)) + : CityHash128WithSeed(s, len, uint128(k0, k1)); +} + +} // namespace CityHash \ No newline at end of file diff --git a/third_party/cityhash/cityhash.hpp b/third_party/cityhash/cityhash.hpp new file mode 100644 index 00000000..c27bb887 --- /dev/null +++ b/third_party/cityhash/cityhash.hpp @@ -0,0 +1,111 @@ +// Copyright (c) 2011 Google, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// +// CityHash, by Geoff Pike and Jyrki Alakuijala +// +// http://code.google.com/p/cityhash/ +// +// This file provides a few functions for hashing strings. All of them are +// high-quality functions in the sense that they pass standard tests such +// as Austin Appleby's SMHasher. They are also fast. +// +// For 64-bit x86 code, on short strings, we don't know of anything faster than +// CityHash64 that is of comparable quality. We believe our nearest competitor +// is Murmur3. For 64-bit x86 code, CityHash64 is an excellent choice for hash +// tables and most other hashing (excluding cryptography). +// +// For 64-bit x86 code, on long strings, the picture is more complicated. +// On many recent Intel CPUs, such as Nehalem, Westmere, Sandy Bridge, etc., +// CityHashCrc128 appears to be faster than all competitors of comparable +// quality. CityHash128 is also good but not quite as fast. We believe our +// nearest competitor is Bob Jenkins' Spooky. We don't have great data for +// other 64-bit CPUs, but for long strings we know that Spooky is slightly +// faster than CityHash on some relatively recent AMD x86-64 CPUs, for example. +// Note that CityHashCrc128 is declared in citycrc.h. +// +// For 32-bit x86 code, we don't know of anything faster than CityHash32 that +// is of comparable quality. We believe our nearest competitor is Murmur3A. +// (On 64-bit CPUs, it is typically faster to use the other CityHash variants.) +// +// Functions in the CityHash family are not suitable for cryptography. +// +// Please see CityHash's README file for more details on our performance +// measurements and so on. +// +// WARNING: This code has been only lightly tested on big-endian platforms! +// It is known to work well on little-endian platforms that have a small penalty +// for unaligned reads, such as current Intel and AMD moderate-to-high-end CPUs. +// It should work on all 32-bit and 64-bit platforms that allow unaligned reads; +// bug reports are welcome. +// +// By the way, for some hash functions, given strings a and b, the hash +// of a+b is easily derived from the hashes of a and b. This property +// doesn't hold for any hash functions in this file. + +#pragma once + +#include +#include +#include + +namespace CityHash { + +using uint128 = std::pair; + +[[nodiscard]] inline uint64_t Uint128Low64(const uint128& x) { + return x.first; +} +[[nodiscard]] inline uint64_t Uint128High64(const uint128& x) { + return x.second; +} + +// Hash function for a byte array. +[[nodiscard]] uint64_t CityHash64(const char* buf, std::size_t len); + +// Hash function for a byte array. For convenience, a 64-bit seed is also +// hashed into the result. +[[nodiscard]] uint64_t CityHash64WithSeed(const char* buf, std::size_t len, uint64_t seed); + +// Hash function for a byte array. For convenience, two seeds are also +// hashed into the result. +[[nodiscard]] uint64_t CityHash64WithSeeds(const char* buf, std::size_t len, uint64_t seed0, + uint64_t seed1); + +// Hash function for a byte array. +[[nodiscard]] uint128 CityHash128(const char* s, std::size_t len); + +// Hash function for a byte array. For convenience, a 128-bit seed is also +// hashed into the result. +[[nodiscard]] uint128 CityHash128WithSeed(const char* s, std::size_t len, uint128 seed); + +// Hash 128 input bits down to 64 bits of output. +// This is intended to be a reasonably good hash function. +[[nodiscard]] inline uint64_t Hash128to64(const uint128& x) { + // Murmur-inspired hashing. + const uint64_t kMul = 0x9ddfea08eb382d69ULL; + uint64_t a = (Uint128Low64(x) ^ Uint128High64(x)) * kMul; + a ^= (a >> 47); + uint64_t b = (Uint128High64(x) ^ a) * kMul; + b ^= (b >> 47); + b *= kMul; + return b; +} + +} // namespace CityHash \ No newline at end of file diff --git a/third_party/cityhash/swap.hpp b/third_party/cityhash/swap.hpp new file mode 100644 index 00000000..b7db9226 --- /dev/null +++ b/third_party/cityhash/swap.hpp @@ -0,0 +1,718 @@ +// Copyright (c) 2012- PPSSPP Project / Dolphin Project. + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, version 2.0 or later versions. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License 2.0 for more details. + +// A copy of the GPL 2.0 should have been included with the program. +// If not, see http://www.gnu.org/licenses/ + +// Official git repository and contact information can be found at +// https://github.com/hrydgard/ppsspp and http://www.ppsspp.org/. + +#pragma once + +#include + +#if defined(_MSC_VER) +#include +#endif +#include +#include "helpers.hpp" + +// GCC +#ifdef __GNUC__ + +#if __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) && !defined(COMMON_LITTLE_ENDIAN) +#define COMMON_LITTLE_ENDIAN 1 +#elif __BYTE_ORDER__ && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) && !defined(COMMON_BIG_ENDIAN) +#define COMMON_BIG_ENDIAN 1 +#endif + +// LLVM/clang +#elif defined(__clang__) + +#if __LITTLE_ENDIAN__ && !defined(COMMON_LITTLE_ENDIAN) +#define COMMON_LITTLE_ENDIAN 1 +#elif __BIG_ENDIAN__ && !defined(COMMON_BIG_ENDIAN) +#define COMMON_BIG_ENDIAN 1 +#endif + +// MSVC +#elif defined(_MSC_VER) && !defined(COMMON_BIG_ENDIAN) && !defined(COMMON_LITTLE_ENDIAN) + +#define COMMON_LITTLE_ENDIAN 1 +#endif + +// Worst case, default to little endian. +#if !COMMON_BIG_ENDIAN && !COMMON_LITTLE_ENDIAN +#define COMMON_LITTLE_ENDIAN 1 +#endif + +namespace Common { + +#ifdef _MSC_VER +[[nodiscard]] inline u16 swap16(u16 data) noexcept { + return _byteswap_ushort(data); +} +[[nodiscard]] inline u32 swap32(u32 data) noexcept { + return _byteswap_ulong(data); +} +[[nodiscard]] inline u64 swap64(u64 data) noexcept { + return _byteswap_uint64(data); +} +#elif defined(__clang__) || defined(__GNUC__) +#if defined(__Bitrig__) || defined(__OpenBSD__) +// redefine swap16, swap32, swap64 as inline functions +#undef swap16 +#undef swap32 +#undef swap64 +#endif +[[nodiscard]] inline u16 swap16(u16 data) noexcept { + return __builtin_bswap16(data); +} +[[nodiscard]] inline u32 swap32(u32 data) noexcept { + return __builtin_bswap32(data); +} +[[nodiscard]] inline u64 swap64(u64 data) noexcept { + return __builtin_bswap64(data); +} +#else +// Generic implementation. +[[nodiscard]] inline u16 swap16(u16 data) noexcept { + return (data >> 8) | (data << 8); +} +[[nodiscard]] inline u32 swap32(u32 data) noexcept { + return ((data & 0xFF000000U) >> 24) | ((data & 0x00FF0000U) >> 8) | + ((data & 0x0000FF00U) << 8) | ((data & 0x000000FFU) << 24); +} +[[nodiscard]] inline u64 swap64(u64 data) noexcept { + return ((data & 0xFF00000000000000ULL) >> 56) | ((data & 0x00FF000000000000ULL) >> 40) | + ((data & 0x0000FF0000000000ULL) >> 24) | ((data & 0x000000FF00000000ULL) >> 8) | + ((data & 0x00000000FF000000ULL) << 8) | ((data & 0x0000000000FF0000ULL) << 24) | + ((data & 0x000000000000FF00ULL) << 40) | ((data & 0x00000000000000FFULL) << 56); +} +#endif + +[[nodiscard]] inline float swapf(float f) noexcept { + static_assert(sizeof(u32) == sizeof(float), "float must be the same size as uint32_t."); + + u32 value; + std::memcpy(&value, &f, sizeof(u32)); + + value = swap32(value); + std::memcpy(&f, &value, sizeof(u32)); + + return f; +} + +[[nodiscard]] inline double swapd(double f) noexcept { + static_assert(sizeof(u64) == sizeof(double), "double must be the same size as uint64_t."); + + u64 value; + std::memcpy(&value, &f, sizeof(u64)); + + value = swap64(value); + std::memcpy(&f, &value, sizeof(u64)); + + return f; +} + +} // Namespace Common + +template +struct swap_struct_t { + using swapped_t = swap_struct_t; + +protected: + T value; + + static T swap(T v) { + return F::swap(v); + } + +public: + T swap() const { + return swap(value); + } + swap_struct_t() = default; + swap_struct_t(const T& v) : value(swap(v)) {} + + template + swapped_t& operator=(const S& source) { + value = swap(static_cast(source)); + return *this; + } + + operator s8() const { + return static_cast(swap()); + } + operator u8() const { + return static_cast(swap()); + } + operator s16() const { + return static_cast(swap()); + } + operator u16() const { + return static_cast(swap()); + } + operator s32() const { + return static_cast(swap()); + } + operator u32() const { + return static_cast(swap()); + } + operator s64() const { + return static_cast(swap()); + } + operator u64() const { + return static_cast(swap()); + } + operator float() const { + return static_cast(swap()); + } + operator double() const { + return static_cast(swap()); + } + + // +v + swapped_t operator+() const { + return +swap(); + } + // -v + swapped_t operator-() const { + return -swap(); + } + + // v / 5 + swapped_t operator/(const swapped_t& i) const { + return swap() / i.swap(); + } + template + swapped_t operator/(const S& i) const { + return swap() / i; + } + + // v * 5 + swapped_t operator*(const swapped_t& i) const { + return swap() * i.swap(); + } + template + swapped_t operator*(const S& i) const { + return swap() * i; + } + + // v + 5 + swapped_t operator+(const swapped_t& i) const { + return swap() + i.swap(); + } + template + swapped_t operator+(const S& i) const { + return swap() + static_cast(i); + } + // v - 5 + swapped_t operator-(const swapped_t& i) const { + return swap() - i.swap(); + } + template + swapped_t operator-(const S& i) const { + return swap() - static_cast(i); + } + + // v += 5 + swapped_t& operator+=(const swapped_t& i) { + value = swap(swap() + i.swap()); + return *this; + } + template + swapped_t& operator+=(const S& i) { + value = swap(swap() + static_cast(i)); + return *this; + } + // v -= 5 + swapped_t& operator-=(const swapped_t& i) { + value = swap(swap() - i.swap()); + return *this; + } + template + swapped_t& operator-=(const S& i) { + value = swap(swap() - static_cast(i)); + return *this; + } + + // ++v + swapped_t& operator++() { + value = swap(swap() + 1); + return *this; + } + // --v + swapped_t& operator--() { + value = swap(swap() - 1); + return *this; + } + + // v++ + swapped_t operator++(int) { + swapped_t old = *this; + value = swap(swap() + 1); + return old; + } + // v-- + swapped_t operator--(int) { + swapped_t old = *this; + value = swap(swap() - 1); + return old; + } + // Comparaison + // v == i + bool operator==(const swapped_t& i) const { + return swap() == i.swap(); + } + template + bool operator==(const S& i) const { + return swap() == i; + } + + // v != i + bool operator!=(const swapped_t& i) const { + return swap() != i.swap(); + } + template + bool operator!=(const S& i) const { + return swap() != i; + } + + // v > i + bool operator>(const swapped_t& i) const { + return swap() > i.swap(); + } + template + bool operator>(const S& i) const { + return swap() > i; + } + + // v < i + bool operator<(const swapped_t& i) const { + return swap() < i.swap(); + } + template + bool operator<(const S& i) const { + return swap() < i; + } + + // v >= i + bool operator>=(const swapped_t& i) const { + return swap() >= i.swap(); + } + template + bool operator>=(const S& i) const { + return swap() >= i; + } + + // v <= i + bool operator<=(const swapped_t& i) const { + return swap() <= i.swap(); + } + template + bool operator<=(const S& i) const { + return swap() <= i; + } + + // logical + swapped_t operator!() const { + return !swap(); + } + + // bitmath + swapped_t operator~() const { + return ~swap(); + } + + swapped_t operator&(const swapped_t& b) const { + return swap() & b.swap(); + } + template + swapped_t operator&(const S& b) const { + return swap() & b; + } + swapped_t& operator&=(const swapped_t& b) { + value = swap(swap() & b.swap()); + return *this; + } + template + swapped_t& operator&=(const S b) { + value = swap(swap() & b); + return *this; + } + + swapped_t operator|(const swapped_t& b) const { + return swap() | b.swap(); + } + template + swapped_t operator|(const S& b) const { + return swap() | b; + } + swapped_t& operator|=(const swapped_t& b) { + value = swap(swap() | b.swap()); + return *this; + } + template + swapped_t& operator|=(const S& b) { + value = swap(swap() | b); + return *this; + } + + swapped_t operator^(const swapped_t& b) const { + return swap() ^ b.swap(); + } + template + swapped_t operator^(const S& b) const { + return swap() ^ b; + } + swapped_t& operator^=(const swapped_t& b) { + value = swap(swap() ^ b.swap()); + return *this; + } + template + swapped_t& operator^=(const S& b) { + value = swap(swap() ^ b); + return *this; + } + + template + swapped_t operator<<(const S& b) const { + return swap() << b; + } + template + swapped_t& operator<<=(const S& b) const { + value = swap(swap() << b); + return *this; + } + + template + swapped_t operator>>(const S& b) const { + return swap() >> b; + } + template + swapped_t& operator>>=(const S& b) const { + value = swap(swap() >> b); + return *this; + } + + // Member + /** todo **/ + + // Arithmetics + template + friend S operator+(const S& p, const swapped_t v); + + template + friend S operator-(const S& p, const swapped_t v); + + template + friend S operator/(const S& p, const swapped_t v); + + template + friend S operator*(const S& p, const swapped_t v); + + template + friend S operator%(const S& p, const swapped_t v); + + // Arithmetics + assignments + template + friend S operator+=(const S& p, const swapped_t v); + + template + friend S operator-=(const S& p, const swapped_t v); + + // Bitmath + template + friend S operator&(const S& p, const swapped_t v); + + // Comparison + template + friend bool operator<(const S& p, const swapped_t v); + + template + friend bool operator>(const S& p, const swapped_t v); + + template + friend bool operator<=(const S& p, const swapped_t v); + + template + friend bool operator>=(const S& p, const swapped_t v); + + template + friend bool operator!=(const S& p, const swapped_t v); + + template + friend bool operator==(const S& p, const swapped_t v); +}; + +// Arithmetics +template +S operator+(const S& i, const swap_struct_t v) { + return i + v.swap(); +} + +template +S operator-(const S& i, const swap_struct_t v) { + return i - v.swap(); +} + +template +S operator/(const S& i, const swap_struct_t v) { + return i / v.swap(); +} + +template +S operator*(const S& i, const swap_struct_t v) { + return i * v.swap(); +} + +template +S operator%(const S& i, const swap_struct_t v) { + return i % v.swap(); +} + +// Arithmetics + assignments +template +S& operator+=(S& i, const swap_struct_t v) { + i += v.swap(); + return i; +} + +template +S& operator-=(S& i, const swap_struct_t v) { + i -= v.swap(); + return i; +} + +// Logical +template +S operator&(const S& i, const swap_struct_t v) { + return i & v.swap(); +} + +template +S operator&(const swap_struct_t v, const S& i) { + return static_cast(v.swap() & i); +} + +// Comparaison +template +bool operator<(const S& p, const swap_struct_t v) { + return p < v.swap(); +} +template +bool operator>(const S& p, const swap_struct_t v) { + return p > v.swap(); +} +template +bool operator<=(const S& p, const swap_struct_t v) { + return p <= v.swap(); +} +template +bool operator>=(const S& p, const swap_struct_t v) { + return p >= v.swap(); +} +template +bool operator!=(const S& p, const swap_struct_t v) { + return p != v.swap(); +} +template +bool operator==(const S& p, const swap_struct_t v) { + return p == v.swap(); +} + +template +struct swap_64_t { + static T swap(T x) { + return static_cast(Common::swap64(x)); + } +}; + +template +struct swap_32_t { + static T swap(T x) { + return static_cast(Common::swap32(x)); + } +}; + +template +struct swap_16_t { + static T swap(T x) { + return static_cast(Common::swap16(x)); + } +}; + +template +struct swap_float_t { + static T swap(T x) { + return static_cast(Common::swapf(x)); + } +}; + +template +struct swap_double_t { + static T swap(T x) { + return static_cast(Common::swapd(x)); + } +}; + +template +struct swap_enum_t { + static_assert(std::is_enum_v); + using base = std::underlying_type_t; + +public: + swap_enum_t() = default; + swap_enum_t(const T& v) : value(swap(v)) {} + + swap_enum_t& operator=(const T& v) { + value = swap(v); + return *this; + } + + operator T() const { + return swap(value); + } + + explicit operator base() const { + return static_cast(swap(value)); + } + +protected: + T value{}; + // clang-format off + using swap_t = std::conditional_t< + std::is_same_v, swap_16_t, std::conditional_t< + std::is_same_v, swap_16_t, std::conditional_t< + std::is_same_v, swap_32_t, std::conditional_t< + std::is_same_v, swap_32_t, std::conditional_t< + std::is_same_v, swap_64_t, std::conditional_t< + std::is_same_v, swap_64_t, void>>>>>>; + // clang-format on + static T swap(T x) { + return static_cast(swap_t::swap(static_cast(x))); + } +}; + +struct SwapTag {}; // Use the different endianness from the system +struct KeepTag {}; // Use the same endianness as the system + +template +struct AddEndian; + +// KeepTag specializations + +template +struct AddEndian { + using type = T; +}; + +// SwapTag specializations + +template <> +struct AddEndian { + using type = u8; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = s8; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template <> +struct AddEndian { + using type = swap_struct_t>; +}; + +template +struct AddEndian { + static_assert(std::is_enum_v); + using type = swap_enum_t; +}; + +// Alias LETag/BETag as KeepTag/SwapTag depending on the system +#if COMMON_LITTLE_ENDIAN + +using LETag = KeepTag; +using BETag = SwapTag; + +#else + +using BETag = KeepTag; +using LETag = SwapTag; + +#endif + +// Aliases for LE types +using u16_le = AddEndian::type; +using u32_le = AddEndian::type; +using u64_le = AddEndian::type; + +using s16_le = AddEndian::type; +using s32_le = AddEndian::type; +using s64_le = AddEndian::type; + +template +using enum_le = std::enable_if_t, typename AddEndian::type>; + +using float_le = AddEndian::type; +using double_le = AddEndian::type; + +// Aliases for BE types +using u16_be = AddEndian::type; +using u32_be = AddEndian::type; +using u64_be = AddEndian::type; + +using s16_be = AddEndian::type; +using s32_be = AddEndian::type; +using s64_be = AddEndian::type; + +template +using enum_be = std::enable_if_t, typename AddEndian::type>; + +using float_be = AddEndian::type; +using double_be = AddEndian::type; \ No newline at end of file