// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: hash.h // ----------------------------------------------------------------------------- // #ifndef Y_ABSL_HASH_INTERNAL_HASH_H_ #define Y_ABSL_HASH_INTERNAL_HASH_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "y_absl/base/config.h" #include "y_absl/base/internal/unaligned_access.h" #include "y_absl/base/port.h" #include "y_absl/container/fixed_array.h" #include "y_absl/hash/internal/city.h" #include "y_absl/hash/internal/low_level_hash.h" #include "y_absl/meta/type_traits.h" #include "y_absl/numeric/bits.h" #include "y_absl/numeric/int128.h" #include "y_absl/strings/string_view.h" #include "y_absl/types/optional.h" #include "y_absl/types/variant.h" #include "y_absl/utility/utility.h" #ifdef Y_ABSL_HAVE_STD_STRING_VIEW #include #endif namespace y_absl { Y_ABSL_NAMESPACE_BEGIN class HashState; namespace hash_internal { // Internal detail: Large buffers are hashed in smaller chunks. This function // returns the size of these chunks. constexpr size_t PiecewiseChunkSize() { return 1024; } // PiecewiseCombiner // // PiecewiseCombiner is an internal-only helper class for hashing a piecewise // buffer of `char` or `unsigned char` as though it were contiguous. This class // provides two methods: // // H add_buffer(state, data, size) // H finalize(state) // // `add_buffer` can be called zero or more times, followed by a single call to // `finalize`. This will produce the same hash expansion as concatenating each // buffer piece into a single contiguous buffer, and passing this to // `H::combine_contiguous`. // // Example usage: // PiecewiseCombiner combiner; // for (const auto& piece : pieces) { // state = combiner.add_buffer(std::move(state), piece.data, piece.size); // } // return combiner.finalize(std::move(state)); class PiecewiseCombiner { public: PiecewiseCombiner() : position_(0) {} PiecewiseCombiner(const PiecewiseCombiner&) = delete; PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete; // PiecewiseCombiner::add_buffer() // // Appends the given range of bytes to the sequence to be hashed, which may // modify the provided hash state. template H add_buffer(H state, const unsigned char* data, size_t size); template H add_buffer(H state, const char* data, size_t size) { return add_buffer(std::move(state), reinterpret_cast(data), size); } // PiecewiseCombiner::finalize() // // Finishes combining the hash sequence, which may may modify the provided // hash state. // // Once finalize() is called, add_buffer() may no longer be called. The // resulting hash state will be the same as if the pieces passed to // add_buffer() were concatenated into a single flat buffer, and then provided // to H::combine_contiguous(). template H finalize(H state); private: unsigned char buf_[PiecewiseChunkSize()]; size_t position_; }; // is_hashable() // // Trait class which returns true if T is hashable by the y_absl::Hash framework. // Used for the AbslHashValue implementations for composite types below. template struct is_hashable; // HashStateBase // // An internal implementation detail that contains common implementation details // for all of the "hash state objects" objects generated by Abseil. This is not // a public API; users should not create classes that inherit from this. // // A hash state object is the template argument `H` passed to `AbslHashValue`. // It represents an intermediate state in the computation of an unspecified hash // algorithm. `HashStateBase` provides a CRTP style base class for hash state // implementations. Developers adding type support for `y_absl::Hash` should not // rely on any parts of the state object other than the following member // functions: // // * HashStateBase::combine() // * HashStateBase::combine_contiguous() // * HashStateBase::combine_unordered() // // A derived hash state class of type `H` must provide a public member function // with a signature similar to the following: // // `static H combine_contiguous(H state, const unsigned char*, size_t)`. // // It must also provide a private template method named RunCombineUnordered. // // A "consumer" is a 1-arg functor returning void. Its argument is a reference // to an inner hash state object, and it may be called multiple times. When // called, the functor consumes the entropy from the provided state object, // and resets that object to its empty state. // // A "combiner" is a stateless 2-arg functor returning void. Its arguments are // an inner hash state object and an ElementStateConsumer functor. A combiner // uses the provided inner hash state object to hash each element of the // container, passing the inner hash state object to the consumer after hashing // each element. // // Given these definitions, a derived hash state class of type H // must provide a private template method with a signature similar to the // following: // // `template ` // `static H RunCombineUnordered(H outer_state, CombinerT combiner)` // // This function is responsible for constructing the inner state object and // providing a consumer to the combiner. It uses side effects of the consumer // and combiner to mix the state of each element in an order-independent manner, // and uses this to return an updated value of `outer_state`. // // This inside-out approach generates efficient object code in the normal case, // but allows us to use stack storage to implement the y_absl::HashState type // erasure mechanism (avoiding heap allocations while hashing). // // `HashStateBase` will provide a complete implementation for a hash state // object in terms of these two methods. // // Example: // // // Use CRTP to define your derived class. // struct MyHashState : HashStateBase { // static H combine_contiguous(H state, const unsigned char*, size_t); // using MyHashState::HashStateBase::combine; // using MyHashState::HashStateBase::combine_contiguous; // using MyHashState::HashStateBase::combine_unordered; // private: // template // static H RunCombineUnordered(H state, CombinerT combiner); // }; template class HashStateBase { public: // HashStateBase::combine() // // Combines an arbitrary number of values into a hash state, returning the // updated state. // // Each of the value types `T` must be separately hashable by the Abseil // hashing framework. // // NOTE: // // state = H::combine(std::move(state), value1, value2, value3); // // is guaranteed to produce the same hash expansion as: // // state = H::combine(std::move(state), value1); // state = H::combine(std::move(state), value2); // state = H::combine(std::move(state), value3); template static H combine(H state, const T& value, const Ts&... values); static H combine(H state) { return state; } // HashStateBase::combine_contiguous() // // Combines a contiguous array of `size` elements into a hash state, returning // the updated state. // // NOTE: // // state = H::combine_contiguous(std::move(state), data, size); // // is NOT guaranteed to produce the same hash expansion as a for-loop (it may // perform internal optimizations). If you need this guarantee, use the // for-loop instead. template static H combine_contiguous(H state, const T* data, size_t size); template static H combine_unordered(H state, I begin, I end); using AbslInternalPiecewiseCombiner = PiecewiseCombiner; template using is_hashable = y_absl::hash_internal::is_hashable; private: // Common implementation of the iteration step of a "combiner", as described // above. template struct CombineUnorderedCallback { I begin; I end; template void operator()(InnerH inner_state, ElementStateConsumer cb) { for (; begin != end; ++begin) { inner_state = H::combine(std::move(inner_state), *begin); cb(inner_state); } } }; }; // is_uniquely_represented // // `is_uniquely_represented` is a trait class that indicates whether `T` // is uniquely represented. // // A type is "uniquely represented" if two equal values of that type are // guaranteed to have the same bytes in their underlying storage. In other // words, if `a == b`, then `memcmp(&a, &b, sizeof(T))` is guaranteed to be // zero. This property cannot be detected automatically, so this trait is false // by default, but can be specialized by types that wish to assert that they are // uniquely represented. This makes them eligible for certain optimizations. // // If you have any doubt whatsoever, do not specialize this template. // The default is completely safe, and merely disables some optimizations // that will not matter for most types. Specializing this template, // on the other hand, can be very hazardous. // // To be uniquely represented, a type must not have multiple ways of // representing the same value; for example, float and double are not // uniquely represented, because they have distinct representations for // +0 and -0. Furthermore, the type's byte representation must consist // solely of user-controlled data, with no padding bits and no compiler- // controlled data such as vptrs or sanitizer metadata. This is usually // very difficult to guarantee, because in most cases the compiler can // insert data and padding bits at its own discretion. // // If you specialize this template for a type `T`, you must do so in the file // that defines that type (or in this file). If you define that specialization // anywhere else, `is_uniquely_represented` could have different meanings // in different places. // // The Enable parameter is meaningless; it is provided as a convenience, // to support certain SFINAE techniques when defining specializations. template struct is_uniquely_represented : std::false_type {}; // is_uniquely_represented // // unsigned char is a synonym for "byte", so it is guaranteed to be // uniquely represented. template <> struct is_uniquely_represented : std::true_type {}; // is_uniquely_represented for non-standard integral types // // Integral types other than bool should be uniquely represented on any // platform that this will plausibly be ported to. template struct is_uniquely_represented< Integral, typename std::enable_if::value>::type> : std::true_type {}; // is_uniquely_represented // // template <> struct is_uniquely_represented : std::false_type {}; // hash_bytes() // // Convenience function that combines `hash_state` with the byte representation // of `value`. template H hash_bytes(H hash_state, const T& value) { const unsigned char* start = reinterpret_cast(&value); return H::combine_contiguous(std::move(hash_state), start, sizeof(value)); } // ----------------------------------------------------------------------------- // AbslHashValue for Basic Types // ----------------------------------------------------------------------------- // Note: Default `AbslHashValue` implementations live in `hash_internal`. This // allows us to block lexical scope lookup when doing an unqualified call to // `AbslHashValue` below. User-defined implementations of `AbslHashValue` can // only be found via ADL. // AbslHashValue() for hashing bool values // // We use SFINAE to ensure that this overload only accepts bool, not types that // are convertible to bool. template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, B value) { return H::combine(std::move(hash_state), static_cast(value ? 1 : 0)); } // AbslHashValue() for hashing enum values template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, Enum e) { // In practice, we could almost certainly just invoke hash_bytes directly, // but it's possible that a sanitizer might one day want to // store data in the unused bits of an enum. To avoid that risk, we // convert to the underlying type before hashing. Hopefully this will get // optimized away; if not, we can reopen discussion with c-toolchain-team. return H::combine(std::move(hash_state), static_cast::type>(e)); } // AbslHashValue() for hashing floating-point values template typename std::enable_if::value || std::is_same::value, H>::type AbslHashValue(H hash_state, Float value) { return hash_internal::hash_bytes(std::move(hash_state), value == 0 ? 0 : value); } // Long double has the property that it might have extra unused bytes in it. // For example, in x86 sizeof(long double)==16 but it only really uses 80-bits // of it. This means we can't use hash_bytes on a long double and have to // convert it to something else first. template typename std::enable_if::value, H>::type AbslHashValue(H hash_state, LongDouble value) { const int category = std::fpclassify(value); switch (category) { case FP_INFINITE: // Add the sign bit to differentiate between +Inf and -Inf hash_state = H::combine(std::move(hash_state), std::signbit(value)); break; case FP_NAN: case FP_ZERO: default: // Category is enough for these. break; case FP_NORMAL: case FP_SUBNORMAL: // We can't convert `value` directly to double because this would have // undefined behavior if the value is out of range. // std::frexp gives us a value in the range (-1, -.5] or [.5, 1) that is // guaranteed to be in range for `double`. The truncation is // implementation defined, but that works as long as it is deterministic. int exp; auto mantissa = static_cast(std::frexp(value, &exp)); hash_state = H::combine(std::move(hash_state), mantissa, exp); } return H::combine(std::move(hash_state), category); } // AbslHashValue() for hashing pointers template H AbslHashValue(H hash_state, T* ptr) { auto v = reinterpret_cast(ptr); // Due to alignment, pointers tend to have low bits as zero, and the next few // bits follow a pattern since they are also multiples of some base value. // Mixing the pointer twice helps prevent stuck low bits for certain alignment // values. return H::combine(std::move(hash_state), v, v); } // AbslHashValue() for hashing nullptr_t template H AbslHashValue(H hash_state, std::nullptr_t) { return H::combine(std::move(hash_state), static_cast(nullptr)); } // AbslHashValue() for hashing pointers-to-member template H AbslHashValue(H hash_state, T C::*ptr) { auto salient_ptm_size = [](std::size_t n) -> std::size_t { #if defined(_MSC_VER) // Pointers-to-member-function on MSVC consist of one pointer plus 0, 1, 2, // or 3 ints. In 64-bit mode, they are 8-byte aligned and thus can contain // padding (namely when they have 1 or 3 ints). The value below is a lower // bound on the number of salient, non-padding bytes that we use for // hashing. if (alignof(T C::*) == alignof(int)) { // No padding when all subobjects have the same size as the total // alignment. This happens in 32-bit mode. return n; } else { // Padding for 1 int (size 16) or 3 ints (size 24). // With 2 ints, the size is 16 with no padding, which we pessimize. return n == 24 ? 20 : n == 16 ? 12 : n; } #else // On other platforms, we assume that pointers-to-members do not have // padding. #ifdef __cpp_lib_has_unique_object_representations static_assert(std::has_unique_object_representations::value); #endif // __cpp_lib_has_unique_object_representations return n; #endif }; return H::combine_contiguous(std::move(hash_state), reinterpret_cast(&ptr), salient_ptm_size(sizeof ptr)); } // ----------------------------------------------------------------------------- // AbslHashValue for Composite Types // ----------------------------------------------------------------------------- // AbslHashValue() for hashing pairs template typename std::enable_if::value && is_hashable::value, H>::type AbslHashValue(H hash_state, const std::pair& p) { return H::combine(std::move(hash_state), p.first, p.second); } // hash_tuple() // // Helper function for hashing a tuple. The third argument should // be an index_sequence running from 0 to tuple_size - 1. template H hash_tuple(H hash_state, const Tuple& t, y_absl::index_sequence) { return H::combine(std::move(hash_state), std::get(t)...); } // AbslHashValue for hashing tuples template #if defined(_MSC_VER) // This SFINAE gets MSVC confused under some conditions. Let's just disable it // for now. H #else // _MSC_VER typename std::enable_if...>::value, H>::type #endif // _MSC_VER AbslHashValue(H hash_state, const std::tuple& t) { return hash_internal::hash_tuple(std::move(hash_state), t, y_absl::make_index_sequence()); } // ----------------------------------------------------------------------------- // AbslHashValue for Pointers // ----------------------------------------------------------------------------- // AbslHashValue for hashing unique_ptr template H AbslHashValue(H hash_state, const std::unique_ptr& ptr) { return H::combine(std::move(hash_state), ptr.get()); } // AbslHashValue for hashing shared_ptr template H AbslHashValue(H hash_state, const std::shared_ptr& ptr) { return H::combine(std::move(hash_state), ptr.get()); } // ----------------------------------------------------------------------------- // AbslHashValue for String-Like Types // ----------------------------------------------------------------------------- // AbslHashValue for hashing strings // // All the string-like types supported here provide the same hash expansion for // the same character sequence. These types are: // // - `y_absl::Cord` // - `TString` (and std::basic_string, A> for // any allocator A and any T in {char, wchar_t, char16_t, char32_t}) // - `y_absl::string_view`, `std::string_view`, `std::wstring_view`, // `std::u16string_view`, and `std::u32_string_view`. // // For simplicity, we currently support only strings built on `char`, `wchar_t`, // `char16_t`, or `char32_t`. This support may be broadened, if necessary, but // with some caution - this overload would misbehave in cases where the traits' // `eq()` member isn't equivalent to `==` on the underlying character type. template H AbslHashValue(H hash_state, y_absl::string_view str) { return H::combine( H::combine_contiguous(std::move(hash_state), str.data(), str.size()), str.size()); } // Support std::wstring, std::u16string and std::u32string. template ::value || std::is_same::value || std::is_same::value>> H AbslHashValue( H hash_state, const std::basic_string, Alloc>& str) { return H::combine( H::combine_contiguous(std::move(hash_state), str.data(), str.size()), str.size()); } #ifdef Y_ABSL_HAVE_STD_STRING_VIEW // Support std::wstring_view, std::u16string_view and std::u32string_view. template ::value || std::is_same::value || std::is_same::value>> H AbslHashValue(H hash_state, std::basic_string_view str) { return H::combine( H::combine_contiguous(std::move(hash_state), str.data(), str.size()), str.size()); } #endif // Y_ABSL_HAVE_STD_STRING_VIEW // ----------------------------------------------------------------------------- // AbslHashValue for Sequence Containers // ----------------------------------------------------------------------------- // AbslHashValue for hashing std::array template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::array& array) { return H::combine_contiguous(std::move(hash_state), array.data(), array.size()); } // AbslHashValue for hashing std::deque template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::deque& deque) { // TODO(gromer): investigate a more efficient implementation taking // advantage of the chunk structure. for (const auto& t : deque) { hash_state = H::combine(std::move(hash_state), t); } return H::combine(std::move(hash_state), deque.size()); } // AbslHashValue for hashing std::forward_list template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::forward_list& list) { size_t size = 0; for (const T& t : list) { hash_state = H::combine(std::move(hash_state), t); ++size; } return H::combine(std::move(hash_state), size); } // AbslHashValue for hashing std::list template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::list& list) { for (const auto& t : list) { hash_state = H::combine(std::move(hash_state), t); } return H::combine(std::move(hash_state), list.size()); } // AbslHashValue for hashing std::vector // // Do not use this for vector on platforms that have a working // implementation of std::hash. It does not have a .data(), and a fallback for // std::hash<> is most likely faster. template typename std::enable_if::value && !std::is_same::value, H>::type AbslHashValue(H hash_state, const std::vector& vector) { return H::combine(H::combine_contiguous(std::move(hash_state), vector.data(), vector.size()), vector.size()); } // AbslHashValue special cases for hashing std::vector #if defined(Y_ABSL_IS_BIG_ENDIAN) && \ (defined(__GLIBCXX__) || defined(__GLIBCPP__)) // std::hash in libstdc++ does not work correctly with vector on Big // Endian platforms therefore we need to implement a custom AbslHashValue for // it. More details on the bug: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531 template typename std::enable_if::value && std::is_same::value, H>::type AbslHashValue(H hash_state, const std::vector& vector) { typename H::AbslInternalPiecewiseCombiner combiner; for (const auto& i : vector) { unsigned char c = static_cast(i); hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c)); } return H::combine(combiner.finalize(std::move(hash_state)), vector.size()); } #else // When not working around the libstdc++ bug above, we still have to contend // with the fact that std::hash> is often poor quality, hashing // directly on the internal words and on no other state. On these platforms, // vector{1, 1} and vector{1, 1, 0} hash to the same value. // // Mixing in the size (as we do in our other vector<> implementations) on top // of the library-provided hash implementation avoids this QOI issue. template typename std::enable_if::value && std::is_same::value, H>::type AbslHashValue(H hash_state, const std::vector& vector) { return H::combine(std::move(hash_state), std::hash>{}(vector), vector.size()); } #endif // ----------------------------------------------------------------------------- // AbslHashValue for Ordered Associative Containers // ----------------------------------------------------------------------------- // AbslHashValue for hashing std::map template typename std::enable_if::value && is_hashable::value, H>::type AbslHashValue(H hash_state, const std::map& map) { for (const auto& t : map) { hash_state = H::combine(std::move(hash_state), t); } return H::combine(std::move(hash_state), map.size()); } // AbslHashValue for hashing std::multimap template typename std::enable_if::value && is_hashable::value, H>::type AbslHashValue(H hash_state, const std::multimap& map) { for (const auto& t : map) { hash_state = H::combine(std::move(hash_state), t); } return H::combine(std::move(hash_state), map.size()); } // AbslHashValue for hashing std::set template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::set& set) { for (const auto& t : set) { hash_state = H::combine(std::move(hash_state), t); } return H::combine(std::move(hash_state), set.size()); } // AbslHashValue for hashing std::multiset template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::multiset& set) { for (const auto& t : set) { hash_state = H::combine(std::move(hash_state), t); } return H::combine(std::move(hash_state), set.size()); } // ----------------------------------------------------------------------------- // AbslHashValue for Unordered Associative Containers // ----------------------------------------------------------------------------- // AbslHashValue for hashing std::unordered_set template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::unordered_set& s) { return H::combine( H::combine_unordered(std::move(hash_state), s.begin(), s.end()), s.size()); } // AbslHashValue for hashing std::unordered_multiset template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const std::unordered_multiset& s) { return H::combine( H::combine_unordered(std::move(hash_state), s.begin(), s.end()), s.size()); } // AbslHashValue for hashing std::unordered_set template typename std::enable_if::value && is_hashable::value, H>::type AbslHashValue(H hash_state, const std::unordered_map& s) { return H::combine( H::combine_unordered(std::move(hash_state), s.begin(), s.end()), s.size()); } // AbslHashValue for hashing std::unordered_multiset template typename std::enable_if::value && is_hashable::value, H>::type AbslHashValue(H hash_state, const std::unordered_multimap& s) { return H::combine( H::combine_unordered(std::move(hash_state), s.begin(), s.end()), s.size()); } // ----------------------------------------------------------------------------- // AbslHashValue for Wrapper Types // ----------------------------------------------------------------------------- // AbslHashValue for hashing std::reference_wrapper template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, std::reference_wrapper opt) { return H::combine(std::move(hash_state), opt.get()); } // AbslHashValue for hashing y_absl::optional template typename std::enable_if::value, H>::type AbslHashValue( H hash_state, const y_absl::optional& opt) { if (opt) hash_state = H::combine(std::move(hash_state), *opt); return H::combine(std::move(hash_state), opt.has_value()); } // VariantVisitor template struct VariantVisitor { H&& hash_state; template H operator()(const T& t) const { return H::combine(std::move(hash_state), t); } }; // AbslHashValue for hashing y_absl::variant template typename std::enable_if...>::value, H>::type AbslHashValue(H hash_state, const y_absl::variant& v) { if (!v.valueless_by_exception()) { hash_state = y_absl::visit(VariantVisitor{std::move(hash_state)}, v); } return H::combine(std::move(hash_state), v.index()); } // ----------------------------------------------------------------------------- // AbslHashValue for Other Types // ----------------------------------------------------------------------------- // AbslHashValue for hashing std::bitset is not defined on Little Endian // platforms, for the same reason as for vector (see std::vector above): // It does not expose the raw bytes, and a fallback to std::hash<> is most // likely faster. #if defined(Y_ABSL_IS_BIG_ENDIAN) && \ (defined(__GLIBCXX__) || defined(__GLIBCPP__)) // AbslHashValue for hashing std::bitset // // std::hash in libstdc++ does not work correctly with std::bitset on Big Endian // platforms therefore we need to implement a custom AbslHashValue for it. More // details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531 template H AbslHashValue(H hash_state, const std::bitset& set) { typename H::AbslInternalPiecewiseCombiner combiner; for (int i = 0; i < N; i++) { unsigned char c = static_cast(set[i]); hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c)); } return H::combine(combiner.finalize(std::move(hash_state)), N); } #endif // ----------------------------------------------------------------------------- // hash_range_or_bytes() // // Mixes all values in the range [data, data+size) into the hash state. // This overload accepts only uniquely-represented types, and hashes them by // hashing the entire range of bytes. template typename std::enable_if::value, H>::type hash_range_or_bytes(H hash_state, const T* data, size_t size) { const auto* bytes = reinterpret_cast(data); return H::combine_contiguous(std::move(hash_state), bytes, sizeof(T) * size); } // hash_range_or_bytes() template typename std::enable_if::value, H>::type hash_range_or_bytes(H hash_state, const T* data, size_t size) { for (const auto end = data + size; data < end; ++data) { hash_state = H::combine(std::move(hash_state), *data); } return hash_state; } #if defined(Y_ABSL_INTERNAL_LEGACY_HASH_NAMESPACE) && \ Y_ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_ #define Y_ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 1 #else #define Y_ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ 0 #endif // HashSelect // // Type trait to select the appropriate hash implementation to use. // HashSelect::type will give the proper hash implementation, to be invoked // as: // HashSelect::type::Invoke(state, value) // Also, HashSelect::type::value is a boolean equal to `true` if there is a // valid `Invoke` function. Types that are not hashable will have a ::value of // `false`. struct HashSelect { private: struct State : HashStateBase { static State combine_contiguous(State hash_state, const unsigned char*, size_t); using State::HashStateBase::combine_contiguous; }; struct UniquelyRepresentedProbe { template static auto Invoke(H state, const T& value) -> y_absl::enable_if_t::value, H> { return hash_internal::hash_bytes(std::move(state), value); } }; struct HashValueProbe { template static auto Invoke(H state, const T& value) -> y_absl::enable_if_t< std::is_same::value, H> { return AbslHashValue(std::move(state), value); } }; struct LegacyHashProbe { #if Y_ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ template static auto Invoke(H state, const T& value) -> y_absl::enable_if_t< std::is_convertible< decltype(Y_ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash()(value)), size_t>::value, H> { return hash_internal::hash_bytes( std::move(state), Y_ABSL_INTERNAL_LEGACY_HASH_NAMESPACE::hash{}(value)); } #endif // Y_ABSL_HASH_INTERNAL_SUPPORT_LEGACY_HASH_ }; struct StdHashProbe { template static auto Invoke(H state, const T& value) -> y_absl::enable_if_t::value, H> { return hash_internal::hash_bytes(std::move(state), std::hash{}(value)); } }; template struct Probe : Hash { private: template (), std::declval()))> static std::true_type Test(int); template static std::false_type Test(char); public: static constexpr bool value = decltype(Test(0))::value; }; public: // Probe each implementation in order. // disjunction provides short circuiting wrt instantiation. template using Apply = y_absl::disjunction< // Probe, // Probe, // Probe, // Probe, // std::false_type>; }; template struct is_hashable : std::integral_constant::value> {}; // MixingHashState class Y_ABSL_DLL MixingHashState : public HashStateBase { // y_absl::uint128 is not an alias or a thin wrapper around the intrinsic. // We use the intrinsic when available to improve performance. #ifdef Y_ABSL_HAVE_INTRINSIC_INT128 using uint128 = __uint128_t; #else // Y_ABSL_HAVE_INTRINSIC_INT128 using uint128 = y_absl::uint128; #endif // Y_ABSL_HAVE_INTRINSIC_INT128 static constexpr uint64_t kMul = sizeof(size_t) == 4 ? uint64_t{0xcc9e2d51} : uint64_t{0x9ddfea08eb382d69}; template using IntegralFastPath = conjunction, is_uniquely_represented>; public: // Move only MixingHashState(MixingHashState&&) = default; MixingHashState& operator=(MixingHashState&&) = default; // MixingHashState::combine_contiguous() // // Fundamental base case for hash recursion: mixes the given range of bytes // into the hash state. static MixingHashState combine_contiguous(MixingHashState hash_state, const unsigned char* first, size_t size) { return MixingHashState( CombineContiguousImpl(hash_state.state_, first, size, std::integral_constant{})); } using MixingHashState::HashStateBase::combine_contiguous; // MixingHashState::hash() // // For performance reasons in non-opt mode, we specialize this for // integral types. // Otherwise we would be instantiating and calling dozens of functions for // something that is just one multiplication and a couple xor's. // The result should be the same as running the whole algorithm, but faster. template ::value, int> = 0> static size_t hash(T value) { return static_cast( Mix(Seed(), static_cast>(value))); } // Overload of MixingHashState::hash() template ::value, int> = 0> static size_t hash(const T& value) { return static_cast(combine(MixingHashState{}, value).state_); } private: // Invoked only once for a given argument; that plus the fact that this is // move-only ensures that there is only one non-moved-from object. MixingHashState() : state_(Seed()) {} friend class MixingHashState::HashStateBase; template static MixingHashState RunCombineUnordered(MixingHashState state, CombinerT combiner) { uint64_t unordered_state = 0; combiner(MixingHashState{}, [&](MixingHashState& inner_state) { // Add the hash state of the element to the running total, but mix the // carry bit back into the low bit. This in intended to avoid losing // entropy to overflow, especially when unordered_multisets contain // multiple copies of the same value. auto element_state = inner_state.state_; unordered_state += element_state; if (unordered_state < element_state) { ++unordered_state; } inner_state = MixingHashState{}; }); return MixingHashState::combine(std::move(state), unordered_state); } // Allow the HashState type-erasure implementation to invoke // RunCombinedUnordered() directly. friend class y_absl::HashState; // Workaround for MSVC bug. // We make the type copyable to fix the calling convention, even though we // never actually copy it. Keep it private to not affect the public API of the // type. MixingHashState(const MixingHashState&) = default; explicit MixingHashState(uint64_t state) : state_(state) {} // Implementation of the base case for combine_contiguous where we actually // mix the bytes into the state. // Dispatch to different implementations of the combine_contiguous depending // on the value of `sizeof(size_t)`. static uint64_t CombineContiguousImpl(uint64_t state, const unsigned char* first, size_t len, std::integral_constant /* sizeof_size_t */); static uint64_t CombineContiguousImpl(uint64_t state, const unsigned char* first, size_t len, std::integral_constant /* sizeof_size_t */); // Slow dispatch path for calls to CombineContiguousImpl with a size argument // larger than PiecewiseChunkSize(). Has the same effect as calling // CombineContiguousImpl() repeatedly with the chunk stride size. static uint64_t CombineLargeContiguousImpl32(uint64_t state, const unsigned char* first, size_t len); static uint64_t CombineLargeContiguousImpl64(uint64_t state, const unsigned char* first, size_t len); // Reads 9 to 16 bytes from p. // The least significant 8 bytes are in .first, the rest (zero padded) bytes // are in .second. static std::pair Read9To16(const unsigned char* p, size_t len) { uint64_t low_mem = y_absl::base_internal::UnalignedLoad64(p); uint64_t high_mem = y_absl::base_internal::UnalignedLoad64(p + len - 8); #ifdef Y_ABSL_IS_LITTLE_ENDIAN uint64_t most_significant = high_mem; uint64_t least_significant = low_mem; #else uint64_t most_significant = low_mem; uint64_t least_significant = high_mem; #endif return {least_significant, most_significant}; } // Reads 4 to 8 bytes from p. Zero pads to fill uint64_t. static uint64_t Read4To8(const unsigned char* p, size_t len) { uint32_t low_mem = y_absl::base_internal::UnalignedLoad32(p); uint32_t high_mem = y_absl::base_internal::UnalignedLoad32(p + len - 4); #ifdef Y_ABSL_IS_LITTLE_ENDIAN uint32_t most_significant = high_mem; uint32_t least_significant = low_mem; #else uint32_t most_significant = low_mem; uint32_t least_significant = high_mem; #endif return (static_cast(most_significant) << (len - 4) * 8) | least_significant; } // Reads 1 to 3 bytes from p. Zero pads to fill uint32_t. static uint32_t Read1To3(const unsigned char* p, size_t len) { // The trick used by this implementation is to avoid branches if possible. unsigned char mem0 = p[0]; unsigned char mem1 = p[len / 2]; unsigned char mem2 = p[len - 1]; #ifdef Y_ABSL_IS_LITTLE_ENDIAN unsigned char significant2 = mem2; unsigned char significant1 = mem1; unsigned char significant0 = mem0; #else unsigned char significant2 = mem0; unsigned char significant1 = len == 2 ? mem0 : mem1; unsigned char significant0 = mem2; #endif return static_cast(significant0 | // (significant1 << (len / 2 * 8)) | // (significant2 << ((len - 1) * 8))); } Y_ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) { // Though the 128-bit product on AArch64 needs two instructions, it is // still a good balance between speed and hash quality. using MultType = y_absl::conditional_t; // We do the addition in 64-bit space to make sure the 128-bit // multiplication is fast. If we were to do it as MultType the compiler has // to assume that the high word is non-zero and needs to perform 2 // multiplications instead of one. MultType m = state + v; m *= kMul; return static_cast(m ^ (m >> (sizeof(m) * 8 / 2))); } // An extern to avoid bloat on a direct call to LowLevelHash() with fixed // values for both the seed and salt parameters. static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len); Y_ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data, size_t len) { #ifdef Y_ABSL_HAVE_INTRINSIC_INT128 return LowLevelHashImpl(data, len); #else return hash_internal::CityHash64(reinterpret_cast(data), len); #endif } // Seed() // // A non-deterministic seed. // // The current purpose of this seed is to generate non-deterministic results // and prevent having users depend on the particular hash values. // It is not meant as a security feature right now, but it leaves the door // open to upgrade it to a true per-process random seed. A true random seed // costs more and we don't need to pay for that right now. // // On platforms with ASLR, we take advantage of it to make a per-process // random value. // See https://en.wikipedia.org/wiki/Address_space_layout_randomization // // On other platforms this is still going to be non-deterministic but most // probably per-build and not per-process. Y_ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() { #if (!defined(__clang__) || __clang_major__ > 11) && \ (!defined(__apple_build_version__) || \ __apple_build_version__ >= 19558921) // Xcode 12 return static_cast(reinterpret_cast(&kSeed)); #else // Workaround the absence of // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021. return static_cast(reinterpret_cast(kSeed)); #endif } static const void* const kSeed; uint64_t state_; }; // MixingHashState::CombineContiguousImpl() inline uint64_t MixingHashState::CombineContiguousImpl( uint64_t state, const unsigned char* first, size_t len, std::integral_constant /* sizeof_size_t */) { // For large values we use CityHash, for small ones we just use a // multiplicative hash. uint64_t v; if (len > 8) { if (Y_ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) { return CombineLargeContiguousImpl32(state, first, len); } v = hash_internal::CityHash32(reinterpret_cast(first), len); } else if (len >= 4) { v = Read4To8(first, len); } else if (len > 0) { v = Read1To3(first, len); } else { // Empty ranges have no effect. return state; } return Mix(state, v); } // Overload of MixingHashState::CombineContiguousImpl() inline uint64_t MixingHashState::CombineContiguousImpl( uint64_t state, const unsigned char* first, size_t len, std::integral_constant /* sizeof_size_t */) { // For large values we use LowLevelHash or CityHash depending on the platform, // for small ones we just use a multiplicative hash. uint64_t v; if (len > 16) { if (Y_ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) { return CombineLargeContiguousImpl64(state, first, len); } v = Hash64(first, len); } else if (len > 8) { // This hash function was constructed by the ML-driven algorithm discovery // using reinforcement learning. We fed the agent lots of inputs from // microbenchmarks, SMHasher, low hamming distance from generated inputs and // picked up the one that was good on micro and macrobenchmarks. auto p = Read9To16(first, len); uint64_t lo = p.first; uint64_t hi = p.second; // Rotation by 53 was found to be most often useful when discovering these // hashing algorithms with ML techniques. lo = y_absl::rotr(lo, 53); state += kMul; lo += state; state ^= hi; uint128 m = state; m *= lo; return static_cast(m ^ (m >> 64)); } else if (len >= 4) { v = Read4To8(first, len); } else if (len > 0) { v = Read1To3(first, len); } else { // Empty ranges have no effect. return state; } return Mix(state, v); } struct AggregateBarrier {}; // HashImpl // Add a private base class to make sure this type is not an aggregate. // Aggregates can be aggregate initialized even if the default constructor is // deleted. struct PoisonedHash : private AggregateBarrier { PoisonedHash() = delete; PoisonedHash(const PoisonedHash&) = delete; PoisonedHash& operator=(const PoisonedHash&) = delete; }; template struct HashImpl { size_t operator()(const T& value) const { return MixingHashState::hash(value); } }; template struct Hash : y_absl::conditional_t::value, HashImpl, PoisonedHash> {}; template template H HashStateBase::combine(H state, const T& value, const Ts&... values) { return H::combine(hash_internal::HashSelect::template Apply::Invoke( std::move(state), value), values...); } // HashStateBase::combine_contiguous() template template H HashStateBase::combine_contiguous(H state, const T* data, size_t size) { return hash_internal::hash_range_or_bytes(std::move(state), data, size); } // HashStateBase::combine_unordered() template template H HashStateBase::combine_unordered(H state, I begin, I end) { return H::RunCombineUnordered(std::move(state), CombineUnorderedCallback{begin, end}); } // HashStateBase::PiecewiseCombiner::add_buffer() template H PiecewiseCombiner::add_buffer(H state, const unsigned char* data, size_t size) { if (position_ + size < PiecewiseChunkSize()) { // This partial chunk does not fill our existing buffer memcpy(buf_ + position_, data, size); position_ += size; return state; } // If the buffer is partially filled we need to complete the buffer // and hash it. if (position_ != 0) { const size_t bytes_needed = PiecewiseChunkSize() - position_; memcpy(buf_ + position_, data, bytes_needed); state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize()); data += bytes_needed; size -= bytes_needed; } // Hash whatever chunks we can without copying while (size >= PiecewiseChunkSize()) { state = H::combine_contiguous(std::move(state), data, PiecewiseChunkSize()); data += PiecewiseChunkSize(); size -= PiecewiseChunkSize(); } // Fill the buffer with the remainder memcpy(buf_, data, size); position_ = size; return state; } // HashStateBase::PiecewiseCombiner::finalize() template H PiecewiseCombiner::finalize(H state) { // Hash the remainder left in the buffer, which may be empty return H::combine_contiguous(std::move(state), buf_, position_); } } // namespace hash_internal Y_ABSL_NAMESPACE_END } // namespace y_absl #endif // Y_ABSL_HASH_INTERNAL_HASH_H_