Browse Source

Restoring authorship annotation for <rdna@yandex-team.ru>. Commit 1 of 2.

rdna 3 years ago
parent
commit
7804d69d16

+ 14 - 14
build/ya.conf.json

@@ -3507,7 +3507,7 @@
                     "default": true
                 }
             ]
-        },
+        }, 
         "ag": {
             "tools": {
                 "ag": {
@@ -7024,20 +7024,20 @@
             },
             "executable": "ninja"
         },
-        "kwfeed": {
-            "formula": {
+        "kwfeed": { 
+            "formula": { 
                 "sandbox_id": 36077631,
-                "match": "KWFeed"
-            },
-            "executable": {
+                "match": "KWFeed" 
+            }, 
+            "executable": { 
                 "kwfeed": [
                     "kwfeed"
                 ],
                 "metaquery.sh": [
                     "metaquery.sh"
                 ]
-            }
-        },
+            } 
+        }, 
         "protobin_diff": {
             "formula": {
                 "sandbox_id": 28621470,
@@ -7082,16 +7082,16 @@
                 ]
             }
         },
-        "kwmqbuild": {
-            "formula": {
+        "kwmqbuild": { 
+            "formula": { 
                 "sandbox_id": 41067877,
-                "match": "KWMQBuild"
-            },
-            "executable": {
+                "match": "KWMQBuild" 
+            }, 
+            "executable": { 
                 "kwmqbuild": [
                     "kwmqbuild"
                 ]
-            }
+            } 
         },
         "gpt": {
             "formula": {

+ 25 - 25
contrib/libs/jemalloc/COPYING

@@ -1,27 +1,27 @@
-Unless otherwise specified, files in the jemalloc source distribution are
-subject to the following license:
---------------------------------------------------------------------------------
+Unless otherwise specified, files in the jemalloc source distribution are 
+subject to the following license: 
+-------------------------------------------------------------------------------- 
 Copyright (C) 2002-present Jason Evans <jasone@canonware.com>.
-All rights reserved.
-Copyright (C) 2007-2012 Mozilla Foundation.  All rights reserved.
+All rights reserved. 
+Copyright (C) 2007-2012 Mozilla Foundation.  All rights reserved. 
 Copyright (C) 2009-present Facebook, Inc.  All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright notice(s),
-   this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice(s),
-   this list of conditions and the following disclaimer in the documentation
-   and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
-EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
-INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------------
+ 
+Redistribution and use in source and binary forms, with or without 
+modification, are permitted provided that the following conditions are met: 
+1. Redistributions of source code must retain the above copyright notice(s), 
+   this list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice(s), 
+   this list of conditions and the following disclaimer in the documentation 
+   and/or other materials provided with the distribution. 
+ 
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS 
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO 
+EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, 
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
+-------------------------------------------------------------------------------- 

+ 13 - 13
contrib/libs/jemalloc/README

@@ -1,20 +1,20 @@
-jemalloc is a general purpose malloc(3) implementation that emphasizes
-fragmentation avoidance and scalable concurrency support.  jemalloc first came
-into use as the FreeBSD libc allocator in 2005, and since then it has found its
-way into numerous applications that rely on its predictable behavior.  In 2010
-jemalloc development efforts broadened to include developer support features
+jemalloc is a general purpose malloc(3) implementation that emphasizes 
+fragmentation avoidance and scalable concurrency support.  jemalloc first came 
+into use as the FreeBSD libc allocator in 2005, and since then it has found its 
+way into numerous applications that rely on its predictable behavior.  In 2010 
+jemalloc development efforts broadened to include developer support features 
 such as heap profiling and extensive monitoring/tuning hooks.  Modern jemalloc
 releases continue to be integrated back into FreeBSD, and therefore versatility
 remains critical.  Ongoing development efforts trend toward making jemalloc
 among the best allocators for a broad range of demanding applications, and
 eliminating/mitigating weaknesses that have practical repercussions for real
 world applications.
-
-The COPYING file contains copyright and licensing information.
-
-The INSTALL file contains information on how to configure, build, and install
-jemalloc.
-
-The ChangeLog file contains a brief summary of changes for each release.
-
+ 
+The COPYING file contains copyright and licensing information. 
+ 
+The INSTALL file contains information on how to configure, build, and install 
+jemalloc. 
+ 
+The ChangeLog file contains a brief summary of changes for each release. 
+ 
 URL: http://jemalloc.net/

+ 18 - 18
contrib/libs/jemalloc/include/jemalloc/internal/atomic.h

@@ -1,8 +1,8 @@
 #ifndef JEMALLOC_INTERNAL_ATOMIC_H
 #define JEMALLOC_INTERNAL_ATOMIC_H
-
+ 
 #define ATOMIC_INLINE JEMALLOC_ALWAYS_INLINE
-
+ 
 #define JEMALLOC_U8_ATOMICS
 #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS)
 #  include "jemalloc/internal/atomic_gcc_atomic.h"
@@ -21,7 +21,7 @@
 #else
 #  error "Don't have atomics implemented on this platform."
 #endif
-
+ 
 /*
  * This header gives more or less a backport of C11 atomics. The user can write
  * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate
@@ -40,7 +40,7 @@
  *   atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence).
  *   ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT).
  */
-
+ 
 /*
  * Pure convenience, so that we don't have to type "atomic_memory_order_"
  * quite so often.
@@ -50,37 +50,37 @@
 #define ATOMIC_RELEASE atomic_memory_order_release
 #define ATOMIC_ACQ_REL atomic_memory_order_acq_rel
 #define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
-
+ 
 /*
  * Not all platforms have 64-bit atomics.  If we do, this #define exposes that
  * fact.
  */
-#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
+#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) 
 #  define JEMALLOC_ATOMIC_U64
-#endif
-
+#endif 
+ 
 JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
-
+ 
 /*
  * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only
  * platform that actually needs to know the size, MSVC.
  */
 JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
-
+ 
 JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
-
+ 
 JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
-
+ 
 JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
-
+ 
 JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
-
+ 
 JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
-
+ 
 #ifdef JEMALLOC_ATOMIC_U64
 JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
-#endif
-
+#endif 
+ 
 #undef ATOMIC_INLINE
-
+ 
 #endif /* JEMALLOC_INTERNAL_ATOMIC_H */

+ 103 - 103
contrib/libs/jemalloc/include/jemalloc/internal/bitmap.h

@@ -1,13 +1,13 @@
 #ifndef JEMALLOC_INTERNAL_BITMAP_H
 #define JEMALLOC_INTERNAL_BITMAP_H
-
+ 
 #include "jemalloc/internal/arena_types.h"
 #include "jemalloc/internal/bit_util.h"
 #include "jemalloc/internal/sc.h"
-
-typedef unsigned long bitmap_t;
+ 
+typedef unsigned long bitmap_t; 
 #define LG_SIZEOF_BITMAP	LG_SIZEOF_LONG
-
+ 
 /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
 #if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
 /* Maximum bitmap bit count is determined by maximum regions per slab. */
@@ -18,11 +18,11 @@ typedef unsigned long bitmap_t;
 #endif
 #define BITMAP_MAXBITS		(ZU(1) << LG_BITMAP_MAXBITS)
 
-/* Number of bits per group. */
+/* Number of bits per group. */ 
 #define LG_BITMAP_GROUP_NBITS		(LG_SIZEOF_BITMAP + 3)
 #define BITMAP_GROUP_NBITS		(1U << LG_BITMAP_GROUP_NBITS)
 #define BITMAP_GROUP_NBITS_MASK		(BITMAP_GROUP_NBITS-1)
-
+ 
 /*
  * Do some analysis on how big the bitmap is before we use a tree.  For a brute
  * force linear search, if we would have to call ffs_lu() more than 2^3 times,
@@ -31,11 +31,11 @@ typedef unsigned long bitmap_t;
 #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
 #  define BITMAP_USE_TREE
 #endif
-
+ 
 /* Number of groups required to store a given number of bits. */
 #define BITMAP_BITS2GROUPS(nbits)					\
     (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS)
-
+ 
 /*
  * Number of groups required at a particular level for a given number of bits.
  */
@@ -145,40 +145,40 @@ typedef unsigned long bitmap_t;
 #endif /* BITMAP_USE_TREE */
 
 typedef struct bitmap_level_s {
-	/* Offset of this level's groups within the array of groups. */
-	size_t group_offset;
+	/* Offset of this level's groups within the array of groups. */ 
+	size_t group_offset; 
 } bitmap_level_t;
-
+ 
 typedef struct bitmap_info_s {
-	/* Logical number of bits in bitmap (stored at bottom level). */
-	size_t nbits;
-
+	/* Logical number of bits in bitmap (stored at bottom level). */ 
+	size_t nbits; 
+ 
 #ifdef BITMAP_USE_TREE
-	/* Number of levels necessary for nbits. */
-	unsigned nlevels;
-
-	/*
-	 * Only the first (nlevels+1) elements are used, and levels are ordered
-	 * bottom to top (e.g. the bottom level is stored in levels[0]).
-	 */
-	bitmap_level_t levels[BITMAP_MAX_LEVELS+1];
+	/* Number of levels necessary for nbits. */ 
+	unsigned nlevels; 
+ 
+	/* 
+	 * Only the first (nlevels+1) elements are used, and levels are ordered 
+	 * bottom to top (e.g. the bottom level is stored in levels[0]). 
+	 */ 
+	bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; 
 #else /* BITMAP_USE_TREE */
 	/* Number of groups necessary for nbits. */
 	size_t ngroups;
 #endif /* BITMAP_USE_TREE */
 } bitmap_info_t;
-
+ 
 void bitmap_info_init(bitmap_info_t *binfo, size_t nbits);
 void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill);
 size_t bitmap_size(const bitmap_info_t *binfo);
-
+ 
 static inline bool
 bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
 #ifdef BITMAP_USE_TREE
 	size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
-	bitmap_t rg = bitmap[rgoff];
-	/* The bitmap is full iff the root group is 0. */
-	return (rg == 0);
+	bitmap_t rg = bitmap[rgoff]; 
+	/* The bitmap is full iff the root group is 0. */ 
+	return (rg == 0); 
 #else
 	size_t i;
 
@@ -189,54 +189,54 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
 	}
 	return true;
 #endif
-}
-
+} 
+ 
 static inline bool
 bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
-	size_t goff;
-	bitmap_t g;
-
-	assert(bit < binfo->nbits);
-	goff = bit >> LG_BITMAP_GROUP_NBITS;
-	g = bitmap[goff];
+	size_t goff; 
+	bitmap_t g; 
+ 
+	assert(bit < binfo->nbits); 
+	goff = bit >> LG_BITMAP_GROUP_NBITS; 
+	g = bitmap[goff]; 
 	return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
-}
-
+} 
+ 
 static inline void
 bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
-	size_t goff;
-	bitmap_t *gp;
-	bitmap_t g;
-
-	assert(bit < binfo->nbits);
+	size_t goff; 
+	bitmap_t *gp; 
+	bitmap_t g; 
+ 
+	assert(bit < binfo->nbits); 
 	assert(!bitmap_get(bitmap, binfo, bit));
-	goff = bit >> LG_BITMAP_GROUP_NBITS;
-	gp = &bitmap[goff];
-	g = *gp;
+	goff = bit >> LG_BITMAP_GROUP_NBITS; 
+	gp = &bitmap[goff]; 
+	g = *gp; 
 	assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
 	g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-	*gp = g;
-	assert(bitmap_get(bitmap, binfo, bit));
+	*gp = g; 
+	assert(bitmap_get(bitmap, binfo, bit)); 
 #ifdef BITMAP_USE_TREE
-	/* Propagate group state transitions up the tree. */
-	if (g == 0) {
-		unsigned i;
-		for (i = 1; i < binfo->nlevels; i++) {
-			bit = goff;
-			goff = bit >> LG_BITMAP_GROUP_NBITS;
-			gp = &bitmap[binfo->levels[i].group_offset + goff];
-			g = *gp;
+	/* Propagate group state transitions up the tree. */ 
+	if (g == 0) { 
+		unsigned i; 
+		for (i = 1; i < binfo->nlevels; i++) { 
+			bit = goff; 
+			goff = bit >> LG_BITMAP_GROUP_NBITS; 
+			gp = &bitmap[binfo->levels[i].group_offset + goff]; 
+			g = *gp; 
 			assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
 			g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-			*gp = g;
+			*gp = g; 
 			if (g != 0) {
-				break;
+				break; 
 			}
-		}
-	}
+		} 
+	} 
 #endif
-}
-
+} 
+ 
 /* ffu: find first unset >= bit. */
 static inline size_t
 bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
@@ -296,24 +296,24 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
 #endif
 }
 
-/* sfu: set first unset. */
+/* sfu: set first unset. */ 
 static inline size_t
 bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
-	size_t bit;
-	bitmap_t g;
-	unsigned i;
-
+	size_t bit; 
+	bitmap_t g; 
+	unsigned i; 
+ 
 	assert(!bitmap_full(bitmap, binfo));
-
+ 
 #ifdef BITMAP_USE_TREE
-	i = binfo->nlevels - 1;
-	g = bitmap[binfo->levels[i].group_offset];
+	i = binfo->nlevels - 1; 
+	g = bitmap[binfo->levels[i].group_offset]; 
 	bit = ffs_lu(g) - 1;
-	while (i > 0) {
-		i--;
-		g = bitmap[binfo->levels[i].group_offset + bit];
+	while (i > 0) { 
+		i--; 
+		g = bitmap[binfo->levels[i].group_offset + bit]; 
 		bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
-	}
+	} 
 #else
 	i = 0;
 	g = bitmap[0];
@@ -323,47 +323,47 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
 	}
 	bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
 #endif
-	bitmap_set(bitmap, binfo, bit);
+	bitmap_set(bitmap, binfo, bit); 
 	return bit;
-}
-
+} 
+ 
 static inline void
 bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
-	size_t goff;
-	bitmap_t *gp;
-	bitmap_t g;
+	size_t goff; 
+	bitmap_t *gp; 
+	bitmap_t g; 
 	UNUSED bool propagate;
-
-	assert(bit < binfo->nbits);
-	assert(bitmap_get(bitmap, binfo, bit));
-	goff = bit >> LG_BITMAP_GROUP_NBITS;
-	gp = &bitmap[goff];
-	g = *gp;
-	propagate = (g == 0);
+ 
+	assert(bit < binfo->nbits); 
+	assert(bitmap_get(bitmap, binfo, bit)); 
+	goff = bit >> LG_BITMAP_GROUP_NBITS; 
+	gp = &bitmap[goff]; 
+	g = *gp; 
+	propagate = (g == 0); 
 	assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
 	g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-	*gp = g;
+	*gp = g; 
 	assert(!bitmap_get(bitmap, binfo, bit));
 #ifdef BITMAP_USE_TREE
-	/* Propagate group state transitions up the tree. */
-	if (propagate) {
-		unsigned i;
-		for (i = 1; i < binfo->nlevels; i++) {
-			bit = goff;
-			goff = bit >> LG_BITMAP_GROUP_NBITS;
-			gp = &bitmap[binfo->levels[i].group_offset + goff];
-			g = *gp;
-			propagate = (g == 0);
+	/* Propagate group state transitions up the tree. */ 
+	if (propagate) { 
+		unsigned i; 
+		for (i = 1; i < binfo->nlevels; i++) { 
+			bit = goff; 
+			goff = bit >> LG_BITMAP_GROUP_NBITS; 
+			gp = &bitmap[binfo->levels[i].group_offset + goff]; 
+			g = *gp; 
+			propagate = (g == 0); 
 			assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))
-			    == 0);
+			    == 0); 
 			g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
-			*gp = g;
+			*gp = g; 
 			if (!propagate) {
-				break;
+				break; 
 			}
-		}
-	}
+		} 
+	} 
 #endif /* BITMAP_USE_TREE */
-}
-
+} 
+ 
 #endif /* JEMALLOC_INTERNAL_BITMAP_H */

+ 37 - 37
contrib/libs/jemalloc/include/jemalloc/internal/ckh.h

@@ -1,74 +1,74 @@
 #ifndef JEMALLOC_INTERNAL_CKH_H
 #define JEMALLOC_INTERNAL_CKH_H
-
+ 
 #include "jemalloc/internal/tsd.h"
-
+ 
 /* Cuckoo hashing implementation.  Skip to the end for the interface. */
-
+ 
 /******************************************************************************/
 /* INTERNAL DEFINITIONS -- IGNORE */
 /******************************************************************************/
 
-/* Maintain counters used to get an idea of performance. */
+/* Maintain counters used to get an idea of performance. */ 
 /* #define CKH_COUNT */
-/* Print counter values in ckh_delete() (requires CKH_COUNT). */
+/* Print counter values in ckh_delete() (requires CKH_COUNT). */ 
 /* #define CKH_VERBOSE */
-
-/*
- * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket.  Try to fit
- * one bucket per L1 cache line.
- */
+ 
+/* 
+ * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket.  Try to fit 
+ * one bucket per L1 cache line. 
+ */ 
 #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
-
+ 
 /* Typedefs to allow easy function pointer passing. */
 typedef void ckh_hash_t (const void *, size_t[2]);
 typedef bool ckh_keycomp_t (const void *, const void *);
-
-/* Hash table cell. */
+ 
+/* Hash table cell. */ 
 typedef struct {
 	const void *key;
 	const void *data;
 } ckhc_t;
-
+ 
 /* The hash table itself. */
 typedef struct {
-#ifdef CKH_COUNT
-	/* Counters used to get an idea of performance. */
+#ifdef CKH_COUNT 
+	/* Counters used to get an idea of performance. */ 
 	uint64_t ngrows;
 	uint64_t nshrinks;
 	uint64_t nshrinkfails;
 	uint64_t ninserts;
 	uint64_t nrelocs;
-#endif
-
-	/* Used for pseudo-random number generation. */
+#endif 
+ 
+	/* Used for pseudo-random number generation. */ 
 	uint64_t prng_state;
-
-	/* Total number of items. */
+ 
+	/* Total number of items. */ 
 	size_t count;
-
-	/*
-	 * Minimum and current number of hash table buckets.  There are
-	 * 2^LG_CKH_BUCKET_CELLS cells per bucket.
-	 */
+ 
+	/* 
+	 * Minimum and current number of hash table buckets.  There are 
+	 * 2^LG_CKH_BUCKET_CELLS cells per bucket. 
+	 */ 
 	unsigned lg_minbuckets;
 	unsigned lg_curbuckets;
-
-	/* Hash and comparison functions. */
+ 
+	/* Hash and comparison functions. */ 
 	ckh_hash_t *hash;
 	ckh_keycomp_t *keycomp;
-
-	/* Hash table with 2^lg_curbuckets buckets. */
+ 
+	/* Hash table with 2^lg_curbuckets buckets. */ 
 	ckhc_t *tab;
 } ckh_t;
-
-/******************************************************************************/
+ 
+/******************************************************************************/ 
 /* BEGIN PUBLIC API */
 /******************************************************************************/
-
+ 
 /* Lifetime management.  Minitems is the initial capacity. */
 bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
-    ckh_keycomp_t *keycomp);
+    ckh_keycomp_t *keycomp); 
 void ckh_delete(tsd_t *tsd, ckh_t *ckh);
 
 /* Get the number of elements in the set. */
@@ -89,13 +89,13 @@ bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
  */
 bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
 bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
-    void **data);
+    void **data); 
 bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
-
+ 
 /* Some useful hash and comparison functions for strings and pointers. */
 void ckh_string_hash(const void *key, size_t r_hash[2]);
 bool ckh_string_keycomp(const void *k1, const void *k2);
 void ckh_pointer_hash(const void *key, size_t r_hash[2]);
 bool ckh_pointer_keycomp(const void *k1, const void *k2);
-
+ 
 #endif /* JEMALLOC_INTERNAL_CKH_H */

+ 41 - 41
contrib/libs/jemalloc/include/jemalloc/internal/ctl.h

@@ -1,52 +1,52 @@
 #ifndef JEMALLOC_INTERNAL_CTL_H
 #define JEMALLOC_INTERNAL_CTL_H
-
+ 
 #include "jemalloc/internal/jemalloc_internal_types.h"
 #include "jemalloc/internal/malloc_io.h"
 #include "jemalloc/internal/mutex_prof.h"
 #include "jemalloc/internal/ql.h"
 #include "jemalloc/internal/sc.h"
 #include "jemalloc/internal/stats.h"
-
+ 
 /* Maximum ctl tree depth. */
 #define CTL_MAX_DEPTH	7
-
+ 
 typedef struct ctl_node_s {
 	bool named;
 } ctl_node_t;
-
+ 
 typedef struct ctl_named_node_s {
 	ctl_node_t node;
 	const char *name;
-	/* If (nchildren == 0), this is a terminal node. */
+	/* If (nchildren == 0), this is a terminal node. */ 
 	size_t nchildren;
 	const ctl_node_t *children;
 	int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *,
 	    size_t);
 } ctl_named_node_t;
-
+ 
 typedef struct ctl_indexed_node_s {
 	struct ctl_node_s node;
 	const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
 	    size_t);
 } ctl_indexed_node_t;
-
+ 
 typedef struct ctl_arena_stats_s {
 	arena_stats_t astats;
-
-	/* Aggregate stats for small size classes, based on bin stats. */
+ 
+	/* Aggregate stats for small size classes, based on bin stats. */ 
 	size_t allocated_small;
 	uint64_t nmalloc_small;
 	uint64_t ndalloc_small;
 	uint64_t nrequests_small;
 	uint64_t nfills_small;
 	uint64_t nflushes_small;
-
+ 
 	bin_stats_t bstats[SC_NBINS];
 	arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
 	arena_stats_extents_t estats[SC_NPSIZES];
 } ctl_arena_stats_t;
-
+ 
 typedef struct ctl_stats_s {
 	size_t allocated;
 	size_t active;
@@ -77,13 +77,13 @@ struct ctl_arena_s {
 
 	/* NULL if !config_stats. */
 	ctl_arena_stats_t *astats;
-};
-
+}; 
+ 
 typedef struct ctl_arenas_s {
 	uint64_t epoch;
 	unsigned narenas;
 	ql_head(ctl_arena_t) destroyed;
-
+ 
 	/*
 	 * Element 0 corresponds to merged stats for extant arenas (accessed via
 	 * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for
@@ -92,11 +92,11 @@ typedef struct ctl_arenas_s {
 	 */
 	ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT];
 } ctl_arenas_t;
-
+ 
 int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
-    void *newp, size_t newlen);
+    void *newp, size_t newlen); 
 int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
-
+ 
 int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
     size_t *oldlenp, void *newp, size_t newlen);
 bool ctl_boot(void);
@@ -105,30 +105,30 @@ void ctl_postfork_parent(tsdn_t *tsdn);
 void ctl_postfork_child(tsdn_t *tsdn);
 
 #define xmallctl(name, oldp, oldlenp, newp, newlen) do {		\
-	if (je_mallctl(name, oldp, oldlenp, newp, newlen)		\
-	    != 0) {							\
-		malloc_printf(						\
-		    "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n",	\
-		    name);						\
-		abort();						\
-	}								\
-} while (0)
-
+	if (je_mallctl(name, oldp, oldlenp, newp, newlen)		\ 
+	    != 0) {							\ 
+		malloc_printf(						\ 
+		    "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n",	\ 
+		    name);						\ 
+		abort();						\ 
+	}								\ 
+} while (0) 
+ 
 #define xmallctlnametomib(name, mibp, miblenp) do {			\
-	if (je_mallctlnametomib(name, mibp, miblenp) != 0) {		\
-		malloc_printf("<jemalloc>: Failure in "			\
-		    "xmallctlnametomib(\"%s\", ...)\n", name);		\
-		abort();						\
-	}								\
-} while (0)
-
+	if (je_mallctlnametomib(name, mibp, miblenp) != 0) {		\ 
+		malloc_printf("<jemalloc>: Failure in "			\ 
+		    "xmallctlnametomib(\"%s\", ...)\n", name);		\ 
+		abort();						\ 
+	}								\ 
+} while (0) 
+ 
 #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do {	\
-	if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp,		\
-	    newlen) != 0) {						\
-		malloc_write(						\
-		    "<jemalloc>: Failure in xmallctlbymib()\n");	\
-		abort();						\
-	}								\
-} while (0)
-
+	if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp,		\ 
+	    newlen) != 0) {						\ 
+		malloc_write(						\ 
+		    "<jemalloc>: Failure in xmallctlbymib()\n");	\ 
+		abort();						\ 
+	}								\ 
+} while (0) 
+ 
 #endif /* JEMALLOC_INTERNAL_CTL_H */

+ 227 - 227
contrib/libs/jemalloc/include/jemalloc/internal/hash.h

@@ -3,272 +3,272 @@
 
 #include "jemalloc/internal/assert.h"
 
-/*
- * The following hash function is based on MurmurHash3, placed into the public
+/* 
+ * The following hash function is based on MurmurHash3, placed into the public 
  * domain by Austin Appleby.  See https://github.com/aappleby/smhasher for
- * details.
- */
-
-/******************************************************************************/
-/* Internal implementation. */
+ * details. 
+ */ 
+ 
+/******************************************************************************/ 
+/* Internal implementation. */ 
 static inline uint32_t
 hash_rotl_32(uint32_t x, int8_t r) {
 	return ((x << r) | (x >> (32 - r)));
-}
-
+} 
+ 
 static inline uint64_t
 hash_rotl_64(uint64_t x, int8_t r) {
 	return ((x << r) | (x >> (64 - r)));
-}
-
+} 
+ 
 static inline uint32_t
 hash_get_block_32(const uint32_t *p, int i) {
 	/* Handle unaligned read. */
 	if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
 		uint32_t ret;
-
+ 
 		memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
 		return ret;
 	}
 
 	return p[i];
-}
-
+} 
+ 
 static inline uint64_t
 hash_get_block_64(const uint64_t *p, int i) {
 	/* Handle unaligned read. */
 	if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
 		uint64_t ret;
-
+ 
 		memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
 		return ret;
 	}
 
 	return p[i];
-}
-
+} 
+ 
 static inline uint32_t
 hash_fmix_32(uint32_t h) {
-	h ^= h >> 16;
-	h *= 0x85ebca6b;
-	h ^= h >> 13;
-	h *= 0xc2b2ae35;
-	h ^= h >> 16;
-
+	h ^= h >> 16; 
+	h *= 0x85ebca6b; 
+	h ^= h >> 13; 
+	h *= 0xc2b2ae35; 
+	h ^= h >> 16; 
+ 
 	return h;
-}
-
+} 
+ 
 static inline uint64_t
 hash_fmix_64(uint64_t k) {
-	k ^= k >> 33;
+	k ^= k >> 33; 
 	k *= KQU(0xff51afd7ed558ccd);
-	k ^= k >> 33;
+	k ^= k >> 33; 
 	k *= KQU(0xc4ceb9fe1a85ec53);
-	k ^= k >> 33;
-
+	k ^= k >> 33; 
+ 
 	return k;
-}
-
+} 
+ 
 static inline uint32_t
 hash_x86_32(const void *key, int len, uint32_t seed) {
-	const uint8_t *data = (const uint8_t *) key;
-	const int nblocks = len / 4;
-
-	uint32_t h1 = seed;
-
-	const uint32_t c1 = 0xcc9e2d51;
-	const uint32_t c2 = 0x1b873593;
-
-	/* body */
-	{
-		const uint32_t *blocks = (const uint32_t *) (data + nblocks*4);
-		int i;
-
-		for (i = -nblocks; i; i++) {
-			uint32_t k1 = hash_get_block_32(blocks, i);
-
-			k1 *= c1;
-			k1 = hash_rotl_32(k1, 15);
-			k1 *= c2;
-
-			h1 ^= k1;
-			h1 = hash_rotl_32(h1, 13);
-			h1 = h1*5 + 0xe6546b64;
-		}
-	}
-
-	/* tail */
-	{
-		const uint8_t *tail = (const uint8_t *) (data + nblocks*4);
-
-		uint32_t k1 = 0;
-
-		switch (len & 3) {
+	const uint8_t *data = (const uint8_t *) key; 
+	const int nblocks = len / 4; 
+ 
+	uint32_t h1 = seed; 
+ 
+	const uint32_t c1 = 0xcc9e2d51; 
+	const uint32_t c2 = 0x1b873593; 
+ 
+	/* body */ 
+	{ 
+		const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); 
+		int i; 
+ 
+		for (i = -nblocks; i; i++) { 
+			uint32_t k1 = hash_get_block_32(blocks, i); 
+ 
+			k1 *= c1; 
+			k1 = hash_rotl_32(k1, 15); 
+			k1 *= c2; 
+ 
+			h1 ^= k1; 
+			h1 = hash_rotl_32(h1, 13); 
+			h1 = h1*5 + 0xe6546b64; 
+		} 
+	} 
+ 
+	/* tail */ 
+	{ 
+		const uint8_t *tail = (const uint8_t *) (data + nblocks*4); 
+ 
+		uint32_t k1 = 0; 
+ 
+		switch (len & 3) { 
 		case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
 		case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
-		case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
-			k1 *= c2; h1 ^= k1;
-		}
-	}
-
-	/* finalization */
-	h1 ^= len;
-
-	h1 = hash_fmix_32(h1);
-
+		case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); 
+			k1 *= c2; h1 ^= k1; 
+		} 
+	} 
+ 
+	/* finalization */ 
+	h1 ^= len; 
+ 
+	h1 = hash_fmix_32(h1); 
+ 
 	return h1;
-}
-
+} 
+ 
 static inline void
-hash_x86_128(const void *key, const int len, uint32_t seed,
+hash_x86_128(const void *key, const int len, uint32_t seed, 
     uint64_t r_out[2]) {
-	const uint8_t * data = (const uint8_t *) key;
-	const int nblocks = len / 16;
-
-	uint32_t h1 = seed;
-	uint32_t h2 = seed;
-	uint32_t h3 = seed;
-	uint32_t h4 = seed;
-
-	const uint32_t c1 = 0x239b961b;
-	const uint32_t c2 = 0xab0e9789;
-	const uint32_t c3 = 0x38b34ae5;
-	const uint32_t c4 = 0xa1e38b93;
-
-	/* body */
-	{
-		const uint32_t *blocks = (const uint32_t *) (data + nblocks*16);
-		int i;
-
-		for (i = -nblocks; i; i++) {
-			uint32_t k1 = hash_get_block_32(blocks, i*4 + 0);
-			uint32_t k2 = hash_get_block_32(blocks, i*4 + 1);
-			uint32_t k3 = hash_get_block_32(blocks, i*4 + 2);
-			uint32_t k4 = hash_get_block_32(blocks, i*4 + 3);
-
-			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
-
-			h1 = hash_rotl_32(h1, 19); h1 += h2;
-			h1 = h1*5 + 0x561ccd1b;
-
-			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
-
-			h2 = hash_rotl_32(h2, 17); h2 += h3;
-			h2 = h2*5 + 0x0bcaa747;
-
-			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
-
-			h3 = hash_rotl_32(h3, 15); h3 += h4;
-			h3 = h3*5 + 0x96cd1c35;
-
-			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
-
-			h4 = hash_rotl_32(h4, 13); h4 += h1;
-			h4 = h4*5 + 0x32ac3b17;
-		}
-	}
-
-	/* tail */
-	{
-		const uint8_t *tail = (const uint8_t *) (data + nblocks*16);
-		uint32_t k1 = 0;
-		uint32_t k2 = 0;
-		uint32_t k3 = 0;
-		uint32_t k4 = 0;
-
-		switch (len & 15) {
+	const uint8_t * data = (const uint8_t *) key; 
+	const int nblocks = len / 16; 
+ 
+	uint32_t h1 = seed; 
+	uint32_t h2 = seed; 
+	uint32_t h3 = seed; 
+	uint32_t h4 = seed; 
+ 
+	const uint32_t c1 = 0x239b961b; 
+	const uint32_t c2 = 0xab0e9789; 
+	const uint32_t c3 = 0x38b34ae5; 
+	const uint32_t c4 = 0xa1e38b93; 
+ 
+	/* body */ 
+	{ 
+		const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); 
+		int i; 
+ 
+		for (i = -nblocks; i; i++) { 
+			uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); 
+			uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); 
+			uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); 
+			uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); 
+ 
+			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; 
+ 
+			h1 = hash_rotl_32(h1, 19); h1 += h2; 
+			h1 = h1*5 + 0x561ccd1b; 
+ 
+			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; 
+ 
+			h2 = hash_rotl_32(h2, 17); h2 += h3; 
+			h2 = h2*5 + 0x0bcaa747; 
+ 
+			k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; 
+ 
+			h3 = hash_rotl_32(h3, 15); h3 += h4; 
+			h3 = h3*5 + 0x96cd1c35; 
+ 
+			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; 
+ 
+			h4 = hash_rotl_32(h4, 13); h4 += h1; 
+			h4 = h4*5 + 0x32ac3b17; 
+		} 
+	} 
+ 
+	/* tail */ 
+	{ 
+		const uint8_t *tail = (const uint8_t *) (data + nblocks*16); 
+		uint32_t k1 = 0; 
+		uint32_t k2 = 0; 
+		uint32_t k3 = 0; 
+		uint32_t k4 = 0; 
+ 
+		switch (len & 15) { 
 		case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
 		case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
-		case 13: k4 ^= tail[12] << 0;
-			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
+		case 13: k4 ^= tail[12] << 0; 
+			k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; 
       JEMALLOC_FALLTHROUGH
 		case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
 		case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
 		case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
-		case  9: k3 ^= tail[ 8] << 0;
-		     k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+		case  9: k3 ^= tail[ 8] << 0; 
+		     k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; 
          JEMALLOC_FALLTHROUGH
 		case  8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
 		case  7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
 		case  6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
-		case  5: k2 ^= tail[ 4] << 0;
-			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
+		case  5: k2 ^= tail[ 4] << 0; 
+			k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; 
       JEMALLOC_FALLTHROUGH
 		case  4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
 		case  3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
 		case  2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
-		case  1: k1 ^= tail[ 0] << 0;
-			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
+		case  1: k1 ^= tail[ 0] << 0; 
+			k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; 
       JEMALLOC_FALLTHROUGH
-		}
-	}
-
-	/* finalization */
-	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
-
-	h1 += h2; h1 += h3; h1 += h4;
-	h2 += h1; h3 += h1; h4 += h1;
-
-	h1 = hash_fmix_32(h1);
-	h2 = hash_fmix_32(h2);
-	h3 = hash_fmix_32(h3);
-	h4 = hash_fmix_32(h4);
-
-	h1 += h2; h1 += h3; h1 += h4;
-	h2 += h1; h3 += h1; h4 += h1;
-
-	r_out[0] = (((uint64_t) h2) << 32) | h1;
-	r_out[1] = (((uint64_t) h4) << 32) | h3;
-}
-
+		} 
+	} 
+ 
+	/* finalization */ 
+	h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; 
+ 
+	h1 += h2; h1 += h3; h1 += h4; 
+	h2 += h1; h3 += h1; h4 += h1; 
+ 
+	h1 = hash_fmix_32(h1); 
+	h2 = hash_fmix_32(h2); 
+	h3 = hash_fmix_32(h3); 
+	h4 = hash_fmix_32(h4); 
+ 
+	h1 += h2; h1 += h3; h1 += h4; 
+	h2 += h1; h3 += h1; h4 += h1; 
+ 
+	r_out[0] = (((uint64_t) h2) << 32) | h1; 
+	r_out[1] = (((uint64_t) h4) << 32) | h3; 
+} 
+ 
 static inline void
-hash_x64_128(const void *key, const int len, const uint32_t seed,
+hash_x64_128(const void *key, const int len, const uint32_t seed, 
     uint64_t r_out[2]) {
-	const uint8_t *data = (const uint8_t *) key;
-	const int nblocks = len / 16;
-
-	uint64_t h1 = seed;
-	uint64_t h2 = seed;
-
+	const uint8_t *data = (const uint8_t *) key; 
+	const int nblocks = len / 16; 
+ 
+	uint64_t h1 = seed; 
+	uint64_t h2 = seed; 
+ 
 	const uint64_t c1 = KQU(0x87c37b91114253d5);
 	const uint64_t c2 = KQU(0x4cf5ad432745937f);
-
-	/* body */
-	{
-		const uint64_t *blocks = (const uint64_t *) (data);
-		int i;
-
-		for (i = 0; i < nblocks; i++) {
-			uint64_t k1 = hash_get_block_64(blocks, i*2 + 0);
-			uint64_t k2 = hash_get_block_64(blocks, i*2 + 1);
-
-			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
-
-			h1 = hash_rotl_64(h1, 27); h1 += h2;
-			h1 = h1*5 + 0x52dce729;
-
-			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
-
-			h2 = hash_rotl_64(h2, 31); h2 += h1;
-			h2 = h2*5 + 0x38495ab5;
-		}
-	}
-
-	/* tail */
-	{
-		const uint8_t *tail = (const uint8_t*)(data + nblocks*16);
-		uint64_t k1 = 0;
-		uint64_t k2 = 0;
-
-		switch (len & 15) {
+ 
+	/* body */ 
+	{ 
+		const uint64_t *blocks = (const uint64_t *) (data); 
+		int i; 
+ 
+		for (i = 0; i < nblocks; i++) { 
+			uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); 
+			uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); 
+ 
+			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; 
+ 
+			h1 = hash_rotl_64(h1, 27); h1 += h2; 
+			h1 = h1*5 + 0x52dce729; 
+ 
+			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; 
+ 
+			h2 = hash_rotl_64(h2, 31); h2 += h1; 
+			h2 = h2*5 + 0x38495ab5; 
+		} 
+	} 
+ 
+	/* tail */ 
+	{ 
+		const uint8_t *tail = (const uint8_t*)(data + nblocks*16); 
+		uint64_t k1 = 0; 
+		uint64_t k2 = 0; 
+ 
+		switch (len & 15) { 
 		case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
 		case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
 		case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
 		case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
 		case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
 		case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8;  JEMALLOC_FALLTHROUGH
-		case  9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
-			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
+		case  9: k2 ^= ((uint64_t)(tail[ 8])) << 0; 
+			k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; 
 			JEMALLOC_FALLTHROUGH
 		case  8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
 		case  7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
@@ -277,43 +277,43 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
 		case  4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
 		case  3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
 		case  2: k1 ^= ((uint64_t)(tail[ 1])) << 8;  JEMALLOC_FALLTHROUGH
-		case  1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
-			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
-		}
-	}
-
-	/* finalization */
-	h1 ^= len; h2 ^= len;
-
-	h1 += h2;
-	h2 += h1;
-
-	h1 = hash_fmix_64(h1);
-	h2 = hash_fmix_64(h2);
-
-	h1 += h2;
-	h2 += h1;
-
-	r_out[0] = h1;
-	r_out[1] = h2;
-}
-
-/******************************************************************************/
-/* API. */
+		case  1: k1 ^= ((uint64_t)(tail[ 0])) << 0; 
+			k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; 
+		} 
+	} 
+ 
+	/* finalization */ 
+	h1 ^= len; h2 ^= len; 
+ 
+	h1 += h2; 
+	h2 += h1; 
+ 
+	h1 = hash_fmix_64(h1); 
+	h2 = hash_fmix_64(h2); 
+ 
+	h1 += h2; 
+	h2 += h1; 
+ 
+	r_out[0] = h1; 
+	r_out[1] = h2; 
+} 
+ 
+/******************************************************************************/ 
+/* API. */ 
 static inline void
 hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
 	assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
 
-#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
+#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) 
 	hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash);
-#else
+#else 
 	{
 		uint64_t hashes[2];
 		hash_x86_128(key, (int)len, seed, hashes);
 		r_hash[0] = (size_t)hashes[0];
 		r_hash[1] = (size_t)hashes[1];
 	}
-#endif
-}
-
+#endif 
+} 
+ 
 #endif /* JEMALLOC_INTERNAL_HASH_H */

+ 134 - 134
contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs-linux.h

@@ -1,15 +1,15 @@
-/* include/jemalloc/internal/jemalloc_internal_defs.h.  Generated from jemalloc_internal_defs.h.in by configure.  */
-#ifndef JEMALLOC_INTERNAL_DEFS_H_
+/* include/jemalloc/internal/jemalloc_internal_defs.h.  Generated from jemalloc_internal_defs.h.in by configure.  */ 
+#ifndef JEMALLOC_INTERNAL_DEFS_H_ 
 #define JEMALLOC_INTERNAL_DEFS_H_
-/*
- * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
- * public APIs to be prefixed.  This makes it possible, with some care, to use
- * multiple allocators simultaneously.
- */
-/* #undef JEMALLOC_PREFIX */
-/* #undef JEMALLOC_CPREFIX */
-
-/*
+/* 
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all 
+ * public APIs to be prefixed.  This makes it possible, with some care, to use 
+ * multiple allocators simultaneously. 
+ */ 
+/* #undef JEMALLOC_PREFIX */ 
+/* #undef JEMALLOC_CPREFIX */ 
+ 
+/* 
  * Define overrides for non-standard allocator-related functions if they are
  * present on the system.
  */
@@ -22,17 +22,17 @@
 /* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */
 
 /*
- * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
- * For shared libraries, symbol visibility mechanisms prevent these symbols
- * from being exported, but for static libraries, naming collisions are a real
- * possibility.
- */
-#define JEMALLOC_PRIVATE_NAMESPACE je_
-
-/*
- * Hyper-threaded CPUs may need a special instruction inside spin loops in
- * order to yield to another virtual CPU.
- */
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. 
+ * For shared libraries, symbol visibility mechanisms prevent these symbols 
+ * from being exported, but for static libraries, naming collisions are a real 
+ * possibility. 
+ */ 
+#define JEMALLOC_PRIVATE_NAMESPACE je_ 
+ 
+/* 
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in 
+ * order to yield to another virtual CPU. 
+ */ 
 #if defined(__i386__) || defined(__amd64__)
 #define CPU_SPINWAIT __asm__ volatile("pause")
 /* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
@@ -41,14 +41,14 @@
 #define CPU_SPINWAIT
 #define HAVE_CPU_SPINWAIT 0
 #endif
-
+ 
 /*
  * Number of significant bits in virtual addresses.  This may be less than the
  * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
  * bits are the same as bit 47.
  */
 #define LG_VADDR 48
-
+ 
 /* Defined if C11 atomics are available. */
 #define JEMALLOC_C11_ATOMICS 1
 
@@ -62,36 +62,36 @@
 /* and the 8-bit variant support. */
 #define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
 
-/*
+/* 
  * Defined if __builtin_clz() and __builtin_clzl() are available.
- */
+ */ 
 #define JEMALLOC_HAVE_BUILTIN_CLZ 
-
-/*
+ 
+/* 
  * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
- */
+ */ 
 /* #undef JEMALLOC_OS_UNFAIR_LOCK */
-
+ 
 /* Defined if syscall(2) is usable. */
 #define JEMALLOC_USE_SYSCALL 
 
-/*
+/* 
  * Defined if secure_getenv(3) is available.
- */
+ */ 
 #define JEMALLOC_HAVE_SECURE_GETENV 
-
-/*
+ 
+/* 
  * Defined if issetugid(2) is available.
- */
+ */ 
 /* #undef JEMALLOC_HAVE_ISSETUGID */
-
+ 
 /* Defined if pthread_atfork(3) is available. */
 #define JEMALLOC_HAVE_PTHREAD_ATFORK 
 
 /* Defined if pthread_setname_np(3) is available. */
 /* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
 
-/*
+/* 
  * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
  */
 #define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1
@@ -107,98 +107,98 @@
 /* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
 
 /*
- * Defined if _malloc_thread_cleanup() exists.  At least in the case of
- * FreeBSD, pthread_key_create() allocates, which if used during malloc
- * bootstrapping will cause recursion into the pthreads library.  Therefore, if
- * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
- * malloc_tsd.
- */
-/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */
-
-/*
- * Defined if threaded initialization is known to be safe on this platform.
- * Among other things, it must be possible to initialize a mutex without
- * triggering allocation in order for threaded allocation to be safe.
- */
+ * Defined if _malloc_thread_cleanup() exists.  At least in the case of 
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc 
+ * bootstrapping will cause recursion into the pthreads library.  Therefore, if 
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in 
+ * malloc_tsd. 
+ */ 
+/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ 
+ 
+/* 
+ * Defined if threaded initialization is known to be safe on this platform. 
+ * Among other things, it must be possible to initialize a mutex without 
+ * triggering allocation in order for threaded allocation to be safe. 
+ */ 
 #define JEMALLOC_THREADED_INIT 
-
-/*
- * Defined if the pthreads implementation defines
- * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
- * to avoid recursive allocation during mutex initialization.
- */
-/* #undef JEMALLOC_MUTEX_INIT_CB */
-
-/* Non-empty if the tls_model attribute is supported. */
+ 
+/* 
+ * Defined if the pthreads implementation defines 
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order 
+ * to avoid recursive allocation during mutex initialization. 
+ */ 
+/* #undef JEMALLOC_MUTEX_INIT_CB */ 
+ 
+/* Non-empty if the tls_model attribute is supported. */ 
 #define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
-
-/*
- * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
- * inline functions.
- */
-/* #undef JEMALLOC_DEBUG */
-
-/* JEMALLOC_STATS enables statistics calculation. */
+ 
+/* 
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables 
+ * inline functions. 
+ */ 
+/* #undef JEMALLOC_DEBUG */ 
+ 
+/* JEMALLOC_STATS enables statistics calculation. */ 
 #define JEMALLOC_STATS 
-
+ 
 /* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
 /* #undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API */
 
-/* JEMALLOC_PROF enables allocation profiling. */
+/* JEMALLOC_PROF enables allocation profiling. */ 
 #define JEMALLOC_PROF 
-
-/* Use libunwind for profile backtracing if defined. */
+ 
+/* Use libunwind for profile backtracing if defined. */ 
 #define JEMALLOC_PROF_LIBUNWIND 
-
-/* Use libgcc for profile backtracing if defined. */
+ 
+/* Use libgcc for profile backtracing if defined. */ 
 /* #undef JEMALLOC_PROF_LIBGCC */
-
-/* Use gcc intrinsics for profile backtracing if defined. */
-/* #undef JEMALLOC_PROF_GCC */
-
-/*
+ 
+/* Use gcc intrinsics for profile backtracing if defined. */ 
+/* #undef JEMALLOC_PROF_GCC */ 
+ 
+/* 
  * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
- * segment (DSS).
- */
+ * segment (DSS). 
+ */ 
 #define JEMALLOC_DSS 
-
+ 
 /* Support memory filling (junk/zero). */
 #define JEMALLOC_FILL 
-
-/* Support utrace(2)-based tracing. */
-/* #undef JEMALLOC_UTRACE */
-
-/* Support optional abort() on OOM. */
-/* #undef JEMALLOC_XMALLOC */
-
-/* Support lazy locking (avoid locking unless a second thread is launched). */
-/* #undef JEMALLOC_LAZY_LOCK */
-
+ 
+/* Support utrace(2)-based tracing. */ 
+/* #undef JEMALLOC_UTRACE */ 
+ 
+/* Support optional abort() on OOM. */ 
+/* #undef JEMALLOC_XMALLOC */ 
+ 
+/* Support lazy locking (avoid locking unless a second thread is launched). */ 
+/* #undef JEMALLOC_LAZY_LOCK */ 
+ 
 /*
  * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
  * classes).
  */
 /* #undef LG_QUANTUM */
-
+ 
 /* One page is 2^LG_PAGE bytes. */
 #define LG_PAGE 12
 
-/*
+/* 
  * One huge page is 2^LG_HUGEPAGE bytes.  Note that this is defined even if the
  * system does not explicitly support huge pages; system calls that require
  * explicit huge page support are separately configured.
- */
+ */ 
 #define LG_HUGEPAGE 21
-
-/*
+ 
+/* 
  * If defined, adjacent virtual memory mappings with identical attributes
  * automatically coalesce, and they fragment when changes are made to subranges.
  * This is the normal order of things for mmap()/munmap(), but on Windows
  * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
  * mappings do *not* coalesce/fragment.
- */
+ */ 
 #define JEMALLOC_MAPS_COALESCE 
-
+ 
 /*
  * If defined, retain memory for later reuse by default rather than using e.g.
  * munmap() to unmap freed extents.  This is enabled on 64-bit Linux because
@@ -207,16 +207,16 @@
  */
 #define JEMALLOC_RETAIN 
 
-/* TLS is used to map arenas and magazine caches to threads. */
+/* TLS is used to map arenas and magazine caches to threads. */ 
 #define JEMALLOC_TLS 
-
-/*
+ 
+/* 
  * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
  * Don't use this directly; instead use unreachable() from util.h
- */
+ */ 
 #define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable
-
-/*
+ 
+/* 
  * ffs*() functions to use for bitmapping.  Don't use these directly; instead,
  * use ffs_*() from util.h.
  */
@@ -249,11 +249,11 @@
 /* #undef JEMALLOC_READLINKAT */
 
 /*
- * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
- */
-/* #undef JEMALLOC_ZONE */
-
-/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. 
+ */ 
+/* #undef JEMALLOC_ZONE */ 
+ 
+/* 
  * Methods for determining whether the OS overcommits.
  * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
  *                                         /proc/sys/vm.overcommit_memory file.
@@ -272,28 +272,28 @@
 #define JEMALLOC_HAVE_MADVISE_HUGE 
 
 /*
- * Methods for purging unused pages differ between operating systems.
- *
+ * Methods for purging unused pages differ between operating systems. 
+ * 
  *   madvise(..., MADV_FREE) : This marks pages as being unused, such that they
  *                             will be discarded rather than swapped out.
  *   madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
  *                                 defined, this immediately discards pages,
- *                                 such that new pages will be demand-zeroed if
+ *                                 such that new pages will be demand-zeroed if 
  *                                 the address region is later touched;
  *                                 otherwise this behaves similarly to
  *                                 MADV_FREE, though typically with higher
  *                                 system overhead.
- */
+ */ 
 #define JEMALLOC_PURGE_MADVISE_FREE 
 #define JEMALLOC_PURGE_MADVISE_DONTNEED 
 #define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS 
-
+ 
 /* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
 #define JEMALLOC_DEFINE_MADVISE_FREE
 
-/*
+/* 
  * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
- */
+ */ 
 #define JEMALLOC_MADVISE_DONTDUMP 
 
 /*
@@ -303,26 +303,26 @@
 /* #undef JEMALLOC_THP */
 
 /* Define if operating system has alloca.h header. */
-#define JEMALLOC_HAS_ALLOCA_H 1
-
-/* C99 restrict keyword supported. */
-#define JEMALLOC_HAS_RESTRICT 1
-
-/* For use by hash code. */
-/* #undef JEMALLOC_BIG_ENDIAN */
-
-/* sizeof(int) == 2^LG_SIZEOF_INT. */
-#define LG_SIZEOF_INT 2
-
-/* sizeof(long) == 2^LG_SIZEOF_LONG. */
-#define LG_SIZEOF_LONG 3
-
+#define JEMALLOC_HAS_ALLOCA_H 1 
+ 
+/* C99 restrict keyword supported. */ 
+#define JEMALLOC_HAS_RESTRICT 1 
+ 
+/* For use by hash code. */ 
+/* #undef JEMALLOC_BIG_ENDIAN */ 
+ 
+/* sizeof(int) == 2^LG_SIZEOF_INT. */ 
+#define LG_SIZEOF_INT 2 
+ 
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */ 
+#define LG_SIZEOF_LONG 3 
+ 
 /* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
 #define LG_SIZEOF_LONG_LONG 3
 
-/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
-#define LG_SIZEOF_INTMAX_T 3
-
+/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ 
+#define LG_SIZEOF_INTMAX_T 3 
+ 
 /* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
 #define JEMALLOC_GLIBC_MALLOC_HOOK 
 
@@ -369,4 +369,4 @@
 /* Performs additional safety checks when defined. */
 /* #undef JEMALLOC_OPT_SAFETY_CHECKS */
 
-#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
+#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ 

+ 4 - 4
contrib/libs/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h

@@ -1,5 +1,5 @@
-#pragma once
-
+#pragma once 
+ 
 #if defined(__APPLE__) && defined(__arm64__)
 #   include "jemalloc_internal_defs-osx-arm64.h"
 #elif defined(__APPLE__)
@@ -7,5 +7,5 @@
 #elif defined(_MSC_VER)
 #   include "jemalloc_internal_defs-win.h"
 #else
-#   include "jemalloc_internal_defs-linux.h"
-#endif
+#   include "jemalloc_internal_defs-linux.h" 
+#endif 

Some files were not shown because too many files changed in this diff