blake2-impl.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. BLAKE2 reference source code package - optimized C implementations
  3. Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
  4. To the extent possible under law, the author(s) have dedicated all copyright
  5. and related and neighboring rights to this software to the public domain
  6. worldwide. This software is distributed without any warranty.
  7. You should have received a copy of the CC0 Public Domain Dedication along with
  8. this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
  9. */
  10. #pragma once
  11. #ifndef __BLAKE2_IMPL_H__
  12. #define __BLAKE2_IMPL_H__
  13. #if defined(_WIN32) || defined(WIN32)
  14. #include <windows.h>
  15. #endif
  16. #include <stddef.h>
  17. #include <stdint.h>
  18. #include <string.h>
  19. #define BLAKE2_IMPL_CAT(x,y) x ## y
  20. #define BLAKE2_IMPL_EVAL(x,y) BLAKE2_IMPL_CAT(x,y)
  21. #define BLAKE2_IMPL_NAME(fun) BLAKE2_IMPL_EVAL(fun, SUFFIX)
  22. static inline uint32_t load32( const void *src )
  23. {
  24. #if defined(NATIVE_LITTLE_ENDIAN)
  25. uint32_t w;
  26. memcpy( &w, src, sizeof( w ) );
  27. return w;
  28. #else
  29. const uint8_t *p = ( uint8_t * )src;
  30. uint32_t w = *p++;
  31. w |= ( uint32_t )( *p++ ) << 8;
  32. w |= ( uint32_t )( *p++ ) << 16;
  33. w |= ( uint32_t )( *p++ ) << 24;
  34. return w;
  35. #endif
  36. }
  37. static inline uint64_t load64( const void *src )
  38. {
  39. #if defined(NATIVE_LITTLE_ENDIAN)
  40. uint64_t w;
  41. memcpy( &w, src, sizeof( w ) );
  42. return w;
  43. #else
  44. const uint8_t *p = ( uint8_t * )src;
  45. uint64_t w = *p++;
  46. w |= ( uint64_t )( *p++ ) << 8;
  47. w |= ( uint64_t )( *p++ ) << 16;
  48. w |= ( uint64_t )( *p++ ) << 24;
  49. w |= ( uint64_t )( *p++ ) << 32;
  50. w |= ( uint64_t )( *p++ ) << 40;
  51. w |= ( uint64_t )( *p++ ) << 48;
  52. w |= ( uint64_t )( *p++ ) << 56;
  53. return w;
  54. #endif
  55. }
  56. static inline void store32( void *dst, uint32_t w )
  57. {
  58. #if defined(NATIVE_LITTLE_ENDIAN)
  59. memcpy( dst, &w, sizeof( w ) );
  60. #else
  61. uint8_t *p = ( uint8_t * )dst;
  62. *p++ = ( uint8_t )w; w >>= 8;
  63. *p++ = ( uint8_t )w; w >>= 8;
  64. *p++ = ( uint8_t )w; w >>= 8;
  65. *p++ = ( uint8_t )w;
  66. #endif
  67. }
  68. static inline void store64( void *dst, uint64_t w )
  69. {
  70. #if defined(NATIVE_LITTLE_ENDIAN)
  71. memcpy( dst, &w, sizeof( w ) );
  72. #else
  73. uint8_t *p = ( uint8_t * )dst;
  74. *p++ = ( uint8_t )w; w >>= 8;
  75. *p++ = ( uint8_t )w; w >>= 8;
  76. *p++ = ( uint8_t )w; w >>= 8;
  77. *p++ = ( uint8_t )w; w >>= 8;
  78. *p++ = ( uint8_t )w; w >>= 8;
  79. *p++ = ( uint8_t )w; w >>= 8;
  80. *p++ = ( uint8_t )w; w >>= 8;
  81. *p++ = ( uint8_t )w;
  82. #endif
  83. }
  84. static inline uint64_t load48( const void *src )
  85. {
  86. const uint8_t *p = ( const uint8_t * )src;
  87. uint64_t w = *p++;
  88. w |= ( uint64_t )( *p++ ) << 8;
  89. w |= ( uint64_t )( *p++ ) << 16;
  90. w |= ( uint64_t )( *p++ ) << 24;
  91. w |= ( uint64_t )( *p++ ) << 32;
  92. w |= ( uint64_t )( *p++ ) << 40;
  93. return w;
  94. }
  95. static inline void store48( void *dst, uint64_t w )
  96. {
  97. uint8_t *p = ( uint8_t * )dst;
  98. *p++ = ( uint8_t )w; w >>= 8;
  99. *p++ = ( uint8_t )w; w >>= 8;
  100. *p++ = ( uint8_t )w; w >>= 8;
  101. *p++ = ( uint8_t )w; w >>= 8;
  102. *p++ = ( uint8_t )w; w >>= 8;
  103. *p++ = ( uint8_t )w;
  104. }
  105. static inline uint32_t rotl32( const uint32_t w, const unsigned c )
  106. {
  107. return ( w << c ) | ( w >> ( 32 - c ) );
  108. }
  109. static inline uint64_t rotl64( const uint64_t w, const unsigned c )
  110. {
  111. return ( w << c ) | ( w >> ( 64 - c ) );
  112. }
  113. static inline uint32_t rotr32( const uint32_t w, const unsigned c )
  114. {
  115. return ( w >> c ) | ( w << ( 32 - c ) );
  116. }
  117. static inline uint64_t rotr64( const uint64_t w, const unsigned c )
  118. {
  119. return ( w >> c ) | ( w << ( 64 - c ) );
  120. }
  121. /* prevents compiler optimizing out memset() */
  122. static inline void secure_zero_memory(void *v, size_t n)
  123. {
  124. #if defined(_WIN32) || defined(WIN32)
  125. SecureZeroMemory(v, n);
  126. #elif defined(__hpux)
  127. static void *(*const volatile memset_v)(void *, int, size_t) = &memset;
  128. memset_v(v, 0, n);
  129. #else
  130. // prioritize first the general C11 call
  131. #if defined(HAVE_MEMSET_S)
  132. memset_s(v, n, 0, n);
  133. #elif defined(HAVE_EXPLICIT_BZERO)
  134. explicit_bzero(v, n);
  135. #elif defined(HAVE_EXPLICIT_MEMSET)
  136. explicit_memset(v, 0, n);
  137. #else
  138. memset(v, 0, n);
  139. __asm__ __volatile__("" :: "r"(v) : "memory");
  140. #endif
  141. #endif
  142. }
  143. #endif