intreadwrite.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #ifndef AVUTIL_INTREADWRITE_H
  19. #define AVUTIL_INTREADWRITE_H
  20. #include <stdint.h>
  21. #include "config.h"
  22. #include "bswap.h"
  23. #include "common.h"
  24. typedef union {
  25. uint64_t u64;
  26. uint32_t u32[2];
  27. uint16_t u16[4];
  28. uint8_t u8 [8];
  29. double f64;
  30. float f32[2];
  31. } av_alias av_alias64;
  32. typedef union {
  33. uint32_t u32;
  34. uint16_t u16[2];
  35. uint8_t u8 [4];
  36. float f32;
  37. } av_alias av_alias32;
  38. typedef union {
  39. uint16_t u16;
  40. uint8_t u8 [2];
  41. } av_alias av_alias16;
  42. /*
  43. * Arch-specific headers can provide any combination of
  44. * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
  45. * Preprocessor symbols must be defined, even if these are implemented
  46. * as inline functions.
  47. */
  48. #if ARCH_ARM
  49. # include "arm/intreadwrite.h"
  50. #elif ARCH_AVR32
  51. # include "avr32/intreadwrite.h"
  52. #elif ARCH_MIPS
  53. # include "mips/intreadwrite.h"
  54. #elif ARCH_PPC
  55. # include "ppc/intreadwrite.h"
  56. #elif ARCH_TOMI
  57. # include "tomi/intreadwrite.h"
  58. #elif ARCH_X86
  59. # include "x86/intreadwrite.h"
  60. #endif
  61. /*
  62. * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
  63. */
  64. #if HAVE_BIGENDIAN
  65. # if defined(AV_RN16) && !defined(AV_RB16)
  66. # define AV_RB16(p) AV_RN16(p)
  67. # elif !defined(AV_RN16) && defined(AV_RB16)
  68. # define AV_RN16(p) AV_RB16(p)
  69. # endif
  70. # if defined(AV_WN16) && !defined(AV_WB16)
  71. # define AV_WB16(p, v) AV_WN16(p, v)
  72. # elif !defined(AV_WN16) && defined(AV_WB16)
  73. # define AV_WN16(p, v) AV_WB16(p, v)
  74. # endif
  75. # if defined(AV_RN24) && !defined(AV_RB24)
  76. # define AV_RB24(p) AV_RN24(p)
  77. # elif !defined(AV_RN24) && defined(AV_RB24)
  78. # define AV_RN24(p) AV_RB24(p)
  79. # endif
  80. # if defined(AV_WN24) && !defined(AV_WB24)
  81. # define AV_WB24(p, v) AV_WN24(p, v)
  82. # elif !defined(AV_WN24) && defined(AV_WB24)
  83. # define AV_WN24(p, v) AV_WB24(p, v)
  84. # endif
  85. # if defined(AV_RN32) && !defined(AV_RB32)
  86. # define AV_RB32(p) AV_RN32(p)
  87. # elif !defined(AV_RN32) && defined(AV_RB32)
  88. # define AV_RN32(p) AV_RB32(p)
  89. # endif
  90. # if defined(AV_WN32) && !defined(AV_WB32)
  91. # define AV_WB32(p, v) AV_WN32(p, v)
  92. # elif !defined(AV_WN32) && defined(AV_WB32)
  93. # define AV_WN32(p, v) AV_WB32(p, v)
  94. # endif
  95. # if defined(AV_RN64) && !defined(AV_RB64)
  96. # define AV_RB64(p) AV_RN64(p)
  97. # elif !defined(AV_RN64) && defined(AV_RB64)
  98. # define AV_RN64(p) AV_RB64(p)
  99. # endif
  100. # if defined(AV_WN64) && !defined(AV_WB64)
  101. # define AV_WB64(p, v) AV_WN64(p, v)
  102. # elif !defined(AV_WN64) && defined(AV_WB64)
  103. # define AV_WN64(p, v) AV_WB64(p, v)
  104. # endif
  105. #else /* HAVE_BIGENDIAN */
  106. # if defined(AV_RN16) && !defined(AV_RL16)
  107. # define AV_RL16(p) AV_RN16(p)
  108. # elif !defined(AV_RN16) && defined(AV_RL16)
  109. # define AV_RN16(p) AV_RL16(p)
  110. # endif
  111. # if defined(AV_WN16) && !defined(AV_WL16)
  112. # define AV_WL16(p, v) AV_WN16(p, v)
  113. # elif !defined(AV_WN16) && defined(AV_WL16)
  114. # define AV_WN16(p, v) AV_WL16(p, v)
  115. # endif
  116. # if defined(AV_RN24) && !defined(AV_RL24)
  117. # define AV_RL24(p) AV_RN24(p)
  118. # elif !defined(AV_RN24) && defined(AV_RL24)
  119. # define AV_RN24(p) AV_RL24(p)
  120. # endif
  121. # if defined(AV_WN24) && !defined(AV_WL24)
  122. # define AV_WL24(p, v) AV_WN24(p, v)
  123. # elif !defined(AV_WN24) && defined(AV_WL24)
  124. # define AV_WN24(p, v) AV_WL24(p, v)
  125. # endif
  126. # if defined(AV_RN32) && !defined(AV_RL32)
  127. # define AV_RL32(p) AV_RN32(p)
  128. # elif !defined(AV_RN32) && defined(AV_RL32)
  129. # define AV_RN32(p) AV_RL32(p)
  130. # endif
  131. # if defined(AV_WN32) && !defined(AV_WL32)
  132. # define AV_WL32(p, v) AV_WN32(p, v)
  133. # elif !defined(AV_WN32) && defined(AV_WL32)
  134. # define AV_WN32(p, v) AV_WL32(p, v)
  135. # endif
  136. # if defined(AV_RN64) && !defined(AV_RL64)
  137. # define AV_RL64(p) AV_RN64(p)
  138. # elif !defined(AV_RN64) && defined(AV_RL64)
  139. # define AV_RN64(p) AV_RL64(p)
  140. # endif
  141. # if defined(AV_WN64) && !defined(AV_WL64)
  142. # define AV_WL64(p, v) AV_WN64(p, v)
  143. # elif !defined(AV_WN64) && defined(AV_WL64)
  144. # define AV_WN64(p, v) AV_WL64(p, v)
  145. # endif
  146. #endif /* !HAVE_BIGENDIAN */
  147. /*
  148. * Define AV_[RW]N helper macros to simplify definitions not provided
  149. * by per-arch headers.
  150. */
  151. #if HAVE_ATTRIBUTE_PACKED
  152. union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
  153. union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
  154. union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
  155. # define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
  156. # define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
  157. #elif defined(__DECC)
  158. # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
  159. # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
  160. #elif HAVE_FAST_UNALIGNED
  161. # define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
  162. # define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
  163. #else
  164. #ifndef AV_RB16
  165. # define AV_RB16(x) \
  166. ((((const uint8_t*)(x))[0] << 8) | \
  167. ((const uint8_t*)(x))[1])
  168. #endif
  169. #ifndef AV_WB16
  170. # define AV_WB16(p, d) do { \
  171. ((uint8_t*)(p))[1] = (d); \
  172. ((uint8_t*)(p))[0] = (d)>>8; \
  173. } while(0)
  174. #endif
  175. #ifndef AV_RL16
  176. # define AV_RL16(x) \
  177. ((((const uint8_t*)(x))[1] << 8) | \
  178. ((const uint8_t*)(x))[0])
  179. #endif
  180. #ifndef AV_WL16
  181. # define AV_WL16(p, d) do { \
  182. ((uint8_t*)(p))[0] = (d); \
  183. ((uint8_t*)(p))[1] = (d)>>8; \
  184. } while(0)
  185. #endif
  186. #ifndef AV_RB32
  187. # define AV_RB32(x) \
  188. ((((const uint8_t*)(x))[0] << 24) | \
  189. (((const uint8_t*)(x))[1] << 16) | \
  190. (((const uint8_t*)(x))[2] << 8) | \
  191. ((const uint8_t*)(x))[3])
  192. #endif
  193. #ifndef AV_WB32
  194. # define AV_WB32(p, d) do { \
  195. ((uint8_t*)(p))[3] = (d); \
  196. ((uint8_t*)(p))[2] = (d)>>8; \
  197. ((uint8_t*)(p))[1] = (d)>>16; \
  198. ((uint8_t*)(p))[0] = (d)>>24; \
  199. } while(0)
  200. #endif
  201. #ifndef AV_RL32
  202. # define AV_RL32(x) \
  203. ((((const uint8_t*)(x))[3] << 24) | \
  204. (((const uint8_t*)(x))[2] << 16) | \
  205. (((const uint8_t*)(x))[1] << 8) | \
  206. ((const uint8_t*)(x))[0])
  207. #endif
  208. #ifndef AV_WL32
  209. # define AV_WL32(p, d) do { \
  210. ((uint8_t*)(p))[0] = (d); \
  211. ((uint8_t*)(p))[1] = (d)>>8; \
  212. ((uint8_t*)(p))[2] = (d)>>16; \
  213. ((uint8_t*)(p))[3] = (d)>>24; \
  214. } while(0)
  215. #endif
  216. #ifndef AV_RB64
  217. # define AV_RB64(x) \
  218. (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
  219. ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
  220. ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
  221. ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
  222. ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
  223. ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
  224. ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
  225. (uint64_t)((const uint8_t*)(x))[7])
  226. #endif
  227. #ifndef AV_WB64
  228. # define AV_WB64(p, d) do { \
  229. ((uint8_t*)(p))[7] = (d); \
  230. ((uint8_t*)(p))[6] = (d)>>8; \
  231. ((uint8_t*)(p))[5] = (d)>>16; \
  232. ((uint8_t*)(p))[4] = (d)>>24; \
  233. ((uint8_t*)(p))[3] = (d)>>32; \
  234. ((uint8_t*)(p))[2] = (d)>>40; \
  235. ((uint8_t*)(p))[1] = (d)>>48; \
  236. ((uint8_t*)(p))[0] = (d)>>56; \
  237. } while(0)
  238. #endif
  239. #ifndef AV_RL64
  240. # define AV_RL64(x) \
  241. (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
  242. ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
  243. ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
  244. ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
  245. ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
  246. ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
  247. ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
  248. (uint64_t)((const uint8_t*)(x))[0])
  249. #endif
  250. #ifndef AV_WL64
  251. # define AV_WL64(p, d) do { \
  252. ((uint8_t*)(p))[0] = (d); \
  253. ((uint8_t*)(p))[1] = (d)>>8; \
  254. ((uint8_t*)(p))[2] = (d)>>16; \
  255. ((uint8_t*)(p))[3] = (d)>>24; \
  256. ((uint8_t*)(p))[4] = (d)>>32; \
  257. ((uint8_t*)(p))[5] = (d)>>40; \
  258. ((uint8_t*)(p))[6] = (d)>>48; \
  259. ((uint8_t*)(p))[7] = (d)>>56; \
  260. } while(0)
  261. #endif
  262. #if HAVE_BIGENDIAN
  263. # define AV_RN(s, p) AV_RB##s(p)
  264. # define AV_WN(s, p, v) AV_WB##s(p, v)
  265. #else
  266. # define AV_RN(s, p) AV_RL##s(p)
  267. # define AV_WN(s, p, v) AV_WL##s(p, v)
  268. #endif
  269. #endif /* HAVE_FAST_UNALIGNED */
  270. #ifndef AV_RN16
  271. # define AV_RN16(p) AV_RN(16, p)
  272. #endif
  273. #ifndef AV_RN32
  274. # define AV_RN32(p) AV_RN(32, p)
  275. #endif
  276. #ifndef AV_RN64
  277. # define AV_RN64(p) AV_RN(64, p)
  278. #endif
  279. #ifndef AV_WN16
  280. # define AV_WN16(p, v) AV_WN(16, p, v)
  281. #endif
  282. #ifndef AV_WN32
  283. # define AV_WN32(p, v) AV_WN(32, p, v)
  284. #endif
  285. #ifndef AV_WN64
  286. # define AV_WN64(p, v) AV_WN(64, p, v)
  287. #endif
  288. #if HAVE_BIGENDIAN
  289. # define AV_RB(s, p) AV_RN##s(p)
  290. # define AV_WB(s, p, v) AV_WN##s(p, v)
  291. # define AV_RL(s, p) bswap_##s(AV_RN##s(p))
  292. # define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v))
  293. #else
  294. # define AV_RB(s, p) bswap_##s(AV_RN##s(p))
  295. # define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v))
  296. # define AV_RL(s, p) AV_RN##s(p)
  297. # define AV_WL(s, p, v) AV_WN##s(p, v)
  298. #endif
  299. #define AV_RB8(x) (((const uint8_t*)(x))[0])
  300. #define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
  301. #define AV_RL8(x) AV_RB8(x)
  302. #define AV_WL8(p, d) AV_WB8(p, d)
  303. #ifndef AV_RB16
  304. # define AV_RB16(p) AV_RB(16, p)
  305. #endif
  306. #ifndef AV_WB16
  307. # define AV_WB16(p, v) AV_WB(16, p, v)
  308. #endif
  309. #ifndef AV_RL16
  310. # define AV_RL16(p) AV_RL(16, p)
  311. #endif
  312. #ifndef AV_WL16
  313. # define AV_WL16(p, v) AV_WL(16, p, v)
  314. #endif
  315. #ifndef AV_RB32
  316. # define AV_RB32(p) AV_RB(32, p)
  317. #endif
  318. #ifndef AV_WB32
  319. # define AV_WB32(p, v) AV_WB(32, p, v)
  320. #endif
  321. #ifndef AV_RL32
  322. # define AV_RL32(p) AV_RL(32, p)
  323. #endif
  324. #ifndef AV_WL32
  325. # define AV_WL32(p, v) AV_WL(32, p, v)
  326. #endif
  327. #ifndef AV_RB64
  328. # define AV_RB64(p) AV_RB(64, p)
  329. #endif
  330. #ifndef AV_WB64
  331. # define AV_WB64(p, v) AV_WB(64, p, v)
  332. #endif
  333. #ifndef AV_RL64
  334. # define AV_RL64(p) AV_RL(64, p)
  335. #endif
  336. #ifndef AV_WL64
  337. # define AV_WL64(p, v) AV_WL(64, p, v)
  338. #endif
  339. #ifndef AV_RB24
  340. # define AV_RB24(x) \
  341. ((((const uint8_t*)(x))[0] << 16) | \
  342. (((const uint8_t*)(x))[1] << 8) | \
  343. ((const uint8_t*)(x))[2])
  344. #endif
  345. #ifndef AV_WB24
  346. # define AV_WB24(p, d) do { \
  347. ((uint8_t*)(p))[2] = (d); \
  348. ((uint8_t*)(p))[1] = (d)>>8; \
  349. ((uint8_t*)(p))[0] = (d)>>16; \
  350. } while(0)
  351. #endif
  352. #ifndef AV_RL24
  353. # define AV_RL24(x) \
  354. ((((const uint8_t*)(x))[2] << 16) | \
  355. (((const uint8_t*)(x))[1] << 8) | \
  356. ((const uint8_t*)(x))[0])
  357. #endif
  358. #ifndef AV_WL24
  359. # define AV_WL24(p, d) do { \
  360. ((uint8_t*)(p))[0] = (d); \
  361. ((uint8_t*)(p))[1] = (d)>>8; \
  362. ((uint8_t*)(p))[2] = (d)>>16; \
  363. } while(0)
  364. #endif
  365. /*
  366. * The AV_[RW]NA macros access naturally aligned data
  367. * in a type-safe way.
  368. */
  369. #define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
  370. #define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
  371. #ifndef AV_RN16A
  372. # define AV_RN16A(p) AV_RNA(16, p)
  373. #endif
  374. #ifndef AV_RN32A
  375. # define AV_RN32A(p) AV_RNA(32, p)
  376. #endif
  377. #ifndef AV_RN64A
  378. # define AV_RN64A(p) AV_RNA(64, p)
  379. #endif
  380. #ifndef AV_WN16A
  381. # define AV_WN16A(p, v) AV_WNA(16, p, v)
  382. #endif
  383. #ifndef AV_WN32A
  384. # define AV_WN32A(p, v) AV_WNA(32, p, v)
  385. #endif
  386. #ifndef AV_WN64A
  387. # define AV_WN64A(p, v) AV_WNA(64, p, v)
  388. #endif
  389. /* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
  390. * naturally aligned. They may be implemented using MMX,
  391. * so emms_c() must be called before using any float code
  392. * afterwards.
  393. */
  394. #define AV_COPY(n, d, s) \
  395. (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
  396. #ifndef AV_COPY16
  397. # define AV_COPY16(d, s) AV_COPY(16, d, s)
  398. #endif
  399. #ifndef AV_COPY32
  400. # define AV_COPY32(d, s) AV_COPY(32, d, s)
  401. #endif
  402. #ifndef AV_COPY64
  403. # define AV_COPY64(d, s) AV_COPY(64, d, s)
  404. #endif
  405. #ifndef AV_COPY128
  406. # define AV_COPY128(d, s) \
  407. do { \
  408. AV_COPY64(d, s); \
  409. AV_COPY64((char*)(d)+8, (char*)(s)+8); \
  410. } while(0)
  411. #endif
  412. #define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
  413. #ifndef AV_SWAP64
  414. # define AV_SWAP64(a, b) AV_SWAP(64, a, b)
  415. #endif
  416. #define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
  417. #ifndef AV_ZERO16
  418. # define AV_ZERO16(d) AV_ZERO(16, d)
  419. #endif
  420. #ifndef AV_ZERO32
  421. # define AV_ZERO32(d) AV_ZERO(32, d)
  422. #endif
  423. #ifndef AV_ZERO64
  424. # define AV_ZERO64(d) AV_ZERO(64, d)
  425. #endif
  426. #ifndef AV_ZERO128
  427. # define AV_ZERO128(d) \
  428. do { \
  429. AV_ZERO64(d); \
  430. AV_ZERO64((char*)(d)+8); \
  431. } while(0)
  432. #endif
  433. #endif /* AVUTIL_INTREADWRITE_H */