You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

444 lines
13KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #ifndef AVUTIL_INTREADWRITE_H
  19. #define AVUTIL_INTREADWRITE_H
  20. #include <stdint.h>
  21. #include "config.h"
  22. #include "bswap.h"
  23. /*
  24. * Arch-specific headers can provide any combination of
  25. * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
  26. * Preprocessor symbols must be defined, even if these are implemented
  27. * as inline functions.
  28. */
  29. #if ARCH_ARM
  30. # include "arm/intreadwrite.h"
  31. #elif ARCH_AVR32
  32. # include "avr32/intreadwrite.h"
  33. #elif ARCH_MIPS
  34. # include "mips/intreadwrite.h"
  35. #elif ARCH_PPC
  36. # include "ppc/intreadwrite.h"
  37. #elif ARCH_X86
  38. # include "x86/intreadwrite.h"
  39. #endif
  40. /*
  41. * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
  42. */
  43. #if HAVE_BIGENDIAN
  44. # if defined(AV_RN16) && !defined(AV_RB16)
  45. # define AV_RB16(p) AV_RN16(p)
  46. # elif !defined(AV_RN16) && defined(AV_RB16)
  47. # define AV_RN16(p) AV_RB16(p)
  48. # endif
  49. # if defined(AV_WN16) && !defined(AV_WB16)
  50. # define AV_WB16(p, v) AV_WN16(p, v)
  51. # elif !defined(AV_WN16) && defined(AV_WB16)
  52. # define AV_WN16(p, v) AV_WB16(p, v)
  53. # endif
  54. # if defined(AV_RN24) && !defined(AV_RB24)
  55. # define AV_RB24(p) AV_RN24(p)
  56. # elif !defined(AV_RN24) && defined(AV_RB24)
  57. # define AV_RN24(p) AV_RB24(p)
  58. # endif
  59. # if defined(AV_WN24) && !defined(AV_WB24)
  60. # define AV_WB24(p, v) AV_WN24(p, v)
  61. # elif !defined(AV_WN24) && defined(AV_WB24)
  62. # define AV_WN24(p, v) AV_WB24(p, v)
  63. # endif
  64. # if defined(AV_RN32) && !defined(AV_RB32)
  65. # define AV_RB32(p) AV_RN32(p)
  66. # elif !defined(AV_RN32) && defined(AV_RB32)
  67. # define AV_RN32(p) AV_RB32(p)
  68. # endif
  69. # if defined(AV_WN32) && !defined(AV_WB32)
  70. # define AV_WB32(p, v) AV_WN32(p, v)
  71. # elif !defined(AV_WN32) && defined(AV_WB32)
  72. # define AV_WN32(p, v) AV_WB32(p, v)
  73. # endif
  74. # if defined(AV_RN64) && !defined(AV_RB64)
  75. # define AV_RB64(p) AV_RN64(p)
  76. # elif !defined(AV_RN64) && defined(AV_RB64)
  77. # define AV_RN64(p) AV_RB64(p)
  78. # endif
  79. # if defined(AV_WN64) && !defined(AV_WB64)
  80. # define AV_WB64(p, v) AV_WN64(p, v)
  81. # elif !defined(AV_WN64) && defined(AV_WB64)
  82. # define AV_WN64(p, v) AV_WB64(p, v)
  83. # endif
  84. #else /* HAVE_BIGENDIAN */
  85. # if defined(AV_RN16) && !defined(AV_RL16)
  86. # define AV_RL16(p) AV_RN16(p)
  87. # elif !defined(AV_RN16) && defined(AV_RL16)
  88. # define AV_RN16(p) AV_RL16(p)
  89. # endif
  90. # if defined(AV_WN16) && !defined(AV_WL16)
  91. # define AV_WL16(p, v) AV_WN16(p, v)
  92. # elif !defined(AV_WN16) && defined(AV_WL16)
  93. # define AV_WN16(p, v) AV_WL16(p, v)
  94. # endif
  95. # if defined(AV_RN24) && !defined(AV_RL24)
  96. # define AV_RL24(p) AV_RN24(p)
  97. # elif !defined(AV_RN24) && defined(AV_RL24)
  98. # define AV_RN24(p) AV_RL24(p)
  99. # endif
  100. # if defined(AV_WN24) && !defined(AV_WL24)
  101. # define AV_WL24(p, v) AV_WN24(p, v)
  102. # elif !defined(AV_WN24) && defined(AV_WL24)
  103. # define AV_WN24(p, v) AV_WL24(p, v)
  104. # endif
  105. # if defined(AV_RN32) && !defined(AV_RL32)
  106. # define AV_RL32(p) AV_RN32(p)
  107. # elif !defined(AV_RN32) && defined(AV_RL32)
  108. # define AV_RN32(p) AV_RL32(p)
  109. # endif
  110. # if defined(AV_WN32) && !defined(AV_WL32)
  111. # define AV_WL32(p, v) AV_WN32(p, v)
  112. # elif !defined(AV_WN32) && defined(AV_WL32)
  113. # define AV_WN32(p, v) AV_WL32(p, v)
  114. # endif
  115. # if defined(AV_RN64) && !defined(AV_RL64)
  116. # define AV_RL64(p) AV_RN64(p)
  117. # elif !defined(AV_RN64) && defined(AV_RL64)
  118. # define AV_RN64(p) AV_RL64(p)
  119. # endif
  120. # if defined(AV_WN64) && !defined(AV_WL64)
  121. # define AV_WL64(p, v) AV_WN64(p, v)
  122. # elif !defined(AV_WN64) && defined(AV_WL64)
  123. # define AV_WN64(p, v) AV_WL64(p, v)
  124. # endif
  125. #endif /* !HAVE_BIGENDIAN */
  126. /*
  127. * Define AV_[RW]N helper macros to simplify definitions not provided
  128. * by per-arch headers.
  129. */
  130. #if HAVE_ATTRIBUTE_PACKED
  131. struct unaligned_64 { uint64_t l; } __attribute__((packed));
  132. struct unaligned_32 { uint32_t l; } __attribute__((packed));
  133. struct unaligned_16 { uint16_t l; } __attribute__((packed));
  134. # define AV_RN(s, p) (((const struct unaligned_##s *) (p))->l)
  135. # define AV_WN(s, p, v) ((((struct unaligned_##s *) (p))->l) = (v))
  136. #elif defined(__DECC)
  137. # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
  138. # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
  139. #elif HAVE_FAST_UNALIGNED
  140. # define AV_RN(s, p) (*((const uint##s##_t*)(p)))
  141. # define AV_WN(s, p, v) (*((uint##s##_t*)(p)) = (v))
  142. #else
  143. #ifndef AV_RB16
  144. # define AV_RB16(x) \
  145. ((((const uint8_t*)(x))[0] << 8) | \
  146. ((const uint8_t*)(x))[1])
  147. #endif
  148. #ifndef AV_WB16
  149. # define AV_WB16(p, d) do { \
  150. ((uint8_t*)(p))[1] = (d); \
  151. ((uint8_t*)(p))[0] = (d)>>8; \
  152. } while(0)
  153. #endif
  154. #ifndef AV_RL16
  155. # define AV_RL16(x) \
  156. ((((const uint8_t*)(x))[1] << 8) | \
  157. ((const uint8_t*)(x))[0])
  158. #endif
  159. #ifndef AV_WL16
  160. # define AV_WL16(p, d) do { \
  161. ((uint8_t*)(p))[0] = (d); \
  162. ((uint8_t*)(p))[1] = (d)>>8; \
  163. } while(0)
  164. #endif
  165. #ifndef AV_RB32
  166. # define AV_RB32(x) \
  167. ((((const uint8_t*)(x))[0] << 24) | \
  168. (((const uint8_t*)(x))[1] << 16) | \
  169. (((const uint8_t*)(x))[2] << 8) | \
  170. ((const uint8_t*)(x))[3])
  171. #endif
  172. #ifndef AV_WB32
  173. # define AV_WB32(p, d) do { \
  174. ((uint8_t*)(p))[3] = (d); \
  175. ((uint8_t*)(p))[2] = (d)>>8; \
  176. ((uint8_t*)(p))[1] = (d)>>16; \
  177. ((uint8_t*)(p))[0] = (d)>>24; \
  178. } while(0)
  179. #endif
  180. #ifndef AV_RL32
  181. # define AV_RL32(x) \
  182. ((((const uint8_t*)(x))[3] << 24) | \
  183. (((const uint8_t*)(x))[2] << 16) | \
  184. (((const uint8_t*)(x))[1] << 8) | \
  185. ((const uint8_t*)(x))[0])
  186. #endif
  187. #ifndef AV_WL32
  188. # define AV_WL32(p, d) do { \
  189. ((uint8_t*)(p))[0] = (d); \
  190. ((uint8_t*)(p))[1] = (d)>>8; \
  191. ((uint8_t*)(p))[2] = (d)>>16; \
  192. ((uint8_t*)(p))[3] = (d)>>24; \
  193. } while(0)
  194. #endif
  195. #ifndef AV_RB64
  196. # define AV_RB64(x) \
  197. (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
  198. ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
  199. ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
  200. ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
  201. ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
  202. ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
  203. ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
  204. (uint64_t)((const uint8_t*)(x))[7])
  205. #endif
  206. #ifndef AV_WB64
  207. # define AV_WB64(p, d) do { \
  208. ((uint8_t*)(p))[7] = (d); \
  209. ((uint8_t*)(p))[6] = (d)>>8; \
  210. ((uint8_t*)(p))[5] = (d)>>16; \
  211. ((uint8_t*)(p))[4] = (d)>>24; \
  212. ((uint8_t*)(p))[3] = (d)>>32; \
  213. ((uint8_t*)(p))[2] = (d)>>40; \
  214. ((uint8_t*)(p))[1] = (d)>>48; \
  215. ((uint8_t*)(p))[0] = (d)>>56; \
  216. } while(0)
  217. #endif
  218. #ifndef AV_RL64
  219. # define AV_RL64(x) \
  220. (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
  221. ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
  222. ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
  223. ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
  224. ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
  225. ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
  226. ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
  227. (uint64_t)((const uint8_t*)(x))[0])
  228. #endif
  229. #ifndef AV_WL64
  230. # define AV_WL64(p, d) do { \
  231. ((uint8_t*)(p))[0] = (d); \
  232. ((uint8_t*)(p))[1] = (d)>>8; \
  233. ((uint8_t*)(p))[2] = (d)>>16; \
  234. ((uint8_t*)(p))[3] = (d)>>24; \
  235. ((uint8_t*)(p))[4] = (d)>>32; \
  236. ((uint8_t*)(p))[5] = (d)>>40; \
  237. ((uint8_t*)(p))[6] = (d)>>48; \
  238. ((uint8_t*)(p))[7] = (d)>>56; \
  239. } while(0)
  240. #endif
  241. #if HAVE_BIGENDIAN
  242. # define AV_RN(s, p) AV_RB##s(p)
  243. # define AV_WN(s, p, v) AV_WB##s(p, v)
  244. #else
  245. # define AV_RN(s, p) AV_RL##s(p)
  246. # define AV_WN(s, p, v) AV_WL##s(p, v)
  247. #endif
  248. #endif /* HAVE_FAST_UNALIGNED */
  249. #ifndef AV_RN16
  250. # define AV_RN16(p) AV_RN(16, p)
  251. #endif
  252. #ifndef AV_RN32
  253. # define AV_RN32(p) AV_RN(32, p)
  254. #endif
  255. #ifndef AV_RN64
  256. # define AV_RN64(p) AV_RN(64, p)
  257. #endif
  258. #ifndef AV_WN16
  259. # define AV_WN16(p, v) AV_WN(16, p, v)
  260. #endif
  261. #ifndef AV_WN32
  262. # define AV_WN32(p, v) AV_WN(32, p, v)
  263. #endif
  264. #ifndef AV_WN64
  265. # define AV_WN64(p, v) AV_WN(64, p, v)
  266. #endif
  267. #if HAVE_BIGENDIAN
  268. # define AV_RB(s, p) AV_RN##s(p)
  269. # define AV_WB(s, p, v) AV_WN##s(p, v)
  270. # define AV_RL(s, p) bswap_##s(AV_RN##s(p))
  271. # define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v))
  272. #else
  273. # define AV_RB(s, p) bswap_##s(AV_RN##s(p))
  274. # define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v))
  275. # define AV_RL(s, p) AV_RN##s(p)
  276. # define AV_WL(s, p, v) AV_WN##s(p, v)
  277. #endif
  278. #define AV_RB8(x) (((const uint8_t*)(x))[0])
  279. #define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
  280. #define AV_RL8(x) AV_RB8(x)
  281. #define AV_WL8(p, d) AV_WB8(p, d)
  282. #ifndef AV_RB16
  283. # define AV_RB16(p) AV_RB(16, p)
  284. #endif
  285. #ifndef AV_WB16
  286. # define AV_WB16(p, v) AV_WB(16, p, v)
  287. #endif
  288. #ifndef AV_RL16
  289. # define AV_RL16(p) AV_RL(16, p)
  290. #endif
  291. #ifndef AV_WL16
  292. # define AV_WL16(p, v) AV_WL(16, p, v)
  293. #endif
  294. #ifndef AV_RB32
  295. # define AV_RB32(p) AV_RB(32, p)
  296. #endif
  297. #ifndef AV_WB32
  298. # define AV_WB32(p, v) AV_WB(32, p, v)
  299. #endif
  300. #ifndef AV_RL32
  301. # define AV_RL32(p) AV_RL(32, p)
  302. #endif
  303. #ifndef AV_WL32
  304. # define AV_WL32(p, v) AV_WL(32, p, v)
  305. #endif
  306. #ifndef AV_RB64
  307. # define AV_RB64(p) AV_RB(64, p)
  308. #endif
  309. #ifndef AV_WB64
  310. # define AV_WB64(p, v) AV_WB(64, p, v)
  311. #endif
  312. #ifndef AV_RL64
  313. # define AV_RL64(p) AV_RL(64, p)
  314. #endif
  315. #ifndef AV_WL64
  316. # define AV_WL64(p, v) AV_WL(64, p, v)
  317. #endif
  318. #ifndef AV_RB24
  319. # define AV_RB24(x) \
  320. ((((const uint8_t*)(x))[0] << 16) | \
  321. (((const uint8_t*)(x))[1] << 8) | \
  322. ((const uint8_t*)(x))[2])
  323. #endif
  324. #ifndef AV_WB24
  325. # define AV_WB24(p, d) do { \
  326. ((uint8_t*)(p))[2] = (d); \
  327. ((uint8_t*)(p))[1] = (d)>>8; \
  328. ((uint8_t*)(p))[0] = (d)>>16; \
  329. } while(0)
  330. #endif
  331. #ifndef AV_RL24
  332. # define AV_RL24(x) \
  333. ((((const uint8_t*)(x))[2] << 16) | \
  334. (((const uint8_t*)(x))[1] << 8) | \
  335. ((const uint8_t*)(x))[0])
  336. #endif
  337. #ifndef AV_WL24
  338. # define AV_WL24(p, d) do { \
  339. ((uint8_t*)(p))[0] = (d); \
  340. ((uint8_t*)(p))[1] = (d)>>8; \
  341. ((uint8_t*)(p))[2] = (d)>>16; \
  342. } while(0)
  343. #endif
  344. /* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
  345. * naturally aligned. They may be implemented using MMX,
  346. * so emms_c() must be called before using any float code
  347. * afterwards.
  348. */
  349. #define AV_COPY(n, d, s) (*(uint##n##_t*)(d) = *(const uint##n##_t*)(s))
  350. #ifndef AV_COPY64
  351. # define AV_COPY64(d, s) AV_COPY(64, d, s)
  352. #endif
  353. #ifndef AV_COPY128
  354. # define AV_COPY128(d, s) \
  355. do { \
  356. AV_COPY64(d, s); \
  357. AV_COPY64((char*)(d)+8, (char*)(s)+8); \
  358. } while(0)
  359. #endif
  360. #define AV_SWAP(n, a, b) FFSWAP(uint##n##_t, *(uint##n##_t*)(a), *(uint##n##_t*)(b))
  361. #ifndef AV_SWAP64
  362. # define AV_SWAP64(a, b) AV_SWAP(64, a, b)
  363. #endif
  364. #define AV_ZERO(n, d) (*(uint##n##_t*)(d) = 0)
  365. #ifndef AV_ZERO64
  366. # define AV_ZERO64(d) AV_ZERO(64, d)
  367. #endif
  368. #ifndef AV_ZERO128
  369. # define AV_ZERO128(d) \
  370. do { \
  371. AV_ZERO64(d); \
  372. AV_ZERO64((char*)(d)+8); \
  373. } while(0)
  374. #endif
  375. #endif /* AVUTIL_INTREADWRITE_H */