ppccommon.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*****************************************************************************
  2. * ppccommon.h: ppc utility macros
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Eric Petit <eric.petit@lapsus.org>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  21. *
  22. * This program is also available under a commercial proprietary license.
  23. * For more information, contact us at licensing@x264.com.
  24. *****************************************************************************/
  25. #if HAVE_ALTIVEC_H
  26. #include <altivec.h>
  27. #endif
  28. /***********************************************************************
  29. * For constant vectors, use parentheses on OS X and braces on Linux
  30. **********************************************************************/
  31. #if defined(__APPLE__) && __GNUC__ < 4
  32. #define CV(a...) (a)
  33. #else
  34. #define CV(a...) {a}
  35. #endif
  36. /***********************************************************************
  37. * Vector types
  38. **********************************************************************/
  39. #define vec_u8_t vector unsigned char
  40. #define vec_s8_t vector signed char
  41. #define vec_u16_t vector unsigned short
  42. #define vec_s16_t vector signed short
  43. #define vec_u32_t vector unsigned int
  44. #define vec_s32_t vector signed int
  45. #if HAVE_VSX
  46. #define vec_u64_t vector unsigned long long
  47. #define vec_s64_t vector signed long long
  48. typedef union {
  49. uint64_t s[2];
  50. vec_u64_t v;
  51. } vec_u64_u;
  52. typedef union {
  53. int64_t s[2];
  54. vec_s64_t v;
  55. } vec_s64_u;
  56. #endif
  57. typedef union {
  58. uint32_t s[4];
  59. vec_u32_t v;
  60. } vec_u32_u;
  61. typedef union {
  62. int32_t s[4];
  63. vec_s32_t v;
  64. } vec_s32_u;
  65. typedef union {
  66. uint16_t s[8];
  67. vec_u16_t v;
  68. } vec_u16_u;
  69. typedef union {
  70. int16_t s[8];
  71. vec_s16_t v;
  72. } vec_s16_u;
  73. typedef union {
  74. uint8_t s[16];
  75. vec_u8_t v;
  76. } vec_u8_u;
  77. typedef union {
  78. int8_t s[16];
  79. vec_s8_t v;
  80. } vec_s8_u;
  81. /***********************************************************************
  82. * Null vector
  83. **********************************************************************/
  84. #define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 )
  85. #define zero_u8v (vec_u8_t) zerov
  86. #define zero_s8v (vec_s8_t) zerov
  87. #define zero_u16v (vec_u16_t) zerov
  88. #define zero_s16v (vec_s16_t) zerov
  89. #define zero_u32v (vec_u32_t) zerov
  90. #define zero_s32v (vec_s32_t) zerov
  91. /***********************************************************************
  92. * 8 <-> 16 bits conversions
  93. **********************************************************************/
  94. #ifdef WORDS_BIGENDIAN
  95. #define vec_u8_to_u16_h(v) (vec_u16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
  96. #define vec_u8_to_u16_l(v) (vec_u16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
  97. #define vec_u8_to_s16_h(v) (vec_s16_t) vec_mergeh( zero_u8v, (vec_u8_t) v )
  98. #define vec_u8_to_s16_l(v) (vec_s16_t) vec_mergel( zero_u8v, (vec_u8_t) v )
  99. #else
  100. #define vec_u8_to_u16_h(v) (vec_u16_t) vec_mergeh( (vec_u8_t) v, zero_u8v )
  101. #define vec_u8_to_u16_l(v) (vec_u16_t) vec_mergel( (vec_u8_t) v, zero_u8v )
  102. #define vec_u8_to_s16_h(v) (vec_s16_t) vec_mergeh( (vec_u8_t) v, zero_u8v )
  103. #define vec_u8_to_s16_l(v) (vec_s16_t) vec_mergel( (vec_u8_t) v, zero_u8v )
  104. #endif
  105. #define vec_u8_to_u16(v) vec_u8_to_u16_h(v)
  106. #define vec_u8_to_s16(v) vec_u8_to_s16_h(v)
  107. #define vec_u16_to_u8(v) vec_pack( v, zero_u16v )
  108. #define vec_s16_to_u8(v) vec_packsu( v, zero_s16v )
  109. /***********************************************************************
  110. * 16 <-> 32 bits conversions
  111. **********************************************************************/
  112. #ifdef WORDS_BIGENDIAN
  113. #define vec_u16_to_u32_h(v) (vec_u32_t) vec_mergeh( zero_u16v, (vec_u16_t) v )
  114. #define vec_u16_to_u32_l(v) (vec_u32_t) vec_mergel( zero_u16v, (vec_u16_t) v )
  115. #define vec_u16_to_s32_h(v) (vec_s32_t) vec_mergeh( zero_u16v, (vec_u16_t) v )
  116. #define vec_u16_to_s32_l(v) (vec_s32_t) vec_mergel( zero_u16v, (vec_u16_t) v )
  117. #else
  118. #define vec_u16_to_u32_h(v) (vec_u32_t) vec_mergeh( (vec_u16_t) v, zero_u16v )
  119. #define vec_u16_to_u32_l(v) (vec_u32_t) vec_mergel( (vec_u16_t) v, zero_u16v )
  120. #define vec_u16_to_s32_h(v) (vec_s32_t) vec_mergeh( (vec_u16_t) v, zero_u16v )
  121. #define vec_u16_to_s32_l(v) (vec_s32_t) vec_mergel( (vec_u16_t) v, zero_u16v )
  122. #endif
  123. #define vec_u16_to_u32(v) vec_u16_to_u32_h(v)
  124. #define vec_u16_to_s32(v) vec_u16_to_s32_h(v)
  125. #define vec_u32_to_u16(v) vec_pack( v, zero_u32v )
  126. #define vec_s32_to_u16(v) vec_packsu( v, zero_s32v )
  127. /***********************************************************************
  128. * PREP_STORE##n: declares required vectors to store n bytes to a
  129. * potentially unaligned address
  130. * VEC_STORE##n: stores n bytes from vector v to address p
  131. **********************************************************************/
  132. #define PREP_STORE8 \
  133. vec_u8_t _tmp3v; \
  134. vec_u8_t mask = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  135. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F } \
  136. #define VEC_STORE8( v, p ) \
  137. _tmp3v = vec_vsx_ld( 0, p ); \
  138. v = vec_perm( v, _tmp3v, mask ); \
  139. vec_vsx_st( v, 0, p )
  140. /***********************************************************************
  141. * VEC_TRANSPOSE_8
  142. ***********************************************************************
  143. * Transposes a 8x8 matrix of s16 vectors
  144. **********************************************************************/
  145. #define VEC_TRANSPOSE_8(a0,a1,a2,a3,a4,a5,a6,a7,b0,b1,b2,b3,b4,b5,b6,b7) \
  146. b0 = vec_mergeh( a0, a4 ); \
  147. b1 = vec_mergel( a0, a4 ); \
  148. b2 = vec_mergeh( a1, a5 ); \
  149. b3 = vec_mergel( a1, a5 ); \
  150. b4 = vec_mergeh( a2, a6 ); \
  151. b5 = vec_mergel( a2, a6 ); \
  152. b6 = vec_mergeh( a3, a7 ); \
  153. b7 = vec_mergel( a3, a7 ); \
  154. a0 = vec_mergeh( b0, b4 ); \
  155. a1 = vec_mergel( b0, b4 ); \
  156. a2 = vec_mergeh( b1, b5 ); \
  157. a3 = vec_mergel( b1, b5 ); \
  158. a4 = vec_mergeh( b2, b6 ); \
  159. a5 = vec_mergel( b2, b6 ); \
  160. a6 = vec_mergeh( b3, b7 ); \
  161. a7 = vec_mergel( b3, b7 ); \
  162. b0 = vec_mergeh( a0, a4 ); \
  163. b1 = vec_mergel( a0, a4 ); \
  164. b2 = vec_mergeh( a1, a5 ); \
  165. b3 = vec_mergel( a1, a5 ); \
  166. b4 = vec_mergeh( a2, a6 ); \
  167. b5 = vec_mergel( a2, a6 ); \
  168. b6 = vec_mergeh( a3, a7 ); \
  169. b7 = vec_mergel( a3, a7 )
  170. /***********************************************************************
  171. * VEC_TRANSPOSE_4
  172. ***********************************************************************
  173. * Transposes a 4x4 matrix of s16 vectors.
  174. * Actually source and destination are 8x4. The low elements of the
  175. * source are discarded and the low elements of the destination mustn't
  176. * be used.
  177. **********************************************************************/
  178. #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
  179. b0 = vec_mergeh( a0, a0 ); \
  180. b1 = vec_mergeh( a1, a0 ); \
  181. b2 = vec_mergeh( a2, a0 ); \
  182. b3 = vec_mergeh( a3, a0 ); \
  183. a0 = vec_mergeh( b0, b2 ); \
  184. a1 = vec_mergel( b0, b2 ); \
  185. a2 = vec_mergeh( b1, b3 ); \
  186. a3 = vec_mergel( b1, b3 ); \
  187. b0 = vec_mergeh( a0, a2 ); \
  188. b1 = vec_mergel( a0, a2 ); \
  189. b2 = vec_mergeh( a1, a3 ); \
  190. b3 = vec_mergel( a1, a3 )
  191. /***********************************************************************
  192. * VEC_DIFF_H
  193. ***********************************************************************
  194. * p1, p2: u8 *
  195. * i1, i2, n: int
  196. * d: s16v
  197. *
  198. * Loads n bytes from p1 and p2, do the diff of the high elements into
  199. * d, increments p1 and p2 by i1 and i2 into known offset g
  200. **********************************************************************/
  201. #define PREP_DIFF \
  202. LOAD_ZERO; \
  203. vec_s16_t pix1v, pix2v;
  204. #define VEC_DIFF_H(p1,i1,p2,i2,n,d) \
  205. pix1v = vec_vsx_ld( 0, (int16_t *)p1 ); \
  206. pix1v = vec_u8_to_s16( pix1v ); \
  207. pix2v = vec_vsx_ld( 0, (int16_t *)p2 ); \
  208. pix2v = vec_u8_to_s16( pix2v ); \
  209. d = vec_sub( pix1v, pix2v ); \
  210. p1 += i1; \
  211. p2 += i2
  212. /***********************************************************************
  213. * VEC_DIFF_HL
  214. ***********************************************************************
  215. * p1, p2: u8 *
  216. * i1, i2: int
  217. * dh, dl: s16v
  218. *
  219. * Loads 16 bytes from p1 and p2, do the diff of the high elements into
  220. * dh, the diff of the low elements into dl, increments p1 and p2 by i1
  221. * and i2
  222. **********************************************************************/
  223. #define VEC_DIFF_HL(p1,i1,p2,i2,dh,dl) \
  224. pix1v = (vec_s16_t)vec_ld(0, p1); \
  225. temp0v = vec_u8_to_s16_h( pix1v ); \
  226. temp1v = vec_u8_to_s16_l( pix1v ); \
  227. pix2v = vec_vsx_ld( 0, (int16_t *)p2 ); \
  228. temp2v = vec_u8_to_s16_h( pix2v ); \
  229. temp3v = vec_u8_to_s16_l( pix2v ); \
  230. dh = vec_sub( temp0v, temp2v ); \
  231. dl = vec_sub( temp1v, temp3v ); \
  232. p1 += i1; \
  233. p2 += i2
  234. /***********************************************************************
  235. * VEC_DIFF_H_8BYTE_ALIGNED
  236. ***********************************************************************
  237. * p1, p2: u8 *
  238. * i1, i2, n: int
  239. * d: s16v
  240. *
  241. * Loads n bytes from p1 and p2, do the diff of the high elements into
  242. * d, increments p1 and p2 by i1 and i2
  243. * Slightly faster when we know we are loading/diffing 8bytes which
  244. * are 8 byte aligned. Reduces need for two loads and two vec_lvsl()'s
  245. **********************************************************************/
  246. #define PREP_DIFF_8BYTEALIGNED \
  247. LOAD_ZERO; \
  248. vec_s16_t pix1v, pix2v; \
  249. vec_u8_t pix1v8, pix2v8; \
  250. #define VEC_DIFF_H_8BYTE_ALIGNED(p1,i1,p2,i2,n,d) \
  251. pix1v8 = vec_vsx_ld( 0, p1 ); \
  252. pix2v8 = vec_vsx_ld( 0, p2 ); \
  253. pix1v = vec_u8_to_s16( pix1v8 ); \
  254. pix2v = vec_u8_to_s16( pix2v8 ); \
  255. d = vec_sub( pix1v, pix2v); \
  256. p1 += i1; \
  257. p2 += i2;
  258. #if !HAVE_VSX
  259. #undef vec_vsx_ld
  260. #define vec_vsx_ld(off, src) \
  261. vec_perm(vec_ld(off, src), vec_ld(off + 15, src), vec_lvsl(off, src))
  262. #undef vec_vsx_st
  263. #define vec_vsx_st(v, off, dst) \
  264. do { \
  265. uint8_t *_dst = (uint8_t*)(dst); \
  266. vec_u8_t _v = (vec_u8_t)(v); \
  267. vec_u8_t _a = vec_ld(off, _dst); \
  268. vec_u8_t _b = vec_ld(off + 15, _dst); \
  269. vec_u8_t _e = vec_perm(_b, _a, vec_lvsl(0, _dst)); \
  270. vec_u8_t _m = vec_lvsr(0, _dst); \
  271. \
  272. vec_st(vec_perm(_v, _e, _m), off + 15, _dst); \
  273. vec_st(vec_perm(_e, _v, _m), off, _dst); \
  274. } while( 0 )
  275. #endif
  276. #ifndef __POWER9_VECTOR__
  277. #define vec_absd( a, b ) vec_sub( vec_max( a, b ), vec_min( a, b ) )
  278. #endif
  279. // vec_xxpermdi is quite useful but some version of clang do not expose it
  280. #if !HAVE_VSX || (defined(__clang__) && __clang_major__ < 6)
  281. static const vec_u8_t xxpermdi0_perm = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
  282. 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
  283. 0x14, 0x15, 0x16, 0x17 };
  284. static const vec_u8_t xxpermdi1_perm = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
  285. 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B,
  286. 0x1C, 0x1D, 0x1E, 0x1F };
  287. static const vec_u8_t xxpermdi2_perm = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
  288. 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13,
  289. 0x14, 0x15, 0x16, 0x17 };
  290. static const vec_u8_t xxpermdi3_perm = { 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
  291. 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B,
  292. 0x1C, 0x1D, 0x1E, 0x1F };
  293. #define xxpermdi(a, b, c) vec_perm(a, b, xxpermdi##c##_perm)
  294. #elif (defined(__GNUC__) && (__GNUC__ > 6 || (__GNUC__ == 6 && __GNUC_MINOR__ >= 3))) || \
  295. (defined(__clang__) && __clang_major__ >= 7)
  296. #define xxpermdi(a, b, c) vec_xxpermdi(a, b, c)
  297. #endif
  298. // vec_xxpermdi has its endianness bias exposed in early gcc and clang
  299. #ifdef WORDS_BIGENDIAN
  300. #ifndef xxpermdi
  301. #define xxpermdi(a, b, c) vec_xxpermdi(a, b, c)
  302. #endif
  303. #else
  304. #ifndef xxpermdi
  305. #define xxpermdi(a, b, c) vec_xxpermdi(b, a, ((c >> 1) | (c & 1) << 1) ^ 3)
  306. #endif
  307. #endif