cpu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*****************************************************************************
  2. * cpu.c: cpu detection
  3. *****************************************************************************
  4. * Copyright (C) 2003-2018 x264 project
  5. *
  6. * Authors: Loren Merritt <lorenm@u.washington.edu>
  7. * Laurent Aimar <fenrir@via.ecp.fr>
  8. * Fiona Glaser <fiona@x264.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
  23. *
  24. * This program is also available under a commercial proprietary license.
  25. * For more information, contact us at licensing@x264.com.
  26. *****************************************************************************/
  27. #include "base.h"
  28. #if HAVE_POSIXTHREAD && SYS_LINUX
  29. #include <sched.h>
  30. #endif
  31. #if SYS_BEOS
  32. #include <kernel/OS.h>
  33. #endif
  34. #if SYS_MACOSX || SYS_FREEBSD
  35. #include <sys/types.h>
  36. #include <sys/sysctl.h>
  37. #endif
  38. #if SYS_OPENBSD
  39. #include <sys/param.h>
  40. #include <sys/sysctl.h>
  41. #include <machine/cpu.h>
  42. #endif
  43. const x264_cpu_name_t x264_cpu_names[] =
  44. {
  45. #if HAVE_MMX
  46. // {"MMX", X264_CPU_MMX}, // we don't support asm on mmx1 cpus anymore
  47. #define MMX2 X264_CPU_MMX|X264_CPU_MMX2
  48. {"MMX2", MMX2},
  49. {"MMXEXT", MMX2},
  50. {"SSE", MMX2|X264_CPU_SSE},
  51. #define SSE2 MMX2|X264_CPU_SSE|X264_CPU_SSE2
  52. {"SSE2Slow", SSE2|X264_CPU_SSE2_IS_SLOW},
  53. {"SSE2", SSE2},
  54. {"SSE2Fast", SSE2|X264_CPU_SSE2_IS_FAST},
  55. {"LZCNT", SSE2|X264_CPU_LZCNT},
  56. {"SSE3", SSE2|X264_CPU_SSE3},
  57. {"SSSE3", SSE2|X264_CPU_SSE3|X264_CPU_SSSE3},
  58. {"SSE4.1", SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4},
  59. {"SSE4", SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4},
  60. {"SSE4.2", SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4|X264_CPU_SSE42},
  61. #define AVX SSE2|X264_CPU_SSE3|X264_CPU_SSSE3|X264_CPU_SSE4|X264_CPU_SSE42|X264_CPU_AVX
  62. {"AVX", AVX},
  63. {"XOP", AVX|X264_CPU_XOP},
  64. {"FMA4", AVX|X264_CPU_FMA4},
  65. {"FMA3", AVX|X264_CPU_FMA3},
  66. {"BMI1", AVX|X264_CPU_LZCNT|X264_CPU_BMI1},
  67. {"BMI2", AVX|X264_CPU_LZCNT|X264_CPU_BMI1|X264_CPU_BMI2},
  68. #define AVX2 AVX|X264_CPU_FMA3|X264_CPU_LZCNT|X264_CPU_BMI1|X264_CPU_BMI2|X264_CPU_AVX2
  69. {"AVX2", AVX2},
  70. {"AVX512", AVX2|X264_CPU_AVX512},
  71. #undef AVX2
  72. #undef AVX
  73. #undef SSE2
  74. #undef MMX2
  75. {"Cache32", X264_CPU_CACHELINE_32},
  76. {"Cache64", X264_CPU_CACHELINE_64},
  77. {"SlowAtom", X264_CPU_SLOW_ATOM},
  78. {"SlowPshufb", X264_CPU_SLOW_PSHUFB},
  79. {"SlowPalignr", X264_CPU_SLOW_PALIGNR},
  80. {"SlowShuffle", X264_CPU_SLOW_SHUFFLE},
  81. {"UnalignedStack", X264_CPU_STACK_MOD4},
  82. #elif ARCH_PPC
  83. {"Altivec", X264_CPU_ALTIVEC},
  84. #elif ARCH_ARM
  85. {"ARMv6", X264_CPU_ARMV6},
  86. {"NEON", X264_CPU_NEON},
  87. {"FastNeonMRC", X264_CPU_FAST_NEON_MRC},
  88. #elif ARCH_AARCH64
  89. {"ARMv8", X264_CPU_ARMV8},
  90. {"NEON", X264_CPU_NEON},
  91. #elif ARCH_MIPS
  92. {"MSA", X264_CPU_MSA},
  93. #endif
  94. {"", 0},
  95. };
  96. #if (ARCH_PPC && SYS_LINUX) || (ARCH_ARM && !HAVE_NEON)
  97. #include <signal.h>
  98. #include <setjmp.h>
  99. static sigjmp_buf jmpbuf;
  100. static volatile sig_atomic_t canjump = 0;
  101. static void sigill_handler( int sig )
  102. {
  103. if( !canjump )
  104. {
  105. signal( sig, SIG_DFL );
  106. raise( sig );
  107. }
  108. canjump = 0;
  109. siglongjmp( jmpbuf, 1 );
  110. }
  111. #endif
  112. #if HAVE_MMX
  113. int x264_cpu_cpuid_test( void );
  114. void x264_cpu_cpuid( uint32_t op, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx );
  115. uint64_t x264_cpu_xgetbv( int xcr );
  116. uint32_t x264_cpu_detect( void )
  117. {
  118. uint32_t cpu = 0;
  119. uint32_t eax, ebx, ecx, edx;
  120. uint32_t vendor[4] = {0};
  121. uint32_t max_extended_cap, max_basic_cap;
  122. #if !ARCH_X86_64
  123. if( !x264_cpu_cpuid_test() )
  124. return 0;
  125. #endif
  126. x264_cpu_cpuid( 0, &max_basic_cap, vendor+0, vendor+2, vendor+1 );
  127. if( max_basic_cap == 0 )
  128. return 0;
  129. x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
  130. if( edx&0x00800000 )
  131. cpu |= X264_CPU_MMX;
  132. else
  133. return cpu;
  134. if( edx&0x02000000 )
  135. cpu |= X264_CPU_MMX2|X264_CPU_SSE;
  136. if( edx&0x04000000 )
  137. cpu |= X264_CPU_SSE2;
  138. if( ecx&0x00000001 )
  139. cpu |= X264_CPU_SSE3;
  140. if( ecx&0x00000200 )
  141. cpu |= X264_CPU_SSSE3|X264_CPU_SSE2_IS_FAST;
  142. if( ecx&0x00080000 )
  143. cpu |= X264_CPU_SSE4;
  144. if( ecx&0x00100000 )
  145. cpu |= X264_CPU_SSE42;
  146. if( ecx&0x08000000 ) /* XGETBV supported and XSAVE enabled by OS */
  147. {
  148. uint64_t xcr0 = x264_cpu_xgetbv( 0 );
  149. if( (xcr0&0x6) == 0x6 ) /* XMM/YMM state */
  150. {
  151. if( ecx&0x10000000 )
  152. cpu |= X264_CPU_AVX;
  153. if( ecx&0x00001000 )
  154. cpu |= X264_CPU_FMA3;
  155. if( max_basic_cap >= 7 )
  156. {
  157. x264_cpu_cpuid( 7, &eax, &ebx, &ecx, &edx );
  158. if( ebx&0x00000008 )
  159. cpu |= X264_CPU_BMI1;
  160. if( ebx&0x00000100 )
  161. cpu |= X264_CPU_BMI2;
  162. if( ebx&0x00000020 )
  163. cpu |= X264_CPU_AVX2;
  164. if( (xcr0&0xE0) == 0xE0 ) /* OPMASK/ZMM state */
  165. {
  166. if( (ebx&0xD0030000) == 0xD0030000 )
  167. cpu |= X264_CPU_AVX512;
  168. }
  169. }
  170. }
  171. }
  172. x264_cpu_cpuid( 0x80000000, &eax, &ebx, &ecx, &edx );
  173. max_extended_cap = eax;
  174. if( max_extended_cap >= 0x80000001 )
  175. {
  176. x264_cpu_cpuid( 0x80000001, &eax, &ebx, &ecx, &edx );
  177. if( ecx&0x00000020 )
  178. cpu |= X264_CPU_LZCNT; /* Supported by Intel chips starting with Haswell */
  179. if( ecx&0x00000040 ) /* SSE4a, AMD only */
  180. {
  181. int family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
  182. cpu |= X264_CPU_SSE2_IS_FAST; /* Phenom and later CPUs have fast SSE units */
  183. if( family == 0x14 )
  184. {
  185. cpu &= ~X264_CPU_SSE2_IS_FAST; /* SSSE3 doesn't imply fast SSE anymore... */
  186. cpu |= X264_CPU_SSE2_IS_SLOW; /* Bobcat has 64-bit SIMD units */
  187. cpu |= X264_CPU_SLOW_PALIGNR; /* palignr is insanely slow on Bobcat */
  188. }
  189. if( family == 0x16 )
  190. {
  191. cpu |= X264_CPU_SLOW_PSHUFB; /* Jaguar's pshufb isn't that slow, but it's slow enough
  192. * compared to alternate instruction sequences that this
  193. * is equal or faster on almost all such functions. */
  194. }
  195. }
  196. if( cpu & X264_CPU_AVX )
  197. {
  198. if( ecx&0x00000800 ) /* XOP */
  199. cpu |= X264_CPU_XOP;
  200. if( ecx&0x00010000 ) /* FMA4 */
  201. cpu |= X264_CPU_FMA4;
  202. }
  203. if( !strcmp((char*)vendor, "AuthenticAMD") )
  204. {
  205. if( edx&0x00400000 )
  206. cpu |= X264_CPU_MMX2;
  207. if( (cpu&X264_CPU_SSE2) && !(cpu&X264_CPU_SSE2_IS_FAST) )
  208. cpu |= X264_CPU_SSE2_IS_SLOW; /* AMD CPUs come in two types: terrible at SSE and great at it */
  209. }
  210. }
  211. if( !strcmp((char*)vendor, "GenuineIntel") )
  212. {
  213. x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
  214. int family = ((eax>>8)&0xf) + ((eax>>20)&0xff);
  215. int model = ((eax>>4)&0xf) + ((eax>>12)&0xf0);
  216. if( family == 6 )
  217. {
  218. /* Detect Atom CPU */
  219. if( model == 28 )
  220. {
  221. cpu |= X264_CPU_SLOW_ATOM;
  222. cpu |= X264_CPU_SLOW_PSHUFB;
  223. }
  224. /* Conroe has a slow shuffle unit. Check the model number to make sure not
  225. * to include crippled low-end Penryns and Nehalems that don't have SSE4. */
  226. else if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE4) && model < 23 )
  227. cpu |= X264_CPU_SLOW_SHUFFLE;
  228. }
  229. }
  230. if( (!strcmp((char*)vendor, "GenuineIntel") || !strcmp((char*)vendor, "CyrixInstead")) && !(cpu&X264_CPU_SSE42))
  231. {
  232. /* cacheline size is specified in 3 places, any of which may be missing */
  233. x264_cpu_cpuid( 1, &eax, &ebx, &ecx, &edx );
  234. int cache = (ebx&0xff00)>>5; // cflush size
  235. if( !cache && max_extended_cap >= 0x80000006 )
  236. {
  237. x264_cpu_cpuid( 0x80000006, &eax, &ebx, &ecx, &edx );
  238. cache = ecx&0xff; // cacheline size
  239. }
  240. if( !cache && max_basic_cap >= 2 )
  241. {
  242. // Cache and TLB Information
  243. static const char cache32_ids[] = { 0x0a, 0x0c, 0x41, 0x42, 0x43, 0x44, 0x45, 0x82, 0x83, 0x84, 0x85, 0 };
  244. static const char cache64_ids[] = { 0x22, 0x23, 0x25, 0x29, 0x2c, 0x46, 0x47, 0x49, 0x60, 0x66, 0x67,
  245. 0x68, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7c, 0x7f, 0x86, 0x87, 0 };
  246. uint32_t buf[4];
  247. int max, i = 0;
  248. do {
  249. x264_cpu_cpuid( 2, buf+0, buf+1, buf+2, buf+3 );
  250. max = buf[0]&0xff;
  251. buf[0] &= ~0xff;
  252. for( int j = 0; j < 4; j++ )
  253. if( !(buf[j]>>31) )
  254. while( buf[j] )
  255. {
  256. if( strchr( cache32_ids, buf[j]&0xff ) )
  257. cache = 32;
  258. if( strchr( cache64_ids, buf[j]&0xff ) )
  259. cache = 64;
  260. buf[j] >>= 8;
  261. }
  262. } while( ++i < max );
  263. }
  264. if( cache == 32 )
  265. cpu |= X264_CPU_CACHELINE_32;
  266. else if( cache == 64 )
  267. cpu |= X264_CPU_CACHELINE_64;
  268. else
  269. x264_log_internal( X264_LOG_WARNING, "unable to determine cacheline size\n" );
  270. }
  271. #if STACK_ALIGNMENT < 16
  272. cpu |= X264_CPU_STACK_MOD4;
  273. #endif
  274. return cpu;
  275. }
  276. #elif ARCH_PPC && HAVE_ALTIVEC
  277. #if SYS_MACOSX || SYS_OPENBSD || SYS_FREEBSD
  278. #include <sys/sysctl.h>
  279. uint32_t x264_cpu_detect( void )
  280. {
  281. /* Thank you VLC */
  282. uint32_t cpu = 0;
  283. #if SYS_OPENBSD
  284. int selectors[2] = { CTL_MACHDEP, CPU_ALTIVEC };
  285. #elif SYS_MACOSX
  286. int selectors[2] = { CTL_HW, HW_VECTORUNIT };
  287. #endif
  288. int has_altivec = 0;
  289. size_t length = sizeof( has_altivec );
  290. #if SYS_MACOSX || SYS_OPENBSD
  291. int error = sysctl( selectors, 2, &has_altivec, &length, NULL, 0 );
  292. #else
  293. int error = sysctlbyname( "hw.altivec", &has_altivec, &length, NULL, 0 );
  294. #endif
  295. if( error == 0 && has_altivec != 0 )
  296. cpu |= X264_CPU_ALTIVEC;
  297. return cpu;
  298. }
  299. #elif SYS_LINUX
  300. uint32_t x264_cpu_detect( void )
  301. {
  302. #ifdef __NO_FPRS__
  303. return 0;
  304. #else
  305. static void (*oldsig)( int );
  306. oldsig = signal( SIGILL, sigill_handler );
  307. if( sigsetjmp( jmpbuf, 1 ) )
  308. {
  309. signal( SIGILL, oldsig );
  310. return 0;
  311. }
  312. canjump = 1;
  313. asm volatile( "mtspr 256, %0\n\t"
  314. "vand 0, 0, 0\n\t"
  315. :
  316. : "r"(-1) );
  317. canjump = 0;
  318. signal( SIGILL, oldsig );
  319. return X264_CPU_ALTIVEC;
  320. #endif
  321. }
  322. #endif
  323. #elif ARCH_ARM
  324. void x264_cpu_neon_test( void );
  325. int x264_cpu_fast_neon_mrc_test( void );
  326. uint32_t x264_cpu_detect( void )
  327. {
  328. int flags = 0;
  329. #if HAVE_ARMV6
  330. flags |= X264_CPU_ARMV6;
  331. // don't do this hack if compiled with -mfpu=neon
  332. #if !HAVE_NEON
  333. static void (* oldsig)( int );
  334. oldsig = signal( SIGILL, sigill_handler );
  335. if( sigsetjmp( jmpbuf, 1 ) )
  336. {
  337. signal( SIGILL, oldsig );
  338. return flags;
  339. }
  340. canjump = 1;
  341. x264_cpu_neon_test();
  342. canjump = 0;
  343. signal( SIGILL, oldsig );
  344. #endif
  345. flags |= X264_CPU_NEON;
  346. // fast neon -> arm (Cortex-A9) detection relies on user access to the
  347. // cycle counter; this assumes ARMv7 performance counters.
  348. // NEON requires at least ARMv7, ARMv8 may require changes here, but
  349. // hopefully this hacky detection method will have been replaced by then.
  350. // Note that there is potential for a race condition if another program or
  351. // x264 instance disables or reinits the counters while x264 is using them,
  352. // which may result in incorrect detection and the counters stuck enabled.
  353. // right now Apple does not seem to support performance counters for this test
  354. #ifndef __MACH__
  355. flags |= x264_cpu_fast_neon_mrc_test() ? X264_CPU_FAST_NEON_MRC : 0;
  356. #endif
  357. // TODO: write dual issue test? currently it's A8 (dual issue) vs. A9 (fast mrc)
  358. #endif
  359. return flags;
  360. }
  361. #elif ARCH_AARCH64
  362. uint32_t x264_cpu_detect( void )
  363. {
  364. return X264_CPU_ARMV8 | X264_CPU_NEON;
  365. }
  366. #elif ARCH_MIPS
  367. uint32_t x264_cpu_detect( void )
  368. {
  369. uint32_t flags = 0;
  370. #if HAVE_MSA
  371. flags |= X264_CPU_MSA;
  372. #endif
  373. return flags;
  374. }
  375. #else
  376. uint32_t x264_cpu_detect( void )
  377. {
  378. return 0;
  379. }
  380. #endif
  381. int x264_cpu_num_processors( void )
  382. {
  383. #if !HAVE_THREAD
  384. return 1;
  385. #elif SYS_WINDOWS
  386. return x264_pthread_num_processors_np();
  387. #elif SYS_CYGWIN || SYS_SunOS
  388. return sysconf( _SC_NPROCESSORS_ONLN );
  389. #elif SYS_LINUX
  390. #ifdef __ANDROID__
  391. // Android NDK does not expose sched_getaffinity
  392. return sysconf( _SC_NPROCESSORS_CONF );
  393. #else
  394. cpu_set_t p_aff;
  395. memset( &p_aff, 0, sizeof(p_aff) );
  396. if( sched_getaffinity( 0, sizeof(p_aff), &p_aff ) )
  397. return 1;
  398. #if HAVE_CPU_COUNT
  399. return CPU_COUNT(&p_aff);
  400. #else
  401. int np = 0;
  402. for( unsigned int bit = 0; bit < 8 * sizeof(p_aff); bit++ )
  403. np += (((uint8_t *)&p_aff)[bit / 8] >> (bit % 8)) & 1;
  404. return np;
  405. #endif
  406. #endif
  407. #elif SYS_BEOS
  408. system_info info;
  409. get_system_info( &info );
  410. return info.cpu_count;
  411. #elif SYS_MACOSX || SYS_FREEBSD || SYS_OPENBSD
  412. int ncpu;
  413. size_t length = sizeof( ncpu );
  414. #if SYS_OPENBSD
  415. int mib[2] = { CTL_HW, HW_NCPU };
  416. if( sysctl(mib, 2, &ncpu, &length, NULL, 0) )
  417. #else
  418. if( sysctlbyname("hw.ncpu", &ncpu, &length, NULL, 0) )
  419. #endif
  420. {
  421. ncpu = 1;
  422. }
  423. return ncpu;
  424. #else
  425. return 1;
  426. #endif
  427. }