12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952 |
- /*****************************************************************************
- * macros.h: msa macros
- *****************************************************************************
- * Copyright (C) 2015-2018 x264 project
- *
- * Authors: Rishikesh More <rishikesh.more@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
- *
- * This program is also available under a commercial proprietary license.
- * For more information, contact us at licensing@x264.com.
- *****************************************************************************/
- #ifndef X264_MIPS_MACROS_H
- #define X264_MIPS_MACROS_H
- #include <stdint.h>
- #include <msa.h>
- #define LD_B( RTYPE, p_src ) *( ( RTYPE * )( p_src ) )
- #define LD_UB( ... ) LD_B( v16u8, __VA_ARGS__ )
- #define LD_SB( ... ) LD_B( v16i8, __VA_ARGS__ )
- #define LD_H( RTYPE, p_src ) *( ( RTYPE * )( p_src ) )
- #define LD_SH( ... ) LD_H( v8i16, __VA_ARGS__ )
- #define LD_W( RTYPE, p_src ) *( ( RTYPE * )( p_src ) )
- #define LD_SW( ... ) LD_W( v4i32, __VA_ARGS__ )
- #define ST_B( RTYPE, in, p_dst ) *( ( RTYPE * )( p_dst ) ) = ( in )
- #define ST_UB( ... ) ST_B( v16u8, __VA_ARGS__ )
- #define ST_SB( ... ) ST_B( v16i8, __VA_ARGS__ )
- #define ST_H( RTYPE, in, p_dst ) *( ( RTYPE * )( p_dst ) ) = ( in )
- #define ST_UH( ... ) ST_H( v8u16, __VA_ARGS__ )
- #define ST_SH( ... ) ST_H( v8i16, __VA_ARGS__ )
- #if ( __mips_isa_rev >= 6 )
- #define LH( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint16_t u_val_h_m; \
- \
- asm volatile ( \
- "lh %[u_val_h_m], %[p_src_m] \n\t" \
- \
- : [u_val_h_m] "=r" ( u_val_h_m ) \
- : [p_src_m] "m" ( *p_src_m ) \
- ); \
- \
- u_val_h_m; \
- } )
- #define LW( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint32_t u_val_w_m; \
- \
- asm volatile ( \
- "lw %[u_val_w_m], %[p_src_m] \n\t" \
- \
- : [u_val_w_m] "=r" ( u_val_w_m ) \
- : [p_src_m] "m" ( *p_src_m ) \
- ); \
- \
- u_val_w_m; \
- } )
- #if ( __mips == 64 )
- #define LD( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint64_t u_val_d_m = 0; \
- \
- asm volatile ( \
- "ld %[u_val_d_m], %[p_src_m] \n\t" \
- \
- : [u_val_d_m] "=r" ( u_val_d_m ) \
- : [p_src_m] "m" ( *p_src_m ) \
- ); \
- \
- u_val_d_m; \
- } )
- #else // !( __mips == 64 )
- #define LD( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint32_t u_val0_m, u_val1_m; \
- uint64_t u_val_d_m = 0; \
- \
- u_val0_m = LW( p_src_m ); \
- u_val1_m = LW( p_src_m + 4 ); \
- \
- u_val_d_m = ( uint64_t ) ( u_val1_m ); \
- u_val_d_m = ( uint64_t ) ( ( u_val_d_m << 32 ) & \
- 0xFFFFFFFF00000000 ); \
- u_val_d_m = ( uint64_t ) ( u_val_d_m | ( uint64_t ) u_val0_m ); \
- \
- u_val_d_m; \
- } )
- #endif // ( __mips == 64 )
- #define SH( u_val, p_dst ) \
- { \
- uint8_t *p_dst_m = ( uint8_t * ) ( p_dst ); \
- uint16_t u_val_h_m = ( u_val ); \
- \
- asm volatile ( \
- "sh %[u_val_h_m], %[p_dst_m] \n\t" \
- \
- : [p_dst_m] "=m" ( *p_dst_m ) \
- : [u_val_h_m] "r" ( u_val_h_m ) \
- ); \
- }
- #define SW( u_val, p_dst ) \
- { \
- uint8_t *p_dst_m = ( uint8_t * ) ( p_dst ); \
- uint32_t u_val_w_m = ( u_val ); \
- \
- asm volatile ( \
- "sw %[u_val_w_m], %[p_dst_m] \n\t" \
- \
- : [p_dst_m] "=m" ( *p_dst_m ) \
- : [u_val_w_m] "r" ( u_val_w_m ) \
- ); \
- }
- #define SD( u_val, p_dst ) \
- { \
- uint8_t *p_dst_m = ( uint8_t * ) ( p_dst ); \
- uint64_t u_val_d_m = ( u_val ); \
- \
- asm volatile ( \
- "sd %[u_val_d_m], %[p_dst_m] \n\t" \
- \
- : [p_dst_m] "=m" ( *p_dst_m ) \
- : [u_val_d_m] "r" ( u_val_d_m ) \
- ); \
- }
- #else // !( __mips_isa_rev >= 6 )
- #define LH( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint16_t u_val_h_m; \
- \
- asm volatile ( \
- "ulh %[u_val_h_m], %[p_src_m] \n\t" \
- \
- : [u_val_h_m] "=r" ( u_val_h_m ) \
- : [p_src_m] "m" ( *p_src_m ) \
- ); \
- \
- u_val_h_m; \
- } )
- #define LW( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint32_t u_val_w_m; \
- \
- asm volatile ( \
- "ulw %[u_val_w_m], %[p_src_m] \n\t" \
- \
- : [u_val_w_m] "=r" ( u_val_w_m ) \
- : [p_src_m] "m" ( *p_src_m ) \
- ); \
- \
- u_val_w_m; \
- } )
- #if ( __mips == 64 )
- #define LD( p_src ) \
- ( { \
- uint8_t *p_src_m = ( uint8_t * ) ( p_src ); \
- uint64_t u_val_d_m = 0; \
- \
- asm volatile ( \
- "uld %[u_val_d_m], %[p_src_m] \n\t" \
- \
- : [u_val_d_m] "=r" ( u_val_d_m ) \
- : [p_src_m] "m" ( *p_src_m ) \
- ); \
- \
- u_val_d_m; \
- } )
- #else // !( __mips == 64 )
- #define LD( p_src ) \
- ( { \
- uint8_t *psrc_m1 = ( uint8_t * ) ( p_src ); \
- uint32_t u_val0_m, u_val1_m; \
- uint64_t u_val_d_m = 0; \
- \
- u_val0_m = LW( psrc_m1 ); \
- u_val1_m = LW( psrc_m1 + 4 ); \
- \
- u_val_d_m = ( uint64_t ) ( u_val1_m ); \
- u_val_d_m = ( uint64_t ) ( ( u_val_d_m << 32 ) & \
- 0xFFFFFFFF00000000 ); \
- u_val_d_m = ( uint64_t ) ( u_val_d_m | ( uint64_t ) u_val0_m ); \
- \
- u_val_d_m; \
- } )
- #endif // ( __mips == 64 )
- #define SH( u_val, p_dst ) \
- { \
- uint8_t *p_dst_m = ( uint8_t * ) ( p_dst ); \
- uint16_t u_val_h_m = ( u_val ); \
- \
- asm volatile ( \
- "ush %[u_val_h_m], %[p_dst_m] \n\t" \
- \
- : [p_dst_m] "=m" ( *p_dst_m ) \
- : [u_val_h_m] "r" ( u_val_h_m ) \
- ); \
- }
- #define SW( u_val, p_dst ) \
- { \
- uint8_t *p_dst_m = ( uint8_t * ) ( p_dst ); \
- uint32_t u_val_w_m = ( u_val ); \
- \
- asm volatile ( \
- "usw %[u_val_w_m], %[p_dst_m] \n\t" \
- \
- : [p_dst_m] "=m" ( *p_dst_m ) \
- : [u_val_w_m] "r" ( u_val_w_m ) \
- ); \
- }
- #define SD( u_val, p_dst ) \
- { \
- uint8_t *p_dst_m1 = ( uint8_t * ) ( p_dst ); \
- uint32_t u_val0_m, u_val1_m; \
- \
- u_val0_m = ( uint32_t ) ( ( u_val ) & 0x00000000FFFFFFFF ); \
- u_val1_m = ( uint32_t ) ( ( ( u_val ) >> 32 ) & 0x00000000FFFFFFFF ); \
- \
- SW( u_val0_m, p_dst_m1 ); \
- SW( u_val1_m, p_dst_m1 + 4 ); \
- }
- #endif // ( __mips_isa_rev >= 6 )
- /* Description : Load 4 words with stride
- Arguments : Inputs - psrc (source pointer to load from)
- - stride
- Outputs - out0, out1, out2, out3
- Details : Load word in 'out0' from (psrc)
- Load word in 'out1' from (psrc + stride)
- Load word in 'out2' from (psrc + 2 * stride)
- Load word in 'out3' from (psrc + 3 * stride)
- */
- #define LW4( p_src, stride, out0, out1, out2, out3 ) \
- { \
- out0 = LW( ( p_src ) ); \
- out1 = LW( ( p_src ) + stride ); \
- out2 = LW( ( p_src ) + 2 * stride ); \
- out3 = LW( ( p_src ) + 3 * stride ); \
- }
- /* Description : Store 4 words with stride
- Arguments : Inputs - in0, in1, in2, in3, pdst, stride
- Details : Store word from 'in0' to (pdst)
- Store word from 'in1' to (pdst + stride)
- Store word from 'in2' to (pdst + 2 * stride)
- Store word from 'in3' to (pdst + 3 * stride)
- */
- #define SW4( in0, in1, in2, in3, p_dst, stride ) \
- { \
- SW( in0, ( p_dst ) ) \
- SW( in1, ( p_dst ) + stride ); \
- SW( in2, ( p_dst ) + 2 * stride ); \
- SW( in3, ( p_dst ) + 3 * stride ); \
- }
- /* Description : Store 4 double words with stride
- Arguments : Inputs - in0, in1, in2, in3, pdst, stride
- Details : Store double word from 'in0' to (pdst)
- Store double word from 'in1' to (pdst + stride)
- Store double word from 'in2' to (pdst + 2 * stride)
- Store double word from 'in3' to (pdst + 3 * stride)
- */
- #define SD4( in0, in1, in2, in3, p_dst, stride ) \
- { \
- SD( in0, ( p_dst ) ) \
- SD( in1, ( p_dst ) + stride ); \
- SD( in2, ( p_dst ) + 2 * stride ); \
- SD( in3, ( p_dst ) + 3 * stride ); \
- }
- /* Description : Load vectors with 16 byte elements with stride
- Arguments : Inputs - psrc (source pointer to load from)
- - stride
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Load 16 byte elements in 'out0' from (psrc)
- Load 16 byte elements in 'out1' from (psrc + stride)
- */
- #define LD_B2( RTYPE, p_src, stride, out0, out1 ) \
- { \
- out0 = LD_B( RTYPE, ( p_src ) ); \
- out1 = LD_B( RTYPE, ( p_src ) + stride ); \
- }
- #define LD_UB2( ... ) LD_B2( v16u8, __VA_ARGS__ )
- #define LD_SB2( ... ) LD_B2( v16i8, __VA_ARGS__ )
- #define LD_B3( RTYPE, p_src, stride, out0, out1, out2 ) \
- { \
- LD_B2( RTYPE, ( p_src ), stride, out0, out1 ); \
- out2 = LD_B( RTYPE, ( p_src ) + 2 * stride ); \
- }
- #define LD_UB3( ... ) LD_B3( v16u8, __VA_ARGS__ )
- #define LD_SB3( ... ) LD_B3( v16i8, __VA_ARGS__ )
- #define LD_B4( RTYPE, p_src, stride, out0, out1, out2, out3 ) \
- { \
- LD_B2( RTYPE, ( p_src ), stride, out0, out1 ); \
- LD_B2( RTYPE, ( p_src ) + 2 * stride , stride, out2, out3 ); \
- }
- #define LD_UB4( ... ) LD_B4( v16u8, __VA_ARGS__ )
- #define LD_SB4( ... ) LD_B4( v16i8, __VA_ARGS__ )
- #define LD_B5( RTYPE, p_src, stride, out0, out1, out2, out3, out4 ) \
- { \
- LD_B4( RTYPE, ( p_src ), stride, out0, out1, out2, out3 ); \
- out4 = LD_B( RTYPE, ( p_src ) + 4 * stride ); \
- }
- #define LD_UB5( ... ) LD_B5( v16u8, __VA_ARGS__ )
- #define LD_SB5( ... ) LD_B5( v16i8, __VA_ARGS__ )
- #define LD_B8( RTYPE, p_src, stride, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- LD_B4( RTYPE, ( p_src ), stride, out0, out1, out2, out3 ); \
- LD_B4( RTYPE, ( p_src ) + 4 * stride, stride, out4, out5, out6, out7 ); \
- }
- #define LD_UB8( ... ) LD_B8( v16u8, __VA_ARGS__ )
- #define LD_SB8( ... ) LD_B8( v16i8, __VA_ARGS__ )
- /* Description : Load vectors with 8 halfword elements with stride
- Arguments : Inputs - psrc (source pointer to load from)
- - stride
- Outputs - out0, out1
- Details : Load 8 halfword elements in 'out0' from (psrc)
- Load 8 halfword elements in 'out1' from (psrc + stride)
- */
- #define LD_H2( RTYPE, p_src, stride, out0, out1 ) \
- { \
- out0 = LD_H( RTYPE, ( p_src ) ); \
- out1 = LD_H( RTYPE, ( p_src ) + ( stride ) ); \
- }
- #define LD_SH2( ... ) LD_H2( v8i16, __VA_ARGS__ )
- #define LD_H4( RTYPE, p_src, stride, out0, out1, out2, out3 ) \
- { \
- LD_H2( RTYPE, ( p_src ), stride, out0, out1 ); \
- LD_H2( RTYPE, ( p_src ) + 2 * stride, stride, out2, out3 ); \
- }
- #define LD_SH4( ... ) LD_H4( v8i16, __VA_ARGS__ )
- #define LD_H8( RTYPE, p_src, stride, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- LD_H4( RTYPE, ( p_src ), stride, out0, out1, out2, out3 ); \
- LD_H4( RTYPE, ( p_src ) + 4 * stride, stride, out4, out5, out6, out7 ); \
- }
- #define LD_SH8( ... ) LD_H8( v8i16, __VA_ARGS__ )
- /* Description : Load 4x4 block of signed halfword elements from 1D source
- data into 4 vectors (Each vector with 4 signed halfwords)
- Arguments : Inputs - psrc
- Outputs - out0, out1, out2, out3
- */
- #define LD4x4_SH( p_src, out0, out1, out2, out3 ) \
- { \
- out0 = LD_SH( p_src ); \
- out2 = LD_SH( p_src + 8 ); \
- out1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) out0, ( v2i64 ) out0 ); \
- out3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) out2, ( v2i64 ) out2 ); \
- }
- /* Description : Load 2 vectors of signed word elements with stride
- Arguments : Inputs - psrc (source pointer to load from)
- - stride
- Outputs - out0, out1
- Return Type - signed word
- */
- #define LD_SW2( p_src, stride, out0, out1 ) \
- { \
- out0 = LD_SW( ( p_src ) ); \
- out1 = LD_SW( ( p_src ) + stride ); \
- }
- /* Description : Store vectors of 16 byte elements with stride
- Arguments : Inputs - in0, in1, stride
- - pdst (destination pointer to store to)
- Details : Store 16 byte elements from 'in0' to (pdst)
- Store 16 byte elements from 'in1' to (pdst + stride)
- */
- #define ST_B2( RTYPE, in0, in1, p_dst, stride ) \
- { \
- ST_B( RTYPE, in0, ( p_dst ) ); \
- ST_B( RTYPE, in1, ( p_dst ) + stride ); \
- }
- #define ST_UB2( ... ) ST_B2( v16u8, __VA_ARGS__ )
- #define ST_B4( RTYPE, in0, in1, in2, in3, p_dst, stride ) \
- { \
- ST_B2( RTYPE, in0, in1, ( p_dst ), stride ); \
- ST_B2( RTYPE, in2, in3, ( p_dst ) + 2 * stride, stride ); \
- }
- #define ST_UB4( ... ) ST_B4( v16u8, __VA_ARGS__ )
- #define ST_SB4( ... ) ST_B4( v16i8, __VA_ARGS__ )
- #define ST_B8( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- p_dst, stride ) \
- { \
- ST_B4( RTYPE, in0, in1, in2, in3, p_dst, stride ); \
- ST_B4( RTYPE, in4, in5, in6, in7, ( p_dst ) + 4 * stride, stride ); \
- }
- #define ST_UB8( ... ) ST_B8( v16u8, __VA_ARGS__ )
- /* Description : Store vectors of 8 halfword elements with stride
- Arguments : Inputs - in0, in1, stride
- - pdst (destination pointer to store to)
- Details : Store 8 halfword elements from 'in0' to (pdst)
- Store 8 halfword elements from 'in1' to (pdst + stride)
- */
- #define ST_H2( RTYPE, in0, in1, p_dst, stride ) \
- { \
- ST_H( RTYPE, in0, ( p_dst ) ); \
- ST_H( RTYPE, in1, ( p_dst ) + stride ); \
- }
- #define ST_SH2( ... ) ST_H2( v8i16, __VA_ARGS__ )
- #define ST_H4( RTYPE, in0, in1, in2, in3, p_dst, stride ) \
- { \
- ST_H2( RTYPE, in0, in1, ( p_dst ), stride ); \
- ST_H2( RTYPE, in2, in3, ( p_dst ) + 2 * stride, stride ); \
- }
- #define ST_SH4( ... ) ST_H4( v8i16, __VA_ARGS__ )
- #define ST_H8( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, p_dst, stride ) \
- { \
- ST_H4( RTYPE, in0, in1, in2, in3, ( p_dst ), stride ); \
- ST_H4( RTYPE, in4, in5, in6, in7, ( p_dst ) + 4 * stride, stride ); \
- }
- #define ST_SH8( ... ) ST_H8( v8i16, __VA_ARGS__ )
- /* Description : Store 2x4 byte block to destination memory from input vector
- Arguments : Inputs - in, stidx, pdst, stride
- Details : Index 'stidx' halfword element from 'in' vector is copied to
- GP register and stored to (pdst)
- Index 'stidx+1' halfword element from 'in' vector is copied to
- GP register and stored to (pdst + stride)
- Index 'stidx+2' halfword element from 'in' vector is copied to
- GP register and stored to (pdst + 2 * stride)
- Index 'stidx+3' halfword element from 'in' vector is copied to
- GP register and stored to (pdst + 3 * stride)
- */
- #define ST2x4_UB( in, stidx, p_dst, stride ) \
- { \
- uint16_t u_out0_m, u_out1_m, u_out2_m, u_out3_m; \
- uint8_t *pblk_2x4_m = ( uint8_t * ) ( p_dst ); \
- \
- u_out0_m = __msa_copy_u_h( ( v8i16 ) in, ( stidx ) ); \
- u_out1_m = __msa_copy_u_h( ( v8i16 ) in, ( stidx + 1 ) ); \
- u_out2_m = __msa_copy_u_h( ( v8i16 ) in, ( stidx + 2 ) ); \
- u_out3_m = __msa_copy_u_h( ( v8i16 ) in, ( stidx + 3 ) ); \
- \
- SH( u_out0_m, pblk_2x4_m ); \
- SH( u_out1_m, pblk_2x4_m + stride ); \
- SH( u_out2_m, pblk_2x4_m + 2 * stride ); \
- SH( u_out3_m, pblk_2x4_m + 3 * stride ); \
- }
- /* Description : Store 4x4 byte block to destination memory from input vector
- Arguments : Inputs - in0, in1, pdst, stride
- Details : 'Idx0' word element from input vector 'in0' is copied to
- GP register and stored to (pdst)
- 'Idx1' word element from input vector 'in0' is copied to
- GP register and stored to (pdst + stride)
- 'Idx2' word element from input vector 'in0' is copied to
- GP register and stored to (pdst + 2 * stride)
- 'Idx3' word element from input vector 'in0' is copied to
- GP register and stored to (pdst + 3 * stride)
- */
- #define ST4x4_UB( in0, in1, idx0, idx1, idx2, idx3, p_dst, stride ) \
- { \
- uint32_t u_out0_m, u_out1_m, u_out2_m, u_out3_m; \
- uint8_t *pblk_4x4_m = ( uint8_t * ) ( p_dst ); \
- \
- u_out0_m = __msa_copy_u_w( ( v4i32 ) in0, idx0 ); \
- u_out1_m = __msa_copy_u_w( ( v4i32 ) in0, idx1 ); \
- u_out2_m = __msa_copy_u_w( ( v4i32 ) in1, idx2 ); \
- u_out3_m = __msa_copy_u_w( ( v4i32 ) in1, idx3 ); \
- \
- SW4( u_out0_m, u_out1_m, u_out2_m, u_out3_m, pblk_4x4_m, stride ); \
- }
- #define ST4x8_UB( in0, in1, p_dst, stride ) \
- { \
- uint8_t *pblk_4x8 = ( uint8_t * ) ( p_dst ); \
- \
- ST4x4_UB( in0, in0, 0, 1, 2, 3, pblk_4x8, stride ); \
- ST4x4_UB( in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride ); \
- }
- /* Description : Store 8x1 byte block to destination memory from input vector
- Arguments : Inputs - in, pdst
- Details : Index 0 double word element from 'in' vector is copied to
- GP register and stored to (pdst)
- */
- #define ST8x1_UB( in, p_dst ) \
- { \
- uint64_t u_out0_m; \
- u_out0_m = __msa_copy_u_d( ( v2i64 ) in, 0 ); \
- SD( u_out0_m, p_dst ); \
- }
- /* Description : Store 8x4 byte block to destination memory from input
- vectors
- Arguments : Inputs - in0, in1, pdst, stride
- Details : Index 0 double word element from 'in0' vector is copied to
- GP register and stored to (pdst)
- Index 1 double word element from 'in0' vector is copied to
- GP register and stored to (pdst + stride)
- Index 0 double word element from 'in1' vector is copied to
- GP register and stored to (pdst + 2 * stride)
- Index 1 double word element from 'in1' vector is copied to
- GP register and stored to (pdst + 3 * stride)
- */
- #define ST8x4_UB( in0, in1, p_dst, stride ) \
- { \
- uint64_t u_out0_m, u_out1_m, u_out2_m, u_out3_m; \
- uint8_t *pblk_8x4_m = ( uint8_t * ) ( p_dst ); \
- \
- u_out0_m = __msa_copy_u_d( ( v2i64 ) in0, 0 ); \
- u_out1_m = __msa_copy_u_d( ( v2i64 ) in0, 1 ); \
- u_out2_m = __msa_copy_u_d( ( v2i64 ) in1, 0 ); \
- u_out3_m = __msa_copy_u_d( ( v2i64 ) in1, 1 ); \
- \
- SD4( u_out0_m, u_out1_m, u_out2_m, u_out3_m, pblk_8x4_m, stride ); \
- }
- /* Description : average with rounding (in0 + in1 + 1) / 2.
- Arguments : Inputs - in0, in1, in2, in3,
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Each unsigned byte element from 'in0' vector is added with
- each unsigned byte element from 'in1' vector.
- Average with rounding is calculated and written to 'out0'
- */
- #define AVER_UB2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_aver_u_b( ( v16u8 ) in0, ( v16u8 ) in1 ); \
- out1 = ( RTYPE ) __msa_aver_u_b( ( v16u8 ) in2, ( v16u8 ) in3 ); \
- }
- #define AVER_UB2_UB( ... ) AVER_UB2( v16u8, __VA_ARGS__ )
- #define AVER_UB4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- AVER_UB2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- AVER_UB2( RTYPE, in4, in5, in6, in7, out2, out3 ) \
- }
- #define AVER_UB4_UB( ... ) AVER_UB4( v16u8, __VA_ARGS__ )
- /* Description : Immediate number of elements to slide with zero
- Arguments : Inputs - in0, in1, slide_val
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Byte elements from 'zero_m' vector are slide into 'in0' by
- value specified in 'slide_val'
- */
- #define SLDI_B2_0( RTYPE, in0, in1, out0, out1, slide_val ) \
- { \
- v16i8 zero_m = { 0 }; \
- out0 = ( RTYPE ) __msa_sldi_b( ( v16i8 ) zero_m, \
- ( v16i8 ) in0, slide_val ); \
- out1 = ( RTYPE ) __msa_sldi_b( ( v16i8 ) zero_m, \
- ( v16i8 ) in1, slide_val ); \
- }
- #define SLDI_B2_0_UB( ... ) SLDI_B2_0( v16u8, __VA_ARGS__ )
- /* Description : Immediate number of elements to slide
- Arguments : Inputs - in0_0, in0_1, in1_0, in1_1, slide_val
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Byte elements from 'in0_0' vector are slide into 'in1_0' by
- value specified in 'slide_val'
- */
- #define SLDI_B2( RTYPE, in0_0, in0_1, in1_0, in1_1, out0, out1, slide_val ) \
- { \
- out0 = ( RTYPE ) __msa_sldi_b( ( v16i8 ) in0_0, ( v16i8 ) in1_0, \
- slide_val ); \
- out1 = ( RTYPE ) __msa_sldi_b( ( v16i8 ) in0_1, ( v16i8 ) in1_1, \
- slide_val ); \
- }
- #define SLDI_B2_UB( ... ) SLDI_B2( v16u8, __VA_ARGS__ )
- /* Description : Shuffle byte vector elements as per mask vector
- Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Selective byte elements from 'in0' & 'in1' are copied to
- 'out0' as per control vector 'mask0'
- */
- #define VSHF_B2( RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_vshf_b( ( v16i8 ) mask0, \
- ( v16i8 ) in1, ( v16i8 ) in0 ); \
- out1 = ( RTYPE ) __msa_vshf_b( ( v16i8 ) mask1, \
- ( v16i8 ) in3, ( v16i8 ) in2 ); \
- }
- #define VSHF_B2_UB( ... ) VSHF_B2( v16u8, __VA_ARGS__ )
- #define VSHF_B2_SB( ... ) VSHF_B2( v16i8, __VA_ARGS__ )
- /* Description : Shuffle halfword vector elements as per mask vector
- Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Selective byte elements from 'in0' & 'in1' are copied to
- 'out0' as per control vector 'mask0'
- */
- #define VSHF_H2( RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_vshf_h( ( v8i16 ) mask0, \
- ( v8i16 ) in1, ( v8i16 ) in0 ); \
- out1 = ( RTYPE ) __msa_vshf_h( ( v8i16 ) mask1, \
- ( v8i16 ) in3, ( v8i16 ) in2 ); \
- }
- #define VSHF_H2_SH( ... ) VSHF_H2( v8i16, __VA_ARGS__ )
- /* Description : Dot product of byte vector elements
- Arguments : Inputs - mult0, mult1
- cnst0, cnst1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Unsigned byte elements from 'mult0' are multiplied with
- unsigned byte elements from 'cnst0' producing a result
- twice the size of input i.e. unsigned halfword.
- Multiplication result of adjacent odd-even elements
- are added together and written to the 'out0' vector
- */
- #define DOTP_UB2( RTYPE, mult0, mult1, cnst0, cnst1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_dotp_u_h( ( v16u8 ) mult0, ( v16u8 ) cnst0 ); \
- out1 = ( RTYPE ) __msa_dotp_u_h( ( v16u8 ) mult1, ( v16u8 ) cnst1 ); \
- }
- #define DOTP_UB2_UH( ... ) DOTP_UB2( v8u16, __VA_ARGS__ )
- #define DOTP_UB4( RTYPE, mult0, mult1, mult2, mult3, \
- cnst0, cnst1, cnst2, cnst3, \
- out0, out1, out2, out3 ) \
- { \
- DOTP_UB2( RTYPE, mult0, mult1, cnst0, cnst1, out0, out1 ); \
- DOTP_UB2( RTYPE, mult2, mult3, cnst2, cnst3, out2, out3 ); \
- }
- #define DOTP_UB4_UH( ... ) DOTP_UB4( v8u16, __VA_ARGS__ )
- /* Description : Dot product of byte vector elements
- Arguments : Inputs - mult0, mult1
- cnst0, cnst1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Signed byte elements from 'mult0' are multiplied with
- signed byte elements from 'cnst0' producing a result
- twice the size of input i.e. signed halfword.
- Multiplication result of adjacent odd-even elements
- are added together and written to the 'out0' vector
- */
- #define DPADD_SB2( RTYPE, mult0, mult1, cnst0, cnst1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_dpadd_s_h( ( v8i16 ) out0, \
- ( v16i8 ) mult0, ( v16i8 ) cnst0 ); \
- out1 = ( RTYPE ) __msa_dpadd_s_h( ( v8i16 ) out1, \
- ( v16i8 ) mult1, ( v16i8 ) cnst1 ); \
- }
- #define DPADD_SB2_SH( ... ) DPADD_SB2( v8i16, __VA_ARGS__ )
- #define DPADD_SB4( RTYPE, mult0, mult1, mult2, mult3, \
- cnst0, cnst1, cnst2, cnst3, out0, out1, out2, out3 ) \
- { \
- DPADD_SB2( RTYPE, mult0, mult1, cnst0, cnst1, out0, out1 ); \
- DPADD_SB2( RTYPE, mult2, mult3, cnst2, cnst3, out2, out3 ); \
- }
- #define DPADD_SB4_SH( ... ) DPADD_SB4( v8i16, __VA_ARGS__ )
- /* Description : Dot product of halfword vector elements
- Arguments : Inputs - mult0, mult1
- cnst0, cnst1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Signed halfword elements from 'mult0' are multiplied with
- signed halfword elements from 'cnst0' producing a result
- twice the size of input i.e. signed word.
- Multiplication result of adjacent odd-even elements
- are added together and written to the 'out0' vector
- */
- #define DPADD_SH2( RTYPE, mult0, mult1, cnst0, cnst1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_dpadd_s_w( ( v4i32 ) out0, \
- ( v8i16 ) mult0, ( v8i16 ) cnst0 ); \
- out1 = ( RTYPE ) __msa_dpadd_s_w( ( v4i32 ) out1, \
- ( v8i16 ) mult1, ( v8i16 ) cnst1 ); \
- }
- #define DPADD_SH2_SW( ... ) DPADD_SH2( v4i32, __VA_ARGS__ )
- /* Description : Clips all halfword elements of input vector between min & max
- out = (in < min) ? min : ((in > max) ? max : in)
- Arguments : Inputs - in, min, max
- Output - out_m
- Return Type - signed halfword
- */
- #define CLIP_SH( in, min, max ) \
- ( { \
- v8i16 out_m; \
- \
- out_m = __msa_max_s_h( ( v8i16 ) min, ( v8i16 ) in ); \
- out_m = __msa_min_s_h( ( v8i16 ) max, ( v8i16 ) out_m ); \
- out_m; \
- } )
- /* Description : Clips all signed halfword elements of input vector
- between 0 & 255
- Arguments : Input - in
- Output - out_m
- Return Type - signed halfword
- */
- #define CLIP_SH_0_255( in ) \
- ( { \
- v8i16 max_m = __msa_ldi_h( 255 ); \
- v8i16 out_m; \
- \
- out_m = __msa_maxi_s_h( ( v8i16 ) in, 0 ); \
- out_m = __msa_min_s_h( ( v8i16 ) max_m, ( v8i16 ) out_m ); \
- out_m; \
- } )
- #define CLIP_SH2_0_255( in0, in1 ) \
- { \
- in0 = CLIP_SH_0_255( in0 ); \
- in1 = CLIP_SH_0_255( in1 ); \
- }
- #define CLIP_SH4_0_255( in0, in1, in2, in3 ) \
- { \
- CLIP_SH2_0_255( in0, in1 ); \
- CLIP_SH2_0_255( in2, in3 ); \
- }
- /* Description : Horizontal addition of 4 signed word elements of input vector
- Arguments : Input - in (signed word vector)
- Output - sum_m (i32 sum)
- Return Type - signed word (GP)
- Details : 4 signed word elements of 'in' vector are added together and
- the resulting integer sum is returned
- */
- #define HADD_SW_S32( in ) \
- ( { \
- v2i64 res0_m, res1_m; \
- int32_t i_sum_m; \
- \
- res0_m = __msa_hadd_s_d( ( v4i32 ) in, ( v4i32 ) in ); \
- res1_m = __msa_splati_d( res0_m, 1 ); \
- res0_m = res0_m + res1_m; \
- i_sum_m = __msa_copy_s_w( ( v4i32 ) res0_m, 0 ); \
- i_sum_m; \
- } )
- /* Description : Horizontal addition of 4 signed word elements of input vector
- Arguments : Input - in (signed word vector)
- Output - sum_m (i32 sum)
- Return Type - signed word (GP)
- Details : 4 signed word elements of 'in' vector are added together and
- the resulting integer sum is returned
- */
- #define HADD_UH_U32( in ) \
- ( { \
- v4u32 res_m; \
- v2u64 res0_m, res1_m; \
- uint32_t u_sum_m; \
- \
- res_m = __msa_hadd_u_w( ( v8u16 ) in, ( v8u16 ) in ); \
- res0_m = __msa_hadd_u_d( res_m, res_m ); \
- res1_m = ( v2u64 ) __msa_splati_d( ( v2i64 ) res0_m, 1 ); \
- res0_m = res0_m + res1_m; \
- u_sum_m = __msa_copy_u_w( ( v4i32 ) res0_m, 0 ); \
- u_sum_m; \
- } )
- /* Description : Horizontal addition of signed byte vector elements
- Arguments : Inputs - in0, in1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Each signed odd byte element from 'in0' is added to
- even signed byte element from 'in0' (pairwise) and the
- halfword result is written in 'out0'
- */
- #define HADD_SB2( RTYPE, in0, in1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_hadd_s_h( ( v16i8 ) in0, ( v16i8 ) in0 ); \
- out1 = ( RTYPE ) __msa_hadd_s_h( ( v16i8 ) in1, ( v16i8 ) in1 ); \
- }
- #define HADD_SB4( RTYPE, in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- HADD_SB2( RTYPE, in0, in1, out0, out1 ); \
- HADD_SB2( RTYPE, in2, in3, out2, out3 ); \
- }
- #define HADD_SB4_SH( ... ) HADD_SB4( v8i16, __VA_ARGS__ )
- /* Description : Horizontal addition of unsigned byte vector elements
- Arguments : Inputs - in0, in1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Each unsigned odd byte element from 'in0' is added to
- even unsigned byte element from 'in0' (pairwise) and the
- halfword result is written to 'out0'
- */
- #define HADD_UB2( RTYPE, in0, in1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_hadd_u_h( ( v16u8 ) in0, ( v16u8 ) in0 ); \
- out1 = ( RTYPE ) __msa_hadd_u_h( ( v16u8 ) in1, ( v16u8 ) in1 ); \
- }
- #define HADD_UB2_UH( ... ) HADD_UB2( v8u16, __VA_ARGS__ )
- #define HADD_UB4( RTYPE, in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- HADD_UB2( RTYPE, in0, in1, out0, out1 ); \
- HADD_UB2( RTYPE, in2, in3, out2, out3 ); \
- }
- #define HADD_UB4_UH( ... ) HADD_UB4( v8u16, __VA_ARGS__ )
- /* Description : Horizontal subtraction of unsigned byte vector elements
- Arguments : Inputs - in0, in1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Each unsigned odd byte element from 'in0' is subtracted from
- even unsigned byte element from 'in0' (pairwise) and the
- halfword result is written to 'out0'
- */
- #define HSUB_UB2( RTYPE, in0, in1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_hsub_u_h( ( v16u8 ) in0, ( v16u8 ) in0 ); \
- out1 = ( RTYPE ) __msa_hsub_u_h( ( v16u8 ) in1, ( v16u8 ) in1 ); \
- }
- #define HSUB_UB2_SH( ... ) HSUB_UB2( v8i16, __VA_ARGS__ )
- #define HSUB_UB4( RTYPE, in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- HSUB_UB2( RTYPE, in0, in1, out0, out1 ); \
- HSUB_UB2( RTYPE, in2, in3, out2, out3 ); \
- }
- #define HSUB_UB4_SH( ... ) HSUB_UB4( v8i16, __VA_ARGS__ )
- /* Description : SAD (Sum of Absolute Difference)
- Arguments : Inputs - in0, in1, ref0, ref1
- Outputs - sad_m (halfword vector)
- Return Type - unsigned halfword
- Details : Absolute difference of all the byte elements from 'in0' with
- 'ref0' is calculated and preserved in 'diff0'. Then even-odd
- pairs are added together to generate 8 halfword results.
- */
- #define SAD_UB2_UH( in0, in1, ref0, ref1 ) \
- ( { \
- v16u8 diff0_m, diff1_m; \
- v8u16 sad_m = { 0 }; \
- \
- diff0_m = __msa_asub_u_b( ( v16u8 ) in0, ( v16u8 ) ref0 ); \
- diff1_m = __msa_asub_u_b( ( v16u8 ) in1, ( v16u8 ) ref1 ); \
- \
- sad_m += __msa_hadd_u_h( ( v16u8 ) diff0_m, ( v16u8 ) diff0_m ); \
- sad_m += __msa_hadd_u_h( ( v16u8 ) diff1_m, ( v16u8 ) diff1_m ); \
- \
- sad_m; \
- } )
- /* Description : Set element n input vector to GPR value
- Arguments : Inputs - in0, in1, in2, in3 (4 input vectors)
- Output - out (output vector)
- Return Type - as per RTYPE
- Details : Set element 0 in vector 'out' to value specified in 'in0'
- */
- #define INSERT_W2( RTYPE, in0, in1, out ) \
- { \
- out = ( RTYPE ) __msa_insert_w( ( v4i32 ) out, 0, in0 ); \
- out = ( RTYPE ) __msa_insert_w( ( v4i32 ) out, 1, in1 ); \
- }
- #define INSERT_W2_SB( ... ) INSERT_W2( v16i8, __VA_ARGS__ )
- #define INSERT_W4( RTYPE, in0, in1, in2, in3, out ) \
- { \
- out = ( RTYPE ) __msa_insert_w( ( v4i32 ) out, 0, in0 ); \
- out = ( RTYPE ) __msa_insert_w( ( v4i32 ) out, 1, in1 ); \
- out = ( RTYPE ) __msa_insert_w( ( v4i32 ) out, 2, in2 ); \
- out = ( RTYPE ) __msa_insert_w( ( v4i32 ) out, 3, in3 ); \
- }
- #define INSERT_W4_UB( ... ) INSERT_W4( v16u8, __VA_ARGS__ )
- #define INSERT_W4_SB( ... ) INSERT_W4( v16i8, __VA_ARGS__ )
- #define INSERT_D2( RTYPE, in0, in1, out ) \
- { \
- out = ( RTYPE ) __msa_insert_d( ( v2i64 ) out, 0, in0 ); \
- out = ( RTYPE ) __msa_insert_d( ( v2i64 ) out, 1, in1 ); \
- }
- #define INSERT_D2_UB( ... ) INSERT_D2( v16u8, __VA_ARGS__ )
- /* Description : Interleave even halfword elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Even halfword elements of 'in0' and 'in1' are interleaved
- and written to 'out0'
- */
- #define ILVEV_H2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvev_h( ( v8i16 ) in1, ( v8i16 ) in0 ); \
- out1 = ( RTYPE ) __msa_ilvev_h( ( v8i16 ) in3, ( v8i16 ) in2 ); \
- }
- #define ILVEV_H2_UB( ... ) ILVEV_H2( v16u8, __VA_ARGS__ )
- /* Description : Interleave even double word elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Even double word elements of 'in0' and 'in1' are interleaved
- and written to 'out0'
- */
- #define ILVEV_D2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvev_d( ( v2i64 ) in1, ( v2i64 ) in0 ); \
- out1 = ( RTYPE ) __msa_ilvev_d( ( v2i64 ) in3, ( v2i64 ) in2 ); \
- }
- #define ILVEV_D2_UB( ... ) ILVEV_D2( v16u8, __VA_ARGS__ )
- /* Description : Interleave left half of byte elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Left half of byte elements of 'in0' and 'in1' are interleaved
- and written to 'out0'.
- */
- #define ILVL_B2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvl_b( ( v16i8 ) in0, ( v16i8 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvl_b( ( v16i8 ) in2, ( v16i8 ) in3 ); \
- }
- #define ILVL_B2_UH( ... ) ILVL_B2( v8u16, __VA_ARGS__ )
- #define ILVL_B2_SH( ... ) ILVL_B2( v8i16, __VA_ARGS__ )
- #define ILVL_B4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ILVL_B2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- ILVL_B2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define ILVL_B4_UB( ... ) ILVL_B4( v16u8, __VA_ARGS__ )
- #define ILVL_B4_SB( ... ) ILVL_B4( v16i8, __VA_ARGS__ )
- #define ILVL_B4_UH( ... ) ILVL_B4( v8u16, __VA_ARGS__ )
- #define ILVL_B4_SH( ... ) ILVL_B4( v8i16, __VA_ARGS__ )
- /* Description : Interleave left half of halfword elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Left half of halfword elements of 'in0' and 'in1' are
- interleaved and written to 'out0'.
- */
- #define ILVL_H2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvl_h( ( v8i16 ) in0, ( v8i16 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvl_h( ( v8i16 ) in2, ( v8i16 ) in3 ); \
- }
- #define ILVL_H2_SH( ... ) ILVL_H2( v8i16, __VA_ARGS__ )
- #define ILVL_H2_SW( ... ) ILVL_H2( v4i32, __VA_ARGS__ )
- #define ILVL_H4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ILVL_H2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- ILVL_H2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define ILVL_H4_SW( ... ) ILVL_H4( v4i32, __VA_ARGS__ )
- /* Description : Interleave left half of word elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Left half of word elements of 'in0' and 'in1' are interleaved
- and written to 'out0'.
- */
- #define ILVL_W2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvl_w( ( v4i32 ) in0, ( v4i32 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvl_w( ( v4i32 ) in2, ( v4i32 ) in3 ); \
- }
- #define ILVL_W2_SH( ... ) ILVL_W2( v8i16, __VA_ARGS__ )
- /* Description : Interleave right half of byte elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Right half of byte elements of 'in0' and 'in1' are interleaved
- and written to out0.
- */
- #define ILVR_B2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_b( ( v16i8 ) in0, ( v16i8 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvr_b( ( v16i8 ) in2, ( v16i8 ) in3 ); \
- }
- #define ILVR_B2_SB( ... ) ILVR_B2( v16i8, __VA_ARGS__ )
- #define ILVR_B2_UH( ... ) ILVR_B2( v8u16, __VA_ARGS__ )
- #define ILVR_B2_SH( ... ) ILVR_B2( v8i16, __VA_ARGS__ )
- #define ILVR_B4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ILVR_B2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- ILVR_B2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define ILVR_B4_UB( ... ) ILVR_B4( v16u8, __VA_ARGS__ )
- #define ILVR_B4_SB( ... ) ILVR_B4( v16i8, __VA_ARGS__ )
- #define ILVR_B4_UH( ... ) ILVR_B4( v8u16, __VA_ARGS__ )
- #define ILVR_B4_SH( ... ) ILVR_B4( v8i16, __VA_ARGS__ )
- /* Description : Interleave right half of halfword elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Right half of halfword elements of 'in0' and 'in1' are
- interleaved and written to 'out0'.
- */
- #define ILVR_H2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_h( ( v8i16 ) in0, ( v8i16 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvr_h( ( v8i16 ) in2, ( v8i16 ) in3 ); \
- }
- #define ILVR_H2_SH( ... ) ILVR_H2( v8i16, __VA_ARGS__ )
- #define ILVR_H2_SW( ... ) ILVR_H2( v4i32, __VA_ARGS__ )
- #define ILVR_H4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ILVR_H2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- ILVR_H2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define ILVR_H4_SH( ... ) ILVR_H4( v8i16, __VA_ARGS__ )
- #define ILVR_H4_SW( ... ) ILVR_H4( v4i32, __VA_ARGS__ )
- #define ILVR_W2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_w( ( v4i32 ) in0, ( v4i32 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvr_w( ( v4i32 ) in2, ( v4i32 ) in3 ); \
- }
- #define ILVR_W2_SH( ... ) ILVR_W2( v8i16, __VA_ARGS__ )
- /* Description : Interleave right half of double word elements from vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Right half of double word elements of 'in0' and 'in1' are
- interleaved and written to 'out0'.
- */
- #define ILVR_D2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_d( ( v2i64 ) ( in0 ), ( v2i64 ) ( in1 ) ); \
- out1 = ( RTYPE ) __msa_ilvr_d( ( v2i64 ) ( in2 ), ( v2i64 ) ( in3 ) ); \
- }
- #define ILVR_D2_UB( ... ) ILVR_D2( v16u8, __VA_ARGS__ )
- #define ILVR_D2_SB( ... ) ILVR_D2( v16i8, __VA_ARGS__ )
- #define ILVR_D2_SH( ... ) ILVR_D2( v8i16, __VA_ARGS__ )
- #define ILVR_D4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ILVR_D2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- ILVR_D2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define ILVR_D4_UB( ... ) ILVR_D4( v16u8, __VA_ARGS__ )
- /* Description : Interleave both left and right half of input vectors
- Arguments : Inputs - in0, in1
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Right half of byte elements from 'in0' and 'in1' are
- interleaved and written to 'out0'
- */
- #define ILVRL_B2( RTYPE, in0, in1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_b( ( v16i8 ) in0, ( v16i8 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvl_b( ( v16i8 ) in0, ( v16i8 ) in1 ); \
- }
- #define ILVRL_B2_UB( ... ) ILVRL_B2( v16u8, __VA_ARGS__ )
- #define ILVRL_B2_SB( ... ) ILVRL_B2( v16i8, __VA_ARGS__ )
- #define ILVRL_B2_UH( ... ) ILVRL_B2( v8u16, __VA_ARGS__ )
- #define ILVRL_B2_SH( ... ) ILVRL_B2( v8i16, __VA_ARGS__ )
- #define ILVRL_B2_SW( ... ) ILVRL_B2( v4i32, __VA_ARGS__ )
- #define ILVRL_H2( RTYPE, in0, in1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_h( ( v8i16 ) in0, ( v8i16 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvl_h( ( v8i16 ) in0, ( v8i16 ) in1 ); \
- }
- #define ILVRL_H2_SH( ... ) ILVRL_H2( v8i16, __VA_ARGS__ )
- #define ILVRL_H2_SW( ... ) ILVRL_H2( v4i32, __VA_ARGS__ )
- #define ILVRL_W2( RTYPE, in0, in1, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_ilvr_w( ( v4i32 ) in0, ( v4i32 ) in1 ); \
- out1 = ( RTYPE ) __msa_ilvl_w( ( v4i32 ) in0, ( v4i32 ) in1 ); \
- }
- #define ILVRL_W2_SH( ... ) ILVRL_W2( v8i16, __VA_ARGS__ )
- #define ILVRL_W2_SW( ... ) ILVRL_W2( v4i32, __VA_ARGS__ )
- /* Description : Maximum values between signed elements of vector and
- 5-bit signed immediate value are copied to the output vector
- Arguments : Inputs - in0, in1, in2, in3, max_val
- Outputs - in place operation
- Return Type - unsigned halfword
- Details : Maximum of signed halfword element values from 'in0' and
- 'max_val' are written in place
- */
- #define MAXI_SH2( RTYPE, in0, in1, max_val ) \
- { \
- in0 = ( RTYPE ) __msa_maxi_s_h( ( v8i16 ) in0, ( max_val ) ); \
- in1 = ( RTYPE ) __msa_maxi_s_h( ( v8i16 ) in1, ( max_val ) ); \
- }
- #define MAXI_SH2_UH( ... ) MAXI_SH2( v8u16, __VA_ARGS__ )
- #define MAXI_SH2_SH( ... ) MAXI_SH2( v8i16, __VA_ARGS__ )
- #define MAXI_SH4( RTYPE, in0, in1, in2, in3, max_val ) \
- { \
- MAXI_SH2( RTYPE, in0, in1, max_val ); \
- MAXI_SH2( RTYPE, in2, in3, max_val ); \
- }
- #define MAXI_SH4_UH( ... ) MAXI_SH4( v8u16, __VA_ARGS__ )
- /* Description : Saturate the halfword element values to the max
- unsigned value of (sat_val + 1 bits)
- The element data width remains unchanged
- Arguments : Inputs - in0, in1, sat_val
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each unsigned halfword element from 'in0' is saturated to the
- value generated with (sat_val+1) bit range.
- The results are written in place
- */
- #define SAT_UH2( RTYPE, in0, in1, sat_val ) \
- { \
- in0 = ( RTYPE ) __msa_sat_u_h( ( v8u16 ) in0, sat_val ); \
- in1 = ( RTYPE ) __msa_sat_u_h( ( v8u16 ) in1, sat_val ); \
- }
- #define SAT_UH2_UH( ... ) SAT_UH2( v8u16, __VA_ARGS__ )
- #define SAT_UH4( RTYPE, in0, in1, in2, in3, sat_val ) \
- { \
- SAT_UH2( RTYPE, in0, in1, sat_val ); \
- SAT_UH2( RTYPE, in2, in3, sat_val ) \
- }
- #define SAT_UH4_UH( ... ) SAT_UH4( v8u16, __VA_ARGS__ )
- /* Description : Saturate the halfword element values to the max
- unsigned value of (sat_val+1 bits)
- The element data width remains unchanged
- Arguments : Inputs - in0, in1, sat_val
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each unsigned halfword element from 'in0' is saturated to the
- value generated with (sat_val+1) bit range
- The results are written in place
- */
- #define SAT_SH2( RTYPE, in0, in1, sat_val ) \
- { \
- in0 = ( RTYPE ) __msa_sat_s_h( ( v8i16 ) in0, sat_val ); \
- in1 = ( RTYPE ) __msa_sat_s_h( ( v8i16 ) in1, sat_val ); \
- }
- #define SAT_SH2_SH( ... ) SAT_SH2( v8i16, __VA_ARGS__ )
- #define SAT_SH4( RTYPE, in0, in1, in2, in3, sat_val ) \
- { \
- SAT_SH2( RTYPE, in0, in1, sat_val ); \
- SAT_SH2( RTYPE, in2, in3, sat_val ); \
- }
- #define SAT_SH4_SH( ... ) SAT_SH4( v8i16, __VA_ARGS__ )
- /* Description : Saturate the word element values to the max
- unsigned value of (sat_val+1 bits)
- The element data width remains unchanged
- Arguments : Inputs - in0, in1, sat_val
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each unsigned word element from 'in0' is saturated to the
- value generated with (sat_val+1) bit range
- The results are written in place
- */
- #define SAT_SW2( RTYPE, in0, in1, sat_val ) \
- { \
- in0 = ( RTYPE ) __msa_sat_s_w( ( v4i32 ) in0, sat_val ); \
- in1 = ( RTYPE ) __msa_sat_s_w( ( v4i32 ) in1, sat_val ); \
- }
- #define SAT_SW2_SW( ... ) SAT_SW2( v4i32, __VA_ARGS__ )
- /* Description : Pack even byte elements of vector pairs
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Even byte elements of 'in0' are copied to the left half of
- 'out0' & even byte elements of 'in1' are copied to the right
- half of 'out0'.
- */
- #define PCKEV_B2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_pckev_b( ( v16i8 ) in0, ( v16i8 ) in1 ); \
- out1 = ( RTYPE ) __msa_pckev_b( ( v16i8 ) in2, ( v16i8 ) in3 ); \
- }
- #define PCKEV_B2_SB( ... ) PCKEV_B2( v16i8, __VA_ARGS__ )
- #define PCKEV_B2_UB( ... ) PCKEV_B2( v16u8, __VA_ARGS__ )
- #define PCKEV_B2_SH( ... ) PCKEV_B2( v8i16, __VA_ARGS__ )
- #define PCKEV_B2_SW( ... ) PCKEV_B2( v4i32, __VA_ARGS__ )
- #define PCKEV_B3( RTYPE, in0, in1, in2, in3, in4, in5, out0, out1, out2 ) \
- { \
- PCKEV_B2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- out2 = ( RTYPE ) __msa_pckev_b( ( v16i8 ) in4, ( v16i8 ) in5 ); \
- }
- #define PCKEV_B3_UB( ... ) PCKEV_B3( v16u8, __VA_ARGS__ )
- #define PCKEV_B4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- PCKEV_B2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- PCKEV_B2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define PCKEV_B4_SB( ... ) PCKEV_B4( v16i8, __VA_ARGS__ )
- #define PCKEV_B4_UB( ... ) PCKEV_B4( v16u8, __VA_ARGS__ )
- /* Description : Pack even halfword elements of vector pairs
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Even halfword elements of 'in0' are copied to the left half of
- 'out0' & even halfword elements of 'in1' are copied to the
- right half of 'out0'.
- */
- #define PCKEV_H2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_pckev_h( ( v8i16 ) in0, ( v8i16 ) in1 ); \
- out1 = ( RTYPE ) __msa_pckev_h( ( v8i16 ) in2, ( v8i16 ) in3 ); \
- }
- #define PCKEV_H2_SH( ... ) PCKEV_H2( v8i16, __VA_ARGS__ )
- #define PCKEV_H4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- PCKEV_H2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- PCKEV_H2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define PCKEV_H4_SH( ... ) PCKEV_H4( v8i16, __VA_ARGS__ )
- /* Description : Pack even double word elements of vector pairs
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Even double elements of 'in0' are copied to the left half of
- 'out0' & even double elements of 'in1' are copied to the right
- half of 'out0'.
- */
- #define PCKEV_D2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_pckev_d( ( v2i64 ) in0, ( v2i64 ) in1 ); \
- out1 = ( RTYPE ) __msa_pckev_d( ( v2i64 ) in2, ( v2i64 ) in3 ); \
- }
- #define PCKEV_D2_UB( ... ) PCKEV_D2( v16u8, __VA_ARGS__ )
- #define PCKEV_D4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- PCKEV_D2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- PCKEV_D2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define PCKEV_D4_UB( ... ) PCKEV_D4( v16u8, __VA_ARGS__ )
- /* Description : Pack odd byte elements of vector pairs
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Odd byte elements of 'in0' are copied to the left half of
- 'out0' & odd byte elements of 'in1' are copied to the right
- half of 'out0'.
- */
- #define PCKOD_B2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_pckod_b( ( v16i8 ) in0, ( v16i8 ) in1 ); \
- out1 = ( RTYPE ) __msa_pckod_b( ( v16i8 ) in2, ( v16i8 ) in3 ); \
- }
- #define PCKOD_B2_UB( ... ) PCKOD_B2( v16u8, __VA_ARGS__ )
- #define PCKOD_B4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- PCKOD_B2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- PCKOD_B2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define PCKOD_B4_UB( ... ) PCKOD_B4( v16u8, __VA_ARGS__ )
- /* Description : Pack odd double word elements of vector pairs
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Odd double word elements of 'in0' are copied to the left half
- of 'out0' & odd double word elements of 'in1' are copied to
- the right half of 'out0'.
- */
- #define PCKOD_D2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) in0, ( v2i64 ) in1 ); \
- out1 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) in2, ( v2i64 ) in3 ); \
- }
- #define PCKOD_D2_SH( ... ) PCKOD_D2( v8i16, __VA_ARGS__ )
- #define PCKOD_D2_SD( ... ) PCKOD_D2( v2i64, __VA_ARGS__ )
- /* Description : Each byte element is logically xor'ed with immediate 128
- Arguments : Inputs - in0, in1
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each unsigned byte element from input vector 'in0' is
- logically xor'ed with 128 and the result is stored in-place.
- */
- #define XORI_B2_128( RTYPE, in0, in1 ) \
- { \
- in0 = ( RTYPE ) __msa_xori_b( ( v16u8 ) in0, 128 ); \
- in1 = ( RTYPE ) __msa_xori_b( ( v16u8 ) in1, 128 ); \
- }
- #define XORI_B2_128_UB( ... ) XORI_B2_128( v16u8, __VA_ARGS__ )
- #define XORI_B2_128_SB( ... ) XORI_B2_128( v16i8, __VA_ARGS__ )
- #define XORI_B3_128( RTYPE, in0, in1, in2 ) \
- { \
- XORI_B2_128( RTYPE, in0, in1 ); \
- in2 = ( RTYPE ) __msa_xori_b( ( v16u8 ) in2, 128 ); \
- }
- #define XORI_B3_128_SB( ... ) XORI_B3_128( v16i8, __VA_ARGS__ )
- #define XORI_B4_128( RTYPE, in0, in1, in2, in3 ) \
- { \
- XORI_B2_128( RTYPE, in0, in1 ); \
- XORI_B2_128( RTYPE, in2, in3 ); \
- }
- #define XORI_B4_128_UB( ... ) XORI_B4_128( v16u8, __VA_ARGS__ )
- #define XORI_B4_128_SB( ... ) XORI_B4_128( v16i8, __VA_ARGS__ )
- #define XORI_B5_128( RTYPE, in0, in1, in2, in3, in4 ) \
- { \
- XORI_B3_128( RTYPE, in0, in1, in2 ); \
- XORI_B2_128( RTYPE, in3, in4 ); \
- }
- #define XORI_B5_128_SB( ... ) XORI_B5_128( v16i8, __VA_ARGS__ )
- /* Description : Addition of signed halfword elements and signed saturation
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Return Type - as per RTYPE
- Details : Signed halfword elements from 'in0' are added to signed
- halfword elements of 'in1'. The result is then signed saturated
- between halfword data type range
- */
- #define ADDS_SH2( RTYPE, in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = ( RTYPE ) __msa_adds_s_h( ( v8i16 ) in0, ( v8i16 ) in1 ); \
- out1 = ( RTYPE ) __msa_adds_s_h( ( v8i16 ) in2, ( v8i16 ) in3 ); \
- }
- #define ADDS_SH2_SH( ... ) ADDS_SH2( v8i16, __VA_ARGS__ )
- #define ADDS_SH4( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ADDS_SH2( RTYPE, in0, in1, in2, in3, out0, out1 ); \
- ADDS_SH2( RTYPE, in4, in5, in6, in7, out2, out3 ); \
- }
- #define ADDS_SH4_UH( ... ) ADDS_SH4( v8u16, __VA_ARGS__ )
- /* Description : Shift left all elements of vector (generic for all data types)
- Arguments : Inputs - in0, in1, in2, in3, shift
- Outputs - in place operation
- Return Type - as per input vector RTYPE
- Details : Each element of vector 'in0' is left shifted by 'shift' and
- the result is written in-place.
- */
- #define SLLI_4V( in0, in1, in2, in3, shift ) \
- { \
- in0 = in0 << shift; \
- in1 = in1 << shift; \
- in2 = in2 << shift; \
- in3 = in3 << shift; \
- }
- /* Description : Arithmetic shift right all elements of vector
- (generic for all data types)
- Arguments : Inputs - in0, in1, in2, in3, shift
- Outputs - in place operation
- Return Type - as per input vector RTYPE
- Details : Each element of vector 'in0' is right shifted by 'shift' and
- the result is written in-place. 'shift' is a GP variable.
- */
- #define SRA_4V( in0, in1, in2, in3, shift ) \
- { \
- in0 = in0 >> shift; \
- in1 = in1 >> shift; \
- in2 = in2 >> shift; \
- in3 = in3 >> shift; \
- }
- /* Description : Shift right arithmetic rounded halfwords
- Arguments : Inputs - in0, in1, shift
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each element of vector 'in0' is shifted right arithmetic by
- number of bits respective element holds in vector 'shift'.
- The last discarded bit is added to shifted value for rounding
- and the result is written in-place.
- 'shift' is a vector.
- */
- #define SRAR_H2( RTYPE, in0, in1, shift ) \
- { \
- in0 = ( RTYPE ) __msa_srar_h( ( v8i16 ) in0, ( v8i16 ) shift ); \
- in1 = ( RTYPE ) __msa_srar_h( ( v8i16 ) in1, ( v8i16 ) shift ); \
- }
- #define SRAR_H2_SH( ... ) SRAR_H2( v8i16, __VA_ARGS__ )
- #define SRAR_H4( RTYPE, in0, in1, in2, in3, shift ) \
- { \
- SRAR_H2( RTYPE, in0, in1, shift ) \
- SRAR_H2( RTYPE, in2, in3, shift ) \
- }
- #define SRAR_H4_SH( ... ) SRAR_H4( v8i16, __VA_ARGS__ )
- /* Description : Shift right logical all halfword elements of vector
- Arguments : Inputs - in0, in1, in2, in3, shift
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each element of vector 'in0' is shifted right logical by
- number of bits respective element holds in vector 'shift' and
- the result is stored in-place.'shift' is a vector.
- */
- #define SRL_H4( RTYPE, in0, in1, in2, in3, shift ) \
- { \
- in0 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in0, ( v8i16 ) shift ); \
- in1 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in1, ( v8i16 ) shift ); \
- in2 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in2, ( v8i16 ) shift ); \
- in3 = ( RTYPE ) __msa_srl_h( ( v8i16 ) in3, ( v8i16 ) shift ); \
- }
- #define SRL_H4_UH( ... ) SRL_H4( v8u16, __VA_ARGS__ )
- /* Description : Shift right arithmetic rounded (immediate)
- Arguments : Inputs - in0, in1, shift
- Outputs - in place operation
- Return Type - as per RTYPE
- Details : Each element of vector 'in0' is shifted right arithmetic by
- value in 'shift'. The last discarded bit is added to shifted
- value for rounding and the result is written in-place.
- 'shift' is an immediate value.
- */
- #define SRARI_H2( RTYPE, in0, in1, shift ) \
- { \
- in0 = ( RTYPE ) __msa_srari_h( ( v8i16 ) in0, shift ); \
- in1 = ( RTYPE ) __msa_srari_h( ( v8i16 ) in1, shift ); \
- }
- #define SRARI_H2_UH( ... ) SRARI_H2( v8u16, __VA_ARGS__ )
- #define SRARI_H2_SH( ... ) SRARI_H2( v8i16, __VA_ARGS__ )
- #define SRARI_H4( RTYPE, in0, in1, in2, in3, shift ) \
- { \
- SRARI_H2( RTYPE, in0, in1, shift ); \
- SRARI_H2( RTYPE, in2, in3, shift ); \
- }
- #define SRARI_H4_UH( ... ) SRARI_H4( v8u16, __VA_ARGS__ )
- #define SRARI_H4_SH( ... ) SRARI_H4( v8i16, __VA_ARGS__ )
- #define SRARI_W2( RTYPE, in0, in1, shift ) \
- { \
- in0 = ( RTYPE ) __msa_srari_w( ( v4i32 ) in0, shift ); \
- in1 = ( RTYPE ) __msa_srari_w( ( v4i32 ) in1, shift ); \
- }
- #define SRARI_W2_SW( ... ) SRARI_W2( v4i32, __VA_ARGS__ )
- #define SRARI_W4( RTYPE, in0, in1, in2, in3, shift ) \
- { \
- SRARI_W2( RTYPE, in0, in1, shift ); \
- SRARI_W2( RTYPE, in2, in3, shift ); \
- }
- #define SRARI_W4_SW( ... ) SRARI_W4( v4i32, __VA_ARGS__ )
- /* Description : Multiplication of pairs of vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Details : Each element from 'in0' is multiplied with elements from 'in1'
- and the result is written to 'out0'
- */
- #define MUL2( in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = in0 * in1; \
- out1 = in2 * in3; \
- }
- #define MUL4( in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- MUL2( in0, in1, in2, in3, out0, out1 ); \
- MUL2( in4, in5, in6, in7, out2, out3 ); \
- }
- /* Description : Addition of 2 pairs of vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1
- Details : Each element in 'in0' is added to 'in1' and result is written
- to 'out0'.
- */
- #define ADD2( in0, in1, in2, in3, out0, out1 ) \
- { \
- out0 = in0 + in1; \
- out1 = in2 + in3; \
- }
- #define ADD4( in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- ADD2( in0, in1, in2, in3, out0, out1 ); \
- ADD2( in4, in5, in6, in7, out2, out3 ); \
- }
- #define SUB4( in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3 ) \
- { \
- out0 = in0 - in1; \
- out1 = in2 - in3; \
- out2 = in4 - in5; \
- out3 = in6 - in7; \
- }
- /* Description : Sign extend halfword elements from right half of the vector
- Arguments : Input - in (halfword vector)
- Output - out (sign extended word vector)
- Return Type - signed word
- Details : Sign bit of halfword elements from input vector 'in' is
- extracted and interleaved with same vector 'in0' to generate
- 4 word elements keeping sign intact
- */
- #define UNPCK_R_SH_SW( in, out ) \
- { \
- v8i16 sign_m; \
- \
- sign_m = __msa_clti_s_h( ( v8i16 ) in, 0 ); \
- out = ( v4i32 ) __msa_ilvr_h( sign_m, ( v8i16 ) in ); \
- }
- /* Description : Zero extend unsigned byte elements to halfword elements
- Arguments : Input - in (unsigned byte vector)
- Outputs - out0, out1 (unsigned halfword vectors)
- Return Type - signed halfword
- Details : Zero extended right half of vector is returned in 'out0'
- Zero extended left half of vector is returned in 'out1'
- */
- #define UNPCK_UB_SH( in, out0, out1 ) \
- { \
- v16i8 zero_m = { 0 }; \
- \
- ILVRL_B2_SH( zero_m, in, out0, out1 ); \
- }
- /* Description : Sign extend halfword elements from input vector and return
- the result in pair of vectors
- Arguments : Input - in (halfword vector)
- Outputs - out0, out1 (sign extended word vectors)
- Return Type - signed word
- Details : Sign bit of halfword elements from input vector 'in' is
- extracted and interleaved right with same vector 'in0' to
- generate 4 signed word elements in 'out0'
- Then interleaved left with same vector 'in0' to
- generate 4 signed word elements in 'out1'
- */
- #define UNPCK_SH_SW( in, out0, out1 ) \
- { \
- v8i16 tmp_m; \
- \
- tmp_m = __msa_clti_s_h( ( v8i16 ) in, 0 ); \
- ILVRL_H2_SW( tmp_m, in, out0, out1 ); \
- }
- /* Description : Butterfly of 4 input vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1, out2, out3
- Details : Butterfly operation
- */
- #define BUTTERFLY_4( in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- out0 = in0 + in3; \
- out1 = in1 + in2; \
- \
- out2 = in1 - in2; \
- out3 = in0 - in3; \
- }
- /* Description : Butterfly of 8 input vectors
- Arguments : Inputs - in0 ... in7
- Outputs - out0 .. out7
- Details : Butterfly operation
- */
- #define BUTTERFLY_8( in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- out0 = in0 + in7; \
- out1 = in1 + in6; \
- out2 = in2 + in5; \
- out3 = in3 + in4; \
- \
- out4 = in3 - in4; \
- out5 = in2 - in5; \
- out6 = in1 - in6; \
- out7 = in0 - in7; \
- }
- /* Description : Transpose input 8x8 byte block
- Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
- Outputs - out0, out1, out2, out3, out4, out5, out6, out7
- Return Type - as per RTYPE
- */
- #define TRANSPOSE8x8_UB( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
- v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
- \
- ILVR_B4_SB( in2, in0, in3, in1, in6, in4, in7, in5, \
- tmp0_m, tmp1_m, tmp2_m, tmp3_m ); \
- ILVRL_B2_SB( tmp1_m, tmp0_m, tmp4_m, tmp5_m ); \
- ILVRL_B2_SB( tmp3_m, tmp2_m, tmp6_m, tmp7_m ); \
- ILVRL_W2( RTYPE, tmp6_m, tmp4_m, out0, out2 ); \
- ILVRL_W2( RTYPE, tmp7_m, tmp5_m, out4, out6 ); \
- SLDI_B2_0( RTYPE, out0, out2, out1, out3, 8 ); \
- SLDI_B2_0( RTYPE, out4, out6, out5, out7, 8 ); \
- }
- #define TRANSPOSE8x8_UB_UB( ... ) TRANSPOSE8x8_UB( v16u8, __VA_ARGS__ )
- /* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
- Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
- in8, in9, in10, in11, in12, in13, in14, in15
- Outputs - out0, out1, out2, out3, out4, out5, out6, out7
- Return Type - unsigned byte
- */
- #define TRANSPOSE16x8_UB_UB( in0, in1, in2, in3, in4, in5, in6, in7, \
- in8, in9, in10, in11, in12, in13, in14, in15, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- v16u8 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
- v16u8 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
- \
- ILVEV_D2_UB( in0, in8, in1, in9, out7, out6 ); \
- ILVEV_D2_UB( in2, in10, in3, in11, out5, out4 ); \
- ILVEV_D2_UB( in4, in12, in5, in13, out3, out2 ); \
- ILVEV_D2_UB( in6, in14, in7, in15, out1, out0 ); \
- \
- tmp0_m = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out6, ( v16i8 ) out7 ); \
- tmp4_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out6, ( v16i8 ) out7 ); \
- tmp1_m = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out4, ( v16i8 ) out5 ); \
- tmp5_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out4, ( v16i8 ) out5 ); \
- out5 = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out2, ( v16i8 ) out3 ); \
- tmp6_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out2, ( v16i8 ) out3 ); \
- out7 = ( v16u8 ) __msa_ilvev_b( ( v16i8 ) out0, ( v16i8 ) out1 ); \
- tmp7_m = ( v16u8 ) __msa_ilvod_b( ( v16i8 ) out0, ( v16i8 ) out1 ); \
- \
- ILVEV_H2_UB( tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m ); \
- out0 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- out4 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- \
- tmp2_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp1_m, ( v8i16 ) tmp0_m ); \
- tmp3_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) out7, ( v8i16 ) out5 ); \
- out2 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- out6 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- \
- ILVEV_H2_UB( tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m ); \
- out1 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- out5 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- \
- tmp2_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp5_m, ( v8i16 ) tmp4_m ); \
- tmp2_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp5_m, ( v8i16 ) tmp4_m ); \
- tmp3_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp7_m, ( v8i16 ) tmp6_m ); \
- tmp3_m = ( v16u8 ) __msa_ilvod_h( ( v8i16 ) tmp7_m, ( v8i16 ) tmp6_m ); \
- out3 = ( v16u8 ) __msa_ilvev_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- out7 = ( v16u8 ) __msa_ilvod_w( ( v4i32 ) tmp3_m, ( v4i32 ) tmp2_m ); \
- }
- /* Description : Transpose 4x4 block with half word elements in vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1, out2, out3
- Return Type - signed halfword
- */
- #define TRANSPOSE4x4_SH_SH( in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- v8i16 s0_m, s1_m; \
- \
- ILVR_H2_SH( in1, in0, in3, in2, s0_m, s1_m ); \
- ILVRL_W2_SH( s1_m, s0_m, out0, out2 ); \
- out1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) out0, ( v2i64 ) out0 ); \
- out3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) out0, ( v2i64 ) out2 ); \
- }
- /* Description : Transpose 4x8 block with half word elements in vectors
- Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
- Outputs - out0, out1, out2, out3, out4, out5, out6, out7
- Return Type - signed halfword
- */
- #define TRANSPOSE4X8_SH_SH( in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
- v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \
- v8i16 zero_m = { 0 }; \
- \
- ILVR_H4_SH( in1, in0, in3, in2, in5, in4, in7, in6, \
- tmp0_n, tmp1_n, tmp2_n, tmp3_n ); \
- ILVRL_W2_SH( tmp1_n, tmp0_n, tmp0_m, tmp2_m ); \
- ILVRL_W2_SH( tmp3_n, tmp2_n, tmp1_m, tmp3_m ); \
- \
- out0 = ( v8i16 ) __msa_ilvr_d( ( v2i64 ) tmp1_m, ( v2i64 ) tmp0_m ); \
- out1 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) tmp1_m, ( v2i64 ) tmp0_m ); \
- out2 = ( v8i16 ) __msa_ilvr_d( ( v2i64 ) tmp3_m, ( v2i64 ) tmp2_m ); \
- out3 = ( v8i16 ) __msa_ilvl_d( ( v2i64 ) tmp3_m, ( v2i64 ) tmp2_m ); \
- \
- out4 = zero_m; \
- out5 = zero_m; \
- out6 = zero_m; \
- out7 = zero_m; \
- }
- /* Description : Transpose 8x4 block with half word elements in vectors
- Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
- Outputs - out0, out1, out2, out3, out4, out5, out6, out7
- Return Type - signed halfword
- */
- #define TRANSPOSE8X4_SH_SH( in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
- \
- ILVR_H2_SH( in1, in0, in3, in2, tmp0_m, tmp1_m ); \
- ILVL_H2_SH( in1, in0, in3, in2, tmp2_m, tmp3_m ); \
- ILVR_W2_SH( tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out2 ); \
- ILVL_W2_SH( tmp1_m, tmp0_m, tmp3_m, tmp2_m, out1, out3 ); \
- }
- /* Description : Transpose 8x8 block with half word elements in vectors
- Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7
- Outputs - out0, out1, out2, out3, out4, out5, out6, out7
- Return Type - as per RTYPE
- */
- #define TRANSPOSE8x8_H( RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
- out0, out1, out2, out3, out4, out5, out6, out7 ) \
- { \
- v8i16 s0_m, s1_m; \
- v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
- v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
- \
- ILVR_H2_SH( in6, in4, in7, in5, s0_m, s1_m ); \
- ILVRL_H2_SH( s1_m, s0_m, tmp0_m, tmp1_m ); \
- ILVL_H2_SH( in6, in4, in7, in5, s0_m, s1_m ); \
- ILVRL_H2_SH( s1_m, s0_m, tmp2_m, tmp3_m ); \
- ILVR_H2_SH( in2, in0, in3, in1, s0_m, s1_m ); \
- ILVRL_H2_SH( s1_m, s0_m, tmp4_m, tmp5_m ); \
- ILVL_H2_SH( in2, in0, in3, in1, s0_m, s1_m ); \
- ILVRL_H2_SH( s1_m, s0_m, tmp6_m, tmp7_m ); \
- PCKEV_D4( RTYPE, tmp0_m, tmp4_m, tmp1_m, tmp5_m, tmp2_m, tmp6_m, \
- tmp3_m, tmp7_m, out0, out2, out4, out6 ); \
- out1 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp0_m, ( v2i64 ) tmp4_m ); \
- out3 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp1_m, ( v2i64 ) tmp5_m ); \
- out5 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp2_m, ( v2i64 ) tmp6_m ); \
- out7 = ( RTYPE ) __msa_pckod_d( ( v2i64 ) tmp3_m, ( v2i64 ) tmp7_m ); \
- }
- #define TRANSPOSE8x8_SH_SH( ... ) TRANSPOSE8x8_H( v8i16, __VA_ARGS__ )
- /* Description : Transpose 4x4 block with word elements in vectors
- Arguments : Inputs - in0, in1, in2, in3
- Outputs - out0, out1, out2, out3
- Return Type - signed word
- */
- #define TRANSPOSE4x4_SW_SW( in0, in1, in2, in3, out0, out1, out2, out3 ) \
- { \
- v4i32 s0_m, s1_m, s2_m, s3_m; \
- \
- ILVRL_W2_SW( in1, in0, s0_m, s1_m ); \
- ILVRL_W2_SW( in3, in2, s2_m, s3_m ); \
- \
- out0 = ( v4i32 ) __msa_ilvr_d( ( v2i64 ) s2_m, ( v2i64 ) s0_m ); \
- out1 = ( v4i32 ) __msa_ilvl_d( ( v2i64 ) s2_m, ( v2i64 ) s0_m ); \
- out2 = ( v4i32 ) __msa_ilvr_d( ( v2i64 ) s3_m, ( v2i64 ) s1_m ); \
- out3 = ( v4i32 ) __msa_ilvl_d( ( v2i64 ) s3_m, ( v2i64 ) s1_m ); \
- }
- /* Description : Add block 4x4
- Arguments : Inputs - in0, in1, in2, in3, pdst, stride
- Details : Least significant 4 bytes from each input vector are added to
- the destination bytes, clipped between 0-255 and stored.
- */
- #define ADDBLK_ST4x4_UB( in0, in1, in2, in3, p_dst, stride ) \
- { \
- uint32_t src0_m, src1_m, src2_m, src3_m; \
- uint32_t out0_m, out1_m, out2_m, out3_m; \
- v8i16 inp0_m, inp1_m, res0_m, res1_m; \
- v16i8 dst0_m = { 0 }; \
- v16i8 dst1_m = { 0 }; \
- v16i8 zero_m = { 0 }; \
- \
- ILVR_D2_SH( in1, in0, in3, in2, inp0_m, inp1_m ) \
- LW4( p_dst, stride, src0_m, src1_m, src2_m, src3_m ); \
- INSERT_W2_SB( src0_m, src1_m, dst0_m ); \
- INSERT_W2_SB( src2_m, src3_m, dst1_m ); \
- ILVR_B2_SH( zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m ); \
- ADD2( res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m ); \
- CLIP_SH2_0_255( res0_m, res1_m ); \
- PCKEV_B2_SB( res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m ); \
- \
- out0_m = __msa_copy_u_w( ( v4i32 ) dst0_m, 0 ); \
- out1_m = __msa_copy_u_w( ( v4i32 ) dst0_m, 1 ); \
- out2_m = __msa_copy_u_w( ( v4i32 ) dst1_m, 0 ); \
- out3_m = __msa_copy_u_w( ( v4i32 ) dst1_m, 1 ); \
- SW4( out0_m, out1_m, out2_m, out3_m, p_dst, stride ); \
- }
- /* Description : Dot product and addition of 3 signed halfword input vectors
- Arguments : Inputs - in0, in1, in2, coeff0, coeff1, coeff2
- Output - out0_m
- Return Type - signed halfword
- Details : Dot product of 'in0' with 'coeff0'
- Dot product of 'in1' with 'coeff1'
- Dot product of 'in2' with 'coeff2'
- Addition of all the 3 vector results
- out0_m = (in0 * coeff0) + (in1 * coeff1) + (in2 * coeff2)
- */
- #define DPADD_SH3_SH( in0, in1, in2, coeff0, coeff1, coeff2 ) \
- ( { \
- v8i16 tmp1_m; \
- v8i16 out0_m; \
- \
- out0_m = __msa_dotp_s_h( ( v16i8 ) in0, ( v16i8 ) coeff0 ); \
- out0_m = __msa_dpadd_s_h( out0_m, ( v16i8 ) in1, ( v16i8 ) coeff1 ); \
- tmp1_m = __msa_dotp_s_h( ( v16i8 ) in2, ( v16i8 ) coeff2 ); \
- out0_m = __msa_adds_s_h( out0_m, tmp1_m ); \
- \
- out0_m; \
- } )
- /* Description : Pack even elements of input vectors & xor with 128
- Arguments : Inputs - in0, in1
- Output - out_m
- Return Type - unsigned byte
- Details : Signed byte even elements from 'in0' and 'in1' are packed
- together in one vector and the resulting vector is xor'ed with
- 128 to shift the range from signed to unsigned byte
- */
- #define PCKEV_XORI128_UB( in0, in1 ) \
- ( { \
- v16u8 out_m; \
- out_m = ( v16u8 ) __msa_pckev_b( ( v16i8 ) in1, ( v16i8 ) in0 ); \
- out_m = ( v16u8 ) __msa_xori_b( ( v16u8 ) out_m, 128 ); \
- out_m; \
- } )
- /* Description : Pack even byte elements, extract 0 & 2 index words from pair
- of results and store 4 words in destination memory as per
- stride
- Arguments : Inputs - in0, in1, in2, in3, pdst, stride
- */
- #define PCKEV_ST4x4_UB( in0, in1, in2, in3, p_dst, stride ) \
- { \
- uint32_t out0_m, out1_m, out2_m, out3_m; \
- v16i8 tmp0_m, tmp1_m; \
- \
- PCKEV_B2_SB( in1, in0, in3, in2, tmp0_m, tmp1_m ); \
- \
- out0_m = __msa_copy_u_w( ( v4i32 ) tmp0_m, 0 ); \
- out1_m = __msa_copy_u_w( ( v4i32 ) tmp0_m, 2 ); \
- out2_m = __msa_copy_u_w( ( v4i32 ) tmp1_m, 0 ); \
- out3_m = __msa_copy_u_w( ( v4i32 ) tmp1_m, 2 ); \
- \
- SW4( out0_m, out1_m, out2_m, out3_m, p_dst, stride ); \
- }
- /* Description : Pack even byte elements and store byte vector in destination
- memory
- Arguments : Inputs - in0, in1, pdst
- */
- #define PCKEV_ST_SB( in0, in1, p_dst ) \
- { \
- v16i8 tmp_m; \
- tmp_m = __msa_pckev_b( ( v16i8 ) in1, ( v16i8 ) in0 ); \
- ST_SB( tmp_m, ( p_dst ) ); \
- }
- #define AVC_CALC_DPADD_H_6PIX_2COEFF_SH( in0, in1, in2, in3, in4, in5 ) \
- ( { \
- v4i32 tmp0_m, tmp1_m; \
- v8i16 out0_m, out1_m, out2_m, out3_m; \
- v8i16 minus5h_m = __msa_ldi_h( -5 ); \
- v8i16 plus20h_m = __msa_ldi_h( 20 ); \
- \
- ILVRL_H2_SW( in5, in0, tmp0_m, tmp1_m ); \
- \
- tmp0_m = __msa_hadd_s_w( ( v8i16 ) tmp0_m, ( v8i16 ) tmp0_m ); \
- tmp1_m = __msa_hadd_s_w( ( v8i16 ) tmp1_m, ( v8i16 ) tmp1_m ); \
- \
- ILVRL_H2_SH( in1, in4, out0_m, out1_m ); \
- DPADD_SH2_SW( out0_m, out1_m, minus5h_m, minus5h_m, tmp0_m, tmp1_m ); \
- ILVRL_H2_SH( in2, in3, out2_m, out3_m ); \
- DPADD_SH2_SW( out2_m, out3_m, plus20h_m, plus20h_m, tmp0_m, tmp1_m ); \
- \
- SRARI_W2_SW( tmp0_m, tmp1_m, 10 ); \
- SAT_SW2_SW( tmp0_m, tmp1_m, 7 ); \
- out0_m = __msa_pckev_h( ( v8i16 ) tmp1_m, ( v8i16 ) tmp0_m ); \
- \
- out0_m; \
- } )
- #define AVC_HORZ_FILTER_SH( in, mask0, mask1, mask2 ) \
- ( { \
- v8i16 out0_m, out1_m; \
- v16i8 tmp0_m, tmp1_m; \
- v16i8 minus5b = __msa_ldi_b( -5 ); \
- v16i8 plus20b = __msa_ldi_b( 20 ); \
- \
- tmp0_m = __msa_vshf_b( ( v16i8 ) mask0, in, in ); \
- out0_m = __msa_hadd_s_h( tmp0_m, tmp0_m ); \
- \
- tmp0_m = __msa_vshf_b( ( v16i8 ) mask1, in, in ); \
- out0_m = __msa_dpadd_s_h( out0_m, minus5b, tmp0_m ); \
- \
- tmp1_m = __msa_vshf_b( ( v16i8 ) ( mask2 ), in, in ); \
- out1_m = __msa_dpadd_s_h( out0_m, plus20b, tmp1_m ); \
- \
- out1_m; \
- } )
- #endif /* X264_MIPS_MACROS_H */
|