| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919139201392113922139231392413925139261392713928139291393013931139321393313934139351393613937139381393913940139411394213943139441394513946139471394813949139501395113952139531395413955139561395713958139591396013961139621396313964139651396613967139681396913970139711397213973139741397513976139771397813979139801398113982139831398413985139861398713988139891399013991139921399313994139951399613997139981399914000140011400214003140041400514006140071400814009140101401114012140131401414015140161401714018140191402014021140221402314024140251402614027140281402914030140311403214033140341403514036140371403814039140401404114042140431404414045140461404714048140491405014051140521405314054140551405614057140581405914060140611406214063140641406514066140671406814069140701407114072140731407414075140761407714078140791408014081140821408314084140851408614087140881408914090140911409214093140941409514096140971409814099141001410114102141031410414105141061410714108141091411014111141121411314114141151411614117141181411914120141211412214123141241412514126141271412814129141301413114132141331413414135141361413714138141391414014141141421414314144141451414614147141481414914150141511415214153141541415514156141571415814159141601416114162141631416414165141661416714168141691417014171141721417314174141751417614177141781417914180141811418214183141841418514186141871418814189141901419114192141931419414195141961419714198141991420014201142021420314204142051420614207142081420914210142111421214213142141421514216142171421814219142201422114222142231422414225142261422714228142291423014231142321423314234142351423614237142381423914240142411424214243142441424514246142471424814249142501425114252142531425414255142561425714258142591426014261142621426314264142651426614267142681426914270142711427214273142741427514276142771427814279142801428114282142831428414285142861428714288142891429014291142921429314294142951429614297142981429914300143011430214303143041430514306143071430814309143101431114312143131431414315143161431714318143191432014321143221432314324143251432614327143281432914330143311433214333143341433514336143371433814339143401434114342143431434414345143461434714348143491435014351143521435314354143551435614357143581435914360143611436214363143641436514366143671436814369143701437114372143731437414375143761437714378143791438014381143821438314384143851438614387143881438914390143911439214393143941439514396143971439814399144001440114402144031440414405144061440714408144091441014411144121441314414144151441614417144181441914420144211442214423144241442514426144271442814429144301443114432144331443414435144361443714438144391444014441144421444314444144451444614447144481444914450144511445214453144541445514456144571445814459144601446114462144631446414465144661446714468144691447014471144721447314474144751447614477144781447914480144811448214483144841448514486144871448814489144901449114492144931449414495144961449714498144991450014501145021450314504145051450614507145081450914510145111451214513145141451514516145171451814519145201452114522145231452414525145261452714528145291453014531145321453314534145351453614537145381453914540145411454214543145441454514546145471454814549145501455114552145531455414555145561455714558145591456014561145621456314564145651456614567145681456914570145711457214573145741457514576145771457814579145801458114582145831458414585145861458714588145891459014591145921459314594145951459614597145981459914600146011460214603146041460514606146071460814609146101461114612146131461414615146161461714618146191462014621146221462314624146251462614627146281462914630146311463214633146341463514636146371463814639146401464114642146431464414645146461464714648146491465014651146521465314654146551465614657146581465914660146611466214663146641466514666146671466814669146701467114672146731467414675146761467714678146791468014681146821468314684146851468614687146881468914690146911469214693146941469514696146971469814699147001470114702147031470414705147061470714708147091471014711147121471314714147151471614717147181471914720147211472214723147241472514726147271472814729147301473114732147331473414735147361473714738147391474014741147421474314744147451474614747147481474914750147511475214753147541475514756147571475814759147601476114762147631476414765147661476714768147691477014771147721477314774147751477614777147781477914780147811478214783147841478514786147871478814789147901479114792147931479414795147961479714798147991480014801148021480314804148051480614807148081480914810148111481214813148141481514816148171481814819148201482114822148231482414825148261482714828148291483014831148321483314834148351483614837148381483914840148411484214843148441484514846148471484814849148501485114852148531485414855148561485714858148591486014861148621486314864148651486614867148681486914870148711487214873148741487514876148771487814879148801488114882148831488414885148861488714888148891489014891148921489314894148951489614897148981489914900149011490214903149041490514906149071490814909149101491114912149131491414915149161491714918149191492014921149221492314924149251492614927149281492914930149311493214933149341493514936149371493814939149401494114942149431494414945149461494714948149491495014951149521495314954149551495614957149581495914960149611496214963149641496514966149671496814969149701497114972149731497414975149761497714978149791498014981149821498314984149851498614987149881498914990149911499214993149941499514996149971499814999150001500115002150031500415005150061500715008150091501015011150121501315014150151501615017150181501915020150211502215023150241502515026150271502815029150301503115032150331503415035150361503715038150391504015041150421504315044150451504615047150481504915050150511505215053150541505515056150571505815059150601506115062150631506415065150661506715068150691507015071150721507315074150751507615077150781507915080150811508215083150841508515086150871508815089150901509115092150931509415095150961509715098150991510015101151021510315104151051510615107151081510915110151111511215113151141511515116151171511815119151201512115122151231512415125151261512715128151291513015131151321513315134151351513615137151381513915140151411514215143151441514515146151471514815149151501515115152151531515415155151561515715158151591516015161151621516315164151651516615167151681516915170151711517215173151741517515176151771517815179151801518115182151831518415185151861518715188151891519015191151921519315194151951519615197151981519915200152011520215203152041520515206152071520815209152101521115212152131521415215152161521715218152191522015221152221522315224152251522615227152281522915230152311523215233152341523515236152371523815239152401524115242152431524415245152461524715248152491525015251152521525315254152551525615257152581525915260152611526215263152641526515266152671526815269152701527115272152731527415275152761527715278152791528015281152821528315284152851528615287152881528915290152911529215293152941529515296152971529815299153001530115302153031530415305153061530715308153091531015311153121531315314153151531615317153181531915320153211532215323153241532515326153271532815329153301533115332153331533415335153361533715338153391534015341153421534315344153451534615347153481534915350153511535215353153541535515356153571535815359153601536115362153631536415365153661536715368153691537015371153721537315374153751537615377153781537915380153811538215383153841538515386153871538815389153901539115392153931539415395153961539715398153991540015401154021540315404154051540615407154081540915410154111541215413154141541515416154171541815419154201542115422154231542415425154261542715428154291543015431154321543315434154351543615437154381543915440154411544215443154441544515446154471544815449154501545115452154531545415455154561545715458154591546015461154621546315464154651546615467154681546915470154711547215473154741547515476154771547815479154801548115482154831548415485154861548715488154891549015491154921549315494154951549615497154981549915500155011550215503155041550515506155071550815509155101551115512155131551415515155161551715518155191552015521155221552315524155251552615527155281552915530155311553215533155341553515536155371553815539155401554115542155431554415545155461554715548155491555015551155521555315554155551555615557155581555915560155611556215563155641556515566155671556815569155701557115572155731557415575155761557715578155791558015581155821558315584155851558615587155881558915590155911559215593155941559515596155971559815599156001560115602156031560415605156061560715608156091561015611156121561315614156151561615617156181561915620156211562215623156241562515626156271562815629156301563115632156331563415635156361563715638156391564015641156421564315644156451564615647156481564915650156511565215653156541565515656156571565815659156601566115662156631566415665156661566715668156691567015671156721567315674156751567615677156781567915680156811568215683156841568515686156871568815689156901569115692156931569415695156961569715698156991570015701157021570315704157051570615707157081570915710157111571215713157141571515716157171571815719157201572115722157231572415725157261572715728157291573015731157321573315734157351573615737157381573915740157411574215743157441574515746157471574815749157501575115752157531575415755157561575715758157591576015761157621576315764157651576615767157681576915770157711577215773157741577515776157771577815779157801578115782157831578415785157861578715788157891579015791157921579315794157951579615797157981579915800158011580215803158041580515806158071580815809158101581115812158131581415815158161581715818158191582015821158221582315824158251582615827158281582915830158311583215833158341583515836158371583815839158401584115842158431584415845158461584715848158491585015851158521585315854158551585615857158581585915860158611586215863158641586515866158671586815869158701587115872158731587415875158761587715878158791588015881158821588315884158851588615887158881588915890158911589215893158941589515896158971589815899159001590115902159031590415905159061590715908159091591015911159121591315914159151591615917159181591915920159211592215923159241592515926159271592815929159301593115932159331593415935159361593715938159391594015941159421594315944159451594615947159481594915950159511595215953159541595515956159571595815959159601596115962159631596415965159661596715968159691597015971159721597315974159751597615977159781597915980159811598215983159841598515986159871598815989159901599115992159931599415995159961599715998159991600016001160021600316004160051600616007160081600916010160111601216013160141601516016160171601816019160201602116022160231602416025160261602716028160291603016031160321603316034160351603616037160381603916040160411604216043160441604516046160471604816049160501605116052160531605416055160561605716058160591606016061160621606316064160651606616067160681606916070160711607216073160741607516076160771607816079160801608116082160831608416085160861608716088160891609016091160921609316094160951609616097160981609916100161011610216103161041610516106161071610816109161101611116112161131611416115161161611716118161191612016121161221612316124161251612616127161281612916130161311613216133161341613516136161371613816139161401614116142161431614416145161461614716148161491615016151161521615316154161551615616157161581615916160161611616216163161641616516166161671616816169161701617116172161731617416175161761617716178161791618016181161821618316184161851618616187161881618916190161911619216193 |
- /*
- * Copyright (c) 2022,2024 HPMicro
- *
- * SPDX-License-Identifier: BSD-3-Clause
- *
- */
- #ifndef __HPM_MATH_H__
- #define __HPM_MATH_H__
- #include <stddef.h>
- /**
- * @defgroup hpmmath HPMicro Math Functions
- * @ingroup middleware_interfaces
- */
- #define HPM_DSP_HW_NDS32 1 /* andes hardware dsp */
- #ifdef CONFIG_HPM_MATH_HAS_EXTRA_CONFIG
- #include CONFIG_HPM_MATH_HAS_EXTRA_CONFIG
- #else
- /* Enable Compute Cell Library*/
- /* #define HPM_EN_MATH_FFA_LIB */
- /* #define HPM_EN_MATH_DSP_LIB */
- /* #define HPM_EN_MATH_NN_LIB */
- #define HPM_MATH_DSP_STATISTICS 1
- #define HPM_MATH_DSP_BASIC 1
- #define HPM_MATH_DSP_COMPLEX 1
- #define HPM_MATH_DSP_CONTROLLER 1
- #define HPM_MATH_DSP_DISTANCE 1
- #define HPM_MATH_DSP_FILTERING 1
- #define HPM_MATH_DSP_MATRIX 1
- #define HPM_MATH_DSP_SVM 1
- #define HPM_MATH_DSP_TRANSFORM 1
- #define HPM_MATH_DSP_UTILS 1
- #define HPM_MATH_DSP_SORT 1
- #define HPM_MATH_NN_ACTIVATION 1
- #define HPM_MATH_NN_TINYENGINE 1
- #define HPM_MATH_NN_BASIC 1
- #define HPM_MATH_NN_CONCATENATION 1
- #define HPM_MATH_NN_CONVOLUTION 1
- #define HPM_MATH_NN_CONNECTED 1
- #define HPM_MATH_NN_POOLING 1
- #define HPM_MATH_NN_SOFTMAX 1
- #define HPM_MATH_NN_UTIL 1
- #define HPM_DSP_CORE HPM_DSP_HW_NDS32 /* DSP core selection */
- #define HPM_MATH_PI (3.14159265358979323846)
- /**
- * @brief HPM_MATH_SW_FFT_CHECKLIST Enabled to use table lookup to speed up the software fft,
- * but will increase the code space,and only support sampling points 2^( 2-10).
- *
- * With this option turned off,
- * the software fft can support as many sample points as necessary with sufficient space
- *
- */
- #define HPM_MATH_SW_FFT_CHECKLIST
- #endif
- #ifdef __cplusplus
- extern "C"
- {
- #endif
- #ifdef HPM_MATH_DSP_STATISTICS
- /**
- * @defgroup statistics DSP Statistics Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_statistics_math.h"
- // Maximum
- /**
- * @brief Maximum value of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the maximum value.
- * @return maximum value.
- */
- static inline float32_t hpm_dsp_max_f32(const float32_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_max_f32(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_max_f32(src, size, index);
- #endif
- #endif
- }
- static inline float32_t hpm_dsp_max_val_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_max_val_f32(src, size);
- #endif
- }
- /**
- * @brief Maximum value of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the maximum value.
- * @return maximum value.
- */
- static inline q15_t hpm_dsp_max_q15(const q15_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q15_t res;
- tpt_max_q15(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_max_q15(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Maximum value of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the maximum value.
- * @return maximum value.
- */
- static inline q31_t hpm_dsp_max_q31(const q31_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_max_q31(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_max_q31(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Maximum value of the q7 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the maximum value.
- * @return maximum value.
- */
- static inline q7_t hpm_dsp_max_q7(const q7_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q7_t res;
- tpt_max_q7(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_max_q7(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Max value of the u8 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the maximum value.
- * @return max value.
- */
- static inline uint8_t hpm_dsp_max_u8(const uint8_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_max_u8(src, size, index);
- #endif
- }
- // Minimum
- /**
- * @brief Minimum value of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the minimum value.
- * @return minimum value.
- */
- static inline float32_t hpm_dsp_min_f32(const float32_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_min_f32(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_min_f32(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Minimum value of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the minimum value.
- * @return minimum value.
- */
- static inline q15_t hpm_dsp_min_q15(const q15_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q15_t res;
- tpt_min_q15(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_min_q15(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Minimum value of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the minimum value.
- * @return minimum value.
- */
- static inline q31_t hpm_dsp_min_q31(const q31_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_min_q31(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_min_q31(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Minimum value of the q7 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the minimum value.
- * @return minimum value.
- */
- static inline q7_t hpm_dsp_min_q7(const q7_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q7_t res;
- tpt_min_q7(&res, index, src, size);
- return res;
- #else
- return riscv_dsp_min_q7(src, size, index);
- #endif
- #endif
- }
- /**
- * @brief Minimum value of the u8 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @param[out] *index index of the minimum value.
- * @return minimum value.
- */
- static inline uint8_t hpm_dsp_min_u8(const uint8_t *src, uint32_t size, uint32_t *index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_min_u8(src, size, index);
- #endif
- }
- // Mean
- /**
- * @brief Mean value of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return mean value.
- */
- static inline float32_t hpm_dsp_mean_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_mean_f32(&res, src, size);
- return res;
- #else
- return riscv_dsp_mean_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief Mean value of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return mean value.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is accumulated in a 32-bit accumulator in 17.15
- * format and then truncated to yield a result of 1.15 format.
- */
- static inline q15_t hpm_dsp_mean_q15(const q15_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q15_t res;
- tpt_mean_q15(&res, src, size);
- return res;
- #else
- return riscv_dsp_mean_q15(src, size);
- #endif
- #endif
- }
- /**
- * @brief Mean value of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return mean value.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is accumulated in a 64-bit accumulator in 33.31
- * format and then truncated to yield a result of 1.31 format.
- */
- static inline q31_t hpm_dsp_mean_q31(const q31_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_mean_q31(&res, src, size);
- return res;
- #else
- return riscv_dsp_mean_q31(src, size);
- #endif
- #endif
- }
- /**
- * @brief Mean value of the q7 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return mean value.
- *
- * <b>Function notes:</b>
- *
- * The 1.7 format input is accumulated in a 32-bit accumulator in 25.7
- * format and then truncated to yield a result of 1.7 format.
- */
- static inline q7_t hpm_dsp_mean_q7(const q7_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q7_t res;
- tpt_mean_q7(&res, src, size);
- return res;
- #else
- return riscv_dsp_mean_q7(src, size);
- #endif
- #endif
- }
- /**
- * @brief Mean value of the u8 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return mean value.
- *
- * The 8-bit format input is accumulated in a 32-bit accumulator
- * and then truncated to yield a result of 8-bit format.
- */
- static inline uint8_t hpm_dsp_mean_u8(const uint8_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_mean_u8(src, size);
- #endif
- }
- // Sun of the Squares
- /**
- * @brief Sum of the squares of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Sum of the squares value.
- */
- static inline float32_t hpm_dsp_pwr_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_power_f32(&res, src, size);
- return res;
- #else
- return riscv_dsp_pwr_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief Sum of the squares of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Sum of the squares value.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is multiplied yields a 2.30 format, and then added
- * without saturation to a 64-bit accumulator in 34.30 format. Finally,
- * the return result is in 34.30 format.
- */
- static inline q63_t hpm_dsp_pwr_q15(const q15_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q63_t res;
- tpt_power_q15(&res, src, size);
- return res;
- #else
- return riscv_dsp_pwr_q15(src, size);
- #endif
- #endif
- }
- /**
- * @brief Sum of the squares of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Sum of the squares value.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is multiplied yields a 2.62 format and this result
- * is truncated to 2.48 format by discarding the lower 14 bits. The 2.48
- * result is then added without saturation to a 64-bit accumulator in 16.48
- * format. Finally, the return result is in 16.48 format.
- */
- static inline q63_t hpm_dsp_pwr_q31(const q31_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q63_t res;
- tpt_power_q31(&res, src, size);
- return res;
- #else
- return riscv_dsp_pwr_q31(src, size);
- #endif
- #endif
- }
- /**
- * @brief Sum of the squares of the q7 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Sum of the squares value.
- *
- * <b>Function notes:</b>
- *
- * The 1.7 format input is multiplied yields a 2.14 format, and then added
- * without saturation to a 32-bit accumulator in 18.14 format. Finally,
- * the return result is in 18.14 format.
- */
- static inline q31_t hpm_dsp_pwr_q7(const q7_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_power_q7(&res, src, size);
- return res;
- #else
- return riscv_dsp_pwr_q7(src, size);
- #endif
- #endif
- }
- // Root Mean Square
- /**
- * @brief RMS of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return RMS value.
- */
- static inline float32_t hpm_dsp_rms_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_rms_f32(&res, src, size);
- return res;
- #else
- return riscv_dsp_rms_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief RMS of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return RMS value.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is multiplied yields a 2.30 format, and then added
- * without saturation to a 64-bit accumulator in 34.30 format. Finally,
- * the added output is truncated to 34.15 format by discarding the lower 15
- * bits, and then saturated to yield a result in 1.15 format.
- */
- static inline q15_t hpm_dsp_rms_q15(const q15_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q15_t res;
- tpt_rms_q15(&res, src, size);
- return res;
- #else
- return riscv_dsp_rms_q15(src, size);
- #endif
- #endif
- }
- /**
- * @brief RMS of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return RMS value.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is multiplied yields a 2.62 format. In order to
- * avoid overflows, the input signal must be scaled down by
- * <code>log2(size)</code> bits, Finally, the 2.62 accumulator is right
- * shifted by 31 bits to yield a 1.31 format value.
- */
- static inline q31_t hpm_dsp_rms_q31(const q31_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_rms_q31(&res, src, size);
- return res;
- #else
- return riscv_dsp_rms_q31(src, size);
- #endif
- #endif
- }
- // Standard deviation
- /**
- * @brief Standard deviation of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Standard deviation value.
- */
- static inline float32_t hpm_dsp_std_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_std_f32(&res, src, size);
- return res;
- #else
- return riscv_dsp_std_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief Standard deviation of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Standard deviation value.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is multiplied yields a 2.30 format, and then added
- * without saturation to a 64-bit accumulator in 34.30 format. Finally,
- * the added output is truncated to 34.15 format by discarding the lower 15
- * bits, and then saturated to yield a result in 1.15 format.
- */
- static inline q15_t hpm_dsp_std_q15(const q15_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q15_t res;
- tpt_std_q15(&res, src, size);
- return res;
- #else
- return riscv_dsp_std_q15(src, size);
- #endif
- #endif
- }
- /**
- * @brief Standard deviation of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Standard deviation value.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is multiplied yields a 2.62 format. In order to
- * avoid overflows, the input signal must be scaled down by
- * <code>log2(size)</code> bits, Finally, the 2.62 accumulator is right
- * shifted by 31 bits to yield a 1.31 format value.
- */
- static inline q31_t hpm_dsp_std_q31(const q31_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_std_q31(&res, src, size);
- return res;
- #else
- return riscv_dsp_std_q31(src, size);
- #endif
- #endif
- }
- /**
- * @brief Standard deviation of the u8 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Standard deviation value.
- *
- * <b>Function notes:</b>
- * The 8-bit format input is multiplied yields a 16-bit format, and then added
- * saturation to a 32-bit accumulator in 16.16 format. Finally,
- * the added output is truncated to 34.15 format by discarding the lower 1
- * bits, and then saturated to yield a result in 1.15 format.
- */
- static inline q15_t hpm_dsp_std_u8(const uint8_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_std_u8(src, size);
- #endif
- }
- // Variance
- /**
- * @brief Variance of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Variance value.
- */
- static inline float32_t hpm_dsp_var_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_var_f32(&res, src, size);
- return res;
- #else
- return riscv_dsp_var_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief Variance of the q15 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Variance value.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is multiplied yields a 2.30 format, and then added
- * without saturation to a 64-bit accumulator in 34.30 format. Finally,
- * the added output is truncated to 34.15 format by discarding the lower 15
- * bits, and then saturated to yield a result in 1.15 format.
- */
- static inline q31_t hpm_dsp_var_q15(const q15_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q15_t res;
- tpt_var_q15(&res, src, size);
- return res;
- #else
- return riscv_dsp_var_q15(src, size);
- #endif
- #endif
- }
- /**
- * @brief Variance of the q31 vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Variance value.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is multiplied yields a 2.62 format. In order to
- * avoid overflows, the input signal must be scaled down by
- * <code>log2(size)</code> bits, Finally, the 2.62 accumulator is right
- * shifted by 31 bits to yield a 1.31 format value.
- */
- static inline q63_t hpm_dsp_var_q31(const q31_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_var_q31(&res, src, size);
- return res;
- #else
- return riscv_dsp_var_q31(src, size);
- #endif
- #endif
- }
- /**
- * @brief Entropy of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return Entropy value.
- *
- * E = -sum (P .* log2 (P))
- */
- static inline float32_t hpm_dsp_entropy_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_entropy_f32(src, size);
- #else
- return riscv_dsp_entropy_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief Relative Entropy of the floating-potint vector.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @return Relative Entropy value.
- *
- * Relative Entropy also called KullbackLeibler divergence:
- * D(A || B) = A * ln(A / B);
- *
- */
- static inline float32_t hpm_dsp_relative_entropy_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_relative_entropy_f32(src1, src2, size);
- #else
- return riscv_dsp_relative_entropy_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Log-Sum-Exp of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] size size of the vectors.
- * @return lse value.
- *
- */
- static inline float32_t hpm_dsp_lse_f32(const float32_t *src, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_lse_f32(src, size);
- #else
- return riscv_dsp_lse_f32(src, size);
- #endif
- #endif
- }
- /**
- * @brief Dot product with Log-Sum-Exp of the floating-potint vector.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @param[in] *buffer points to temporary buffer.
- * @return the Log-Sum-Exp of dot product value.
- *
- */
- static inline float32_t hpm_dsp_lse_dprod_f32(const float32_t *src1, const float32_t *src2, uint32_t size, float32_t *buffer)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_lse_dprod_f32(src1, src2, size, buffer);
- #else
- return riscv_dsp_lse_dprod_f32(src1, src2, size, buffer);
- #endif
- #endif
- }
- /**
- * @brief Naive Gaussian Bayesian Estimator
- *
- * @param[in] *instance points to a naive bayes instance
- * @param[in] *src points to the elements of the input vector.
- * @param[in] *buf points to a buffer of length numofclass /numberOfClasses
- * @return The predicted class
- *
- */
- static inline uint32_t hpm_dsp_gaussian_naive_bayes_est_f32(const riscv_dsp_gaussian_naivebayes_f32_t *instance, const float32_t * src, float32_t *buf)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_gaussian_naive_bayes_est_f32(instance, src, buf);
- #endif
- }
- /**
- * @brief Maximum absolute value of the floating-potint vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Maximum value
- */
- static inline float32_t hpm_dsp_absmax_f32(const float32_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmax_f32(src, size, index);
- #endif
- }
- /**
- * @brief Maximum absolute value of the q15 vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Maximum value
- */
- static inline q15_t hpm_dsp_absmax_q15(const q15_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmax_q15(src, size, index);
- #endif
- }
- /**
- * @brief Maximum absolute value of the q31 vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Maximum value
- */
- static inline q31_t hpm_dsp_absmax_q31(const q31_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmax_q31(src, size, index);
- #endif
- }
- /**
- * @brief Maximum absolute value of the q7 vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Maximum value
- */
- static inline q7_t hpm_dsp_absmax_q7(const q7_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmax_q7(src, size, index);
- #endif
- }
- /**
- * @brief Minimum absolute value of the floating-potint vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Minimum value
- */
- static inline float32_t hpm_dsp_absmin_f32(const float32_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmin_f32(src, size, index);
- #endif
- }
- /**
- * @brief Minimum absolute value of the q31 vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Minimum value
- */
- static inline q31_t hpm_dsp_absmin_q31(const q31_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmin_q31(src, size, index);
- #endif
- }
- /**
- * @brief Minimum absolute value of the q15 vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Minimum value
- */
- static inline q15_t hpm_dsp_absmin_q15(const q15_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmin_q15(src, size, index);
- #endif
- }
- /**
- * @brief Minimum absolute value of the q7 vector.
- * @param[in] src pointer of the input vector
- * @param[in] size number of elements in a vector
- * @param[out] index index of the maximum value
- * @return Minimum value
- */
- static inline q7_t hpm_dsp_absmin_q7(const q7_t* src, uint32_t size, uint32_t* index)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_absmin_q7(src, size, index);
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_BASIC
- /**
- * @defgroup basic DSP Basic Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_basic_math.h"
- // Absolute value
- /**
- * @brief Absolute value of floating-potint vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_abs_f32(float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_abs_f32(dst, src, size);
- #else
- riscv_dsp_abs_f32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Absolute value of q31 vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The Q31 value INT32_MIN (0x80000000) will be saturated to the maximum
- * allowable positive value INT32_MAX.
- */
- static inline void hpm_dsp_abs_q31(q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_abs_q31(dst, src, size);
- #else
- riscv_dsp_abs_q31(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Absolute value of q15 vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The Q15 value INT16_MIN (0x8000) will be saturated to the maximum
- * allowable positive value INT16_MAX.
- */
- static inline void hpm_dsp_abs_q15(q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_abs_q15(dst, src, size);
- #else
- riscv_dsp_abs_q15(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Absolute value of q7 vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The Q7 value INT8_MIN (0x8000) will be saturated to the maximum
- * allowable positive value INT8_MAX.
- */
- static inline void hpm_dsp_abs_q7(q7_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_abs_q7(dst, src, size);
- #else
- riscv_dsp_abs_q7(src, dst, size);
- #endif
- #endif
- }
- // Addition
- /**
- * @brief Addition of floating-potint vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_add_f32(float32_t *src1, float32_t *src2, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_add_f32(dst, src1, src2, size);
- #else
- riscv_dsp_add_f32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Addition of q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_add_q31(q31_t *src1, q31_t *src2, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_add_q31(dst, src1, src2, size);
- #else
- riscv_dsp_add_q31(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Addition of q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The output results will be saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_add_q15(q15_t *src1, q15_t *src2, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_add_q15(dst, src1, src2, size);
- #else
- riscv_dsp_add_q15(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Addition of q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q7 range [0x80 0x7F].
- */
- static inline void hpm_dsp_add_q7(q7_t *src1, q7_t *src2, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_add_q7(dst, src1, src2, size);
- #else
- riscv_dsp_add_q7(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Addition of U8 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in U16 range [0x0000 0xFFFF].
- */
- static inline void hpm_dsp_add_u8_u16(uint8_t *src1, uint8_t *src2, uint16_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_add_u8_u16(dst, src1, src2, size);
- #else
- riscv_dsp_add_u8_u16(src1, src2, dst, size);
- #endif
- #endif
- }
- // Subtraction
- /**
- * @brief Subtraction of floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_sub_f32(float32_t *src1, float32_t *src2, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_sub_f32(dst, src1, src2, size);
- #else
- riscv_dsp_sub_f32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Subtraction of q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_sub_q31(q31_t *src1, q31_t *src2, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_sub_q31(dst, src1, src2, size);
- #else
- riscv_dsp_sub_q31(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Subtraction of q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The output results will be saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_sub_q15(q15_t *src1, q15_t *src2, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_sub_q15(dst, src1, src2, size);
- #else
- riscv_dsp_sub_q15(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Subtraction of q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q7 range [0x80 0x7F].
- */
- static inline void hpm_dsp_sub_q7(q7_t *src1, q7_t *src2, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_sub_q7(dst, src1, src2, size);
- #else
- riscv_dsp_sub_q7(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Subtraction of u8 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q7 range [0x80 0x7F].
- */
- static inline void hpm_dsp_sub_u8_q7(uint8_t *src1, uint8_t *src2, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_sub_u8_q7(src1, src2, dst, size);
- #endif
- }
- // Multiplication
- /**
- * @brief Multiplication of floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_mul_f32(float32_t *src1, float32_t *src2, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mult_f32(dst, src1, src2, size);
- #else
- riscv_dsp_mul_f32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_mul_q31(q31_t *src1, q31_t *src2, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mult_q31(dst, src1, src2, size);
- #else
- riscv_dsp_mul_q31(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Output results will be saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_mul_q15(q15_t *src1, q15_t *src2, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mult_q15(dst, src1, src2, size);
- #else
- riscv_dsp_mul_q15(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be saturated in Q7 range [0x80 0x7F].
- */
- static inline void hpm_dsp_mul_q7(q7_t *src1, q7_t *src2, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mult_q7(dst, src1, src2, size);
- #else
- riscv_dsp_mul_q7(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of u8 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Ouput results will be in U16 range [0x00 0xFFFF].
- */
- static inline void hpm_dsp_mul_u8_u16(uint8_t *src1, uint8_t *src2, uint16_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_mul_u8_u16(src1, src2, dst, size);
- #endif
- }
- // Division
- /**
- * @brief Division of floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_div_f32(float32_t *src1, float32_t *src2, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_div_f32(dst, src1, src2, size);
- #else
- riscv_dsp_div_f32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Division of q31 inputs.
- * @param[in] src1 the smaller input value.
- * @param[in] src2 the larger input value.
- * @return division of two inputs.
- */
- static inline q31_t hpm_dsp_div_q31(q31_t src1, q31_t src2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_div_q31(src1, src2);
- #else
- return riscv_dsp_div_q31(src1, src2);
- #endif
- #endif
- }
- /**
- * @brief Division of q63 inputs divided by a positive 32 bits.
- * @param[in] src1 the q63 input value.
- * @param[in] src2 the positive 32 bits input value.
- * @return division of two inputs.
- */
- static inline q31_t hpm_dsp_div_s64_u32(q63_t src1, uint32_t src2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_div_s64_u32(src1, src2);
- #else
- return riscv_dsp_div_s64_u32(src1, src2);
- #endif
- #endif
- }
- /**
- * @brief Division of positive 64-bits inputs divided by a positive 32-bits.
- * @param[in] src1 the positive 64-bits input value.
- * @param[in] src2 the positive 32-bits input value.
- * @return division of two inputs.
- */
- static inline q31_t hpm_dsp_div_u64_u32(uint64_t src1, uint32_t src2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_div_u64_u32(src1, src2);
- #else
- return riscv_dsp_div_u64_u32(src1, src2);
- #endif
- #endif
- }
- // Negation
- /**
- * @brief Negation of floating-potint vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_neg_f32(float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_negate_f32(dst, src, size);
- #else
- riscv_dsp_neg_f32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Negation of q31 vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The Q31 value INT32_MIN (0x80000000) will be saturated to the maximum
- * allowable positive value INT32_MAX.
- */
- static inline void hpm_dsp_neg_q31(q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_negate_q31(dst, src, size);
- #else
- riscv_dsp_neg_q31(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Negation of q15 vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The Q15 value INT16_MIN (0x8000) will be saturated to the maximum
- * allowable positive value INT16_MAX.
- */
- static inline void hpm_dsp_neg_q15(q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_negate_q15(dst, src, size);
- #else
- riscv_dsp_neg_q15(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Negation of q15 vectors.
- * @param[in] *src points to the input vector.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The Q7 value INT8_MIN (0x80) will be saturated to the maximum allowable
- * positive value INT8_MAX.
- */
- static inline void hpm_dsp_neg_q7(q7_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_negate_q7(dst, src, size);
- #else
- riscv_dsp_neg_q7(src, dst, size);
- #endif
- #endif
- }
- // Dot Production
- /**
- * @brief Dot production of floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- */
- static inline float32_t hpm_dsp_dprod_f32(float32_t *src1, float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- f32_t res;
- tpt_dot_prod_f32(&res, src1, src2, size);
- return res;
- #else
- return riscv_dsp_dprod_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Dot production of q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- *
- * The output of multiplications is truncated from 2.62 to 2.48 format and
- * then added without saturation to a 64-bit accumulator. The return value
- * is in 16.48 format. When the size of the vectors less than 2^16, there is
- * no risk to overflow.
- */
- static inline q63_t hpm_dsp_dprod_q31(q31_t *src1, q31_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q63_t res;
- tpt_dot_prod_q31(&res, src1, src2, size);
- return res;
- #else
- return riscv_dsp_dprod_q31(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Dot production of q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- *
- * The output of multiplications is in 2.30 format and then added to a
- * 64-bit accumulator in 34.30 format. The return value is in 34.30 format.
- */
- static inline q63_t hpm_dsp_dprod_q15(q15_t *src1, q15_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q63_t res;
- tpt_dot_prod_q15(&res, src1, src2, size);
- return res;
- #else
- return riscv_dsp_dprod_q15(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Dot production of u8 * q15 vectors.
- * @param[in] *src1 points to the uint8_t format input vector.
- * @param[in] *src2 points to the q15 format input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- *
- * The output of multiplications is in 1.23 format and then added to an
- * accumulator in 9.23 format. The return result is in 9.23 format.
- */
- static inline q31_t hpm_dsp_dprod_u8xq15(uint8_t *src1, q15_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_dprod_u8xq15(src1, src2, size);
- #endif
- }
- /**
- * @brief Dot production of q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- *
- * The output of multiplications is in 2.14 format and then added to an
- * accumulator in 18.14 format. The return result is in 18.14 format.
- */
- static inline q31_t hpm_dsp_dprod_q7(q7_t *src1, q7_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- q31_t res;
- tpt_dot_prod_q7(&res, src1, src2, size);
- return res;
- #else
- return riscv_dsp_dprod_q7(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Dot production of q7 * q15 vectors.
- * @param[in] *src1 points to the q7_t format input vector.
- * @param[in] *src2 points to the q15 format input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- *
- * The output of multiplications is in 1.22 format and then added to an
- * accumulator in 10.22 format. The return result is in 10.22 format.
- */
- static inline q31_t hpm_dsp_dprod_q7xq15(q7_t *src1, q15_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_dprod_q7xq15(src1, src2, size);
- #endif
- }
- /**
- * @brief Dot production of U8 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] size size of the vectors.
- * @return dot product of two input vectors.
- *
- * The output of multiplications is in 0.16 format and then added to an
- * accumulator in 16.16 format. The return result is in 16.16 format.
- */
- static inline uint32_t hpm_dsp_dprod_u8(uint8_t *src1, uint8_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_dprod_u8(src1, src2, size);
- #endif
- }
- // Offset
- /**
- * @brief The offset of floating-point vectors.
- * @param[in] *src points to the input vector.
- * @param[in] offset is the value to be added.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_offset_f32(float32_t *src, float32_t offset, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_offset_f32(dst, src, offset, size);
- #else
- riscv_dsp_offset_f32(src, offset, dst, size);
- #endif
- #endif
- }
- /**
- * @brief The offset of q31 vectors.
- * @param[in] *src points to the input vector.
- * @param[in] offset is the value to be added.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Output results are saturated in Q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_offset_q31(q31_t *src, q31_t offset, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_offset_q31(dst, src, offset, size);
- #else
- riscv_dsp_offset_q31(src, offset, dst, size);
- #endif
- #endif
- }
- /**
- * @brief The offset of q15 vectors.
- * @param[in] *src points to the input vector.
- * @param[in] offset is the value to be added.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Output results are saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_offset_q15(q15_t *src, q15_t offset, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_offset_q15(dst, src, offset, size);
- #else
- riscv_dsp_offset_q15(src, offset, dst, size);
- #endif
- #endif
- }
- /**
- * @brief The offset of q7 vectors.
- * @param[in] *src points to the input vector.
- * @param[in] offset is the value to be added.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Output results are saturated in Q7 range [0x80 0x7F].
- */
- static inline void hpm_dsp_offset_q7(q7_t *src, q7_t offset, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_offset_q7(dst, src, offset, size);
- #else
- riscv_dsp_offset_q7(src, offset, dst, size);
- #endif
- #endif
- }
- /**
- * @brief The offset of U8 vectors.
- * @param[in] *src points to the input vector.
- * @param[in] offset is the value to be added.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * Output results are saturated in U8 range [0x00 0xFF].
- */
- static inline void hpm_dsp_offset_u8(uint8_t *src, q7_t offset, uint8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_offset_u8(src, offset, dst, size);
- #endif
- }
- // Scale
- /**
- * @brief To multiply a floating-point vectors by a floating-point scale.
- * @param[in] *src points to the input vector.
- * @param[in] scale is the value to be multiplied.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_scale_f32(float32_t *src, float32_t scale, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_scale_f32(dst, src, scale, size);
- #else
- riscv_dsp_scale_f32(src, scale, dst, size);
- #endif
- #endif
- }
- /**
- * @brief To multiply a q31 vectors by a q31 scale.
- * @param[in] *src points to the input vector.
- * @param[in] scalefract is the fractional portion value
- * to be multiplied.
- * @param[in] shift number of bits to shift.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * These are multiplied to yield a 2.62 output and then is shift with
- * saturation to 1.31 format.
- */
- static inline void hpm_dsp_scale_q31(q31_t *src, q31_t scalefract, int8_t shift, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_scale_q31(dst, src, scalefract, shift, size);
- #else
- riscv_dsp_scale_q31(src, scalefract, shift, dst, size);
- #endif
- #endif
- }
- /**
- * @brief To multiply a q15 vectors by a q15 scale.
- * @param[in] *src points to the input vector.
- * @param[in] scalefract is the fractional portion value
- * to be multiplied.
- * @param[in] shift number of bits to shift.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * These are multiplied to yield a 2.30 output and then is shifted with
- * saturation to 1.15 format.
- */
- static inline void hpm_dsp_scale_q15(q15_t *src, q15_t scalefract, int8_t shift, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_scale_q15(dst, src, scalefract, shift, size);
- #else
- riscv_dsp_scale_q15(src, scalefract, shift, dst, size);
- #endif
- #endif
- }
- /**
- * @brief To multiply a q7 vectors by a q7 scale.
- * @param[in] *src points to the input vector.
- * @param[in] scalefract is the fractional portion value
- * to be multiplied.
- * @param[in] shift number of bits to shift.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * These are multiplied to yield a 2.14 output and then is shifted with
- * saturation to 1.7 format.
- */
- static inline void hpm_dsp_scale_q7(q7_t *src, q7_t scalefract, int8_t shift, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_scale_q7(dst, src, scalefract, shift, size);
- #else
- riscv_dsp_scale_q7(src, scalefract, shift, dst, size);
- #endif
- #endif
- }
- /**
- * @brief To multiply a u8 vectors by a q7 scale.
- * @param[in] *src points to the input vector.
- * @param[in] scalefract: is the fractional portion value to be multiplied.
- * @param[in] shift: number of bits to shift.
- * @param[out] *dst points to the output vector.
- * @param[in] size size of the vectors.
- *
- * The inputs are multiplied to yield a 1.15 output and then are shift with
- * saturation to 8-bit formats.
- */
- static inline void hpm_dsp_scale_u8(uint8_t *src, q7_t scalefract, int8_t shift, uint8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_scale_u8(src, scalefract, shift, dst, size);
- #endif
- }
- // Shift
- /**
- * @brief Shifts a q15 vector with a specified shift number.
- * @param[in] *src the input vector.
- * @param[in] shift number of shift bits. If (shift > 0) means shifts
- * left; (shift < 0) means shifts right.
- * @param[out] *dst the output vector.
- * @param[in] size size of the vectors.
- *
- * The input and output are all saturated to q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_shift_q15(q15_t *src, int8_t shift, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_shift_q15(dst, src, shift, size);
- #else
- riscv_dsp_shift_q15(src, shift, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Shifts a q31 vector with a specified shift number.
- * @param[in] *src the input vector.
- * @param[in] shift number of shift bits. If (shift > 0) means shifts
- * left; (shift < 0) means shifts right.
- * @param[out] *dst the output vector.
- * @param[in] size size of the vectors.
- *
- * The input and output are all saturated to q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_shift_q31(q31_t *src, int8_t shift, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_shift_q31(dst, src, shift, size);
- #else
- riscv_dsp_shift_q31(src, shift, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Shifts a q7 vector with a specified shift number.
- * @param[in] *src the input vector.
- * @param[in] shift number of shift bits. If (shift > 0) means shifts
- * left; (shift < 0) means shifts right.
- * @param[out] *dst the output vector.
- * @param[in] size size of the vectors.
- *
- * The input and output are all saturated to q7 range [0x80 0x7F].
- */
- static inline void hpm_dsp_shift_q7(q7_t *src, int8_t shift, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_shift_q7(dst, src, shift, size);
- #else
- riscv_dsp_shift_q7(src, shift, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Shifts a u8 vector for a specified shift number.
- * @param[in] *src the input vector.
- * @param[in] shift number of shift bits. If (shift > 0) means shifts
- * left; (shift < 0) means shifts right.
- * @param[out] *dst the output vector.
- * @param[in] size size of the vectors.
- *
- * The input and output are all saturated to u8 range [0x00 0xFF].
- */
- static inline void hpm_dsp_shift_u8(uint8_t *src, int8_t shift, uint8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_shift_u8(src, shift, dst, size);
- #endif
- }
- /**
- * @addtogroup basic_clip
- * @{
- */
- /**
- * @brief Elementwise clipping of f32 function.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] low lower bound.
- * @param[in] high higher bound.
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_clip_f32(float32_t *src, float32_t *dst, float32_t low, float32_t high, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_clip_f32(dst, src, low, high, size);
- #else
- riscv_dsp_clip_f32(src, dst, low, high, size);
- #endif
- #endif
- }
- /**
- * @brief Elementwise clipping of q31 function.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] low lower bound.
- * @param[in] high higher bound.
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_clip_q31(q31_t *src, q31_t *dst, q31_t low, q31_t high, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_clip_q31(dst, src, low, high, size);
- #else
- riscv_dsp_clip_q31(src, dst, low, high, size);
- #endif
- #endif
- }
- /**
- * @brief Elementwise clipping of q15 function.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] low lower bound.
- * @param[in] high higher bound.
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_clip_q15(q15_t *src, q15_t *dst, q15_t low, q15_t high, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_clip_q15(dst, src, low, high, size);
- #else
- riscv_dsp_clip_q15(src, dst, low, high, size);
- #endif
- #endif
- }
- /**
- * @brief Elementwise clipping of q7 function.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] low lower bound.
- * @param[in] high higher bound.
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_clip_q7(q7_t *src, q7_t *dst, q7_t low, q7_t high, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_clip_q7(dst, src, low, high, size);
- #else
- riscv_dsp_clip_q7(src, dst, low, high, size);
- #endif
- #endif
- }
- /** @} basic_clip */
- // AND
- /**
- * @defgroup basic_and Bitwise AND Functions
- * @brief Bitwise AND Functions
- *
- * Bitwise AND functions calculate logical bitwise AND value from separate source vectors and write the results one-by-one into a destination vector.
- *
- * Andes DSP library supports distinct bitwise AND functions for U32, U15 and U8 data types. These functions are introduced in the subsections below.
- */
- /**
- * @addtogroup basic_and
- * @{
- */
- /**
- * @brief Compute the logical bitwise AND of two u32 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_and_u32(u32_t *src1, u32_t *src2, u32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_and_32bit(dst, src1, src2, size);
- #else
- riscv_dsp_and_u32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise AND of two u8 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_and_u8(u8_t *src1, u8_t *src2, u8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_and_8bit(dst, src1, src2, size);
- #else
- riscv_dsp_and_u8(src1, src2, dst, size);
- #endif
- #endif
- }
- /** @} basic_and */
- // OR
- /**
- * @defgroup basic_or Bitwise Inclusive OR Functions
- * @brief Bitwise Inclusive OR Functions
- *
- * Bitwise inclusive OR functions calculate logical bitwise OR value from separate source vectors and write the results one-by-one into a destination vector.
- *
- * Andes DSP library supports distinct bitwise inclusive OR functions for U32, U15 and U8 data types. These functions are introduced in the subsections below.
- */
- /**
- * @addtogroup basic_or
- * @{
- */
- /**
- * @brief Compute the logical bitwise OR of two u32 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_or_u32(u32_t *src1, u32_t *src2, u32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_or_32bit(dst, src1, src2, size);
- #else
- riscv_dsp_or_u32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise OR of two u16 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_or_u16(u16_t *src1, u16_t *src2, u16_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_or_16bit(dst, src1, src2, size);
- #else
- riscv_dsp_or_u16(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise OR of two u8 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_or_u8(u8_t *src1, u8_t *src2, u8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_or_8bit(dst, src1, src2, size);
- #else
- riscv_dsp_or_u8(src1, src2, dst, size);
- #endif
- #endif
- }
- /** @} basic_or */
- // XOR
- /**
- * @defgroup basic_xor Bitwise exclusive OR Functions
- * @brief Bitwise exclusive OR Functions
- *
- * Bitwise exclusive OR (XOR) functions calculate logical bitwise XOR value from separate source vectors and write the results one-by-one into a destination vector.
- *
- * Andes DSP library supports distinct bitwise XOR functions for U32, U15 and U8 data types. These functions are introduced in the subsections below.
- */
- /**
- * @addtogroup basic_xor
- * @{
- */
- /**
- * @brief Compute the logical bitwise XOR of two u32 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_xor_u32(u32_t *src1, u32_t *src2, u32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_xor_32bit(dst, src1, src2, size);
- #else
- riscv_dsp_xor_u32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise XOR of two u16 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_xor_u16(u16_t *src1, u16_t *src2, u16_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_xor_16bit(dst, src1, src2, size);
- #else
- riscv_dsp_xor_u16(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise XOR of two u8 vectors.
- * @param[in] *src1 pointer of the first input vector
- * @param[in] *src2 pointer of the second input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_xor_u8(u8_t *src1, u8_t *src2, u8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_xor_8bit(dst, src1, src2, size);
- #else
- riscv_dsp_xor_u8(src1, src2, dst, size);
- #endif
- #endif
- }
- /** @} basic_xor */
- // Not
- /**
- * @defgroup basic_not Bitwise NOT Functions
- * @brief Bitwise NOT Functions
- *
- * Bitwise NOT functions calculate logical bitwise NOT value from elements of a source vector and write them one-by-one into a destination vector.
- *
- * Andes DSP library supports distinct bitwise NOT functions for U32, U15 and U8 data types. These functions are introduced in the subsections below.
- */
- /**
- * @addtogroup basic_not
- * @{
- */
- /**
- * @brief Compute the logical bitwise NOT of u32 vector.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_not_u32(u32_t *src, u32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_not_32bit(dst, src, size);
- #else
- riscv_dsp_not_u32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise NOT of u16 vector.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_not_u16(u16_t *src, u16_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_not_16bit(dst, src, size);
- #else
- riscv_dsp_not_u16(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the logical bitwise NOT of u8 vector.
- * @param[in] *src pointer of the input vector
- * @param[out] *dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- */
- static inline void hpm_dsp_not_u8(u8_t *src, u8_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_not_8bit(dst, src, size);
- #else
- riscv_dsp_not_u8(src, dst, size);
- #endif
- #endif
- }
- /** @} basic_not */
- /** @} basic */
- #endif
- #include <stdint.h>
- /**
- * @brief Reserve 8bit data lsb to msb
- *
- * @param[in] lsb lsb data
- * @return uint8_t msb
- */
- uint8_t hpm_math_sw_reverse_bit8_lsb_to_msb(uint8_t lsb);
- /**
- * @brief Reserve 8bit data msb to lsb
- *
- * @param[in] msb msb data
- * @return uint8_t lsb
- */
- uint8_t hpm_math_sw_reverse_bit8_msb_to_lsb(uint8_t msb);
- /**
- * @brief Reserve 32bit data lsb to msb
- *
- * @param[in] lsb lsb data
- * @return uint32_t msb
- */
- uint32_t hpm_math_sw_reverse_bit32_lsb_to_msb(uint32_t lsb);
- /**
- * @brief Reserve 32bit data msb to lsb
- *
- * @param[in] msb msb data
- * @return uint32_t lsb
- */
- uint32_t hpm_math_sw_reverse_bit32_msb_to_lsb(uint32_t msb);
- #endif
- #ifdef HPM_MATH_DSP_COMPLEX
- /**
- * @defgroup complex DSP Complex Functions
- * This set of functions operates on complex data vectors.
- * The data in the input <code>src</code> vector and output <code>dst</code>
- * are arranged in the array as: [real, imag, real, imag, real, imag, ...).
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_complex_math.h"
- // Complex Conjugate
- /**
- * @brief Conjugate the floating-potint complex vector.
- * @param[in] *src the input complex vector.
- * @param[out] *dst the output complex vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_cconj_f32(const float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_conj_f32(dst, src, size);
- #else
- riscv_dsp_cconj_f32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Conjugate the q15 complex vector.
- * @param[in] *src the input complex vector.
- * @param[out] *dst the output complex vector.
- * @param[in] size size of the vectors.
- *
- * The Q15 value INT16_MIN (0x8000) will be saturated to the maximum
- * allowable positive value INT16_MAX.
- */
- static inline void hpm_dsp_cconj_q15(const q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_conj_q15(dst, src, size);
- #else
- riscv_dsp_cconj_q15(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Conjugate the q31 complex vector.
- * @param[in] *src the input complex vector.
- * @param[out] *dst the output complex vector.
- * @param[in] size size of the vectors.
- *
- * The Q31 value INT32_MIN (0x80000000) will be saturated to the maximum
- * allowable positive value INT32_MAX.
- */
- static inline void hpm_dsp_cconj_q31(const q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_conj_q31(dst, src, size);
- #else
- riscv_dsp_cconj_q31(src, dst, size);
- #endif
- #endif
- }
- // Complex Dot Product
- /**
- * @brief Compute the dot product of the floating-potint complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[in] size size of the vectors.
- * @param[out] *dst the output vector.
- */
- static inline void hpm_dsp_cdprod_f32(const float32_t *src1, const float32_t *src2, uint32_t size, float32_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_cdprod_f32(src1, src2, size, dst);
- #endif
- }
- /**
- * @brief Compute the dot product type2 of the floating-potint complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[in] size size of the vectors.
- * @param[out] *rout the real sum of the output.
- * @param[out] *iout the imag sum of the output.
- */
- static inline void hpm_dsp_cdprod_typ2_f32(const float32_t *src1, const float32_t *src2, uint32_t size, float32_t *rout, float32_t *iout)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_dot_prod_f32(rout, iout, src1, src2, size);
- #else
- riscv_dsp_cdprod_typ2_f32(src1, src2, size, rout, iout);
- #endif
- #endif
- }
- /**
- * @brief Compute the dot product of the q15 complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[in] size size of the vectors.
- * @param[out] *dst the output vector.
- *
- * The multiplication outputs are in 1.15 x 1.15 = 2.30 format and
- * finally output is shift into 3.13 format.
- */
- static inline void hpm_dsp_cdprod_q15(const q15_t *src1, const q15_t *src2, uint32_t size, q15_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_cdprod_q15(src1, src2, size, dst);
- #endif
- }
- /**
- * @brief Compute the dot product type2 of the q15 complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[in] size size of the vectors.
- * @param[out] *rout the real sum of the output.
- * @param[out] *iout the imag sum of the output.
- *
- * The multiplication outputs are in 1.15 x 1.15 = 2.30 format and
- * finally output is shift into q24 format.
- */
- static inline void hpm_dsp_cdprod_typ2_q15(const q15_t *src1, const q15_t *src2, uint32_t size, q31_t *rout, q31_t *iout)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_cdprod_typ2_q15(src1, src2, size, rout, iout);
- #endif
- }
- /**
- * @brief Compute the dot product of the q31 complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[in] size size of the vectors.
- * @param[out] *dst the output vector.
- *
- * The multiplication outputs are in 1.31 x 1.31 = 2.62 format and
- * finally output is shift into 3.29 format.
- */
- static inline void hpm_dsp_cdprod_q31(const q31_t *src1, const q31_t *src2, uint32_t size, q31_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_cdprod_q31(src1, src2, size, dst);
- #endif
- }
- /**
- * @brief Compute the dot product type2 of the q31 complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[in] size size of the vectors.
- * @param[out] *rout the real sum of the output.
- * @param[out] *iout the imag sum of the output.
- *
- * The multiplication outputs are in 1.31 x 1.31 = 2.62 format and
- * finally output is shift into q48 format.
- */
- static inline void hpm_dsp_cdprod_typ2_q31(const q31_t *src1, const q31_t *src2, uint32_t size, q63_t *rout, q63_t *iout)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_dot_prod_q31(rout, iout, src1, src2, size);
- #else
- riscv_dsp_cdprod_typ2_q31(src1, src2, size, rout, iout);
- #endif
- #endif
- }
- // Complex Magnitude
- /**
- * @brief Compute the magnitude of the floating-potint complex vector.
- * @param[in] *src points to the input complex vector.
- * @param[out] *dst points to the output complex vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_cmag_f32(const float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mag_f32(dst, src, size);
- #else
- riscv_dsp_cmag_f32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the magnitude of the q15 complex vector.
- * @param[in] *src points to the input complex vector.
- * @param[out] *dst points to the output complex vector.
- * @param[in] size size of the vectors.
- *
- * The multiplication outputs are in 1.15 x 1.15 = 2.30 format and
- * finally output is shift into 2.14 format.
- */
- static inline void hpm_dsp_cmag_q15(const q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mag_q15(dst, src, size);
- #else
- riscv_dsp_cmag_q15(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the magnitude of the q31 complex vector.
- * @param[in] *src points to the input complex vector.
- * @param[out] *dst points to the output complex vector.
- * @param[in] size size of the vectors.
- *
- * The multiplication outputs are in 1.31 x 1.31 = 2.62 format and
- * finally output is shift into 2.30 format.
- */
- static inline void hpm_dsp_cmag_q31(const q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mag_q31(dst, src, size);
- #else
- riscv_dsp_cmag_q31(src, dst, size);
- #endif
- #endif
- }
- // Complex Magnitude Squared
- /**
- * @brief Compute the magnitude squared of the floating-potint complex
- * vector.
- * @param[in] *src points to the input complex vector.
- * @param[out] *dst points to the output complex vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_cmag_sqr_f32(const float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mag_squared_f32(dst, src, size);
- #else
- riscv_dsp_cmag_sqr_f32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the magnitude squared of the q15 complex vector.
- * @param[in] *src points to the input complex vector.
- * @param[out] *dst points to the output complex vector.
- * @param[in] size size of the vectors.
- *
- * The multiplication outputs are in 1.15 x 1.15 = 2.30 format and
- * finally output is shift into 3.13 format.
- */
- static inline void hpm_dsp_cmag_sqr_q15(const q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mag_squared_q15(dst, src, size);
- #else
- riscv_dsp_cmag_sqr_q15(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Compute the magnitude squared of the q31 complex vector.
- * @param[in] *src points to the input complex vector.
- * @param[out] *dst points to the output complex vector.
- * @param[in] size size of the vectors.
- *
- * The multiplication outputs are in 1.31 x 1.31 = 2.62 format and
- * finally output is shift into 3.29 format.
- */
- static inline void hpm_dsp_cmag_sqr_q31(const q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mag_squared_q31(dst, src, size);
- #else
- riscv_dsp_cmag_sqr_q31(src, dst, size);
- #endif
- #endif
- }
- // Complex Multiplication
- /**
- * @brief Multiply two folating-point complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[out] *dst output complex vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_cmul_f32(const float32_t *src1, const float32_t *src2, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mult_cmplx_f32(dst, src1, src2, size);
- #else
- riscv_dsp_cmul_f32(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiply two q15 complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[out] *dst output complex vector.
- * @param[in] size size of the vectors.
- *
- * The multiplication outputs are in 1.15 x 1.15 = 2.30 format and
- * finally output is shift into 3.13 format.
- */
- static inline void hpm_dsp_cmul_q15(const q15_t *src1, const q15_t *src2, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mult_cmplx_q15(dst, src1, src2, size);
- #else
- riscv_dsp_cmul_q15(src1, src2, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiply two q31 complex vector.
- * @param[in] *src1 the first input complex vector.
- * @param[in] *src2 the second input complex vector.
- * @param[out] *dst output complex vector.
- * @param[in] size size of the vectors.
- *
- * The multiplication outputs are in 1.31 x 1.31 = 2.62 format and
- * finally output is shift into 3.29 format.
- */
- static inline void hpm_dsp_cmul_q31(const q31_t *src1, const q31_t *src2, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mult_cmplx_q31(dst, src1, src2, size);
- #else
- riscv_dsp_cmul_q31(src1, src2, dst, size);
- #endif
- #endif
- }
- // Complex-by-Real Multiplication
- /**
- * @brief Multiply the folating-point complex vector by a real vector.
- * @param[in] *src the input complex vector.
- * @param[in] *real the input real vector.
- * @param[out] *dst output complex vector.
- * @param[in] size size of the vectors.
- */
- static inline void hpm_dsp_cmul_real_f32(const float32_t *src, const float32_t *real, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mult_real_f32(dst, src, real, size);
- #else
- riscv_dsp_cmul_real_f32(src, real, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiply the q15 complex vector by a real vector.
- * @param[in] *src the input complex vector.
- * @param[in] *real the input real vector.
- * @param[out] *dst output complex vector.
- * @param[in] size size of the vectors.
- *
- * Output results will be saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_cmul_real_q15(const q15_t *src, const q15_t *real, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mult_real_q15(dst, src, real, size);
- #else
- riscv_dsp_cmul_real_q15(src, real, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Multiply the q31 complex vector by a real vector.
- * @param[in] *src the input complex vector.
- * @param[in] *real the input real vector.
- * @param[out] *dst output complex vector.
- * @param[in] size size of the vectors.
- *
- * Output results will be saturated in Q31 range[0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_cmul_real_q31(const q31_t *src, const q31_t *real, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cmplx_mult_real_q31(dst, src, real, size);
- #else
- riscv_dsp_cmul_real_q31(src, real, dst, size);
- #endif
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_CONTROLLER
- /**
- * @defgroup controller DSP Controller Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #include "riscv_dsp_controller_math.h"
- // Clarke Transform
- /**
- * @brief Clarke transform of floating-point input.
- * @param[in] a input three-phase coordinate a.
- * @param[in] b input three-phase coordinate b.
- * @param[out] *alpha output two-phase orthogonal vector axis alpha.
- * @param[out] *beta output two-phase orthogonal vector axis beta.
- */
- static inline void hpm_dsp_clarke_f32(float32_t a, float32_t b, float32_t *alpha, float32_t *beta)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_clarke_f32(a, b, alpha, beta);
- #endif
- }
- /**
- * @brief Clarke transform of q31 input.
- * @param[in] a input three-phase coordinate a.
- * @param[in] b input three-phase coordinate b.
- * @param[out] *alpha output two-phase orthogonal vector axis alpha.
- * @param[out] *beta output two-phase orthogonal vector axis beta.
- *
- * The internal 32-bit accumulator maintains 1.31 format by truncating lower
- * 31 bits of the intermediate multiplication in 2.62 format.
- */
- static inline void hpm_dsp_clarke_q31(q31_t a, q31_t b, q31_t *alpha, q31_t *beta)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_clarke_q31(a, b, alpha, beta);
- #endif
- }
- // Inverse Clarke Transform
- /**
- * @brief Inverse Clarke transform of floating-point input.
- * @param[in] alpha input two-phase orthogonal vector axis alpha.
- * @param[in] beta input two-phase orthogonal vector axis beta.
- * @param[out] *a output three-phase coordinate a.
- * @param[in] *b output three-phase coordinate b.
- */
- static inline void hpm_dsp_inv_clarke_f32(float32_t alpha, float32_t beta, float32_t *a, float32_t *b)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_inv_clarke_f32(alpha, beta, a, b);
- #endif
- }
- /**
- * @brief Inverse Clarke transform of q31 input.
- * @param[in] alpha input two-phase orthogonal vector axis alpha.
- * @param[in] beta input two-phase orthogonal vector axis beta.
- * @param[out] *a output three-phase coordinate a.
- * @param[in] *b output three-phase coordinate b.
- *
- * The internal 32-bit accumulator maintains 1.31 format by truncating lower
- * 31 bits of the intermediate multiplication in 2.62 format.
- */
- static inline void hpm_dsp_inv_clarke_q31(q31_t alpha, q31_t beta, q31_t *a, q31_t *b)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_inv_clarke_q31(alpha, beta, a, b);
- #endif
- }
- // Park Transform
- /**
- * @brief Park transform of floating-point input.
- * @param[in] alpha input two-phase coordinate alpha.
- * @param[in] beta input two-phase coordinate beta.
- * @param[out] *a output rotor frame a.
- * @param[out] *b output rotor frame b.
- * @param[in] sin sine value of rotation angle .
- * @param[in] cos cosine value of rotation angle .
- */
- static inline void hpm_dsp_park_f32(float32_t alpha, float32_t beta, float32_t *a, float32_t *b, float32_t sin, float32_t cos)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_park_f32(alpha, beta, a, b, sin, cos);
- #endif
- }
- /**
- * @brief Park transform of q31 input.
- * @param[in] alpha input two-phase coordinate alpha.
- * @param[in] beta input two-phase coordinate beta.
- * @param[out] *a output rotor frame a.
- * @param[out] *b output rotor frame b.
- * @param[in] sin sine value of rotation angle .
- * @param[in] cos cosine value of rotation angle .
- *
- * The internal 32-bit accumulator maintains 1.31 format by truncating lower
- * 31 bits of the intermediate multiplication in 2.62 format.
- */
- static inline void hpm_dsp_park_q31(q31_t alpha, q31_t beta, q31_t *a, q31_t *b, q31_t sin, q31_t cos)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_park_q31(alpha, beta, a, b, sin, cos);
- #endif
- }
- // Inverse Park Transform
- /**
- * @brief Inverse Park transform of floating-point input.
- * @param[in] a input coordinate of rotor frame a.
- * @param[in] b input coordinate of rotor frame b.
- * @param[out] *alpha output two-phase orthogonal vec axis alpha.
- * @param[out] *beta output two-phase orthogonal vec axis beta.
- * @param[in] sin sine value of rotation angle .
- * @param[in] cos cosine value of rotation angle .
- */
- static inline void hpm_dsp_inv_park_f32(float32_t a, float32_t b, float32_t *alpha, float32_t *beta, float32_t sin, float32_t cos)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_inv_park_f32(a, b, alpha, beta, sin, cos);
- #endif
- }
- /**
- * @brief Inverse Park transform of q31 input.
- * @param[in] a input coordinate of rotor frame a.
- * @param[in] b input coordinate of rotor frame b.
- * @param[out] *alpha output two-phase orthogonal vec axis alpha.
- * @param[out] *beta output two-phase orthogonal vec axis beta.
- * @param[in] sin sine value of rotation angle .
- * @param[in] cos cosine value of rotation angle .
- *
- * The internal 32-bit accumulator maintains 1.31 format by truncating lower
- * 31 bits of the intermediate multiplication in 2.62 format.
- */
- static inline void hpm_dsp_inv_park_q31(q31_t a, q31_t b, q31_t *alpha, q31_t *beta, q31_t sin, q31_t cos)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_inv_park_q31(a, b, alpha, beta, sin, cos);
- #endif
- }
- /**
- * @brief PID control of floating-point input.
- * @param[in, out] *instance points to an instance of the PID
- * controliler.
- * @param[in] src input data.
- * @return output data.
- */
- static inline float32_t hpm_dsp_pid_f32(riscv_dsp_pid_f32_t *instance, float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_pid_f32(instance, src);
- #endif
- }
- /**
- * @brief PID initializatopn control function of floating-point formats.
- * @param[in, out] *instance points to an instance of the PID
- * controliler.
- * @param[in] set for 1 will clear the state to all zeros
- * 0 will not.
- *
- * This function will calculate the PID control structure gain
- * <code>gain1</code>, <code>gain2</code> and <code>gain3</code> by seting
- * the variable <code>Kp</code>, <code>Ki</code> and <code>Kd</code>. The
- * state variable will set to all zeros.
- */
- static inline void hpm_dsp_init_pid_f32(riscv_dsp_pid_f32_t *instance, int32_t set)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_init_pid_f32(instance, set);
- #endif
- }
- /**
- * @brief PID control of Q31 input.
- * @param[in, out] *instance points to an instance of the PID
- * controliler.
- * @param[in] src input data.
- * @return output data.
- */
- static inline q31_t hpm_dsp_pid_q31(riscv_dsp_pid_q31_t *instance, q31_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_pid_q31(instance, src);
- #endif
- }
- /**
- * @brief PID initializatopn control function of Q31 formats.
- * @param[in, out] *instance points to an instance of the PID
- * controliler.
- * @param[in] set for 1 will clear the state to all zeros
- * 0 will not.
- *
- * This function will calculate the PID control structure gain
- * <code>gain1</code>, <code>gain2</code> and <code>gain3</code> by seting
- * the variable <code>Kp</code>, <code>Ki</code> and <code>Kd</code>. The
- * state variable will set to all zeros.
- */
- static inline void hpm_dsp_init_pid_q31(riscv_dsp_pid_q31_t *instance, int32_t set)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_init_pid_q31(instance, set);
- #endif
- }
- static inline q15_t hpm_dsp_pid_q15(riscv_dsp_pid_q15_t *instance, q15_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_pid_q15(instance, src);
- #endif
- }
- /**
- * @brief PID initializatopn control function of Q15 formats.
- * @param[in, out] *instance points to an instance of the PID
- * controliler.
- * @param[in] set for 1 will clear the state to all zeros
- * 0 will not.
- *
- * This function will calculate the PID control structure gain
- * <code>gain1</code>, <code>gain2</code> and <code>gain3</code> by seting
- * the variable <code>Kp</code>, <code>Ki</code> and <code>Kd</code>. The
- * state variable will set to all zeros.
- */
- static inline void hpm_dsp_init_pid_q15(riscv_dsp_pid_q15_t *instance, int32_t set)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_init_pid_q15(instance, set);
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_DISTANCE
- /**
- * @defgroup dist DSP Distance Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_distance_math.h"
- /**
- * @brief Bray-Curtis distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_bray_curtis_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_braycurtis_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_bray_curtis_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Canberra distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_canberra_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_canberra_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_canberra_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Chebyshev distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_chebyshev_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_chebyshev_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_chebyshev_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Cityblock (Manhattan) distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_city_block_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cityblock_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_city_block_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Correlation distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_corr_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_correlation_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_corr_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Cosine distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_cos_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cosine_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_cos_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Euclidean distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_euclidean_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_euclidean_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_euclidean_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Jensen-Shannon distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_jensen_shannon_f32(const float32_t *src1, const float32_t *src2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_jensenshannon_distance_f32(src1, src2, size);
- #else
- return riscv_dsp_dist_jensen_shannon_f32(src1, src2, size);
- #endif
- #endif
- }
- /**
- * @brief Minkowski distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] order Distance order
- * @param[in] size vector length
- * @return distance
- */
- static inline float32_t hpm_dsp_dist_minkowski_f32(const float32_t *src1, const float32_t *src2, int32_t order, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_minkowski_distance_f32(src1, src2, order, size);
- #else
- return riscv_dsp_dist_minkowski_f32(src1, src2, order, size);
- #endif
- #endif
- }
- /**
- * @brief Dice distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_dice_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_dice_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_dice_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Hamming distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_hamming_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_hamming_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_hamming_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Jaccard distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_jaccard_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_jaccard_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_jaccard_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Kulsinski distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_kulsinski_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_kulsinski_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_kulsinski_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Sokal-Michener distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_sokal_michener_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_sokalmichener_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_sokal_michener_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Sokal-Sneath distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_sokal_sneath_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_sokalsneath_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_sokal_sneath_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Roger Stanimoto distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_rogers_tanimoto_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_rogerstanimoto_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_rogers_tanimoto_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Yule distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_yule_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_yule_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_yule_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- /**
- * @brief Russell-Rao distance between two vectors
- * @param[in] src1 First vector
- * @param[in] src2 Second vector
- * @param[in] numofbool Number of booleans
- * @return distance
- */
- static inline float32_t hpm_dsp_bdist_russell_rao_u32_f32(const uint32_t *src1, const uint32_t *src2, uint32_t numofbool)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_russellrao_distance(src1, src2, numofbool);
- #else
- return riscv_dsp_bdist_russell_rao_u32_f32(src1, src2, numofbool);
- #endif
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_FILTERING
- /**
- * @defgroup filtering DSP Filtering Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_filtering_math.h"
- /**
- * @brief Function for the floating-point FIR filter.
- * @param[in] *instance points to an instance of the FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- */
- static inline void hpm_dsp_fir_f32(const riscv_dsp_fir_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_fir_f32(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q31 FIR filter.
- * @param[in] *instance points to an instance of the FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both coefficients and state variables are represented in 1.31 format
- * and multiplications yield a 2.62 result. The 2.62 results are accumulated
- * in a 64-bit accumulator and is right shifted by 31 bits and saturated to
- * 1.31 formatthe to yield the final result. In order to avoid overflows
- * completely the input signal must be scaled down by log2(coeff_size) bits.
- */
- static inline void hpm_dsp_fir_q31(const riscv_dsp_fir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_fir_q31(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q31 FIR filter.
- * @param[in] *instance points to an instance of the FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both coefficients and state variables are represented in 1.31 format.
- * These intermediate multiplications results are added to a 2.30 accumulator.
- * Finally, the accumulator is saturated and
- * converted to a 1.31 result. In order to avoid overflows
- * completely the input signal must be scaled down by log2(coeff_size) bits.
- */
- static inline void hpm_dsp_fir_fast_q31(const riscv_dsp_fir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_fir_fast_q31(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q15 FIR filter.
- * @param[in] *instance points to an instance of the FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both coefficients and state variables are represented in 1.15 format
- * and multiplications yield a 2.30 result. The 2.30 results are accumulated
- * in a 64-bit accumulator in 34.30 format and the results is truncated
- * to 34.15 format by discarding low 15 bits. Lastly, the outputs is
- * saturated to yield a result in 1.15 format.
- */
- static inline void hpm_dsp_fir_q15(const riscv_dsp_fir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_fir_q15(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q15 FIR filter.
- * @param[in] *instance points to an instance of the FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both coefficients and state variables are represented in Q15 format and multiplications yield
- * a Q30 result. The results are accumulated in a 32-bit accumulator in Q2.30 format. Lastly, the
- * outputs are saturated to yield a result in Q1.15 format.
- */
- static inline void hpm_dsp_fir_fast_q15(const riscv_dsp_fir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_fir_fast_q15(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q7 FIR filter.
- * @param[in] *instance points to an instance of the FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both inputs are in 1.7 format and multiplications yield a 2.14 result.
- * The 2.14 intermediate results are accumulated in a 32-bit accumulator in
- * 18.14 format. The 18.14 result is then converted to 18.7 format by
- * discarding the low 7 bits and then saturated to 1.7 format.
- */
- static inline void hpm_dsp_fir_q7(const riscv_dsp_fir_q7_t *instance, q7_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_fir_q7(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the floating-point lattice FIR filter.
- * @param[in] *instance points to an instance of the lattice
- * FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- */
- static inline void hpm_dsp_lfir_f32(const riscv_dsp_lfir_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_lfir_f32(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q15 lattice FIR filter.
- * @param[in] *instance points to an instance of the lattice
- * FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- */
- static inline void hpm_dsp_lfir_q15(const riscv_dsp_lfir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_lfir_q15(instance, src, dst, size);
- #endif
- }
- /**
- * @brief Function for the q31 lattice FIR filter.
- * @param[in] *instance points to an instance of the lattice
- * FIR structure.
- * @param[in] *src points to the input block data.
- * @param[out] *dst points to the output block data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * In order to avoid overflows the input signal must be scaled down by
- * 2*log2(stage) bits.
- */
- static inline void hpm_dsp_lfir_q31(const riscv_dsp_lfir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_lfir_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_dcmfir_f32(const riscv_dsp_dcmfir_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dcmfir_f32(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_dcmfir_q15(const riscv_dsp_dcmfir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dcmfir_q15(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_dcmfir_q31(const riscv_dsp_dcmfir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dcmfir_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_dcmfir_fast_q31(const riscv_dsp_dcmfir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dcmfir_fast_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_dcmfir_fast_q15(const riscv_dsp_dcmfir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dcmfir_fast_q15(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_upsplfir_f32(const riscv_dsp_upsplfir_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_upsplfir_f32(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_upsplfir_q15(const riscv_dsp_upsplfir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_upsplfir_q15(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_upsplfir_q31(const riscv_dsp_upsplfir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_upsplfir_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_spafir_f32(riscv_dsp_spafir_f32_t *instance, float32_t *src, float32_t *dst, float32_t *buf, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_spafir_f32(instance, src, dst, buf, size);
- #endif
- }
- static inline void hpm_dsp_spafir_q15(riscv_dsp_spafir_q15_t *instance, q15_t *src, q15_t *dst, q15_t *buf1, q31_t *buf2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_spafir_q15(instance, src, dst, buf1, buf2, size);
- #endif
- }
- static inline void hpm_dsp_spafir_q31(riscv_dsp_spafir_q31_t *instance, q31_t *src, q31_t *dst, q31_t *buf, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_spafir_q31(instance, src, dst, buf, size);
- #endif
- }
- static inline void hpm_dsp_spafir_q7(riscv_dsp_spafir_q7_t *instance, q7_t *src, q7_t *dst, q7_t *buf1, q31_t *buf2, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_spafir_q7(instance, src, dst, buf1, buf2, size);
- #endif
- }
- // Standard LMS filte
- /**
- * @brief Structure for the floatint-point standard LMS Filters.
- */
- /**
- * @brief Function for the floating-point LMS filter.
- * @param[in] *instance points to an instance of the LMS structure.
- * @param[in] *src points to the input block data.
- * @param[in] *ref points to the reference data.
- * @param[out] *dst points to the output data.
- * @param[out] *err points to the error data.
- * @param[in] size number of the blocksize.
- */
- static inline void hpm_dsp_lms_f32(const riscv_dsp_lms_f32_t *instance, float32_t *src, float32_t *ref, float32_t *dst, float32_t *err, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_lms_f32(instance, src, ref, dst, err, size);
- #endif
- }
- /**
- * @brief Function for the q31 LMS filter.
- * @param[in] *instance points to an instance of the LMS structure.
- * @param[in] *src points to the input block data.
- * @param[in] *ref points to the reference data.
- * @param[out] *dst points to the output data.
- * @param[out] *err points to the error data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both coefficients and state variables are represented in 1.31 format
- * and multiplications yield a 2.62 result. The 2.62 results are accumulated
- * in a 64-bit accumulator and is right shifted by 31 bits and saturated to
- * 1.31 formatthe to yield the final result. In order to avoid overflows
- * completely the input signal must be scaled down by log2(coeff_size) bits.
- */
- static inline void hpm_dsp_lms_q31(const riscv_dsp_lms_q31_t *instance, q31_t *src, q31_t *ref, q31_t *dst, q31_t *err, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_lms_q31(instance, src, ref, dst, err, size);
- #endif
- }
- /**
- * @brief Function for the q15 LMS filter.
- * @param[in] *instance points to an instance of the LMS structure.
- * @param[in] *src points to the input block data.
- * @param[in] *ref points to the reference data.
- * @param[out] *dst points to the output data.
- * @param[out] *err points to the error data.
- * @param[in] size number of the blocksize.
- *
- * Function notes:
- * Both coefficients and state variables are represented in 1.15 format
- * and multiplications yield a 2.30 result. The 2.30 results are accumulated
- * in a 64-bit accumulator in 34.30 format and the results is truncated
- * to 34.15 format by discarding low 15 bits. Lastly, the outputs is
- * saturated to yield a result in 1.15 format.
- */
- static inline void hpm_dsp_lms_q15(const riscv_dsp_lms_q15_t *instance, q15_t *src, q15_t *ref, q15_t *dst, q15_t *err, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_lms_q15(instance, src, ref, dst, err, size);
- #endif
- }
- /**
- * @brief Structure for the f32 normalized LMS filter.
- */
- static inline void hpm_dsp_nlms_f32(riscv_dsp_nlms_f32_t *instance, float32_t *src, float32_t *ref, float32_t *dst, float32_t *err, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_nlms_f32(instance, src, ref, dst, err, size);
- #endif
- }
- /**
- * @brief Structure for the q31 normalized LMS filter.
- */
- static inline void hpm_dsp_nlms_q31(riscv_dsp_nlms_q31_t *instance, q31_t *src, q31_t *ref, q31_t *dst, q31_t *err, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_nlms_q31(instance, src, ref, dst, err, size);
- #endif
- }
- static inline void hpm_dsp_nlms_q15(riscv_dsp_nlms_q15_t *instance, q15_t *src, q15_t *ref, q15_t *dst, q15_t *err, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_nlms_q15(instance, src, ref, dst, err, size);
- #endif
- }
- // Convolution
- /**
- * @brief Convolution of the floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- */
- static inline void hpm_dsp_conv_f32(float32_t *src1, uint32_t len1, float32_t *src2, uint32_t len2, float32_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_conv_f32(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_conv_f32(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Convolution of the q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- *
- * Function notes:
- * Both inputs are in 1.15 format and multiplications yield a 2.30 result.
- * The 2.30 intermediate results are accumulated in a 64-bit accumulator in
- * 34.30 format. The 34.30 result is then truncated to 34.15 format by
- * discarding the low 15 bits and then saturated to 1.15 format.
- */
- static inline void hpm_dsp_conv_q15(q15_t *src1, uint32_t len1, q15_t *src2, uint32_t len2, q15_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_conv_q15(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_conv_q15(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Convolution of the q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- *
- * Function notes:
- * Both inputs are in 1.31 format and the 64-bit accumulator has a 2.62
- * format and maintains full precision of the intermediate multiplication
- * results but provides only a single guard bit. The input signals should be
- * scaled down to avoid intermediate overflows. Scale down the inputs by
- * log2(min(srcALen, srcBLen)), The 2.62 accumulator is right shifted by 31
- * bits and saturated to 1.31 forma t to yield the final result.
- */
- static inline void hpm_dsp_conv_q31(q31_t *src1, uint32_t len1, q31_t *src2, uint32_t len2, q31_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_conv_q31(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_conv_q31(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Convolution of the q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- *
- * Function notes:
- * Both inputs are in 1.7 format and multiplications yield a 2.14 result.
- * The 2.14 intermediate results are accumulated in a 32-bit accumulator in
- * 18.14 format. The 18.14 result is then truncated to 18.7 format by
- * discarding the low 7 bits and then saturated to 1.7 format.
- */
- static inline void hpm_dsp_conv_q7(q7_t *src1, uint32_t len1, q7_t *src2, uint32_t len2, q7_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_conv_q7(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_conv_q7(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Convolution Partial of the floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- * @param[in] startindex is the first output sample to start with.
- * @param[in] size is the number of output points to be computed.
- * @return Returns
- * 0; success
- * -1; fail, the input subset are not between 0 and len1+len2-2.
- */
- static inline int32_t hpm_dsp_conv_partial_f32(float32_t *src1, uint32_t len1, float32_t *src2, uint32_t len2, float32_t *dst, uint32_t startindex, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_conv_partial_f32(dst, src1, len1, src2, len2, startindex, size);
- #else
- return riscv_dsp_conv_partial_f32(src1, len1, src2, len2, dst, startindex,
- size);
- #endif
- #endif
- }
- /**
- * @brief Convolution Partial of the q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- * @param[in] startindex is the first output sample to start with.
- * @param[in] size is the number of output points to be computed.
- * @return Returns
- * 0; success
- * -1; fail, the input subset are not between 0 and len1+len2-2.
- */
- static inline int32_t hpm_dsp_conv_partial_q15(q15_t *src1, uint32_t len1, q15_t *src2, uint32_t len2, q15_t *dst, uint32_t startindex, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_conv_partial_q15(dst, src1, len1, src2, len2, startindex, size);
- #else
- return riscv_dsp_conv_partial_q15(src1, len1, src2, len2, dst, startindex,
- size);
- #endif
- #endif
- }
- /**
- * @brief Convolution Partial of the q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- * @param[in] startindex is the first output sample to start with.
- * @param[in] size is the number of output points to be computed.
- * @return Returns
- * 0; success
- * -1; fail, the input subset are not between 0 and len1+len2-2.
- */
- static inline int32_t hpm_dsp_conv_partial_q31(q31_t *src1, uint32_t len1, q31_t *src2, uint32_t len2, q31_t *dst, uint32_t startindex, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_conv_partial_q31(dst, src1, len1, src2, len2, startindex, size);
- #else
- return riscv_dsp_conv_partial_q31(src1, len1, src2, len2, dst, startindex,
- size);
- #endif
- #endif
- }
- /**
- * @brief Convolution Partial of the q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- * @param[in] startindex is the first output sample to start with.
- * @param[in] size is the number of output points to be computed.
- * @return Returns
- * 0; success
- * -1; fail, the input subset are not between 0 and len1+len2-2.
- */
- static inline int32_t hpm_dsp_conv_partial_q7(q7_t *src1, uint32_t len1, q7_t *src2, uint32_t len2, q7_t *dst, uint32_t startindex, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_conv_partial_q7(dst, src1, len1, src2, len2, startindex, size);
- #else
- return riscv_dsp_conv_partial_q7(src1, len1, src2, len2, dst, startindex,
- size);
- #endif
- #endif
- }
- // Correlation
- /**
- * @brief Correlation of the floating-point vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * 2 * max(len1, len2) - 1.
- */
- static inline void hpm_dsp_corr_f32(float32_t *src1, uint32_t len1, float32_t *src2, uint32_t len2, float32_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_correlate_f32(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_corr_f32(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Correlation of the q15 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * 2 * max(len1, len2) - 1.
- *
- * Function notes:
- * Both inputs are in 1.15 format and multiplications yield a 2.30 result.
- * The 2.30 intermediate results are accumulated in a 64-bit accumulator in
- * 34.30 format. The 34.30 result is then truncated to 34.15 format by
- * discarding the low 15 bits and then saturated to 1.15 format.
- */
- static inline void hpm_dsp_corr_q15(q15_t *src1, uint32_t len1, q15_t *src2, uint32_t len2, q15_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_correlate_q15(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_corr_q15(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Convolution of the q31 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * len1 + len2 - 1.
- *
- * Function notes:
- * Both inputs are in 1.31 format and the 64-bit accumulator has a 2.62
- * format and maintains full precision of the intermediate multiplication
- * results but provides only a single guard bit. The input signals should be
- * scaled down to avoid intermediate overflows. Scale down one of the inputs
- * by <code>1/min(srcALen, srcBLen)</code> to avoid overflows since a
- * maximum of <code>min(srcALen, srcBLen)</code> number of additions is
- * carried internally. The 2.62 accumulator is right shifted by 31 bits and
- * saturated to 1.31 forma t to yield the final result.
- */
- static inline void hpm_dsp_corr_q31(q31_t *src1, uint32_t len1, q31_t *src2, uint32_t len2, q31_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_correlate_q31(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_corr_q31(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- /**
- * @brief Correlation of the q7 vectors.
- * @param[in] *src1 points to the first input vector.
- * @param[in] len1 length of the first input vector.
- * @param[in] *src2 points to the second input vector.
- * @param[in] len2 length of the second input vector.
- * @param[out] *dst points to the output vector where the length is
- * 2 * max(len1, len2) - 1.
- *
- * Function notes:
- * Both inputs are in 1.7 format and multiplications yield a 2.14 result.
- * The 2.14 intermediate results are accumulated in a 32-bit accumulator in
- * 18.14 format. The 18.14 result is then truncated to 18.7 format by
- * discarding the low 7 bits and then saturated to 1.7 format.
- */
- static inline void hpm_dsp_corr_q7(q7_t *src1, uint32_t len1, q7_t *src2, uint32_t len2, q7_t *dst)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_correlate_q7(dst, src1, len1, src2, len2);
- #else
- riscv_dsp_corr_q7(src1, len1, src2, len2, dst);
- #endif
- #endif
- }
- static inline void hpm_dsp_bq_df1_f32(const riscv_dsp_bq_df1_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df1_f32(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df1_q15(const riscv_dsp_bq_df1_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df1_q15(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df1_fast_q15(const riscv_dsp_bq_df1_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df1_fast_q15(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df1_q31(const riscv_dsp_bq_df1_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df1_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df1_fast_q31(const riscv_dsp_bq_df1_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df1_fast_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df1_32x64_q31(const riscv_dsp_bq_df1_32x64_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df1_32x64_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df2T_f32(const riscv_dsp_bq_df2T_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df2T_f32(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_df2T_f64(const riscv_dsp_bq_df2T_f64_t *instance, float64_t *src, float64_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_df2T_f64(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_bq_stereo_df2T_f32(const riscv_dsp_bq_stereo_df2T_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_bq_stereo_df2T_f32(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_liir_f32(const riscv_dsp_liir_f32_t *instance, float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_liir_f32(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_liir_q31(const riscv_dsp_liir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_liir_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_liir_fast_q31(const riscv_dsp_liir_q31_t *instance, q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_liir_fast_q31(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_liir_q15(const riscv_dsp_liir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_liir_q15(instance, src, dst, size);
- #endif
- }
- static inline void hpm_dsp_liir_fast_q15(const riscv_dsp_liir_q15_t *instance, q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_liir_fast_q15(instance, src, dst, size);
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_MATRIX
- /**
- * @defgroup matrix DSP Matrix Functions
- *
- * This set of functions provides basic matrix math operations.
- * The funciotn specifies the size of the matrix and then points to an array.
- * For example,
- * the function definition for the floating-point is shown below:
- * <pre>
- * void riscv_dsp_funcname_f32(const float32_t *src1,
- * const float32_t *src2,
- * float32_t *dst,
- * uint32_t row,
- * uint32_t col,
- * uint32_t row2,
- * uint32_t col2)
- * </pre>
- * where it can be transform to the two matrix. For the matrix 1 is a
- * <code>row * col</code> matrix and the matrix 2 is a
- * <code>rol2 * col2</code> and the output matrix woild be different since
- * the math operation. There are similar definitions for Q15 and Q31 data types.
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_matrix_math.h"
- // Matrix Addition
- /**
- * @brief Addition of two floating-potint matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_add_f32(const float32_t *src1, const float32_t *src2, float32_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_add_f32(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_add_f32(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Addition of two q15 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- *
- * The output results will be saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_mat_add_q15(const q15_t *src1, const q15_t *src2, q15_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_add_q15(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_add_q15(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Addition of two q31 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- *
- * Ouput results will be saturated in Q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_mat_add_q31(const q31_t *src1, const q31_t *src2, q31_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_add_q31(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_add_q31(src1, src2, dst, row, col);
- #endif
- #endif
- }
- // Matrix Inverse
- /**
- * @brief Compute the inverse matrix of the floating-potint matrix.
- * @param[in] *src points to the input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] size number of the matrix row or column.
- * @return the inverse process success or not.
- */
- static inline int32_t hpm_dsp_mat_inv_f32(float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_inverse_f32(dst, src, size);
- #else
- return riscv_dsp_mat_inv_f32(src, dst, size);
- #endif
- #endif
- }
- static inline int32_t hpm_dsp_mat_inv_f64(float64_t *src, float64_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_inverse_f64(dst, src, size);
- #else
- return riscv_dsp_mat_inv_f64(src, dst, size);
- #endif
- #endif
- }
- // Matrix Multiplication
- /**
- * @brief Multiplication of two floating-point matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- */
- static inline void hpm_dsp_mat_mul_f32(const float32_t *src1, const float32_t *src2, float32_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_mult_f32(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_mat_mul_f32(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- static inline void hpm_dsp_mat_mul_f64(const float64_t *src1, const float64_t *src2, float64_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_mult_f64(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_mat_mul_f64(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of two floating-point complex matrices.
- * @param[in] *src1 points to the first input complex matrix.
- * @param[in] *src2 points to the second input complex matrix.
- * @param[out] *dst points to the output complex matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- */
- static inline void hpm_dsp_cmat_mul_f32(const float32_t *src1, const float32_t *src2, float32_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_cmplx_mult_f32(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_cmat_mul_f32(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of two q15 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is multiplied yields a 2.30 format, and then added
- * without saturation to a 64-bit accumulator in 34.30 format. Finally,
- * the added output is truncated to 34.15 format by discarding the lower 15
- * bits, and then saturated to yield a result in 1.15 format.
- */
- static inline void hpm_dsp_mat_mul_q15(const q15_t *src1, const q15_t *src2, q15_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_mult_q15(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_mat_mul_q15(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- static inline void hpm_dsp_mat_mul_fast_q15(const q15_t *src1, const q15_t *src2, q15_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_mult_q15(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_mat_mul_fast_q15(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of two q15 complex matrices.
- * @param[in] *src1 points to the first input complex matrix.
- * @param[in] *src2 points to the second input complex matrix.
- * @param[out] *dst points to the output complex matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format input is multiplied yields a 2.30 format, and then added
- * without saturation to a 64-bit accumulator in 34.30 format. Finally,
- * the added output is truncated to 34.15 format by discarding the lower 15
- * bits, and then saturated to yield a result in 1.15 format.
- */
- static inline void hpm_dsp_cmat_mul_q15(const q15_t *src1, const q15_t *src2, q15_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_cmplx_mult_q15(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_cmat_mul_q15(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of two q31 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is multiplied yields a 2.62 format. In order to
- * avoid overflows, the input signal must be scaled down by
- * <code>log2(col)</code> bits, Finally, the 2.62 accumulator is right
- * shifted by 31 bits to yield a 1.31 format value.
- */
- static inline void hpm_dsp_mat_mul_q31(const q31_t *src1, const q31_t *src2, q31_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_mult_q31(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_mat_mul_q31(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- static inline void hpm_dsp_mat_mul_fast_q31(const q31_t *src1, const q31_t *src2, q31_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_mult_q31(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_mat_mul_fast_q31(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of two q31 complex matrices.
- * @param[in] *src1 points to the first input complex matrix.
- * @param[in] *src2 points to the second input complex matrix.
- * @param[out] *dst points to the output complex matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input is multiplied yields a 2.62 format. In order to
- * avoid overflows, the input signal must be scaled down by
- * <code>log2(col)</code> bits, Finally, the 2.62 accumulator is right
- * shifted by 31 bits to yield a 1.31 format value.
- */
- static inline void hpm_dsp_cmat_mul_q31(const q31_t *src1, const q31_t *src2, q31_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_mat_cmplx_mult_q31(dst, src1, src2, row, col, col2);
- #else
- riscv_dsp_cmat_mul_q31(src1, src2, dst, row, col, col2);
- #endif
- #endif
- }
- /**
- * @brief Multiplication of two q7 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the first input matrix rows.
- * @param[in] col number of the first input matrix columns.
- * @param[in] col2 number of the second input matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.7 format input is multiplied yields a 2.15 format, and then added
- * without saturation to a 32-bit accumulator in 17.15 format. Finally,
- * the added output is truncated to 17.7 format by discarding the lower 7
- * bits, and then saturated to yield a result in 1.7 format.
- */
- static inline void hpm_dsp_mat_mul_q7(const q7_t *src1, const q7_t *src2, q7_t *dst, uint32_t row, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_mat_mul_q7(src1, src2, dst, row, col, col2);
- #endif
- }
- /**
- * @brief Multiplication of q7 vetor by matrix.
- * @param[in] *src1 points to the first input vector.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output vector.
- * @param[in] col number of the first input vector columns.
- * @param[in] col2 number of the second input matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.7 format input is multiplied yields a 2.15 format, and then added
- * without saturation to a 32-bit accumulator in 17.15 format. Finally,
- * the added output is truncated to 17.7 format by discarding the lower 7
- * bits, and then saturated to yield a result in 1.7 format.
- */
- static inline void hpm_dsp_mat_mul_vxm_q7(const q7_t * src1, const q7_t * src2, q7_t * dst, uint32_t col, uint32_t col2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_mul_mxv_q7(dst, src1, src2, col, col2);
- #else
- riscv_dsp_mat_mul_vxm_q7(src1, src2, dst, col, col2);
- #endif
- #endif
- }
- // Matrix Power 2 Function
- //
- // The input is a square matrix for riscv_dsp_mat_pow2_cache_f64.
- static inline int32_t hpm_dsp_mat_pwr2_cache_f64(const float64_t *src, float64_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_mat_pwr2_cache_f64(src, dst, size);
- #endif
- }
- // Matrix Scale
- /**
- * @brief Multiplt a scale value of floating-potint matrix.
- * @param[in] *src points to the input matrix.
- * @param[in] scale is the factor to be multiplied.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_scale_f32(const float32_t *src, float32_t scale, float32_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_scale_f32(dst, src, row, col, scale);
- #else
- riscv_dsp_mat_scale_f32(src, scale, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Multiplt a scale value of q15 matrix.
- * @param[in] *src points to the input matrix.
- * @param[in] scale_fract fractional multiplication.
- * @param[in] shift arithmetic shift.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.15 format inputs are multiplied to yield a 2.30 intermediate result
- * and this is shifted with saturation to 1.15 format.
- */
- static inline void hpm_dsp_mat_scale_q15(const q15_t *src, q15_t scale_fract, int32_t shift, q15_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_scale_q15(dst, src, row, col, scale_fract, shift);
- #else
- riscv_dsp_mat_scale_q15(src, scale_fract, shift, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Multiplt a scale value of q31 matrix.
- * @param[in] *src points to the input matrix.
- * @param[in] scale_fract fractional multiplication.
- * @param[in] shift arithmetic shift.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- *
- * <b>Function notes:</b>
- *
- * The 1.31 format input are multiplied to yield a 2.62 intermediate result
- * and this is shifted with saturation to 1.31 format.
- */
- static inline void hpm_dsp_mat_scale_q31(const q31_t *src, q31_t scale_fract, int32_t shift, q31_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_scale_q31(dst, src, row, col, scale_fract, shift);
- #else
- riscv_dsp_mat_scale_q31(src, scale_fract, shift, dst, row, col);
- #endif
- #endif
- }
- // Matrix Subtraction
- /**
- * @brief Substraction of two double-precision floating-potint matrices.
- * @param[in] src1 pointer of the first input matrix
- * @param[in] src2 pointer of the second input matrix
- * @param[out] dst pointer of the output matrix
- * @param[in] row number of rows in a matrix
- * @param[in] col number of columns in a matrix
- *
- */
- static inline void hpm_dsp_mat_sub_f64(const float64_t *src1, const float64_t *src2,
- float64_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_sub_f64(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_sub_f64(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Substraction of two floating-potint matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_sub_f32(const float32_t *src1, const float32_t *src2, float32_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_sub_f32(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_sub_f32(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Substraction of two q15 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- *
- * The output results will be saturated in Q15 range [0x8000 0x7FFF].
- */
- static inline void hpm_dsp_mat_sub_q15(const q15_t *src1, const q15_t *src2, q15_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_sub_q15(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_sub_q15(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Substraction of two q31 matrices.
- * @param[in] *src1 points to the first input matrix.
- * @param[in] *src2 points to the second input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- *
- * Ouput results will be saturated in Q31 range [0x80000000 0x7FFFFFFF].
- */
- static inline void hpm_dsp_mat_sub_q31(const q31_t *src1, const q31_t *src2, q31_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_sub_q31(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_sub_q31(src1, src2, dst, row, col);
- #endif
- #endif
- }
- // Matrix Transpose
- /**
- * @brief Transpose the double-precision floating-potint matrices.
- * @param[in] src pointer of the input matrix
- * @param[out] dst pointer of the output matrix
- * @param[in] row number of rows in a matrix
- * @param[in] col number of columns in a matrix
- *
- */
- static inline void hpm_dsp_mat_trans_f64(const float64_t *src, float64_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_trans_f64(dst, src, row, col);
- #else
- riscv_dsp_mat_trans_f64(src, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Transpose the floating-potint matricex.
- * @param[in] *src points to the input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_trans_f32(const float32_t *src, float32_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_mat_trans_f32(src, dst, row, col);
- #endif
- }
- /**
- * @brief Transpose the q15 matricex.
- * @param[in] *src points to the input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_trans_q15(const q15_t *src, q15_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_trans_q15(dst, src, row, col);
- #else
- riscv_dsp_mat_trans_q15(src, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Transpose the q31 matricex.
- * @param[in] *src points to the input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_trans_q31(const q31_t *src, q31_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_trans_q31(dst, src, row, col);
- #else
- riscv_dsp_mat_trans_q31(src, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Transpose the u8 matricex.
- * @param[in] *src points to the input matrix.
- * @param[out] *dst points to the output matrix.
- * @param[in] row number of the matrix rows.
- * @param[in] col number of the matrix columns.
- */
- static inline void hpm_dsp_mat_trans_u8(const uint8_t *src, uint8_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_mat_trans_u8(src, dst, row, col);
- #endif
- }
- /**
- * @brief Transpose the q7 matrices.
- * @param[in] src pointer of the input matrix
- * @param[out] dst pointer of the output matrix
- * @param[in] row number of rows in a matrix
- * @param[in] col number of columns in a matrix
- *
- */
- static inline void hpm_dsp_mat_trans_q7(const q7_t *src, q7_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_mat_trans_q7(src, dst, row, col);
- #endif
- }
- /**
- * @brief Outer production of two q31 matrices.
- * @param[in] src1 pointer of the first input matrix with a size of size1*1
- * @param[in] src2 pointer of the second input matrix with a size of 1*size2
- * @param[out] dst pointer of the output matrix with a size of size1 * size2
- * @param[in] size1 number of rows in the first input matrix.
- * @param[in] size2 number of columns in the second input matrix.
- *
- *
- * @b Note:
- *
- * This function multiplies a one-column matrix with size1 rows, src1[size1, 1], with a
- * one-row matrix with size2 columns, src2[1, size2], and stores the result into a matrix
- * with size1 rows and size2 columns, dst[size1, size2]. It achieves better efficiency for
- * vector-wise matrix multiplication than for regular matrix multiplication.
- *
- * @b Example
- * <pre>
- * The following equation shows the outer product of two matrices and its result.
- *
- *
- * Its code example is as follows:
- *
- * \#define Arow 3
- * \#define Bcol 2
- * q31_t src1[Arow] = {0x200000, 0x100000, 0x50000};
- * q31_t src2[Bcol] = {0x10000, 0x30000};
- * q31_t dst[Arow * Bcol];
- * hpm_dsp_mat_oprod_q31 (src1, src2, dst, Arow, Bcol);
- * </pre>
- */
- static inline void hpm_dsp_mat_oprod_q31(const q31_t * src1, const q31_t * src2,
- q31_t * dst, uint32_t size1, uint32_t size2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_oprod_q31(dst, src1, src2, size1, size2);
- #else
- riscv_dsp_mat_oprod_q31(src1, src2, dst, size1, size2);
- #endif
- #endif
- }
- /**
- * @brief Matrix multiply vector for f32 formats
- * @param[in] src1 pointer of the input matrix
- * @param[in] src2 pointer of the input vector
- * @param[out] dst pointer of the output vector
- * @param[in] row number of rows in the matrix
- * @param[in] col number of columns in the matrix and the elements size of vector
- *
- *
- * @b Example
- * <pre>
- *
- * \#define Arow 2
- * \#define Acol 3
- * float32_t src1[Arow * Acol] = {0.1, -0.1, 0.1, 0.2, -0.2, 0.3};
- * float32_t src2[Acol] = {0.2, -0.1, -0.7};
- * float32_t dst[Arow];
- * hpm_dsp_mat_mul_mxv_f32 (src1, src2, dst, Arow, Acol);
- *
- * This example also serves as a reference for examples of Q31, Q15 or Q7 functions.
- * </pre>
- */
- static inline void hpm_dsp_mat_mul_mxv_f32(const float32_t *src1, const float32_t *src2,
- float32_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_mul_mxv_f32(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_mul_mxv_f32(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Matrix multiply vector for q15 formats
- * @param[in] src1 pointer of the input matrix
- * @param[in] src2 pointer of the input vector
- * @param[out] dst pointer of the output vector
- * @param[in] row number of rows in the matrix
- * @param[in] col number of columns in the matrix and the elements size of vector
- *
- */
- static inline void hpm_dsp_mat_mul_mxv_q15(const q15_t *src1, const q15_t *src2,
- q15_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_mul_mxv_q15(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_mul_mxv_q15(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Matrix multiply vector for q31 formats
- * @param[in] src1 pointer of the input matrix
- * @param[in] src2 pointer of the input vector
- * @param[out] dst pointer of the output vector
- * @param[in] row number of rows in the matrix
- * @param[in] col number of columns in the matrix and the elements size of vector
- *
- */
- static inline void hpm_dsp_mat_mul_mxv_q31(const q31_t *src1, const q31_t *src2,
- q31_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_mul_mxv_q31(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_mul_mxv_q31(src1, src2, dst, row, col);
- #endif
- #endif
- }
- /**
- * @brief Matrix multiply vector for q7 formats
- * @param[in] src1 pointer of the input matrix
- * @param[in] src2 pointer of the input vector
- * @param[out] dst pointer of the output vector
- * @param[in] row number of rows in the matrix
- * @param[in] col number of columns in the matrix and the elements size of vector
- *
- */
- static inline void hpm_dsp_mat_mul_mxv_q7(const q7_t *src1, const q7_t *src2,
- q7_t *dst, uint32_t row, uint32_t col)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_mat_mul_mxv_q7(dst, src1, src2, row, col);
- #else
- riscv_dsp_mat_mul_mxv_q7(src1, src2, dst, row, col);
- #endif
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_SVM
- /**
- * @defgroup svm DSP SVM Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_svm_math.h"
- /**
- * @brief SVM linear prediction
- * @param[in] instance Pointer to an instance of the linear SVM structure.
- * @param[in] src Pointer to input vector
- * @param[out] result Decision value
- */
- static inline void hpm_dsp_svm_linear_est_f32(const riscv_dsp_svm_linear_f32_t *instance, const float32_t *src, int32_t *result)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_svm_linear_est_f32(instance, src, result);
- #endif
- }
- /**
- * @brief SVM Sigmoid prediction
- * @param[in] instance Pointer to an instance of the linear SVM structure.
- * @param[in] src Pointer to input vector
- * @param[out] result Decision value
- */
- static inline void hpm_dsp_svm_sigmoid_est_f32(const riscv_dsp_svm_sigmoid_f32_t *instance, const float32_t *src, int32_t *result)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_svm_sigmoid_est_f32(instance, src, result);
- #endif
- }
- /**
- * @brief SVM rbf prediction
- * @param[in] instance Pointer to an instance of the linear SVM structure.
- * @param[in] src Pointer to input vector
- * @param[out] result Decision value
- */
- static inline void hpm_dsp_svm_rbf_est_f32(const riscv_dsp_svm_rbf_f32_t *instance, const float32_t *src, int32_t *result)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_svm_rbf_est_f32(instance, src, result);
- #endif
- }
- /**
- * @brief SVM polynomial prediction
- * @param[in] instance Pointer to an instance of the linear SVM structure.
- * @param[in] src Pointer to input vector
- * @param[out] result Decision value
- */
- static inline void hpm_dsp_svm_poly_est_f32(const riscv_dsp_svm_poly_f32_t *instance, const float32_t *src, int32_t *result)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_svm_poly_est_f32(instance, src, result);
- #endif
- }
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_TRANSFORM
- /**
- * @defgroup transform DSP Transform Functions
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include "tpt_math.h"
- #endif
- #include "riscv_dsp_transform_math.h"
- /**
- * @brief cfft_rd2 of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- * @return 0 success; -1 failure
- *
- * @b Example
- * <pre>
- * Given 128 samples (that is, FFT_LOGN = 7), the example of floating-point Radix-2 CFFT and
- * CIFFT is as follows:
- * \#define FFT_LOGN 7
- * float32_t src[2* (1 << FFT_LOGN)] = {};
- * int32_t ret;
- * ret = hpm_dsp_cfft_rd2_f32(src, FFT_LOGN);
- * if (ret == 0)
- * Success
- * Else
- * Fail
- * ret = hpm_dsp_cifft_rd2_f32(src, FFT_LOGN);
- * if (ret == 0)
- * Success
- * Else
- * Fail
- *
- * This example also serves as a reference for examples of Q31 and Q15 Radix-2 CFFT and
- * CIFFT functions.
- * </pre>
- */
- static inline int32_t hpm_dsp_cfft_rd2_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_f32(src, m, false);
- #else
- return riscv_dsp_cfft_rd2_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft_rd2 of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- * @return 0 success; -1 failure
- */
- static inline int32_t hpm_dsp_cifft_rd2_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_f32(src, m, true);
- #else
- return riscv_dsp_cifft_rd2_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft_rd2 of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cfft_rd2_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q15(src, m, false);
- #else
- return riscv_dsp_cfft_rd2_q15(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft_rd2 of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cifft_rd2_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q15(src, m, true);
- #else
- return riscv_dsp_cifft_rd2_q15(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft_rd2 of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cfft_rd2_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q31(src, m, false);
- #else
- return riscv_dsp_cfft_rd2_q31(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft_rd2 of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cifft_rd2_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q31(src, m, true);
- #else
- return riscv_dsp_cifft_rd2_q31(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft_rd4 of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set as 4, 6, 8 or 10
- * @return 0 success; -1 failure
- *
- * @b Example
- * <pre>
- * Given 256 samples (that is, FFT_LOGN = 8), the example of floating-point Radix-4 CFFT and
- * CIFFT is as follows:
- * \#define FFT_LOGN 8
- * float32_t src[2* (1 << FFT_LOGN)] = {};
- * int32_t ret;
- * ret = hpm_dsp_cfft_rd4_f32(src, FFT_LOGN);
- * if (ret == 0)
- * Success
- * Else
- * Fail
- * ret = riscv_dsp_cifft_rd4_f32(src, FFT_LOGN);
- * if (ret == 0)
- * Success
- * Else
- * Fail
- *
- * This example also serves as a reference for examples of Q31 or Q15 Radix-4 CFFT and
- * CIFFT functions.
- * </pre>
- */
- static inline int32_t hpm_dsp_cfft_rd4_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_f32(src, m, false);
- #else
- return riscv_dsp_cfft_rd4_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft_rd4 of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set as 4, 6, 8 or 10
- * @return 0 success; -1 failure
- */
- static inline int32_t hpm_dsp_cifft_rd4_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_f32(src, m, true);
- #else
- return riscv_dsp_cifft_rd4_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft_rd4 of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set as 4, 6, 8 or 10
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cfft_rd4_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q15(src, m, false);
- #else
- return riscv_dsp_cfft_rd4_q15(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft_rd4 of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set as 4, 6, 8 or 10
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cifft_rd4_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q15(src, m, true);
- #else
- return riscv_dsp_cifft_rd4_q15(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft_rd4 of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set as 4, 6, 8 or 10
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cfft_rd4_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q31(src, m, false);
- #else
- return riscv_dsp_cfft_rd4_q31(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft_rd4 of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set as 4, 6, 8 or 10
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_cifft_rd4_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_cfft_q31(src, m, true);
- #else
- return riscv_dsp_cifft_rd4_q31(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- *
- * @b Example
- * <pre>
- * Given 128 samples (that is, FFT_LOGN = 7), the example of floating-point CFFT and
- * CIFFT is as follows:
- * \#define FFT_LOGN 7
- * float32_t src[2* (1 << FFT_LOGN)] = {};
- * int32_t ret;
- * hpm_dsp_cfft_f32(src, FFT_LOGN);
- * hpm_dsp_cifft_f32(src, FFT_LOGN);
- *
- * This example also serves as a reference for examples of F16, F64, Q31 and Q15 CFFT and
- * CIFFT functions.
- * </pre>
- */
- static inline void hpm_dsp_cfft_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_f32(src, m, false);
- #else
- riscv_dsp_cfft_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft of f64 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- */
- static inline void hpm_dsp_cfft_f64(float64_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_f64(src, m, false);
- #else
- riscv_dsp_cfft_f64(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- */
- static inline void hpm_dsp_cifft_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_f32(src, m, true);
- #else
- riscv_dsp_cifft_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft of f64 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- */
- static inline void hpm_dsp_cifft_f64(float64_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_f64(src, m, true);
- #else
- riscv_dsp_cifft_f64(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_cfft_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_q15(src, m, false);
- #else
- riscv_dsp_cfft_q15(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_cifft_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_q15(src, m, true);
- #else
- riscv_dsp_cifft_q15(src, m);
- #endif
- #endif
- }
- /**
- * @brief cfft of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_cfft_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_q31(src, m, false);
- #else
- riscv_dsp_cfft_q31(src, m);
- #endif
- #endif
- }
- /**
- * @brief cifft of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 13
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_cifft_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_cfft_q31(src, m, true);
- #else
- riscv_dsp_cifft_q31(src, m);
- #endif
- #endif
- }
- /**
- * @brief rfft of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- *
- * @b Example
- * <pre>
- * Given 128 samples (that is, FFT_LOGN = 7), the example of floating-point RFFT and RIFFT
- * is as follows:
- * \#define FFT_LOGN 7
- * float32_t src[(1 << FFT_LOGN)] = {};
- * int32_t ret;
- * ret = hpm_dsp_rfft_f32(src, FFT_LOGN);
- * if (ret == 0)
- * Success
- * else
- * Fail
- * ret = riscv_dsp_rifft_f32(src, FFT_LOGN);
- * if (ret == 0)
- * Success
- * else
- * Fail
- *
- * This example also serves as a reference for examples of Q31 or Q15 RFFT and RIFFT
- * functions.
- * </pre>
- */
- static inline int32_t hpm_dsp_rfft_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- return tpt_rfft_f32(src, src, m, false);
- #else
- return riscv_dsp_rfft_f32(src, m);
- #endif
- #endif
- }
- /**
- * @brief rfft of f64 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- */
- static inline int32_t hpm_dsp_rfft_f64(float64_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rfft_f64(src, m);
- #endif
- }
- /**
- * @brief rifft of f32 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- */
- static inline int32_t hpm_dsp_rifft_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rifft_f32(src, m);
- #endif
- }
- /**
- * @brief rifft of f64 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- */
- static inline int32_t hpm_dsp_rifft_f64(float64_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rifft_f64(src, m);
- #endif
- }
- /**
- * @brief rfft of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_rfft_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rfft_q15(src, m);
- #endif
- }
- /**
- * @brief rifft of q15 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_rifft_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rifft_q15(src, m);
- #endif
- }
- /**
- * @brief rfft of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_rfft_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rfft_q31(src, m);
- #endif
- }
- /**
- * @brief rifft of q31 vectors.
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 4 to 14
- * @return 0 success; -1 failure
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline int32_t hpm_dsp_rifft_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_rifft_q31(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 8
- *
- *
- * @b Example
- * <pre>
- * Given 256 samples (that is, FFT_LOGN = 8), the example of floating-point (DCT) type II and
- * IDCT is as follows:
- * \#define FFT_LOGN 8
- * float32_t src[(1 << FFT_LOGN)] = {};
- * riscv_dsp_dct_f32(src, FFT_LOGN);
- * riscv_dsp_idct_f32(src, FFT_LOGN);
- * This example also serves as a reference for examples of Q31 or Q15 DCT type II and IDCT
- * functions.
- * </pre>
- */
- static inline void hpm_dsp_dct_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dct_f32(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 8
- *
- */
- static inline void hpm_dsp_idct_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_idct_f32(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 8
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_dct_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dct_q15(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 8
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_idct_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_idct_q15(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 8
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_dct_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dct_q31(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 8
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_idct_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_idct_q31(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 7
- *
- *
- * @b Example
- * <pre>
- * Given 128 samples (that is, FFT_LOGN = 7), the example of floating-point DCT or IDCT type
- * IV transform is as follows:
- * \#define FFT_LOGN 7
- * float32_t src[(1 << FFT_LOGN)] = {};
- * riscv_dsp_dct4_f32(src, FFT_LOGN);
- * riscv_dsp_idct4_f32(src, FFT_LOGN);
- * This example also serves as a reference for examples of Q31 or Q15 DCT type IV and IDCT
- * functions.
- * </pre>
- */
- static inline void hpm_dsp_dct4_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dct4_f32(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 7
- *
- */
- static inline void hpm_dsp_idct4_f32(float32_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_idct4_f32(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 7
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_dct4_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dct4_q15(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 7
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_idct4_q15(q15_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_idct4_q15(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 7
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_dct4_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dct4_q31(src, m);
- #endif
- }
- /**
- * @param[in, out] src pointer of the input vector. After the function is executed, the
- * output will be stored in the input vector.
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 7
- *
- *
- * @b Note:
- *
- * The input and output formats are listed below. To satisfy the input format corresponding to
- * your input size, you may need to perform an arithmetic shift operation before calling this
- * function.
- */
- static inline void hpm_dsp_idct4_q31(q31_t *src, uint32_t m)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_idct4_q31(src, m);
- #endif
- }
- /**
- * @brief Software implementation does not depend on any hardware
- *
- */
- /**
- * @brief Construct a new hpm software cfft float object
- *
- * @param src requires double the space than other interfaces, 0-n for input data, n-2n for buffers, 0-n for output data
- * @param m 2^n sampling points, including real and imaginary parts
- */
- void hpm_software_cfft_float(float *src, uint32_t m);
- #endif
- #if defined(HPMSOC_HAS_HPMSDK_FFA) && defined(HPM_EN_MATH_DSP_LIB)
- #include "hpm_ffa_drv.h"
- #include "hpm_soc.h"
- /**
- * @brief The ffa module requires the user to pay attention to cache operations
- *
- */
- /**
- * @brief fft calculation using ffa hardware acceleration unit, q15 format
- *
- * @param[in,out] src pointer of the input vector. After the function is executed,
- * the output will be stored in the input vector.
- * The complex data in the input vector are arranged as [real, imaginary,real, imaginary..., real, imaginary].
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 9
- */
- static inline void hpm_ffa_cfft_q15(q15_t *src, uint32_t m)
- {
- fft_xfer_t xfer = { 0 };
- xfer.num_points = 1 << m;
- xfer.src = src;
- xfer.dst = src;
- xfer.is_ifft = false;
- xfer.src_data_type = FFA_DATA_TYPE_COMPLEX_Q15;
- xfer.dst_data_type = FFA_DATA_TYPE_COMPLEX_Q15;
- ffa_calculate_fft_blocking(HPM_FFA, &xfer);
- }
- /**
- * @brief fft calculation using ffa hardware acceleration unit, q31 format
- *
- * @param[in,out] src pointer of the input vector. After the function is executed,
- * the output will be stored in the input vector.
- * The complex data in the input vector are arranged as [real, imaginary,real, imaginary..., real, imaginary].
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 9
- */
- static inline void hpm_ffa_cfft_q31(q31_t *src, uint32_t m)
- {
- fft_xfer_t xfer = { 0 };
- xfer.num_points = 1 << m;
- xfer.src = src;
- xfer.dst = src;
- xfer.is_ifft = false;
- xfer.src_data_type = FFA_DATA_TYPE_COMPLEX_Q31;
- xfer.dst_data_type = FFA_DATA_TYPE_COMPLEX_Q31;
- ffa_calculate_fft_blocking(HPM_FFA, &xfer);
- }
- #if defined(HPM_IP_FEATURE_FFA_FP32) && HPM_IP_FEATURE_FFA_FP32
- static inline void hpm_ffa_cfft_f32(float *src, uint32_t m)
- {
- fft_xfer_t xfer = { 0 };
- xfer.num_points = 1 << m;
- xfer.src = src;
- xfer.dst = src;
- xfer.is_ifft = false;
- xfer.src_data_type = FFA_DATA_TYPE_COMPLEX_FP32;
- xfer.dst_data_type = FFA_DATA_TYPE_COMPLEX_FP32;
- ffa_enable_fp_bias(HPM_FFA);
- ffa_set_coef_max_index(HPM_FFA, 0);
- ffa_set_output_max_index(HPM_FFA, 20);
- ffa_set_input_max_index(HPM_FFA, 20 - m);
- ffa_calculate_fft_blocking(HPM_FFA, &xfer);
- }
- #endif
- /**
- * @brief ifft calculation using ffa hardware acceleration unit, q15 format
- *
- * @param[in,out] src pointer of the input vector. After the function is executed,
- * the output will be stored in the input vector.
- * The complex data in the input vector are arranged as [real, imaginary,real, imaginary..., real, imaginary].
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 9
- */
- static inline void hpm_ffa_cifft_q15(q15_t *src, uint32_t m)
- {
- fft_xfer_t xfer = { 0 };
- xfer.num_points = 1 << m;
- xfer.src = src;
- xfer.dst = src;
- xfer.is_ifft = true;
- xfer.src_data_type = FFA_DATA_TYPE_COMPLEX_Q15;
- xfer.dst_data_type = FFA_DATA_TYPE_COMPLEX_Q15;
- ffa_calculate_fft_blocking(HPM_FFA, &xfer);
- }
- /**
- * @brief ifft calculation using ffa hardware acceleration unit, q31 format
- *
- * @param[in,out] src pointer of the input vector. After the function is executed,
- * the output will be stored in the input vector.
- * The complex data in the input vector are arranged as [real, imaginary,real, imaginary..., real, imaginary].
- * @param[in] m base 2 logarithm value of the sample number and it can be set from 3 to 9
- */
- static inline void hpm_ffa_cifft_q31(q31_t *src, uint32_t m)
- {
- fft_xfer_t xfer = { 0 };
- xfer.num_points = 1 << m;
- xfer.src = src;
- xfer.dst = src;
- xfer.is_ifft = true;
- xfer.src_data_type = FFA_DATA_TYPE_COMPLEX_Q31;
- xfer.dst_data_type = FFA_DATA_TYPE_COMPLEX_Q31;
- ffa_calculate_fft_blocking(HPM_FFA, &xfer);
- }
- #if defined(HPM_IP_FEATURE_FFA_FP32) && HPM_IP_FEATURE_FFA_FP32
- static inline void hpm_ffa_cifft_f32(float *src, uint32_t m)
- {
- fft_xfer_t xfer = { 0 };
- xfer.num_points = 1 << m;
- xfer.src = src;
- xfer.dst = src;
- xfer.is_ifft = true;
- xfer.src_data_type = FFA_DATA_TYPE_COMPLEX_FP32;
- xfer.dst_data_type = FFA_DATA_TYPE_COMPLEX_FP32;
- ffa_enable_fp_bias(HPM_FFA);
- ffa_set_coef_max_index(HPM_FFA, 0x0);
- ffa_set_output_max_index(HPM_FFA, 10);
- ffa_set_input_max_index(HPM_FFA, 20);
- ffa_calculate_fft_blocking(HPM_FFA, &xfer);
- }
- #endif
- #endif
- #endif
- /**
- * @}
- *
- */
- #ifdef HPM_MATH_DSP_UTILS
- /**
- * @defgroup utils DSP Utils Functions
- * This set of functions implements sine, cosine, arctanm, and square root.
- * There are separate functions for Q15, Q31, and floating-point data.
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #ifdef __zcc__
- #include <tpt_math.h>
- #endif
- #include "riscv_dsp_utils_math.h"
- // Cosine and Sine
- static inline float32_t hpm_dsp_cos_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_cos_f32(src);
- #endif
- }
- static inline q31_t hpm_dsp_cos_q31(q31_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_cos_q31(src);
- #endif
- }
- static inline q15_t hpm_dsp_cos_q15(q15_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_cos_q15(src);
- #endif
- }
- static inline float32_t hpm_dsp_sin_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sin_f32(src);
- #endif
- }
- #if defined (__riscv_zfh)
- /**
- * @param[in] src input value (radian)
- * @return Sine value of the input
- */
- static inline float16_t hpm_dsp_sin_f16(float16_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sin_f16(src);
- #endif
- }
- #endif
- static inline q31_t hpm_dsp_sin_q31(q31_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sin_q31(src);
- #endif
- }
- static inline q15_t hpm_dsp_sin_q15(q15_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sin_q15(src);
- #endif
- }
- // Arc tangent
- static inline float32_t hpm_dsp_atan_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_atan_f32(src);
- #endif
- }
- static inline q31_t hpm_dsp_atan_q31(q31_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_atan_q31(src);
- #endif
- }
- static inline q15_t hpm_dsp_atan_q15(q15_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_atan_q15(src);
- #endif
- }
- static inline float32_t hpm_dsp_atan2_f32(float32_t srcy, float32_t src2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_atan2_f32(srcy, src2);
- #endif
- }
- static inline q15_t hpm_dsp_atan2_q15(q15_t srcy, q15_t src2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_atan2_q15(srcy, src2);
- #endif
- }
- static inline q31_t hpm_dsp_atan2_q31(q31_t srcy, q31_t src2)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_atan2_q31(srcy, src2);
- #endif
- }
- // Square Root
- /**
- * @brief Square root of the floating-potint input.
- * @param[in] src the input value.
- * @return the suqare root of input.
- */
- static inline float32_t hpm_dsp_sqrt_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sqrt_f32(src);
- #endif
- }
- /**
- * @brief Square root of the q31 input.
- * @param[in] src the input value.
- * @return the suqare root of input.
- */
- static inline q31_t hpm_dsp_sqrt_q31(q31_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sqrt_q31(src);
- #endif
- }
- /**
- * @brief Square root of the q15 input.
- * @param[in] src the input value.
- * @return the suqare root of input.
- */
- static inline q15_t hpm_dsp_sqrt_q15(q15_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sqrt_q15(src);
- #endif
- }
- // Convert function
- /**
- * @brief Convert a floating-point vector to Q15.
- * @param[in] *src the input vector point.
- * @param[out] *dst yhe output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_f32_q15(float32_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_f32_q15(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a floating-point vector to Q31.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vectors.
- */
- static inline void hpm_dsp_convert_f32_q31(float32_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_f32_to_q31(dst, src, size);
- #else
- riscv_dsp_convert_f32_q31(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Convert a floating-point vector to Q7.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vectors.
- */
- static inline void hpm_dsp_convert_f32_q7(float32_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_f32_q7(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q15 vector to floating.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q15_f32(q15_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q15_f32(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q15 vector to Q31.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q15_q31(q15_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q15_q31(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q15 vector to Q7.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q15_q7(q15_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q15_q7(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q31 vector to floating.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q31_f32(q31_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- #ifdef __zcc__
- tpt_q31_to_f32(dst, src, size);
- #else
- riscv_dsp_convert_q31_f32(src, dst, size);
- #endif
- #endif
- }
- /**
- * @brief Convert a Q31 vector to Q15.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q31_q15(q31_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q31_q15(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q31 vector to Q7.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q31_q7(q31_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q31_q7(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q7 vector to floating.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q7_f32(q7_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q7_f32(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q7 vector to Q15.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q7_q15(q7_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q7_q15(src, dst, size);
- #endif
- }
- /**
- * @brief Convert a Q7 vector to Q31.
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vector.
- */
- static inline void hpm_dsp_convert_q7_q31(q7_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_convert_q7_q31(src, dst, size);
- #endif
- }
- // Duplicate function
- /**
- * @brief Duplicate the floating vector
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vectors.
- */
- static inline void hpm_dsp_dup_f32(float32_t *src, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dup_f32(src, dst, size);
- #endif
- }
- /**
- * @brief Duplicate the Q15 vector
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vectors.
- */
- static inline void hpm_dsp_dup_q15(q15_t *src, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dup_q15(src, dst, size);
- #endif
- }
- /**
- * @brief Duplicate the Q31 vector
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vectors.
- */
- static inline void hpm_dsp_dup_q31(q31_t *src, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dup_q31(src, dst, size);
- #endif
- }
- /**
- * @brief Duplicate the Q7 vector
- * @param[in] *src the input vector point.
- * @param[out] *dst the output vector point.
- * @param[in] size size of vectors.
- */
- static inline void hpm_dsp_dup_q7(q7_t *src, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_dup_q7(src, dst, size);
- #endif
- }
- // Set function
- /**
- * @brief Set the floating-point vector.
- * @param[in] val specify floating-point value.
- * @param[out] *dst the output vector point.
- * @param[in] size size of the vector.
- */
- static inline void hpm_dsp_set_f32(float32_t val, float32_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_set_f32(val, dst, size);
- #endif
- }
- /**
- * @brief Set the Q15 vector.
- * @param[in] val specify Q15 value.
- * @param[out] *dst the output vector point.
- * @param[in] size size of the vector.
- */
- static inline void hpm_dsp_set_q15(q15_t val, q15_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_set_q15(val, dst, size);
- #endif
- }
- /**
- * @brief Set the Q31 vector.
- * @param[in] val specify Q31 value.
- * @param[out] *dst the output vector point.
- * @param[in] size size of the vector.
- */
- static inline void hpm_dsp_set_q31(q31_t val, q31_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_set_q31(val, dst, size);
- #endif
- }
- /**
- * @brief Set the Q7 vector.
- * @param[in] val specify Q7 value.
- * @param[out] *dst the output vector point.
- * @param[in] size size of the vector.
- */
- static inline void hpm_dsp_set_q7(q7_t val, q7_t *dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_set_q7(val, dst, size);
- #endif
- }
- /**
- * @brief Weighted Sum of the floating-potint vector.
- * @param[in] *src points to the input vector.
- * @param[in] *weight points to the weighted vector.
- * @param[in] size size of the vectors.
- * @return Weighted Sumvalue.
- *
- */
- static inline float32_t hpm_dsp_weighted_sum_f32(const float32_t *src, const float32_t *weight, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_weighted_sum_f32(src, weight, size);
- #endif
- }
- /**
- * @brief Barycenter of the floating-potint type.
- * @param[in] *src points to the input vector.
- * @param[in] *weights points to the weighted vector.
- * @param[out] *out points to the out vector.
- * @param[in] numofvec size of the vectors.
- * @param[in] dimofvec size of the vectors.
- *
- */
- static inline void hpm_dsp_barycenter_f32(const float32_t *src, const float32_t *weights, float32_t *out, uint32_t numofvec, uint32_t dimofvec)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_barycenter_f32(src, weights, out, numofvec, dimofvec);
- #endif
- }
- /**
- * @brief Calculate exponential value of f32 vector.
- * @param[in] src input value
- * @return exponential value of the input
- */
- static inline float32_t hpm_dsp_exp_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_exp_f32(src);
- #endif
- }
- #if defined (__riscv_zfh)
- /**
- * @brief Calculate exponential value of f16 vector.
- * @param[in] src input value
- * @return exponential value of the input
- */
- static inline float16_t hpm_dsp_exp_f16(float16_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_exp_f16(src);
- #endif
- }
- #endif
- /**
- * @brief Calculate sigmoid value of f32 vector.
- * @param[in] src input value
- * @return sigmoid value of the input
- */
- static inline float32_t hpm_dsp_sigmoid_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sigmoid_f32(src);
- #endif
- }
- #if defined (__riscv_zfh)
- /**
- * @brief Calculate sigmoid value of f16 vector.
- * @param[in] src input value
- * @return sigmoid value of the input
- */
- static inline float16_t hpm_dsp_sigmoid_f16(float16_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_sigmoid_f16(src);
- #endif
- }
- #endif
- /**
- * @brief Calculate the natural logarithm value of f32 vector.
- * @param[in] src input value
- * @return natural logarithm value of the input
- */
- static inline float32_t hpm_dsp_log_f32(float32_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_log_f32(src);
- #endif
- }
- #if defined (__riscv_zfh)
- /**
- * @brief Calculate the natural logarithm value of f16 vector.
- * @param[in] src input value
- * @return natural logarithm value of the input
- */
- static inline float16_t hpm_dsp_log_f16(float16_t src)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- return riscv_dsp_log_f16(src);
- #endif
- }
- #endif
- /**
- * @}
- *
- */
- #endif
- #endif
- #ifdef HPM_MATH_DSP_SORT
- /**
- * @defgroup sort DSP Sort Functions
- * The generic sort function sorts elements of a vector by the algorithm and sorting order specified
- * in its instance structure. The algorithms to be chosen from to perform the generic sorting
- * include bitonic sort, bubble sort, heap sort, insertion sort, quick sort and selection sort.
- * Andes DSP library only supports the generic sort function for floating-point data.
- * @ingroup hpmmath
- * @{
- */
- #ifdef HPM_EN_MATH_DSP_LIB
- #include "riscv_dsp_sort_math.h"
- /**
- * @param[in,out] instance pointer of the instance structure
- * @param[in] alg desired sorting algorithm
- * @param[in] order desired sorting order
- *
- * @b Note:
- *
- * 1. This function has to be called to initialize the instance structure before the function
- * riscv_dsp_sort_f32 is executed. Please refer to code examples.
- *
- * 2. The possible sorting algorithms for the generic sorting (i.e., options for alg) include
- * - RISCV_DSP_SORT_BITONIC bitonic sort
- * - RISCV_DSP_SORT_BUBBLE bubble sort
- * - RISCV_DSP_SORT_HEAP heap sort
- * - RISCV_DSP_SORT_INSERTION insertion sort
- * - RISCV_DSP_SORT_QUICK quick sort
- * - RISCV_DSP_SORT_SELECTION selection sort
- *
- * 3. The possible sorting orders for the generic sorting (i.e., options for order) include
- * - RISCV_DSP_SORT_DESCENDING descending order
- * - RISCV_DSP_SORT_ASCENDING ascending order
- */
- static inline void hpm_dsp_sort_init_f32(riscv_dsp_sort_f32_t * instance, riscv_dsp_sort_alg alg, riscv_dsp_sort_order order)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_sort_init_f32(instance, alg, order);
- #endif
- }
- /**
- * @brief Generic sorting function
- *
- * @param[in] instance pointer of the instance structure
- * @param[in] src pointer of the input vector
- * @param[out] dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- * @b Note:
- *
- * 1. The possible sorting algorithms for the generic sorting (i.e., options for alg) include
- * - RISCV_DSP_SORT_BITONIC bitonic sort
- * - RISCV_DSP_SORT_BUBBLE bubble sort
- * - RISCV_DSP_SORT_HEAP heap sort
- * - RISCV_DSP_SORT_INSERTION insertion sort
- * - RISCV_DSP_SORT_QUICK quick sort
- * - RISCV_DSP_SORT_SELECTION selection sort
- *
- * 2. The possible sorting orders for the generic sorting (i.e., options for order) include
- * - RISCV_DSP_SORT_DESCENDING descending order
- * - RISCV_DSP_SORT_ASCENDING ascending order
- *
- * 3. To ensure correct results, you must initialize the instance structure with the function
- * riscv_dsp_sort_init_f32 before using this function riscv_dsp_sort_f32. For
- * how to use the two functions, please refer to the code examples below.
- *
- * @b Example
- * <pre>
- * With the input size as 100, sorting order as ascending and sorting algorithm as quick
- * sort, the code example of generic sorting is as follows:
- *
- * \#define size 100
- * riscv_dsp_sort_f32_t *instance;
- * float32_t src[size] = {};
- * float32_t dst[size];
- * riscv_dsp_sort_init_f32(instance, RISCV_DSP_SORT_QUICK,
- * RISCV_DSP_SORT_ASCENDING);
- * riscv_dsp_sort_f32(instance, src, dst, size);
- * </pre>
- */
- static inline void hpm_dsp_sort_f32(const riscv_dsp_sort_f32_t * instance,float32_t * src, float32_t * dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_sort_f32(instance, src, dst, size);
- #endif
- }
- /**
- * @param[in, out] instance pointer of the instance structure.
- * @param[in] order desired sorting order
- * @param[in] buf pointer of the working buffer
- *
- * @b Note:
- *
- * 1. This function has to be called to initialize the instance structure before the function
- * riscv_dsp_sort_merge_f32 is executed. Please refer to Section 2.11.2.2 for a code
- * example.
- *
- * 2. The possible sorting orders for the merge sorting (i.e., options for order) include
- * - RISCV_DSP_SORT_DESCENDING descending order
- * - RISCV_DSP_SORT_ASCENDING ascending order
- */
- static inline void hpm_dsp_sort_merge_init_f32(riscv_dsp_sort_merge_f32_t * instance, riscv_dsp_sort_order order, float32_t * buf)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_sort_merge_init_f32(instance, order, buf);
- #endif
- }
- /**
- * @brief Merge sort
- *
- * @param[in] instance pointer of the instance structure.
- * @param[in] src pointer of the input vector
- * @param[out] dst pointer of the output vector
- * @param[in] size number of elements in a vector
- *
- * @b Note:
- *
- * 1. The possible sorting orders for the merge sorting (i.e., options for order) include
- * - RISCV_DSP_SORT_DESCENDING descending order
- * - RISCV_DSP_SORT_ASCENDING ascending order
- *
- * 2. To ensure correct results, you must initialize the instance structure with the function
- * riscv_dsp_sort_merge_init_f32 before using this function
- * riscv_dsp_sort_merge_f32. For how to use the two functions, please refer to the
- * code example below.
- *
- * @b Example
- * <pre>
- * With the input size as 100 and sorting order as descending, the code example of merge
- * sorting is as follows:
- *
- * \#define size 100
- * riscv_dsp_sort_merge_f32_t *instance;
- * float32_t src[size] = {};
- * float32_t buf[size];
- * float32_t dst[size];
- * riscv_dsp_sort_merge_init_f32(instance, RISCV_DSP_SORT_DESCENDING, buf);
- * riscv_dsp_sort_merge_f32(instance, src, dst, size);
- * </pre>
- */
- static inline void hpm_dsp_sort_merge_f32(const riscv_dsp_sort_merge_f32_t * instance, float32_t * src, float32_t * dst, uint32_t size)
- {
- #if HPM_DSP_CORE == HPM_DSP_HW_NDS32
- riscv_dsp_sort_merge_f32(instance, src, dst, size);
- #endif
- }
- #endif
- #endif
- #ifdef HPM_MATH_NN_TINYENGINE
- #ifdef HPM_EN_MATH_DSP_LIB
- #include "riscv_math_types.h"
- #include <string.h>
- #include "riscv_simd_convert.h"
- #define LEFT_SHIFT(_shift) (_shift > 0 ? _shift : 0)
- #define RIGHT_SHIFT(_shift) (_shift > 0 ? 0 : -_shift)
- #define Q31_MAX ((q31_t)(0x7FFFFFFFL))
- #define Q31_MIN ((q31_t)(0x80000000L))
- static inline void write_q15x2_ia(
- q15_t **pQ15,
- q31_t value)
- {
- q31_t val = value;
- (*pQ15)[0] = (val & 0x0FFFF);
- (*pQ15)[1] = (val >> 16) & 0x0FFFF;
- *pQ15 += 2;
- }
- /**
- * @brief Read 2 q15 elements and post increment pointer.
- *
- * @param[in] in_q15 Pointer to pointer that holds address of input.
- * @return q31 value
- */
- __STATIC_FORCEINLINE q31_t hpm_nn_read_q15x2_ia(const q15_t **in_q15)
- {
- q31_t val;
- val = *(q31_t *)(*in_q15);
- *in_q15 += 2;
- return val;
- }
- /**
- * @brief Saturating doubling high multiply. Result matches
- * NEON instruction VQRDMULH.
- * @param[in] m1 Multiplicand
- * @param[in] m2 Multiplier
- * @return Result of multiplication.
- *
- */
- __STATIC_FORCEINLINE q31_t hpm_nn_sat_doubling_high_mult(const q31_t m1, const q31_t m2)
- {
- q31_t result = 0;
- q63_t mult = 1 << 30;
- if ((m1 < 0) ^ (m2 < 0)) {
- mult = 1 - mult;
- }
- mult = mult + (q63_t)m1 * m2;
- result = mult / (1UL << 31);
- if ((m1 == m2) && (m1 == (int32_t)Q31_MIN)) {
- result = Q31_MAX;
- }
- return result;
- }
- /**
- * @brief Rounding divide by power of two.
- * @param[in] dividend - Dividend
- * @param[in] exponent - Divisor = power(2, exponent)
- * Range: [0, 31]
- * @return Rounded result of division. Midpoint is rounded away from zero.
- *
- */
- __STATIC_FORCEINLINE q31_t hpm_nn_divide_by_power_of_two(const q31_t dividend, const q31_t exponent)
- {
- q31_t result = 0;
- const q31_t remainder_mask = (1l << exponent) - 1;
- int32_t remainder = remainder_mask & dividend;
- result = dividend >> exponent;
- q31_t threshold = remainder_mask >> 1;
- if (result < 0) {
- threshold++;
- }
- if (remainder > threshold) {
- result++;
- }
- return result;
- }
- __STATIC_FORCEINLINE q31_t hpm_nn_requantize(const q31_t val, const q31_t multiplier, const q31_t shift)
- {
- return hpm_nn_divide_by_power_of_two(hpm_nn_sat_doubling_high_mult(val * (1 << LEFT_SHIFT(shift)), multiplier),
- RIGHT_SHIFT(shift));
- }
- /**
- * @brief Read 4 q7 from q7 pointer and post increment pointer.
- * @param[in] in_q7 Pointer to pointer that holds address of input.
- * @return q31 value
- */
- __STATIC_FORCEINLINE q31_t hpm_nn_read_q7x4_ia(const q7_t **in_q7)
- {
- q31_t val;
- val = *(q31_t *)(*in_q7);
- *in_q7 += 4;
- return val;
- }
- /**
- * @brief read and expand one q7 word into two q15 words with reordering
- */
- __STATIC_FORCEINLINE const q7_t *read_and_pad_reordered(const q7_t *source, q31_t *out1, q31_t *out2)
- {
- q31_t inA = hpm_nn_read_q7x4_ia(&source);
- *out2 = __SXTB16_ROR(inA, 8);
- *out1 = __SXTB16(inA);
- return source;
- }
- /**
- * @brief read and expand one q7 word into two q15 words
- */
- __STATIC_FORCEINLINE const q7_t *read_and_pad(const q7_t *source, q31_t *out1, q31_t *out2)
- {
- q31_t inA = hpm_nn_read_q7x4_ia(&source);
- q31_t inAbuf1 = __SXTB16_ROR(inA, 8);
- q31_t inAbuf2 = __SXTB16(inA);
- *out2 = __PKHTB(inAbuf1, inAbuf2, 16);
- *out1 = __PKHBT(inAbuf2, inAbuf1, 16);
- return source;
- }
- /**
- * @brief Read 4 s8 from s8 pointer and post increment pointer.
- * @param[in] in_s8 Pointer to pointer that holds address of input.
- * @return q31 value
- */
- __STATIC_FORCEINLINE int32_t hpm_nn_read_s8x4_ia(const int8_t **in_s8)
- {
- int32_t val;
- val = *(int32_t *)(*in_s8);
- *in_s8 += 4;
- return val;
- }
- __STATIC_FORCEINLINE void hpm_nn_q7_to_q15_with_offset(const int8_t *src, int16_t *dst, int32_t block_size, int16_t offset)
- {
- int32_t block_cnt;
- /* Run the below code for cores that support SIMD instructions */
- int32_t in_q7x4;
- int32_t in_q15x2_1;
- int32_t in_q15x2_2;
- int32_t out_q15x2_1;
- int32_t out_q15x2_2;
- /*loop unrolling */
- block_cnt = block_size >> 2;
- /* First part of the processing with loop unrolling. Compute 4 outputs at a time. */
- const int32_t offset_q15x2 = __PKHBT(offset, offset, 16);
- while (block_cnt > 0) {
- /* convert from s8 to s16 and then store the results in the destination buffer */
- in_q7x4 = hpm_nn_read_s8x4_ia(&src);
- /* Extract and sign extend each of the four s8 values to s16 */
- in_q15x2_1 = __SXTAB16(offset_q15x2, __ROR(in_q7x4, 8));
- in_q15x2_2 = __SXTAB16(offset_q15x2, in_q7x4);
- out_q15x2_2 = __PKHTB(in_q15x2_1, in_q15x2_2, 16);
- out_q15x2_1 = __PKHBT(in_q15x2_2, in_q15x2_1, 16);
- write_q15x2_ia(&dst, out_q15x2_1);
- write_q15x2_ia(&dst, out_q15x2_2);
- block_cnt--;
- }
- /* Handle left over samples */
- block_cnt = block_size % 0x4;
- while (block_cnt > 0) {
- *dst++ = (int16_t)*src++ + offset;
- /* Decrement the loop counter */
- block_cnt--;
- }
- }
- #endif
- #endif
- #ifdef HPM_MATH_NN_ACTIVATION
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_activation.h"
- #else
- #include "riscv_nn_activation.h"
- #endif
- /**
- * @defgroup nnactivation NN Activation Functions
- * @ingroup hpmmath
- * @brief The activation functions are used to filter out some input data. They
- * include sigmoid, tanh and ReLU (Rectified Linear Unit) functions.
- *
- * @{
- */
- /**
- * @brief This function uses the sigmoid or tanh function to perform
- * activation for signed 8-bit integer input vectors.
- * @param[in,out] in_out pointer of the input/output vector
- * @param[in] size number of elements in the input/output vector
- * @param[in] int_bits number of the bits in the integer part, which is
- * supposed to be smaller than 4
- * @param[in] act_fun selection of activation functions. See the Note
- * below for details.
- *
- * @note
- * The available activation functions for selection include:
- * - NN_SIGMOID: Use the sigmoid activation function
- * - NN_TANH: Use the tanh activation function
- *
- * @b Example:
- * @code
- * #define SIZE 32
- * q7_t in_out[SIZE] = {...};
- * hpm_nn_activate_s8(in_out, SIZE, 0, NN_SIGMOID);
- * @endcode
- */
- static inline void hpm_nn_activate_s8(q7_t *in_out,
- uint32_t size,
- uint16_t int_bits,
- riscv_nn_activation_fun act_fun)
- {
- #if defined(__zcc__)
- tpt_nn_activate_s8(in_out, size, int_bits, act_fun);
- #else
- riscv_nn_activate_s8(in_out, size, int_bits, act_fun);
- #endif
- }
- /**
- * @brief This function uses sigmoid or tanh function to perform
- * activation for signed 16-bit integer input vectors.
- * @param[in,out] in_out pointer of the input/output vector
- * @param[in] size number of elements in the input/output vector
- * @param[in] int_bits number of the bits in the integer part, which is
- * supposed to be smaller than 4
- * @param[in] act_fun selection of activation functions. See the Note
- * below for details.
- *
- * @note
- * The availbale activation functions for selection include:
- * - NN_SIGMOID: Use the sigmoid activation function
- * - NN_TANH: Use the tanh activation function
- */
- static inline void hpm_nn_activate_s16(q15_t *in_out,
- uint32_t size,
- uint16_t int_bits,
- riscv_nn_activation_fun act_fun)
- {
- #if defined(__zcc__)
- tpt_nn_activate_s16(in_out, size, int_bits, act_fun);
- #else
- riscv_nn_activate_s16(in_out, size, int_bits, act_fun);
- #endif
- }
- /**
- * @brief This function uses the leaky ReLU function to perform
- * activation for signed 8-bit integer input vectors.
- * @param[in,out] in_out pointer of the input/output vector
- * @param[in] size number of elements in the input/output vector
- * @param[in] slope slope value to be multiplied with the negative
- * inputs. The result will be right shifted 15 bits
- * to scale back to signed 8-bit integer.
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * q15_t slope = 16384;
- * q7_t in_out[SIZE] = {...};
- * hpm_nn_leaky_relu_s8(in_out, SIZE, slope);
- * @endcode
- */
- static inline void hpm_nn_leaky_relu_s8(q7_t *in_out,
- uint32_t size,
- q15_t slope)
- #if defined(__zcc__)
- tpt_nn_leaky_relu_q7(in_out, in_out, size, slope);
- #else
- riscv_nn_leaky_relu_s8(in_out, size, slope);
- #endif
- }
- /**
- * @brief This function uses the ReLU function to perform activation
- * for signed 8-bit integer input vectors.
- * @param[in,out] data pointer of the input/output vector
- * @param[in] size number of elements in the input/output vector
- * @param[in] max_val maximum value to limit the output vector
- */
- static inline void hpm_nn_relu_any_s8(q7_t *data, uint16_t size, q7_t max_val)
- {
- #if defined(__zcc__)
- tpt_nn_relu_any_q7(data, size, max_val);
- #else
- riscv_nn_relu_any_s8(data, size, max_val);
- #endif
- }
- /**
- * @brief This function uses the ReLU function to perform activation
- * for signed 8-bit integer input vectors.
- * @param[in,out] in_out pointer of the input/output vector
- * @param[in] size number of elements in the input/output vector
- *
- * @b Example:
- * @code
- * #define H 16
- * #define W 16
- * #define CH 5
- * #define NUM (H * W *CH)
- * q7_t in_out[NUM] = {...};
- * hpm_nn_relu_s8(in_out, NUM);
- * @endcode
- */
- static inline void hpm_nn_relu_s8(q7_t *in_out, uint32_t size)
- {
- #if defined(__zcc__)
- tpt_nn_relu_q7(in_out, size);
- #else
- riscv_nn_relu_s8(in_out, size);
- #endif
- }
- /**
- * @brief This function uses the ReLU function to perform activation
- * for signed 16-bit integer input vectors.
- * @param[in,out] in_out pointer of the input/output vector
- * @param[in] size number of elements in the input/output vector
- */
- static inline void hpm_nn_relu_s16(q15_t *in_out, uint32_t size)
- {
- #if defined(__zcc__)
- tpt_nn_relu_q15(in_out, size);
- #else
- riscv_nn_relu_s16(in_out, size);
- #endif
- }
- #ifdef __riscv_zfh
- /**
- * @brief This function uses the sigmoid function to perform
- * activation for 16-bit half-precision floating point input
- * vectors.
- * @param[in] in_vec pointer of the input vector
- * @param[in] size number of elements in the input/output vector
- * @param[out] out_vec pointer of the output vector
- * @return This function returns 0.
- */
- static inline int32_t hpm_nn_sigmoid_f16(const float16_t *in_vec,
- uint32_t size,
- float16_t *out_vec)
- {
- #if defined(__zcc__)
- return tpt_nn_sigmoid_f16(in_vec, size, out_vec);
- #else
- return riscv_nn_sigmoid_f16(in_vec, size, out_vec);
- #endif
- }
- /**
- * @brief This function uses the tanh function to perform activation
- * for 16-bit half-precision floating point input vectors.
- * @param[in] in_vec pointer of the input vector
- * @param[in] size number of elements in the input/output vector
- * @param[out] out_vec pointer of the output vector
- * @return This function returns 0.
- */
- static inline int32_t hpm_nn_tanh_f16(const float16_t *in_vec,
- uint32_t size,
- float16_t *out_vec)
- {
- #if defined(__zcc__)
- return tpt_nn_tanh_f16(in_vec, size, out_vec);
- #else
- return riscv_nn_tanh_f16(in_vec, size, out_vec);
- #endif
- }
- #endif
- /**
- * * @}
- */
- #endif
- #endif
- #ifdef HPM_MATH_NN_BASIC
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_basic.h"
- #else
- #include "riscv_nn_basic.h"
- #endif
- /**
- * @defgroup nnbasic NN Basic Functions
- * @ingroup hpmmath
- * @brief The basic functions are used to perform element-wise basic arithmetic
- * operations.
- *
- * @{
- */
- /**
- * @brief This function performs element-wise addition for signed
- * 8-bit integer input vectors with two-stage shift.
- * @param[in] in_tensor1 pointer of the first input vector
- * @param[in] in_tensor2 pointer of the second input vector
- * @param[in] scale1 pointer of the first scaling vector
- * @param[in] scale2 pointer of the second scaling vector
- * @param[in] size number of elements in the input vectors
- * @param[in] pre_rshift right shift amount for the accumulator before
- * the scaling
- * @param[in] out_scale scaling value for the accumulator
- * @param[in] post_rshift right shift amount for the accumulator after the
- * scaling
- * @param[out] out pointer of the element-wise addition results
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * uint16_t pre_rshift = 8; // The addition results of both scaled input
- * // tensors are in the range of 24-bit; thus, the
- * // pre_rshift should be in the range of [0, 24].
- * // Here we scale down the results into 16-bit
- * // range.
- * uint16_t out_scale = 3; // Scale up the result into 18-bit range.
- * uint16_t post_rshift = 11; // Scale down the result into 7-bit range.
- *
- * q7_t in_tensor1[SIZE] = {...};
- * q7_t in_tensor2[SIZE] = {...};
- * q15_t scale1[SIZE] = {...};
- * q15_t scale2[SIZE] = {...};
- * q7_t out[SIZE];
- *
- * hpm_nn_add_s8_sym(in_tensor1, in_tensor2, scale1, scale2, SIZE, pre_rshift,
- * out_scale, post_rshift, out);
- * @endcode
- */
- static inline void hpm_nn_add_s8_sym(const q7_t *in_tensor1,
- const q7_t *in_tensor2,
- const int16_t *scale1,
- const int16_t *scale2,
- const uint32_t size,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out)
- {
- #if defined(__zcc__)
- tpt_nn_add_s8_sym(in_tensor1, in_tensor2, scale1, scale2, size, pre_rshift,
- out_scale, post_rshift, out);
- #else
- riscv_nn_add_s8_sym(in_tensor1, in_tensor2, scale1, scale2, size, pre_rshift,
- out_scale, post_rshift, out);
- #endif
- }
- /**
- * @brief This function performs element-wise addition for signed
- * 8-bit integer input vectors with two-stage shift with
- * rounding.
- * @param[in] in_tensor1 pointer of the first input vector
- * @param[in] in_tensor2 pointer of the second input vector
- * @param[in] scale1 scaling value for the first input vector. It
- * should be in the range of 0 to {2^23}.
- * @param[in] scale2 scaling value for the second input vector. It
- * should be in the range of 0 to {2^23}.
- * @param[in] size number of elements in the input vectors
- * @param[in] pre_rshift right shift amount for the accumulator before
- * the scaling
- * @param[in] out_scale scaling value for the accumulator
- * @param[in] post_rshift right shift amount for the accumulator after the
- * scaling
- * @param[out] out pointer of element-wise addition results
- *
- */
- static inline void hpm_nn_add_s8_sym_round(const q7_t *in_tensor1,
- const q7_t *in_tensor2,
- const uint32_t scale1,
- const uint32_t scale2,
- const uint32_t size,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out)
- {
- #if defined(__zcc__)
- tpt_nn_add_s8_sym_round(in_tensor1, in_tensor2, scale1, scale2, size,
- pre_rshift, out_scale, post_rshift, out);
- #else
- riscv_nn_add_s8_sym_round(in_tensor1, in_tensor2, scale1, scale2, size,
- pre_rshift, out_scale, post_rshift, out);
- #endif
- }
- /**
- * @brief This function performs element-wise addition for signed
- * 8-bit integer input vectors.
- * @param[in] in_tensor1 pointer of the first input vector
- * @param[in] in_tensor2 pointer of the second input vector
- * @param[in] in_offset1 offset value for first input vector. It should
- * be in the range of -127 to 128.
- * @param[in] in_scale1 scaling value for first input vector
- * @param[in] in_rshift1 right shift amount for the first input vector
- * @param[in] in_offset2 offset value for the second input vector. It
- * should be in the range of -127 to 128.
- * @param[in] in_scale2 scaling value for the second input vector
- * @param[in] in_rshift2 right shift amount for the second input vector
- * @param[in] lshift left shift amount for the first and second input
- * vectors
- * @param[out] out pointer of the element-wise addition results
- * @param[in] out_offset offset value for the output
- * @param[in] out_scale scaling value for the output
- * @param[in] out_rshift right shift amount for the output
- * @param[in] act_min minimum value that the output is limited to
- * @param[in] act_max maximum value that the output is limited to
- * @param[in] size number of elements in the input vectors
- * @return This function returns 0.
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * int32_t in_offset1 = 16; // Offset for in_tensor1
- * int32_t in_scale1 = (1<<28); // Scale down in_tensor1 by 1/23
- * int32_t in_rshift1 = 3; // Scale down in_tensor1 by 1/23
- * int32_t in_offset2 = 17; // Offset for in_tensor2
- * int32_t in_scale2 = (1<<28); // Scale down in_tensor2 by 1/23
- * int32_t in_rshift2 = 3; // Scale down in_tensor2 by 1/23
- * int32_t lshift = 10; // Scale up the input tensor by 210 times
- * int32_t out_offset = 18; // Offset for the output tensor
- * int32_t out_scale = (1<<30); // Scale down in_tensor2 by 1/2
- * int32_t out_rshift = 4; // Scale down in_tensor2 by 1/24
- * int32_t act_min = 0xffffffa3; // Limit the outputs in the range of
- * // [0xffffffa3, 0x0000005d]
- * int32_t act_max = 0x0000005d; // Limit the outputs in the range of
- * // [0xffffffa3, 0x0000005d]
- *
- * int8_t in_tensor1[SIZE] = {...};
- * int8_t in_tensor2[SIZE] = {...};
- * int8_t out[SIZE];
- *
- * hpm_nn_ew_add_s8_asym(in_tensor1, in_tensor2, in_offset1, in_scale1,
- * in_rshift1, in_offset2, in_scale2, in_rshift2, lshift, out, out_offset,
- * out_scale, out_rshift, act_min, act_max, SIZE);
- * @endcode
- */
- static inline int hpm_nn_ew_add_s8_asym(const int8_t *in_tensor1,
- const int8_t *in_tensor2,
- const int32_t in_offset1,
- const int32_t in_scale1,
- const int32_t in_rshift1,
- const int32_t in_offset2,
- const int32_t in_scale2,
- const int32_t in_rshift2,
- const int32_t lshift,
- int8_t *out,
- const int32_t out_offset,
- const int32_t out_scale,
- const int32_t out_rshift,
- const int32_t act_min,
- const int32_t act_max,
- const uint32_t size)
- {
- #if defined(__zcc__)
- return tpt_nn_ew_add_s8_asym(in_tensor1, in_tensor2, in_offset1, in_scale1,
- in_rshift1, in_offset2, in_scale2, in_rshift2,
- lshift, out, out_offset, out_scale, out_rshift,
- act_min, act_max, size);
- #else
- return riscv_nn_ew_add_s8_asym(in_tensor1, in_tensor2, in_offset1, in_scale1,
- in_rshift1, in_offset2, in_scale2, in_rshift2,
- lshift, out, out_offset, out_scale, out_rshift,
- act_min, act_max, size);
- #endif
- }
- /**
- * @brief This function performs element-wise multiplication for
- * signed 8-bit integer input vectors.
- * @param[in] in_tensor1 pointer of the first input vector
- * @param[in] in_tensor2 pointer of the second input vector
- * @param[in] in_offset1 offset value for the first input vector. It
- * should be in the range of -127 to 128.
- * @param[in] in_offset2 offset value for the second input vector. It
- * should be in the range of -127 to 128.
- * @param[out] out pointer of element-wise multiplication results
- * @param[in] out_offset offset value for the output
- * @param[in] out_scale scaling value for the output
- * @param[in] out_shift shift amount for the output
- * @param[in] act_min minimum value that the output is limited to
- * @param[in] act_max maximum value that the output is limited to
- * @param[in] size number of elements in the input vectors
- * @return This function returns 0.
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * int32_t in_offset1 = 16; // Offset for in_tensor1
- * int32_t in_offset2 = 17; // Offset for in_tensor2
- * int32_t out_offset = 18; // Offset for the output tensor
- * int32_t out_scale = (1<<30); // Scale down the output tensor by 1/2
- * int32_t out_shift = -4; // Scale down the output tensor by 1/24
- * int32_t act_min = 0xffffffa3; // Limit the outputs in the range of
- * // [0xffffffa3, 0x0000005d]
- * int32_t act_max = 0x0000005d; // Limit the outputs in the range of
- * // [0xffffffa3, 0x0000005d]
- *
- * in_tensor1[SIZE] = {...};
- * in_tensor2[SIZE] = {...};
- * out[SIZE];
- *
- * hpm_nn_ew_mul_s8_asym(in_tensor1, in_tensor2, in_offset1, in_offset2, out,
- * out_offset, out_scale, out_shift, act_min, act_max, SIZE);
- * @endcode
- */
- static inline int hpm_nn_ew_mul_s8_asym(const int8_t *in_tensor1,
- const int8_t *in_tensor2,
- const int32_t in_offset1,
- const int32_t in_offset2,
- int8_t *out,
- const int32_t out_offset,
- const int32_t out_scale,
- const int32_t out_shift,
- const int32_t act_min,
- const int32_t act_max,
- const uint32_t size)
- {
- #if defined(__zcc__)
- return tpt_nn_ew_mul_s8_asym(in_tensor1, in_tensor2, in_offset1, in_offset2,
- out, out_offset, out_scale, out_shift, act_min,
- act_max, size);
- #else
- return riscv_nn_ew_mul_s8_asym(in_tensor1, in_tensor2, in_offset1, in_offset2,
- out, out_offset, out_scale, out_shift, act_min,
- act_max, size);
- #endif
- }
- /**
- * * @}
- */
- #endif
- #ifdef HPM_EN_MATH_NN_RVP32_LIB
- #if defined(__zcc__)
- #include "tpt_nn_basic.h"
- #else
- #include "riscv_nn_basic.h"
- #endif
- /**
- * @brief This function performs element-wise addition for signed
- * 8-bit integer input vectors.
- * @param[in] in_tensor1 pointer of the first input vector
- * @param[in] in_tensor2 pointer of the second input vector
- * @param[in] in_offset1 offset value for first input vector. It should
- * be in the range of -127 to 128.
- * @param[in] in_scale1 scaling value for first input vector
- * @param[in] in_rshift1 right shift amount for the first input vector
- * @param[in] in_offset2 offset value for the second input vector. It
- * should be in the range of -127 to 128.
- * @param[in] in_scale2 scaling value for the second input vector
- * @param[in] in_rshift2 right shift amount for the second input vector
- * @param[in] lshift left shift amount for the first and second input
- * vectors
- * @param[out] out pointer of the element-wise addition results
- * @param[in] out_offset offset value for the output
- * @param[in] out_scale scaling value for the output
- * @param[in] out_rshift right shift amount for the output
- * @param[in] act_min minimum value that the output is limited to
- * @param[in] act_max maximum value that the output is limited to
- * @param[in] size number of elements in the input vectors
- * @return This function returns 0.
- *
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * int32_t in_offset1 = 16; // Offset for in_tensor1
- * int32_t in_scale1 = (1<<28); // Scale down in_tensor1 by 1/23
- * int32_t in_rshift1 = 3; // Scale down in_tensor1 by 1/23
- * int32_t in_offset2 = 17; // Offset for in_tensor2
- * int32_t in_scale2 = (1<<28); // Scale down in_tensor2 by 1/23
- * int32_t in_rshift2 = 3; // Scale down in_tensor2 by 1/23
- * int32_t lshift = 10; // Scale up the input tensor by 210 times
- * int32_t out_offset = 18; // Offset for the output tensor
- * int32_t out_scale = (1<<30); // Scale down in_tensor2 by 1/2
- * int32_t out_rshift = 4; // Scale down in_tensor2 by 1/24
- * int32_t act_min = 0xffffffa3; // Limit the outputs in the range of
- * // [0xffffffa3, 0x0000005d]
- * int32_t act_max = 0x0000005d; // Limit the outputs in the range of
- * // [0xffffffa3, 0x0000005d]
- *
- * int8_t in_tensor1[SIZE] = {...};
- * int8_t in_tensor2[SIZE] = {...};
- * int8_t out[SIZE];
- *
- * hpm_nn_ew_add_s8_asym(in_tensor1, in_tensor2, in_offset1, in_scale1,
- * in_rshift1, in_offset2, in_scale2, in_rshift2, lshift, out, out_offset,
- * out_scale, out_rshift, act_min, act_max, SIZE);
- * @endcode
- */
- static inline int hpm_nn_ew_add_s8_asym(const int8_t *in_tensor1,
- const int8_t *in_tensor2,
- const int32_t in_offset1,
- const int32_t in_scale1,
- const int32_t in_rshift1,
- const int32_t in_offset2,
- const int32_t in_scale2,
- const int32_t in_rshift2,
- const int32_t lshift,
- int8_t *out,
- const int32_t out_offset,
- const int32_t out_scale,
- const int32_t out_rshift,
- const int32_t act_min,
- const int32_t act_max,
- const uint32_t size)
- {
- #if defined(__zcc__)
- return tpt_elementwise_add_s8(out, out_offset, out_scale, -out_rshift, act_min,
- act_max, in_tensor1, in_tensor2, in_offset1, in_scale1,
- in_rshift1, in_offset2, in_scale2, in_rshift2,
- lshift, size);
- #else
- return riscv_nn_ew_add_s8_asym(in_tensor1, in_tensor2, in_offset1, in_scale1,
- in_rshift1, in_offset2, in_scale2, in_rshift2,
- lshift, out, out_offset, out_scale, out_rshift,
- act_min, act_max, size);
- #endif
- }
- #endif
- #endif
- #ifdef HPM_MATH_NN_CONCATENATION
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_concatenation.h"
- #else
- #include "riscv_nn_concatenation.h"
- #endif
- /**
- * @defgroup nnconcatenation NN Concatenation Functions
- * @ingroup hpmmath
- * @brief The concatenation functions are used to concatenate the tensor along
- * the specified axis.
- *
- * @{
- */
- /**
- * @brief This function concatenates the int8_t/uint8_t input tensor along
- * the w-axis with the output tensor.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_x x dimension of the input tensor
- * @param[in] in_tensor_y y dimension of the input tensor
- * @param[in] in_tensor_z z dimension of the input tensor
- * @param[in] in_tensor_w w dimension of the input tensor
- * @param[in] out_tensor pointer of the output tensor
- * @param[in] out_offset_w offset value to be added to the w axis of the
- * output tensor before the concatenation
- *
- * @note
- * The x, y and z dimension of the output tensor will be the same as those of
- * the input tensor.
- */
- static inline void hpm_nn_concate_s8_w(const int8_t *in_tensor,
- const uint16_t in_tensor_x,
- const uint16_t in_tensor_y,
- const uint16_t in_tensor_z,
- const uint16_t in_tensor_w,
- int8_t *out_tensor,
- const uint32_t out_offset_w)
- {
- #if defined(__zcc__)
- tpt_concatenation_s8_w(out_tensor, in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_offset_w);
- #else
- riscv_nn_concate_s8_w(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_offset_w);
- #endif
- }
- /**
- * @brief This function concatenates the int8_t/uint8_t input tensor along
- * the x-axis with the output tensor.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_x x dimension of the input tensor
- * @param[in] in_tensor_y y dimension of the input tensor
- * @param[in] in_tensor_z z dimension of the input tensor
- * @param[in] in_tensor_w w dimension of the input tensor
- * @param[in] out_tensor pointer of the output tensor
- * @param[in] out_tensor_x x dimension of the output tensor
- * @param[in] out_offset_x offset value to be added to the x axis of the
- * output tensor before the concatenation
- *
- * @note
- * The y, z and w dimensions of the output tensor will be the same as those of
- * the input tensor.
- */
- static inline void hpm_nn_concate_s8_x(const int8_t *in_tensor,
- const uint16_t in_tensor_x,
- const uint16_t in_tensor_y,
- const uint16_t in_tensor_z,
- const uint16_t in_tensor_w,
- int8_t *out_tensor,
- const uint16_t out_tensor_x,
- const uint32_t out_offset_x)
- {
- #if defined(__zcc__)
- tpt_nn_concate_s8_x(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_tensor_x, out_offset_x);
- #else
- riscv_nn_concate_s8_x(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_tensor_x, out_offset_x);
- #endif
- }
- /**
- * @brief This function concatenates the int8_t/uint8_t input tensor along
- * the y-axis with the output tensor.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_x x dimension of the input tensor
- * @param[in] in_tensor_y y dimension of the input tensor
- * @param[in] in_tensor_z z dimension of the input tensor
- * @param[in] in_tensor_w w dimension of the input tensor
- * @param[in] out_tensor pointer of the output tensor
- * @param[in] out_tensor_y y dimension of the output tensor
- * @param[in] out_offset_y offset value to be added to the y axis of the
- * output tensor before the concatenation
- *
- * @note
- * The x, z and w dimensions of the output tensor will be the same as those of
- * the input tensor.
- */
- static inline void hpm_nn_concate_s8_y(const int8_t *in_tensor,
- const uint16_t in_tensor_x,
- const uint16_t in_tensor_y,
- const uint16_t in_tensor_z,
- const uint16_t in_tensor_w,
- int8_t *out_tensor,
- const uint16_t out_tensor_y,
- const uint32_t out_offset_y)
- {
- #if defined(__zcc__)
- tpt_nn_concate_s8_y(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_tensor_y, out_offset_y);
- #else
- riscv_nn_concate_s8_y(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_tensor_y, out_offset_y);
- #endif
- }
- /**
- * @brief This function concatenates the int8_t/uint8_t input tensor along
- * the z-axis with the output tensor.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_x x dimension of the input tensor
- * @param[in] in_tensor_y y dimension of the input tensor
- * @param[in] in_tensor_z z dimension of the input tensor
- * @param[in] in_tensor_w w dimension of the input tensor
- * @param[in] out_tensor pointer of the output tensor
- * @param[in] out_tensor_z z dimension of the output tensor
- * @param[in] out_offset_z offset value to be added to the z axis of the
- * output tensor before the concatenation
- *
- * @note
- * The x, y and w dimensions of the output tensor will be the same as those of
- * the input tensor.
- */
- static inline void hpm_nn_concate_s8_z(const int8_t *in_tensor,
- const uint16_t in_tensor_x,
- const uint16_t in_tensor_y,
- const uint16_t in_tensor_z,
- const uint16_t in_tensor_w,
- int8_t *out_tensor,
- const uint16_t out_tensor_z,
- const uint32_t out_offset_z)
- {
- #if defined(__zcc__)
- tpt_nn_concate_s8_z(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_tensor_z, out_offset_z);
- #else
- riscv_nn_concate_s8_z(in_tensor, in_tensor_x, in_tensor_y, in_tensor_z,
- in_tensor_w, out_tensor, out_tensor_z, out_offset_z);
- #endif
- }
- /**
- * * @}
- */
- #endif
- #endif
- #ifdef HPM_MATH_NN_CONVOLUTION
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_convolution.h"
- #else
- #include "riscv_nn_convolution.h"
- #endif
- /**
- * @defgroup nnconvolution NN Convolution Functions
- * @ingroup hpmmath
- * @brief The convolution functions transform the input matrix into a column
- * vector with im2col, and then use matrix-matrix multiplication to get the
- * convolution result.
- *
- * @{
- */
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- *
- * @b Example:
- * @code
- * //Convolve a 160x120x20 input tensor with a 1x1 kernel and generate a
- * //160x120x8 output tensor. Let both dimensions padding be 0 and their
- * //stride be 1.
- *
- * #define IN_X 160
- * #define IN_Y 120
- * #define IN_CH 20
- * #define OUT_CH 8
- * #define KER_DIM_X 1
- * #define KER_DIM_Y 1
- * #define PAD_X 0
- * #define PAD_Y 0
- * #define STRIDE_X 1
- * #define STRIDE_Y 1
- * #define BIAS_LSHIFT 6 //Scale up the bias by 2^6
- * #define OUT_RSHIFT 9 //Scale down the output tensor by 1/2^9
- * #define OUT_X 160
- * #define OUT_Y 120
- *
- * q7_t in_data[IN_CH * IN_X * IN_Y] = {...};
- * q7_t weight[IN_CH * KER_DIM_X * KER_DIM_Y * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * IN_CH * KER_DIM_X * KER_DIM_Y] = {0};
- * q7_t out_data[OUT_CH * OUT_X * OUT_Y];
- *
- * riscv_nn_conv_1x1_HWC_s8_s8_s8_sft_bias_fast_any(in_data, IN_X, IN_Y ,
- * IN_CH, weight, OUT_CH, KER_DIM_X, KER_DIM_Y, PAD_X, PAD_Y, STRIDE_X,
- * STRIDE_Y, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data, OUT_X, OUT_Y,
- * in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s8_s8_sft_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_s8_s8_s8_sft_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_sft_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs signed 8-bit integer convolution for
- * RGB images with shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * (3 * ker_dim * ker_dim + 1)".
- * @param[in] tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-vector enabled and its
- * size must be "out_tensor_ch * (3 * ker_dim *
- * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * //Convolve a 28x28x3 input tensor with a 5x5 kernel and generate a 24x24x20
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 28
- * #define KER_DIM 5
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 10
- * #define OUT_CH 20
- * #define OUT_DIM 24
- *
- * q7_t in_data[3 * IN_DIM * IN_DIM] = {...};
- * q7_t weight[3 * KER_DIM * KER_DIM * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * 3 * KER_DIM * KER_DIM] = {0};
- * q7_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_HWC_s8_s8_s8_RGB_sft_bias(in_data, IN_DIM, weight, OUT_CH,
- * KER_DIM, PAD, STRIDE, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data, OUT_DIM,
- * in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_RGB_sft_bias(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_RGB_sft_bias(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim, in_tmp_buf,
- tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_RGB_sft_bias(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim, in_tmp_buf,
- tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast signed 8-bit integer convolution
- * for RGB images with shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * //Convolve a 28x28x3 input tensor with a 5x5 kernel and generate a 24x24x20
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 28
- * #define KER_DIM 5
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 10
- * #define OUT_CH 20
- * #define OUT_DIM 24
- *
- * q7_t in_data[3 * IN_DIM * IN_DIM] = {...};
- * q7_t weight[3 * KER_DIM * KER_DIM * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * (3 * KER_DIM * KER_DIM + 1)] = {0};
- * q15_t wt_tmp_buf[OUT_CH * (3 * KER_DIM * KER_DIM + 1)];
- * q7_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_HWC_s8_s8_s8_RGB_sft_bias_fast(in_data, IN_DIM, weight,
- * OUT_CH, KER_DIM, PAD, STRIDE, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data,
- * OUT_DIM, in_tmp_buf, wt_tmp_buf);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_RGB_sft_bias_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_RGB_sft_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim, in_tmp_buf,
- wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_RGB_sft_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim, in_tmp_buf,
- wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs signed 8-bit integer convolution with
- * shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * //Convolve a 28x28x1 input tensor with a 5x5 kernel and generate a 24x24x20
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 28
- * #define IN_CH 1
- * #define KER_DIM 5
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 10
- * #define OUT_CH 20
- * #define OUT_DIM 24
- *
- * q7_t in_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q7_t weight[IN_CH * KER_DIM * KER_DIM * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * IN_CH * KER_DIM * KER_DIM] = {0};
- * q7_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_HWC_s8_s8_s8_sft_bias(in_data, IN_DIM, IN_CH, weight, OUT_CH,
- * KER_DIM, PAD, STRIDE, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data, OUT_DIM,
- * in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sft_bias(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sft_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sft_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs signed 8-bit integer convolution in
- * any x and y dimensions with shift-based quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @param[in] tmp_buf dummy
- *
- * @b Example:
- * @code
- * //Convolve a 160x120x3 input tensor with a 3x5 kernel and generate a 80x59x5
- * //output tensor. Let both dimensions padding be 1 and their stride be 2.
- *
- * #define IN_X 160
- * #define IN_Y 120
- * #define IN_CH 3
- * #define OUT_CH 5
- * #define KER_DIM_X 3
- * #define KER_DIM_Y 5
- * #define PAD_X 1
- * #define PAD_Y 1
- * #define STRIDE_X 2
- * #define STRIDE_Y 2
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 9
- * #define OUT_X 40
- * #define OUT_Y 30
- *
- * q7_t in_data[IN_CH * IN_X * IN_Y] = {...};
- * q7_t weight[IN_CH * KER_DIM_X * KER_DIM_Y * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * IN_CH * KER_DIM_X * KER_DIM_Y] = {0};
- * q7_t out_data[OUT_CH * OUT_X * OUT_Y];
- *
- * riscv_nn_conv_HWC_s8_s8_s8_sft_bias_any(in_data, IN_X, IN_Y , IN_CH, weight,
- * OUT_CH, KER_DIM_X, KER_DIM_Y, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y, bias,
- * BIAS_LSHIFT, OUT_RSHIFT, out_data, OUT_X, OUT_Y, in_tmp_buf, NULL);
- * @endcode
- */
- static inline void hpm_nn_conv_HWC_s8_s8_s8_sft_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_HWC_s8_s8_s8_sft_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #else
- riscv_nn_conv_HWC_s8_s8_s8_sft_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast signed 8-bit integer convolution
- * with shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @b Example:
- * @code
- * //Convolve a 12x12x20 input tensor with a 5x5 kernel and generate a 8x8x50
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 12
- * #define IN_CH 20
- * #define KER_DIM 5
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 10
- * #define OUT_CH 50
- * #define OUT_DIM 8
- *
- * q7_t in_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q7_t weight[IN_CH * KER_DIM * KER_DIM * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * IN_CH * KER_DIM * KER_DIM] = {0};
- * q7_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_HWC_s8_s8_s8_sft_bias_fast(in_data, IN_DIM, IN_CH, weight,
- * OUT_CH, KER_DIM, PAD, STRIDE, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data,
- * OUT_DIM, in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sft_bias_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sft_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sft_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast signed 8-bit integer convolution
- * in any x and y dimensions with shift-based quantization on
- * the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @b Example:
- * @code
- * //Convolve a 160x120x20 input tensor with a 3x5 kernel and generate a
- * //80x59x8 output tensor. Let both dimensions padding be 1 and their stride
- * //be 2.
- *
- * #define IN_X 160
- * #define IN_Y 120
- * #define IN_CH 20
- * #define OUT_CH 8
- * #define KER_DIM_X 3
- * #define KER_DIM_Y 5
- * #define PAD_X 1
- * #define PAD_Y 1
- * #define STRIDE_X 2
- * #define STRIDE_Y 2
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 9
- * #define OUT_X 80
- * #define OUT_Y 59
- *
- * q7_t in_data[IN_CH * IN_X * IN_Y] = {...};
- * q7_t weight[IN_CH * KER_DIM_X * KER_DIM_Y * OUT_CH] = {...};
- * q7_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * IN_CH * KER_DIM_X * KER_DIM_Y] = {0};
- * q7_t out_data[OUT_CH * OUT_Y * OUT_X];
- *
- * riscv_nn_conv_HWC_s8_s8_s8_sft_bias_fast_any(in_data, IN_W, IN_Y , IN_CH,
- * weight, OUT_CH, KER_DIM_X, KER_DIM_Y, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y,
- * bias, BIAS_LSHIFT, OUT_RSHIFT, out_data, OUT_X, OUT_Y, in_tmp_buf,
- * NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sft_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sft_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sft_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs signed 16-bit integer convolution
- * with shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "in_tensor_ch * ker_dim * ker_dim".
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * //Convolve a 28x28x1 input tensor with a 5x5 kernel and generate a 24x24x20
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 28
- * #define IN_CH 1
- * #define KER_DIM 5
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 10
- * #define OUT_CH 20
- * #define OUT_DIM 24
- *
- * q15_t input_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q15_t weight[IN_CH * KER_DIM * KER_DIM * OUT_CH] = {...};
- * q15_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[IN_CH * KER_DIM * KER_DIM] = {0};
- * q15_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_HWC_s16_s16_s16_sft_bias(input_data, IN_DIM, IN_CH, weight,
- * OUT_CH, KER_DIM, PAD, STRIDE, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data,
- * OUT_DIM, in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s16_s16_s16_sft_bias(const q15_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q15_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q15_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s16_s16_s16_sft_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_HWC_s16_s16_s16_sft_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast signed 16-bit integer
- * convolution with shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that both
- * in_tensor_ch and out_tensor_ch are multiple of 2.
- *
- * @b Example:
- * @code
- * //Convolve a 28x28x4 input tensor with a 5x5 kernel and generate a 24x24x8
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 28
- * #define IN_CH 4
- * #define KER_DIM 5
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 10
- * #define OUT_CH 8
- * #define OUT_DIM 24
- *
- * q15_t in_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q15_t weight[IN_CH * KER_DIM * KER_DIM * OUT_CH] = {...};
- * q15_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[IN_CH * KER_DIM * KER_DIM] = {0};
- * q15_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_HWC_s16_s16_s16_sft_bias_fast(in_data, IN_DIM, IN_CH, weight,
- * OUT_CH, KER_DIM, PAD, STRIDE, bias, BIAS_LSHIFT, OUT_RSHIFT, out_data,
- * OUT_DIM, in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s16_s16_s16_sft_bias_fast(const q15_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q15_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q15_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s16_s16_s16_sft_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_HWC_s16_s16_s16_sft_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast signed 16-bit integer
- * convolution in any x and y dimensions with shift-based
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that both
- * in_tensor_ch and out_tensor_ch are multiple of 2.
- *
- * @b Example:
- * @code
- * //Convolve a 160x120x20 input tensor with a 3x5 kernel and generate a
- * //80x59x8 output tensor. Let both dimensions padding be 1 and their stride
- * //be 2.
- *
- * #define IN_X 160
- * #define IN_Y 120
- * #define IN_CH 20
- * #define OUT_CH 8
- * #define KER_DIM_X 3
- * #define KER_DIM_Y 5
- * #define PAD_X 1
- * #define PAD_Y 1
- * #define STRIDE_X 2
- * #define STRIDE_Y 2
- * #define BIAS_LSHIFT 6
- * #define OUT_RSHIFT 9
- * #define OUT_X 80
- * #define OUT_Y 59
- *
- * q15_t in_data[IN_CH * IN_X * IN_Y] = {...};
- * q15_t weight[IN_CH * KER_DIM_X * KER_DIM_Y * OUT_CH] = {...};
- * q15_t bias[OUT_CH] = {...};
- * q15_t in_tmp_buf[2 * IN_CH * KER_DIM_X * KER_DIM_Y] = {0};
- * q15_t out_data[OUT_CH * OUT_X * OUT_Y];
- *
- * riscv_nn_conv_HWC_s16_s16_s16_sft_bias_fast_any(in_data, IN_X, IN_Y , IN_CH,
- * weight, OUT_CH, KER_DIM_X, KER_DIM_Y, PAD_X, PAD_Y, STRIDE_X, STRIDE_Y,
- * bias, BIAS_LSHIFT, OUT_RSHIFT, out_data, OUT_X, OUT_Y, in_tmp_buf,
- * NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_HWC_s16_s16_s16_sft_bias_fast_any(const q15_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q15_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q15_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s16_s16_s16_sft_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_HWC_s16_s16_s16_sft_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs signed 8-bit integer depthwise
- * convolution with shift-based quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @b Example:
- * @code
- * //Convolve a 11x11x28 input tensor with a 3x3 kernel and generate a 9x9x48
- * //output tensor. Let both dimensions padding be 0 and their stride be 1.
- *
- * #define IN_DIM 11
- * #define IN_CH 28
- * #define OUT_CH 48
- * #define KER_DIM 3
- * #define PAD 0
- * #define STRIDE 1
- * #define OUT_RSHIFT 7
- * #define OUT_DIM 9
- *
- * q7_t in_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q7_t weight[IN_CH * KER_DIM * KER_DIM * IN_CH] = {...};
- * q7_t bias[IN_CH] = {...};
- * q15_t in_tmp_buf[2 * OUT_CH * KER_DIM * KER_DIM] = {0};
- * q7_t out_data[OUT_CH * OUT_DIM * OUT_DIM];
- *
- * riscv_nn_conv_dw_HWC_s8_s8_s8_sft_bias(in_data, IN_DIM, IN_CH, weight,
- * OUT_CH, KER_DIM, PAD, STRIDE, bias, 0, OUT_RSHIFT, out_data, OUT_DIM,
- * in_tmp_buf, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_sft_bias(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s8_s8_sft_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_sft_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, bias_lshift, out_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs signed 8-bit integer depthwise
- * convolution in any x and y dimensions with shift-based
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @b Example:
- * @code
- * //Perform a depth-wise convolution for a 79x59x12 input tensor with a 3x3
- * //kernel and generate a 77x57x12 output tensor. Let both dimensions padding
- * //be 0 and their stride be 1.
- *
- * #define IN_DIM_X 79
- * #define IN_DIM_Y 59
- * #define IN_CH 12
- * #define OUT_CH 12
- * #define KER_DIM 3
- * #define PAD 0
- * #define STRIDE 1
- * #define BIAS_SHIFT 0
- * #define OUT_RSHIFT 7
- * #define OUT_DIM_X 77
- * #define OUT_DIM_Y 57
- *
- * q7_t in_data[IN_CH * IN_DIM_X * IN_DIM_Y] = {...};
- * q7_t weight[IN_CH * KER_DIM * KER_DIM * IN_CH] = {...};
- * q7_t bias[IN_CH] = {...};
- * q15_t in_tmp_buf[2 * OUT_CH * KER_DIM * KER_DIM] = {0};
- * q7_t out_data[OUT_CH * OUT_DIM_X * OUT_DIM_Y];
- *
- * riscv_nn_conv_dw_HWC_s8_s8_s8_sft_bias_any(in_data, IN_DIM_X, IN_DIM_Y,
- * IN_CH, weight, OUT_CH, KER_DIM, KER_DIM, PAD, PAD, STRIDE, STRIDE, bias,
- * BIAS_SHIFT, OUT_RSHIFT, out_data, OUT_DIM_X, OUT_DIM_Y, in_tmp_buf,
- * NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_sft_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q7_t *bias,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf,
- q7_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s8_s8_sft_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_sft_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, bias_lshift, out_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * bias inputs and symmetric quantization on the outputs..
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s8_s8_sym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_1x1_sym_params S1 = {stride_x, stride_y, pad_x, pad_y, pre_rshift, out_scale, post_rshift};
- tpt_nn_1x1_sym_dims S2 = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_dim_x, ker_dim_y,
- out_tensor_dim_x, out_tensor_dim_y, out_tensor_ch};
- return tpt_nn_conv_1x1_HWC_s8_s8_s8_sym_bias_fast_any(
- out_tensor_ch, in_tensor, ker_weight, bias, &S1, &S2, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s16_s8_sym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_s8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for unsigned
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_u8_u8_s8_sym_bias_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_u8_u8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_u8_u8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for unsigned
- * 8-bit integer inputs and signed 8-bit integer outputs in any
- * x and y dimensions with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_u8_s8_s8_sym_bias_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_u8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_u8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for unsigned
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_u8_s16_s8_sym_bias_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_u8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_u8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to 2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s8_s8_sym_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_s8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to 2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s16_s8_sym_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_s8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for unsigned
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_u8_u8_s8_sym_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_u8_u8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_u8_u8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for unsigned
- * 8-bit integer inputs and signed 8-bit integer outputs in any
- * x and y dimensions with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to 2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_u8_s8_s8_sym_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_u8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_u8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for unsigned
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "2 * in_tensor_ch *
- * ker_dim_x * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- * - The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_u8_s16_s8_sym_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_u8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_u8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * signed 8-bit integer inputs/outputs with bias inputs and
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_RGB_sym_bias_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * signed 8-bit integer inputs and signed 16-bit integer
- * outputs with bias inputs and symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s16_s8_RGB_sym_bias_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s16_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s16_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * unsigned 8-bit integer inputs/outputs with symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_u8_s8_RGB_sym_bias_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_u8_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_u8_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * signed 8-bit integer inputs/outputs with bias inputs and
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s8_s8_RGB_sym_bias_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s8_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s8_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * unsigned 8-bit integer inputs and signed 16-bit integer
- * outputs with bias inputs and symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s16_s8_RGB_sym_bias_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s16_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s16_s8_RGB_sym_bias_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * signed 8-bit integer inputs/outputs with symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_RGB_sym_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * signed 8-bit integer inputs and signed 16-bit integer
- * outputs with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s16_s8_RGB_sym_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s16_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s16_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * unsigned 8-bit integer inputs/outputs with symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_u8_s8_RGB_sym_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_u8_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_u8_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * unsigned 8-bit integer inputs and signed 8-bit integer
- * outputs with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s8_s8_RGB_sym_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s8_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s8_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution on RGB images for
- * unsigned 8-bit integer inputs and signed 16-bit integer
- * outputs with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim input tensor dimension
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "2 * (3 *
- * ker_dim * ker_dim + 1)".
- * @param[in] wt_tmp_buf temporary buffer for kernel weights. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be "out_tensor_ch *
- * (3 * ker_dim * ker_dim + 1)".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s16_s8_RGB_sym_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf,
- q15_t *wt_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s16_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s16_s8_RGB_sym_fast(
- in_tensor, in_tensor_dim, ker_weight, out_tensor_ch, ker_dim, pad, stride,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim,
- in_tmp_buf, wt_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs/outputs with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sym_bias_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs and signed 16-bit integer outputs with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s16_s8_sym_bias_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s16_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s16_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs/outputs with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_u8_s8_sym_bias_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_u8_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_u8_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s8_s8_sym_bias_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s8_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s8_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s16_s8_sym_bias_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s16_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s16_s8_sym_bias_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs/outputs with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sym_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs and signed 16-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s16_s8_sym_fast(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s16_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s16_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs/outputs with symmetric quantization on
- * the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_u8_s8_sym_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_u8_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_u8_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s8_s8_sym_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s8_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s8_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector
- * enabled and its size must be equal to "2 *
- * in_tensor_ch * ker_dim * ker_dim".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s16_s8_sym_fast(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s16_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s16_s8_sym_fast(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs/outputs in any x and y dimensions with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs and signed 16-bit integer outputs in any x
- * and y dimensions with bias inputs and symmetric quantization
- * on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s16_s8_sym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs/outputs in any x and y dimensions with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_u8_s8_sym_bias_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_u8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_u8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs in any x and
- * y dimensions with bias inputs and symmetric quantization on
- * the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s8_s8_sym_bias_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s8_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs in any x
- * and y dimensions with bias inputs and symmetric quantization
- * on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s16_s8_sym_bias_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s16_s8_sym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs/outputs in any x and y dimensions with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_sym_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for signed 8-bit
- * integer inputs and signed 16-bit integer outputs in any x
- * and y dimensions with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s16_s8_sym_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs/outputs in any x and y dimensions with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_u8_s8_sym_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_u8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_u8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs in any x and
- * y dimensions with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s8_s8_sym_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s8_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast convolution for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs in any x
- * and y dimensions with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input vector
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector enabled and its size must
- * be equal to "2 * in_tensor_ch * ker_dim_x
- * * ker_dim_y".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * is a multiple of 4 and out_tensor_ch is a multiple of 2.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_HWC_u8_s16_s8_sym_fast_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_u8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_u8_s16_s8_sym_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs/outputs with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_sym_bias(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s8_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs and signed 16-bit integer outputs with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s16_s8_sym_bias(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s16_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s16_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs/outputs with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_u8_s8_sym_bias(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_u8_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_u8_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 8-bit integer outputs with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s8_s8_sym_bias(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s8_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s8_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 16-bit integer outputs with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s16_s8_sym_bias(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s16_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s16_s8_sym_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, pre_rshift, out_scale, post_rshift,
- out_tensor, out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs/outputs with symmetric quantization on
- * the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_sym(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s8_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs and signed 16-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s16_s8_sym(const q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s16_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s16_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs/outputs with symmetric quantization on
- * the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * (in_tensor_ch * ker_dim * ker_dim + 1) / 2.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_u8_s8_sym(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_u8_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_u8_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 8-bit integer outputs, and
- * with symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s8_s8_sym(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s8_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s8_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 16-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor. It is
- * required when -mext-dsp or -mext-vector is
- * enabled and its size must be equal to
- * "(in_tensor_ch * ker_dim * ker_dim + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s16_s8_sym(const u8_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s16_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s16_s8_sym(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, pre_rshift, out_scale, post_rshift, out_tensor,
- out_tensor_dim, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_sym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s8_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s16_s8_sym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s16_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s16_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_u8_s8_sym_bias_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_u8_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_u8_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 8-bit integer outputs in any
- * x and y dimensions with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s8_s8_sym_bias_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s8_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s8_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s16_s8_sym_bias_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const q31_t *bias,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s16_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s16_s8_sym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * bias inputs and symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_sym_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s8_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s16_s8_sym_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_s8_s16_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s16_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * symmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_u8_s8_sym_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_u8_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_u8_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 8-bit integer outputs in any
- * x and y dimensions with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s8_s8_sym_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s8_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s8_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs and signed 16-bit integer outputs in
- * any x and y dimensions with symmetric quantization on the
- * outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its size
- * must be equal to "(in_tensor_ch *
- * ker_dim_x * ker_dim_y + 1) / 2".
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * must be equal to out_tensor_ch.
- *
- * @note
- * The outputs will be 2-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_s16_s8_sym_any(const u8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_s16_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_u8_s16_s8_sym_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- pre_rshift, out_scale, post_rshift, out_tensor, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit interger inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor_group number of input tensor groups
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const uint16_t in_tensor_group,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_1x1_asym_params aConv_params = {in_offset, out_offset, stride_x,
- stride_y, pad_x, pad_y, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_1x1_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, out_tensor_ch};
- return tpt_convolve_1x1_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, ker_weight, out_tensor_ch, pad_x, pad_y, stride_x,
- stride_y, bias, out_tensor, out_shift, out_scale, out_offset, in_offset,
- act_min, act_max, out_tensor_dim_x, out_tensor_dim_y, tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t
- hpm_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- const uint16_t in_tensor_ch) {
- #if defined(__zcc__)convol
- return tpt_convolve_1x1_s8_s8_s8_asym_bias_any_get_buf_size(
- in_tensor_ch);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- in_tensor_ch);
- #endif
- }
- /**
- * @brief This function performs 1xn kernels convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor_group dummy
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its needed
- * size could be get by calling riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any_get_buffer_size.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraint that
- * out_tensor_dim_x is a multiple of 4.
- */
- static inline int hpm_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_ch,
- const uint16_t in_tensor_group,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t pad_x,
- const uint16_t stride_x,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t out_tensor_dim_x,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_1xn_asym_params aConv_params = {in_offset, out_offset, stride_x, pad_x,
- act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_1xn_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_ch, in_tensor_group,
- ker_dim_x, out_tensor_dim_x, out_tensor_ch};
- return tpt_convolve_1xn_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, in_tmp_buf);
- #else
- return riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_ch, in_tensor_group, ker_weight,
- out_tensor_ch, ker_dim_x, pad_x, stride_x, bias, out_tensor, out_shift,
- out_scale, out_offset, in_offset, act_min, act_max, out_tensor_dim_x,
- in_tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel. It is
- * always 1 here.
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y)
- {
- #if defined(__zcc__)
- return tpt_convolve_1xn_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #else
- return riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #endif
- }
- /**
- * @brief This function performs convolution for signed 8-bit integer
- * inputs/outputs in any x and y dimensions with asymmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor_group number of input tensor groups
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its needed
- * size could be get by calling riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size.
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_asym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const uint16_t in_tensor_group,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_asym_params aConv_params = {stride_x, stride_y, pad_x, pad_y,
- in_offset, out_offset, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y,
- out_tensor_ch};
- return tpt_convolve_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, ker_weight, out_tensor_ch, ker_dim_x, ker_dim_y, pad_x,
- pad_y, stride_x, stride_y, bias, out_tensor, out_shift, out_scale,
- out_offset, in_offset, act_min, act_max, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #endif
- }
- /**
- * @brief This function performs depthwise 3x3 kernels convolution for
- * signed 8-bit integer inputs/outputs in any x and y
- * dimensions with asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] dilation_x dummy
- * @param[in] dilation_y dummy
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints that in_tensor_ch
- * has to be equal to out_tensor_ch and pad_x is less than 1.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_3x3_s8_s8_s8_asym_bias_any(const int8_t *in_tensor,
- const int32_t in_tensor_dim_x,
- const int32_t in_tensor_dim_y,
- const int32_t in_tensor_ch,
- const int8_t *ker_weight,
- const int32_t out_tensor_ch,
- const int32_t pad_x,
- const int32_t pad_y,
- const int32_t stride_x,
- const int32_t stride_y,
- const int32_t *bias,
- int8_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_tensor_dim_x,
- const int32_t out_tensor_dim_y,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const int32_t dilation_x,
- const int32_t dilation_y,
- int16_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_3x3_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, pad_x, pad_y, stride_x, stride_y, bias, out_tensor,
- out_shift, out_scale, out_tensor_dim_x, out_tensor_dim_y, out_offset,
- in_offset, act_min, act_max, dilation_x, dilation_y, tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_3x3_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, pad_x, pad_y, stride_x, stride_y, bias, out_tensor,
- out_shift, out_scale, out_tensor_dim_x, out_tensor_dim_y, out_offset,
- in_offset, act_min, act_max, dilation_x, dilation_y, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit interger inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels.
- * out_tensor_ch is equal to ch_mult *
- * in_tensor_ch.
- * @param[in] ch_mult multiplier of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] dilation_x dummy
- * @param[in] dilation_y dummy
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * to be modified...
- * @endcode
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_asym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ch_mult,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t dilation_x,
- const uint16_t dilation_y,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_dw_conv_asym_params aConv_params = {in_offset, out_offset, ch_mult,
- stride_x, stride_y, pad_x, pad_y, dilation_x, dilation_y, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_dw_conv_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y, out_tensor_ch};
- return tpt_depthwise_conv_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ch_mult, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x,
- stride_y, bias, out_tensor, out_shift, out_scale, out_tensor_dim_x,
- out_tensor_dim_y, out_offset, in_offset, act_min, act_max, dilation_x,
- dilation_y, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast depthwise convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] dilation_x dummy
- * @param[in] dilation_y dummy
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its needed
- * size could be get by calling riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraint that in_tensor_ch
- * has to be equal to out_tensor_ch.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t dilation_x,
- const uint16_t dilation_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_dw_conv_asym_fast_params aConv_params = {in_offset, out_offset,
- stride_x, stride_y, pad_x, pad_y, dilation_x, dilation_y, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_dw_conv_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y, out_tensor_ch};
- return tpt_depthwise_conv_s8_s8_s8_asym_bias_fast_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, out_tensor, out_shift, out_scale, out_tensor_dim_x,
- out_tensor_dim_y, out_offset, in_offset, act_min, act_max, dilation_x,
- dilation_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y)
- {
- #if defined(__zcc__)
- return tpt_depthwise_conv_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for unsigned
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] ch_mult multiplier of input tensor channels
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] dilation_x dummy
- * @param[in] dilation_y dummy
- * @param[in] bias pointer of the bias vector
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -255 to 0.
- * @param[in] ker_offset value of offset for the filter kernel
- * It should be in the range of -255 to 0.
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of 0 to 255.
- * @param[in] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * 0 to 255.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * 0 to 255.
- * @param[in] out_shift shift amount for the output tensor
- * @param[in] out_scale value of sacling for the output tensor
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraint that both ch_mult
- * and ker_dim_x are multiple of 2.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_u8_u8_u8_asym_bias_any(const uint8_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const uint8_t *ker_weight,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const int16_t ch_mult,
- const int16_t pad_x,
- const int16_t pad_y,
- const int16_t stride_x,
- const int16_t stride_y,
- const int16_t dilation_x,
- const int16_t dilation_y,
- const int32_t *bias,
- const int32_t in_offset,
- const int32_t ker_offset,
- const int32_t out_offset,
- uint8_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- const int32_t act_min,
- const int32_t act_max,
- const int32_t out_shift,
- const int32_t out_scale)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_u8_u8_u8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- ker_dim_x, ker_dim_y, ch_mult, pad_x, pad_y, stride_x, stride_y,
- dilation_x, dilation_y, bias, in_offset, ker_offset, out_offset,
- out_tensor, out_tensor_dim_x, out_tensor_dim_y, act_min, act_max,
- out_shift, out_scale);
- #else
- return riscv_nn_conv_dw_HWC_u8_u8_u8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- ker_dim_x, ker_dim_y, ch_mult, pad_x, pad_y, stride_x, stride_y,
- dilation_x, dilation_y, bias, in_offset, ker_offset, out_offset,
- out_tensor, out_tensor_dim_x, out_tensor_dim_y, act_min, act_max,
- out_shift, out_scale);
- #endif
- }
- #ifdef __riscv_zfh
- /**
- * @brief This function performs 1x1 kernels convolution for 16-bit
- * half-precision floating point inputs/outputs in any x and y
- * dimensions.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf dummy
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - out_tensor_ch is a multiple of 2
- * - ker_dim_x is 1
- * - ker_dim_y is 1
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_f16_f16_f16_bias_any(const float16_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const float16_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const float16_t *bias,
- float16_t *out_tensor,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- float16_t *in_tmp_buf,
- float16_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_1x1_HWC_f16_f16_f16_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, out_tensor, out_tensor_dim_x, out_tensor_dim_y, in_tmp_buf,
- tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_f16_f16_f16_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, out_tensor, out_tensor_dim_x, out_tensor_dim_y, in_tmp_buf,
- tmp_buf);
- #endif
- }
- /**
- * @brief This function performs convolution for 16-bit half-precision
- * floating point inputs/outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-vector is
- * enabled and its size must be equal to
- * "2 * in_tensor_ch * ker_dim * ker_dim".
- * @param[in] tmp_buf dummy
- * @return This function returns 0.
- */
- static inline int32_t hpm_nn_conv_HWC_f16_f16_f16_bias(const float16_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const float16_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const float16_t *bias,
- float16_t *out_tensor,
- const uint16_t out_tensor_dim,
- float16_t *in_tmp_buf,
- float16_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_f16_f16_f16_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, out_tensor, out_tensor_dim, in_tmp_buf,
- tmp_buf);
- #else
- return riscv_nn_conv_HWC_f16_f16_f16_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, out_tensor, out_tensor_dim, in_tmp_buf,
- tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for 16-bit
- * half-precision floating point inputs/outputs
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-vector is
- * enabled and its size must be equal to
- * "in_tensor_ch * ker_dim * ker_dim".
- * @param[in] tmp_buf dummy
- * @return This function returns 0.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_f16_f16_f16_bias(const float16_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const float16_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const float16_t *bias,
- float16_t *out_tensor,
- const uint16_t out_tensor_dim,
- float16_t *in_tmp_buf,
- float16_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_dw_HWC_f16_f16_f16_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, out_tensor, out_tensor_dim, in_tmp_buf,
- tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_f16_f16_f16_bias(
- in_tensor, in_tensor_dim, in_tensor_ch, ker_weight, out_tensor_ch,
- ker_dim, pad, stride, bias, out_tensor, out_tensor_dim, in_tmp_buf,
- tmp_buf);
- #endif
- }
- #endif
- /**
- * * @}
- */
- #endif
- #ifdef HPM_EN_MATH_NN_RVP32_LIB
- #if defined(__zcc__)
- #include "tpt_nn_convolution.h"
- #else
- #include "riscv_nn_convolution.h"
- #endif
- /**
- * @brief This function performs convolution for signed 8-bit integer
- * inputs/outputs in any x and y dimensions with asymmetric
- * quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor_group number of input tensor groups
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its needed
- * size could be get by calling riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size.
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_asym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const uint16_t in_tensor_group,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_asym_params aConv_params = {stride_x, stride_y, pad_x, pad_y,
- in_offset, out_offset, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y,
- out_tensor_ch};
- return tpt_convolve_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, in_tmp_buf);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, ker_weight, out_tensor_ch, ker_dim_x, ker_dim_y, pad_x,
- pad_y, stride_x, stride_y, bias, out_tensor, out_shift, out_scale,
- out_offset, in_offset, act_min, act_max, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1x1 kernels convolution for signed
- * 8-bit interger inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor_group number of input tensor groups
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] tmp_buf dummy
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraints (see the Note
- * below for details).
- *
- * @note
- * - The input constraints of this function are:
- * - in_tensor_ch is a multiple of 4
- * - pad_x is 0
- * - pad_y is 0
- * - stride_x is 1
- * - stride_y is 1
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const uint16_t in_tensor_group,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_1x1_asym_params aConv_params = {in_offset, out_offset, stride_x,
- stride_y, pad_x, pad_y, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_1x1_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, out_tensor_ch};
- return tpt_convolve_1x1_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, tmp_buf);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- in_tensor_group, ker_weight, out_tensor_ch, pad_x, pad_y, stride_x,
- stride_y, bias, out_tensor, out_shift, out_scale, out_offset, in_offset,
- act_min, act_max, out_tensor_dim_x, out_tensor_dim_y, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs depthwise convolution for signed
- * 8-bit interger inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels.
- * out_tensor_ch is equal to ch_mult *
- * in_tensor_ch.
- * @param[in] ch_mult multiplier of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] dilation_x dummy
- * @param[in] dilation_y dummy
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * to be modified...
- * @endcode
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_asym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ch_mult,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t dilation_x,
- const uint16_t dilation_y,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_dw_conv_asym_params aConv_params = {in_offset, out_offset, ch_mult,
- stride_x, stride_y, pad_x, pad_y, dilation_x, dilation_y, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_dw_conv_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y, out_tensor_ch};
- return tpt_depthwise_conv_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ch_mult, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x,
- stride_y, bias, out_tensor, out_shift, out_scale, out_tensor_dim_x,
- out_tensor_dim_y, out_offset, in_offset, act_min, act_max, dilation_x,
- dilation_y, tmp_buf);
- #endif
- }
- /**
- * @brief This function performs 1xn kernels convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor_group dummy
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its needed
- * size could be get by calling riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any_get_buffer_size.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraint that
- * out_tensor_dim_x is a multiple of 4.
- */
- static inline int hpm_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_ch,
- const uint16_t in_tensor_group,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t pad_x,
- const uint16_t stride_x,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t out_tensor_dim_x,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_conv_1xn_asym_params aConv_params = {in_offset, out_offset, stride_x, pad_x,
- act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_1xn_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_ch, in_tensor_group,
- ker_dim_x, out_tensor_dim_x, out_tensor_ch};
- return tpt_convolve_1xn_s8_s8_s8_asym_bias_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, in_tmp_buf);
- #else
- return riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any(
- in_tensor, in_tensor_dim_x, in_tensor_ch, in_tensor_group, ker_weight,
- out_tensor_ch, ker_dim_x, pad_x, stride_x, bias, out_tensor, out_shift,
- out_scale, out_offset, in_offset, act_min, act_max, out_tensor_dim_x,
- in_tmp_buf);
- #endif
- }
- /**
- * @brief This function performs fast depthwise convolution for signed
- * 8-bit integer inputs/outputs in any x and y dimensions with
- * asymmetric quantization on the outputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_weight pointer of kernel weights
- * @param[in] out_tensor_ch number of output tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] bias pointer of the bias vector
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_shift pointer of the shift vector for output
- * tensor
- * @param[in] out_scale pointer of the scaling vector for output
- * tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_offset value of offset for the output tensor.
- * It should be in the range of -128 to 127.
- * @param[in] in_offset value of offset for the input tensor
- * It should be in the range of -127 to 128.
- * @param[in] act_min minimum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput
- * tensor. It should be in the range of
- * -128 to 127.
- * @param[in] dilation_x dummy
- * @param[in] dilation_y dummy
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp or
- * -mext-vector is enabled and its needed
- * size could be get by calling riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size.
- * @return This function returns 0 on success; otherwise, it returns -1
- * if its inputs do not meet the constraint that in_tensor_ch
- * has to be equal to out_tensor_ch.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any(const q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const q7_t *ker_weight,
- const uint16_t out_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const int32_t *bias,
- q7_t *out_tensor,
- const int32_t *out_shift,
- const int32_t *out_scale,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- const int32_t out_offset,
- const int32_t in_offset,
- const int32_t act_min,
- const int32_t act_max,
- const uint16_t dilation_x,
- const uint16_t dilation_y,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_dw_conv_asym_fast_params aConv_params = {in_offset, out_offset,
- stride_x, stride_y, pad_x, pad_y, dilation_x, dilation_y, act_min, act_max};
- tpt_nn_per_channel_quant_params aQuant_params = {out_scale, out_shift};
- tpt_nn_dw_conv_asym_dims aConv_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y, out_tensor_ch};
- return tpt_depthwise_conv_s8_s8_s8_asym_bias_fast_any(out_tensor, in_tensor, ker_weight,
- bias, &aConv_params, &aQuant_params, &aConv_dims, in_tmp_buf);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_weight,
- out_tensor_ch, ker_dim_x, ker_dim_y, pad_x, pad_y, stride_x, stride_y,
- bias, out_tensor, out_shift, out_scale, out_tensor_dim_x,
- out_tensor_dim_y, out_offset, in_offset, act_min, act_max, dilation_x,
- dilation_y, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(const uint16_t in_tensor_ch)
- {
- #if defined(__zcc__)
- return tpt_convolve_1x1_s8_s8_s8_asym_bias_any_get_buf_size(
- in_tensor_ch);
- #else
- return riscv_nn_conv_1x1_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- in_tensor_ch);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y)
- {
- #if defined(__zcc__)
- return tpt_depthwise_conv_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #else
- return riscv_nn_conv_dw_HWC_s8_s8_s8_asym_bias_fast_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel. It is
- * always 1 here.
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y)
- {
- #if defined(__zcc__)
- return tpt_convolve_1xn_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #else
- return riscv_nn_conv_1xn_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the input temporary buffer of riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y)
- {
- #if defined(__zcc__)
- return tpt_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #else
- return riscv_nn_conv_HWC_s8_s8_s8_asym_bias_any_get_buffer_size(
- in_tensor_ch, ker_dim_x, ker_dim_y);
- #endif
- }
- #endif
- #endif
- #ifdef HPM_MATH_NN_CONNECTED
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_fully_connected.h"
- #else
- #include "riscv_nn_fully_connected.h"
- #endif
- /**
- * @defgroup nnfullyconnect NN Fully Connected Functions
- * @ingroup hpmmath
- * @brief The fully connected functions multiply the input vector by a weight
- * matrix and add a bias, if any, to the result. The supported combinations of
- * input vector and weight matrix are (signed 8-bit integer, signed 8-bit integer),
- * (unsigned 8-bit integer, signed 8-bit integer), (signed 16-bit integer,
- * signed 8-bit integer), (signed 16-bit integer, signed 16-bit integer) and
- * (16-bit half-precision floating point, 16-bit half-precision floating point).
- *
- * @{
- */
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs with shift-based quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf dummy
- * @return This function only returns 0.
- *
- * @b Example:
- * @code
- * #define IN_SIZE 2048
- * #define OUT_SIZE 256
- * #define BIAS_LSHIFT 9
- * #define OUT_RSHIFT 9
- *
- * q7_t in_vec[IN_SIZE] = {...};;
- * q7_t wt_mat[IN_SIZE * OUT_SIZE] {...};
- * q7_t bias[OUT_SIZE] = {...};
- * q7_t out_vec[OUT_SIZE];
- *
- * hpm_nn_fc_s8_s8_s8_sft_bias(in_vec, wt_mat, IN_SIZE, OUT_SIZE, BIAS_LSHIFT,
- * OUT_RSHIFT, bias, out_vec, NULL);
- * @endcode
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_sft_bias(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- const q7_t *bias,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_sft_bias(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias, out_vec,
- in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_sft_bias(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias, out_vec,
- in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs with interleaved multiplication and
- * shift-based quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be "2 * size".
- * @return This function only returns 0.
- *
- * @note
- * In this function, the input vector is multiplied by the weight matrix in
- * interleaved formats which could be obtained by riscv_nn_fc_s8_wt_converter.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_sft_bias_fast(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- const q7_t *bias,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_sft_bias_fast(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_sft_bias_fast(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 16-bit
- * integer inputs with shift-based quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[in] bias pointer of the bias
- * @param[out] out_vec pointer of the output vector
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_fc_s16_s16_s16_sft_bias(const q15_t *in_vec,
- const q15_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- const q15_t *bias,
- q15_t *out_vec,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s16_s16_s16_sft_bias(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias, out_vec,
- tmp_buf);
- #else
- return riscv_nn_fc_s16_s16_s16_sft_bias(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias,
- out_vec, tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 16-bit
- * integer inputs with interleaved multiplication and
- * shift-based quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[in] bias pointer of the bias
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 4 * size.
- * @return This function only returns 0.
- *
- *
- * @note
- * In this function, the input vector is multiplied by a weight matrix in
- * interleaved formats which could be obtained by riscv_nn_fc_s16_wt_converter.
- */
- static inline int32_t hpm_nn_fc_s16_s16_s16_sft_bias_fast(const q15_t *in_vec,
- const q15_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- const q15_t *bias,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s16_s16_s16_sft_bias_fast(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s16_s16_s16_sft_bias_fast(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This function multiplies a signed 16-bit integer input
- * vector by a signed 8-bit integer weight matrix with
- * shift-based quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[in] bias pointer of the bias
- * @param[out] out_vec pointer of the output vector
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_fc_mat_vec_s16_s16_s8_sft_bias(const q15_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- const q7_t *bias,
- q15_t *out_vec,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_mat_vec_s16_s16_s8_sft_bias(in_vec, wt_mat, size, wt_row_num,
- bias_lshift, out_rshift, bias,
- out_vec, tmp_buf);
- #else
- return riscv_nn_fc_mat_vec_s16_s16_s8_sft_bias(
- in_vec, wt_mat, size, wt_row_num, bias_lshift, out_rshift, bias, out_vec,
- tmp_buf);
- #endif
- }
- /**
- * @brief This function multiplies a signed 16-bit integer input
- * vector by a signed 8-bit integer weight matrix with
- * interleaved multiplication and shift-based quantization on
- * the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] bias_lshift left shift amount for the bias
- * @param[in] out_rshift right shift amount for the output
- * @param[in] bias pointer of the bias
- * @param[out] out_vec pointer of the output vector
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- *
- * @note
- * In this function, the input vector is multiplied by a weight matrix in
- * interleaved formats which could be obtained by
- * hpm_nn_fc_mat_vec_s8_wt_converter.
- */
- static inline int32_t hpm_nn_fc_mat_vec_s16_s16_s8_sft_bias_fast(const q15_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t bias_lshift,
- const uint16_t out_rshift,
- const q7_t *bias,
- q15_t *out_vec,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_mat_vec_s16_s16_s8_sft_bias_fast(
- in_vec, wt_mat, size, wt_row_num, bias_lshift, out_rshift, bias, out_vec,
- tmp_buf);
- #else
- return riscv_nn_fc_mat_vec_s16_s16_s8_sft_bias_fast(
- in_vec, wt_mat, size, wt_row_num, bias_lshift, out_rshift, bias, out_vec,
- tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs/outputs with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_sym_bias(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs and signed 16-bit integer outputs with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s16_s8_sym_bias(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s16_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s16_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs/outputs with bias inputs and symmetric
- * quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_u8_s8_sym_bias(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- u8_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_u8_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_u8_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s8_s8_sym_bias(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s8_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s8_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs with bias
- * inputs and symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s16_s8_sym_bias(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s16_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift, bias,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s16_s8_sym_bias(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs/outputs with symmetric quantization on the
- * outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_sym(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs and signed 16-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s16_s8_sym(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s16_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s16_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs/outputs with symmetric quantization on the
- * outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_u8_s8_sym(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_u8_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_u8_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s8_s8_sym(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s8_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s8_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs with
- * symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output before the
- * scaling
- * @param[in] out_scale scaling value for the output
- * @param[in] post_rshift right shift amount for the output after the
- * scaling
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-dsp is enabled and its
- * size must be "size".
- * @return This function only returns 0.
- *
- * @note
- * The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s16_s8_sym(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s16_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s16_s8_sym(in_vec, wt_mat, size, wt_row_num, pre_rshift,
- out_scale, post_rshift, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs/outputs with bias inputs, interleaved
- * multiplication and symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_sym_bias_fast(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs and signed 16-bit integer outputs with bias
- * inputs, interleaved multiplication and symmetric
- * quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s16_s8_sym_bias_fast(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s16_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s16_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs/outputs with bias inputs, interleaved
- * multiplication and symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_u8_s8_sym_bias_fast(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- u8_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_u8_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_u8_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs with bias
- * inputs, interleaved multiplication and symmetric
- * quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s8_s8_sym_bias_fast(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s8_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s8_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs with bias
- * inputs, interleaved multiplication and symmetric
- * quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[in] bias pointer of the bias vector
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s16_s8_sym_bias_fast(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- const q31_t *bias,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s16_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s16_s8_sym_bias_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- bias, out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs/outputs with interleaved multiplication and
- * symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_sym_fast(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs and signed 16-bit integer outputs with
- * interleaved multiplication and symmetric quantization on the
- * outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_s8_s16_s8_sym_fast(const q7_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s16_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_s8_s16_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs/outputs with interleaved multiplication and
- * symmetric quantization on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_u8_s8_sym_fast(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- u8_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_u8_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_u8_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 8-bit integer outputs with
- * interleaved multiplication and symmetric quantization on the
- * outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s8_s8_sym_fast(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q7_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s8_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s8_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for unsigned 8-bit
- * integer inputs and signed 16-bit integer outputs with
- * interleaved multiplication and symmetric quantization on the
- * outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[in] pre_rshift right shift amount for the output
- * @param[in] out_scale value of scaling for the output
- * @param[in] post_rshift right shift amount for the output
- * @param[out] out_vec pointer of the output vector
- * @param[in] in_tmp_buf temporary buffer for input vector. It is
- * required when -mext-vector is enabled and
- * its size must be 2 * size.
- * @return This function only returns 0.
- *
- * @note
- * - In this function, the input vector is multiplied by the weight matrix in
- * interleaved format which could be obtained by riscv_nn_fc_s8_wt_converter.
- * - The outputs will be two-stage shifted before being stored, i.e.,
- * out = ((out >> pre_rshift) *out_scale) >> post_rshift.
- */
- static inline int32_t hpm_nn_fc_u8_s16_s8_sym_fast(const u8_t *in_vec,
- const q7_t *wt_mat,
- const uint16_t size,
- const uint16_t wt_row_num,
- const uint16_t pre_rshift,
- const uint16_t out_scale,
- const uint16_t post_rshift,
- q15_t *out_vec,
- q15_t *in_tmp_buf)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_u8_s16_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #else
- return riscv_nn_fc_u8_s16_s8_sym_fast(in_vec, wt_mat, size, wt_row_num,
- pre_rshift, out_scale, post_rshift,
- out_vec, in_tmp_buf);
- #endif
- }
- /**
- * @brief This is a weight converter for those fully-connected
- * functions with signed 8-bit weight data and named with
- * "fast".
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[out] wt_mat_out pointer of the weight matrix stored in
- * specific ordering
- */
- static inline void hpm_nn_fc_s8_wt_converter(const q7_t *wt_mat,
- const uint32_t size,
- const uint32_t wt_row_num,
- q7_t *wt_mat_out)
- {
- #if defined(__zcc__)
- tpt_nn_fc_s8_wt_converter(wt_mat, size, wt_row_num, wt_mat_out);
- #else
- riscv_nn_fc_s8_wt_converter(wt_mat, size, wt_row_num, wt_mat_out);
- #endif
- }
- /**
- * @brief This is a weight converter for those fully-connected
- * functions with signed 16-bit weight data and named with
- * "fast".
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[out] wt_mat_out pointer of the weight matrix stored in
- * specific ordering
- */
- static inline void hpm_nn_fc_s16_wt_converter(const q15_t *wt_mat,
- const uint32_t size,
- const uint32_t wt_row_num,
- q15_t *wt_mat_out)
- {
- #if defined(__zcc__)
- tpt_nn_fc_s16_wt_converter(wt_mat, size, wt_row_num, wt_mat_out);
- #else
- riscv_nn_fc_s16_wt_converter(wt_mat, size, wt_row_num, wt_mat_out);
- #endif
- }
- /**
- * @brief This is a weight converter for
- * riscv_nn_fc_mat_vec_s16_s16_s8_sft_bias_fast.
- * @param[in] wt_mat pointer of the weight matrix
- * @param[in] size number of elements in the input vector
- * @param[in] wt_row_num number of rows in the weight matrix
- * @param[out] wt_mat_out pointer of the weight matrix stored in
- * specific ordering
- */
- static inline void hpm_nn_fc_mat_vec_s8_wt_converter(const q7_t *wt_mat,
- const uint32_t size,
- const uint32_t wt_row_num,
- q7_t *wt_mat_out)
- {
- #if defined(__zcc__)
- tpt_nn_fc_mat_vec_s8_wt_converter(wt_mat, size, wt_row_num, wt_mat_out);
- #else
- riscv_nn_fc_mat_vec_s8_wt_converter(wt_mat, size, wt_row_num, wt_mat_out);
- #endif
- }
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs with bias inputs and asymmetric quantization
- * on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the transposed weight matrix
- * @param[in] in_vec_col number of columns in the input vector (or
- * transposed weight matrix)
- * @param[in] wt_mat_row number of rows in the transposed weight
- * matrix
- * @param[in] in_vec_group number of input vector groups
- * @param[in] in_offset value of offset to be added to the input
- * tensor. It should be in the range of -127 to
- * 128.
- * @param[in] wt_offset value of offset to be added to the weight.
- * It should be in the range of -127 to 128.
- * @param[in] out_scale value of sacling for the output tensor
- * @param[in] out_shift shift amount for the output tensor
- * @param[in] out_offset value of offset to be added to the output
- * tensor. It should be in the range of -128 to
- * 127.
- * @param[in] bias pointer of the bias vector
- * @param[in] out_vec pointer of the output vector
- * @param[in] act_min minimum value to clip out the ouput tensor.
- * It should be in the range of -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput tensor.
- * It should be in the range of -128 to 127.
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_asym_bias(const int8_t *in_vec,
- const int8_t *wt_mat,
- const uint16_t in_vec_col,
- const uint16_t wt_mat_row,
- const uint16_t in_vec_group,
- const int32_t in_offset,
- const int32_t wt_offset,
- const int32_t out_scale,
- const int32_t out_shift,
- const int32_t out_offset,
- const int32_t *bias,
- int8_t *out_vec,
- const int32_t act_min,
- const int32_t act_max,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_fc_params_asym_s8 aFc_params = {in_offset, wt_offset, out_offset, out_scale,
- out_shift, act_min, act_max};
- tpt_nn_fc_dims_asym_s8 aFC_dims = {in_vec_col, in_vec_group, wt_mat_row};
- return tpt_fully_connected_s8(out_vec, in_vec, wt_mat, bias, &aFc_params,
- &aFC_dims, tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_asym_bias(in_vec, wt_mat, in_vec_col, wt_mat_row,
- in_vec_group, in_offset, wt_offset,
- out_scale, out_shift, out_offset, bias,
- out_vec, act_min, act_max, tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the temporary buffer of riscv_nn_fc_s8_s8_s8_asym_bias.
- * @param[in] in_vec_col number of columns in the input vector (or
- * transposed weight matrix)
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_asym_bias_get_buffer_size(const uint16_t in_vec_col)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_asym_bias_get_buffer_size(in_vec_col);
- #else
- return riscv_nn_fc_s8_s8_s8_asym_bias_get_buffer_size(in_vec_col);
- #endif
- }
- /**
- * * @}
- */
- #endif
- #ifdef HPM_EN_MATH_NN_RVP32_LIB
- #if defined(__zcc__)
- #include "tpt_nn_fully_connected.h"
- #else
- #include "riscv_nn_fully_connected.h"
- #endif
- /**
- * @brief This is a fully connected layer function for signed 8-bit
- * integer inputs with bias inputs and asymmetric quantization
- * on the outputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] wt_mat pointer of the transposed weight matrix
- * @param[in] in_vec_col number of columns in the input vector (or
- * transposed weight matrix)
- * @param[in] wt_mat_row number of rows in the transposed weight
- * matrix
- * @param[in] in_vec_group number of input vector groups
- * @param[in] in_offset value of offset to be added to the input
- * tensor. It should be in the range of -127 to
- * 128.
- * @param[in] wt_offset value of offset to be added to the weight.
- * It should be in the range of -127 to 128.
- * @param[in] out_scale value of sacling for the output tensor
- * @param[in] out_shift shift amount for the output tensor
- * @param[in] out_offset value of offset to be added to the output
- * tensor. It should be in the range of -128 to
- * 127.
- * @param[in] bias pointer of the bias vector
- * @param[in] out_vec pointer of the output vector
- * @param[in] act_min minimum value to clip out the ouput tensor.
- * It should be in the range of -128 to 127.
- * @param[in] act_max maximum value to clip out the ouput tensor.
- * It should be in the range of -128 to 127.
- * @param[in] tmp_buf dummy
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_asym_bias(const int8_t *in_vec,
- const int8_t *wt_mat,
- const uint16_t in_vec_col,
- const uint16_t wt_mat_row,
- const uint16_t in_vec_group,
- const int32_t in_offset,
- const int32_t wt_offset,
- const int32_t out_scale,
- const int32_t out_shift,
- const int32_t out_offset,
- const int32_t *bias,
- int8_t *out_vec,
- const int32_t act_min,
- const int32_t act_max,
- q15_t *tmp_buf)
- {
- #if defined(__zcc__)
- tpt_nn_fc_params_asym_s8 aFc_params = {in_offset, wt_offset, out_offset, out_scale,
- out_shift, act_min, act_max};
- tpt_nn_fc_dims_asym_s8 aFC_dims = {in_vec_col, in_vec_group, wt_mat_row};
- return tpt_fully_connected_s8(out_vec, in_vec, wt_mat, bias, &aFc_params,
- &aFC_dims, tmp_buf);
- #else
- return riscv_nn_fc_s8_s8_s8_asym_bias(in_vec, wt_mat, in_vec_col, wt_mat_row,
- in_vec_group, in_offset, wt_offset,
- out_scale, out_shift, out_offset, bias,
- out_vec, act_min, act_max, tmp_buf);
- #endif
- }
- /**
- * @brief This function is used to get the needed size, in bytes, by
- * the temporary buffer of riscv_nn_fc_s8_s8_s8_asym_bias.
- * @param[in] in_vec_col number of columns in the input vector (or
- * transposed weight matrix)
- * @return This function returns the needed size by the temporary buffer.
- */
- static inline int32_t hpm_nn_fc_s8_s8_s8_asym_bias_get_buffer_size(const uint16_t in_vec_col)
- {
- #if defined(__zcc__)
- return tpt_nn_fc_s8_s8_s8_asym_bias_get_buffer_size(in_vec_col);
- #else
- return riscv_nn_fc_s8_s8_s8_asym_bias_get_buffer_size(in_vec_col);
- #endif
- }
- #endif /* HPM_EN_MATH_NN_RVP32_LIB */
- #endif
- #ifdef HPM_MATH_NN_POOLING
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_pooling.h"
- #else
- #include "riscv_nn_pooling.h"
- #endif
- /**
- * @defgroup nnpooling NN Pooling Functions
- * @ingroup hpmmath
- * @brief The pooling functions are used to downsample input data. They include
- * max and average pooling functions.
- *
- * @{
- */
- /**
- * @brief This is an average pooling function for signed 8-bit integer
- * inputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor
- * @param[out] out_tensor pointer of the output tensor. It is required
- * when -mext-dsp is enabled and its size must
- * be equal to "2 * out_tensor_dim *
- * in_tensor_ch".
- *
- * @b Example:
- * @code
- * #define IN_DIM 32
- * #define IN_CH 32
- * #define KER_DIM 3
- * #define PAD 0
- * #define STRIDE 2
- * #define OUT_DIM 15
- *
- * q7_t in_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q7_t out_data[IN_CH * OUT_DIM * OUT_DIM] = {...};
- * q7_t in_tmp_buf[2 * OUT_DIM * IN_CH];
- *
- * hpm_nn_avepool_HWC_s8(in_data, IN_DIM, IN_CH, KER_DIM, PAD, STRIDE,
- * OUT_DIM, in_tmp_buf, out_data);
- * @endcode
- */
- static inline void hpm_nn_avepool_HWC_s8(q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t out_tensor_dim,
- q7_t *in_tmp_buf,
- q7_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_nn_avepool_HWC_s8(in_tensor, in_tensor_dim, in_tensor_ch, ker_dim, pad,
- stride, out_tensor_dim, in_tmp_buf, out_tensor);
- #else
- riscv_nn_avepool_HWC_s8(in_tensor, in_tensor_dim, in_tensor_ch, ker_dim, pad,
- stride, out_tensor_dim, in_tmp_buf, out_tensor);
- #endif
- }
- /**
- * @brief This is an average pooling function for signed 8-bit integer
- * inputs in any x and y dimensions.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] pad_x padding size in the x dimension
- * @param[in] pad_y padding size in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * It is required when -mext-dsp is enabled
- * and its size must be equal to "2 *
- * out_tensor_dim_x * in_tensor_ch".
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] out_lshift left shift amount for the output
- *
- * @b Example:
- * @code
- * #define IN_X 160
- * #define IN_Y 120
- * #define IN_CH 3
- * #define KER_DIM_X 3
- * #define KER_DIM_Y 5
- * #define PAD_X 1
- * #define PAD_Y 1
- * #define STRIDE_X 2
- * #define STRIDE_Y 2
- * #define OUT_LSHIFT 3
- * #define OUT_X 80
- * #define OUT_Y 59
- *
- * q7_t in_data[IN_CH * IN_X * IN_Y] = {...};
- * q7_t out_data[IN_CH * OUT_X * OUT_Y] = {...};
- * q7_t in_tmp_buf[2 * IN_CH * OUT_X * OUT_Y];
- *
- * hpm_nn_avepool_HWC_s8_any(in_data, IN_X, IN_Y, IN_CH, KER_DIM_X, KER_DIM_Y,
- * PAD_X, PAD_Y, STRIDE_X, STRIDE_Y, OUT_X, OUT_Y, in_tmp_buf, out_data,
- * OUT_LSHIFT);
- * @endcode
- */
- static inline void hpm_nn_avepool_HWC_s8_any(q7_t *in_tensor,
- const uint16_t in_tensor_dim_x,
- const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_ch,
- const uint16_t ker_dim_x,
- const uint16_t ker_dim_y,
- const uint16_t pad_x,
- const uint16_t pad_y,
- const uint16_t stride_x,
- const uint16_t stride_y,
- const uint16_t out_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- q7_t *in_tmp_buf,
- q7_t *out_tensor,
- const uint16_t out_lshift)
- {
- #if defined(__zcc__)
- tpt_nn_avepool_HWC_s8_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_dim_x,
- ker_dim_y, pad_x, pad_y, stride_x, stride_y, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, out_tensor, out_lshift);
- #else
- riscv_nn_avepool_HWC_s8_any(
- in_tensor, in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch, ker_dim_x,
- ker_dim_y, pad_x, pad_y, stride_x, stride_y, out_tensor_dim_x,
- out_tensor_dim_y, in_tmp_buf, out_tensor, out_lshift);
- #endif
- }
- /**
- * @brief This is an average pooling function for S8 inputs with any x
- * and y dimension with the actvating parameters to limit the
- * outputs.
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] pad_y padding size in the y dimension
- * @param[in] pad_x padding size in the x dimension
- * @param[in] act_min minimum value that the output tensor is
- * limited to. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value that the output tensor is
- * limited to. It should be in the range of
- * -128 to 127.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * Its needed size could be obtained by
- * calling riscv_nn_avepool_s8_HWC_any_get_buffer_size.
- * @param[out] out_tensor pointer of the output tensor
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_avepool_HWC_s8_any_act(const int in_tensor_dim_y,
- const int in_tensor_dim_x,
- const int out_tensor_dim_y,
- const int out_tensor_dim_x,
- const int stride_y,
- const int stride_x,
- const int ker_dim_y,
- const int ker_dim_x,
- const int pad_y,
- const int pad_x,
- const int act_min,
- const int act_max,
- const int in_tensor_ch,
- int8_t *in_tensor,
- int16_t *in_tmp_buf,
- int8_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_nn_avgpool_params_act_s8 aPool_params = {stride_x, stride_y, pad_x, pad_y,
- act_min, act_max};
- tpt_nn_avgpool_dims_act_s8 aPool_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y};
- return tpt_avgpool_s8_any_act(out_tensor, in_tensor, &aPool_params, &aPool_dims, in_tmp_buf);
- #else
- return riscv_nn_avepool_HWC_s8_any_act(
- in_tensor_dim_y, in_tensor_dim_x, out_tensor_dim_y, out_tensor_dim_x,
- stride_y, stride_x, ker_dim_y, ker_dim_x, pad_y, pad_x, act_min, act_max,
- in_tensor_ch, in_tensor, in_tmp_buf, out_tensor);
- #endif
- }
- /**
- * @brief This function is used to obtain the required size, in bytes,
- * for the input temporary buffer of riscv_nn_avepool_HWC_s8_any_act.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @return This function returns the size required by the temporary
- * buffer.
- */
- static inline int32_t hpm_nn_avepool_HWC_s8_any_act_get_buffer_size(const int out_tensor_dim_x, const int in_tensor_ch)
- {
- #if defined(__zcc__)
- return tpt_nn_avepool_HWC_s8_any_act_get_buffer_size(out_tensor_dim_x,
- in_tensor_ch);
- #else
- return riscv_nn_avepool_HWC_s8_any_act_get_buffer_size(out_tensor_dim_x,
- in_tensor_ch);
- #endif
- }
- /**
- * @brief This is a max pooling function for signed 8-bit integer
- * inputs.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_dim dimension of the input tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] ker_dim dimension of the filter kernel
- * @param[in] pad padding size
- * @param[in] stride convolution stride
- * @param[in] out_tensor_dim dimension of the output tensor
- * @param[in] in_tmp_buf dummy
- * @param[out] out_tensor pointer of the output tensor
- *
- * @b Example:
- * @code
- * #define IN_DIM 32
- * #define IN_CH 32
- * #define KER_DIM 3
- * #define PAD 0
- * #define STRIDE 2
- * #define OUT_DIM 15
- *
- * q7_t in_data[IN_CH * IN_DIM * IN_DIM] = {...};
- * q7_t out_data[IN_CH * OUT_DIM * OUT_DIM] = {...};
- *
- * hpm_nn_maxpool_HWC_s8(in_data, IN_DIM, IN_CH, KER_DIM, PAD, STRIDE,
- * OUT_DIM, NULL, out_data);
- * @endcode
- */
- static inline void hpm_nn_maxpool_HWC_s8(q7_t *in_tensor,
- const uint16_t in_tensor_dim,
- const uint16_t in_tensor_ch,
- const uint16_t ker_dim,
- const uint16_t pad,
- const uint16_t stride,
- const uint16_t out_tensor_dim,
- q7_t *in_tmp_buf,
- q7_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_nn_maxpool_HWC_s8(in_tensor, in_tensor_dim, in_tensor_ch, ker_dim, pad,
- stride, out_tensor_dim, in_tmp_buf, out_tensor);
- #else
- riscv_nn_maxpool_HWC_s8(in_tensor, in_tensor_dim, in_tensor_ch, ker_dim, pad,
- stride, out_tensor_dim, in_tmp_buf, out_tensor);
- #endif
- }
- /**
- * @brief This is a max pooling function for signed 8-bit integer
- * inputs in any x and y dimensions with the actvating
- * parameters to limit the outputs.
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] pad_y padding size in the y dimension
- * @param[in] pad_x padding size in the x dimension
- * @param[in] act_min minimum value that the output tensor is
- * limited to. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value that the output tensor is
- * limited to. It should be in the range of
- * -128 to 127.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] tmp_buffer dummy
- * @param[in] out_tensor pointer of the output tensor
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_maxpool_HWC_s8_any_act(const uint16_t in_tensor_dim_y,
- const uint16_t in_tensor_dim_x,
- const uint16_t out_tensor_dim_y,
- const uint16_t out_tensor_dim_x,
- const uint16_t stride_y,
- const uint16_t stride_x,
- const uint16_t ker_dim_y,
- const uint16_t ker_dim_x,
- const uint16_t pad_y,
- const uint16_t pad_x,
- const int8_t act_min,
- const int8_t act_max,
- const uint16_t in_tensor_ch,
- int8_t *in_tensor,
- int16_t *tmp_buffer,
- int8_t *out_tensor)
- {
- #if defined(__zcc__)
- return tpt_nn_maxpool_HWC_s8_any_act(
- in_tensor_dim_y, in_tensor_dim_x, out_tensor_dim_y, out_tensor_dim_x,
- stride_y, stride_x, ker_dim_y, ker_dim_x, pad_y, pad_x, act_min, act_max,
- in_tensor_ch, in_tensor, tmp_buffer, out_tensor);
- #else
- return riscv_nn_maxpool_HWC_s8_any_act(
- in_tensor_dim_y, in_tensor_dim_x, out_tensor_dim_y, out_tensor_dim_x,
- stride_y, stride_x, ker_dim_y, ker_dim_x, pad_y, pad_x, act_min, act_max,
- in_tensor_ch, in_tensor, tmp_buffer, out_tensor);
- #endif
- }
- /**
- * * @}
- */
- #endif
- #ifdef HPM_EN_MATH_NN_RVP32_LIB
- #if defined(__zcc__)
- #include "tpt_nn_pooling.h"
- #else
- #include "riscv_nn_pooling.h"
- #endif
- /**
- * @brief This is an average pooling function for S8 inputs with any x
- * and y dimension with the actvating parameters to limit the
- * outputs.
- * @param[in] in_tensor_dim_y y dimension of the input tensor
- * @param[in] in_tensor_dim_x x dimension of the input tensor
- * @param[in] out_tensor_dim_y y dimension of the output tensor
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] stride_y convolution stride in the y dimension
- * @param[in] stride_x convolution stride in the x dimension
- * @param[in] ker_dim_y y dimension of the filter kernel
- * @param[in] ker_dim_x x dimension of the filter kernel
- * @param[in] pad_y padding size in the y dimension
- * @param[in] pad_x padding size in the x dimension
- * @param[in] act_min minimum value that the output tensor is
- * limited to. It should be in the range of
- * -128 to 127.
- * @param[in] act_max maximum value that the output tensor is
- * limited to. It should be in the range of
- * -128 to 127.
- * @param[in] in_tensor_ch number of input tensor channels
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tmp_buf temporary buffer for the input tensor.
- * Its needed size could be obtained by
- * calling riscv_nn_avepool_s8_HWC_any_get_buffer_size.
- * @param[out] out_tensor pointer of the output tensor
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_avepool_HWC_s8_any_act(const int in_tensor_dim_y,
- const int in_tensor_dim_x,
- const int out_tensor_dim_y,
- const int out_tensor_dim_x,
- const int stride_y,
- const int stride_x,
- const int ker_dim_y,
- const int ker_dim_x,
- const int pad_y,
- const int pad_x,
- const int act_min,
- const int act_max,
- const int in_tensor_ch,
- int8_t *in_tensor,
- int16_t *in_tmp_buf,
- int8_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_nn_avgpool_params_act_s8 aPool_params = {stride_x, stride_y, pad_x, pad_y,
- act_min, act_max};
- tpt_nn_avgpool_dims_act_s8 aPool_dims = {in_tensor_dim_x, in_tensor_dim_y, in_tensor_ch,
- ker_dim_x, ker_dim_y, out_tensor_dim_x, out_tensor_dim_y};
- return tpt_avgpool_s8_any_act(out_tensor, in_tensor, &aPool_params, &aPool_dims, in_tmp_buf);
- #else
- return riscv_nn_avepool_HWC_s8_any_act(
- in_tensor_dim_y, in_tensor_dim_x, out_tensor_dim_y, out_tensor_dim_x,
- stride_y, stride_x, ker_dim_y, ker_dim_x, pad_y, pad_x, act_min, act_max,
- in_tensor_ch, in_tensor, in_tmp_buf, out_tensor);
- #endif
- }
- /**
- * @brief This function is used to obtain the required size, in bytes,
- * for the input temporary buffer of riscv_nn_avepool_HWC_s8_any_act.
- * @param[in] out_tensor_dim_x x dimension of the output tensor
- * @param[in] in_tensor_ch number of input tensor channels
- * @return This function returns the size required by the temporary
- * buffer.
- */
- static inline int32_t hpm_nn_avepool_HWC_s8_any_act_get_buffer_size(const int out_tensor_dim_x, const int in_tensor_ch)
- {
- #if defined(__zcc__)
- return tpt_nn_avepool_HWC_s8_any_act_get_buffer_size(out_tensor_dim_x,
- in_tensor_ch);
- #else
- return riscv_nn_avepool_HWC_s8_any_act_get_buffer_size(out_tensor_dim_x,
- in_tensor_ch);
- #endif
- }
- #endif
- #endif
- #ifdef HPM_MATH_NN_SOFTMAX
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_softmax.h"
- #else
- #include "riscv_nn_softmax.h"
- #endif
- /**
- * @defgroup nnsoftmax NN Softmax Functions
- * @ingroup hpmmath
- * @brief The softmax functions are exponential functions with base 2.
- *
- * @{
- */
- /**
- * @brief This is a softmax function for signed 8-bit integer input
- * vectors.
- * @param[in] in_vec pointer of the input vector
- * @param[in] size number of elements in the input vector
- * @param[out] out_vec pointer of the output vector
- *
- * @b Example:
- * @code
- * #define LENGTH 10
- * q7_t in_data[LENGTH] = {...};
- * q7_t out_data[LENGTH];
- *
- * hpm_nn_softmax_s8_fast(in_data, LENGTH, out_data);
- * @endcode
- */
- static inline void hpm_nn_softmax_s8_fast(const q7_t *in_vec,
- const uint16_t size,
- q7_t *out_vec)
- {
- #if defined(__zcc__)
- tpt_nn_softmax_s8_fast(in_vec, size, out_vec);
- #else
- riscv_nn_softmax_s8_fast(in_vec, size, out_vec);
- #endif
- }
- /**
- * @brief This is a softmax function for signed 16-bit integer input
- * vectors.
- * @param[in] in_vec pointer of the input vector
- * @param[in] size number of elements in the input vector
- * @param[out] out_vec pointer of the output vector
- */
- static inline void hpm_nn_softmax_s16_fast(const q15_t *in_vec,
- const uint16_t size,
- q15_t *out_vec)
- {
- #if defined(__zcc__)
- tpt_nn_softmax_s16_fast(in_vec, size, out_vec);
- #else
- riscv_nn_softmax_s16_fast(in_vec, size, out_vec);
- #endif
- }
- /**
- * @brief This is a softmax function for signed 8-bit integer input
- * tensor with high precision algorithm.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_row number of rows in the input tensor
- * @param[in] in_tensor_col number of columns in the input tensor
- * @param[in] scale scaling value for input quantization
- * @param[in] lshift left shift amount for input quantization
- * @param[in] diff_min minimum threshold to perform the quantized
- * exponential operation. The difference can be
- * obtained by subtracting the input from the
- * maximum in row.
- * @param[out] out_tensor pointer of the output tensor
- */
- static inline void hpm_nn_softmax_s8_hp(const int8_t *in_tensor,
- const int32_t in_tensor_row,
- const int32_t in_tensor_col,
- const int32_t scale,
- const int32_t lshift,
- const int32_t diff_min,
- int8_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_softmax_s8_hp(out_tensor, in_tensor, in_tensor_row, in_tensor_col, scale, lshift,
- diff_min);
- #else
- riscv_nn_softmax_s8_hp(in_tensor, in_tensor_row, in_tensor_col, scale, lshift,
- diff_min, out_tensor);
- #endif
- }
- /**
- * @brief This is a softmax function for unsigned 8-bit integer input
- * tensor with high precision algorithm.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_row number of rows in the input tensor
- * @param[in] in_tensor_col number of columns in the input tensor
- * @param[in] scale scaling value for input quantization
- * @param[in] lshift left shift amount for input quantization
- * @param[in] diff_min minimum threshold to perform the quantized
- * exponential operation. The difference can be
- * obtained by subtracting the input from the
- * maximum in row.
- * @param[out] out_tensor pointer of the output tensor
- */
- static inline void hpm_nn_softmax_u8_hp(const uint8_t *in_tensor,
- const int32_t in_tensor_row,
- const int32_t in_tensor_col,
- const int32_t scale,
- const int32_t lshift,
- const int32_t diff_min,
- uint8_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_nn_softmax_u8_hp(in_tensor, in_tensor_row, in_tensor_col, scale, lshift,
- diff_min, out_tensor);
- #else
- riscv_nn_softmax_u8_hp(in_tensor, in_tensor_row, in_tensor_col, scale, lshift,
- diff_min, out_tensor);
- #endif
- }
- /**
- * * @}
- */
- #endif
- #ifdef HPM_EN_MATH_NN_RVP32_LIB
- #if defined(__zcc__)
- #include "tpt_nn_softmax.h"
- #else
- #include "riscv_nn_softmax.h"
- #endif
- /**
- * @brief This is a softmax function for signed 8-bit integer input
- * tensor with high precision algorithm.
- * @param[in] in_tensor pointer of the input tensor
- * @param[in] in_tensor_row number of rows in the input tensor
- * @param[in] in_tensor_col number of columns in the input tensor
- * @param[in] scale scaling value for input quantization
- * @param[in] lshift left shift amount for input quantization
- * @param[in] diff_min minimum threshold to perform the quantized
- * exponential operation. The difference can be
- * obtained by subtracting the input from the
- * maximum in row.
- * @param[out] out_tensor pointer of the output tensor
- */
- static inline void hpm_nn_softmax_s8_hp(const int8_t *in_tensor,
- const int32_t in_tensor_row,
- const int32_t in_tensor_col,
- const int32_t scale,
- const int32_t lshift,
- const int32_t diff_min,
- int8_t *out_tensor)
- {
- #if defined(__zcc__)
- tpt_softmax_s8_hp(out_tensor, in_tensor, in_tensor_row, in_tensor_col, scale, lshift,
- diff_min);
- #else
- riscv_nn_softmax_s8_hp(in_tensor, in_tensor_row, in_tensor_col, scale, lshift,
- diff_min, out_tensor);
- #endif
- }
- #endif
- #endif
- #ifdef HPM_MATH_NN_UTIL
- #ifdef HPM_EN_MATH_NN_LIB
- #if defined(__zcc__)
- #include "tpt_nn_util.h"
- #else
- #include "riscv_nn_util.h"
- #endif
- /**
- * @defgroup nnutils NN Utils Functions
- * @ingroup hpmmath
- * @brief Utils functions are miscellaneous auxiliary tools.
- *
- * @{
- */
- #ifdef __riscv_zfh
- /**
- * @brief This function calculates the base-e exponential values of
- * 16-bit half-precision floating point inputs.
- * @param[in] in_vec pointer of the input vector
- * @param[in] size number of elements in the input vector
- * @param[out] out_vec pointer of the output vector
- * @return This function only returns 0.
- */
- static inline int32_t hpm_nn_exp_f16(const float16_t *in_vec,
- const uint32_t size,
- float16_t *out_vec)
- {
- #if defined(__zcc__)
- return tpt_nn_exp_f16(in_vec, size, out_vec);
- #else
- return riscv_nn_exp_f16(in_vec, size, out_vec);
- #endif
- }
- #endif
- /**
- * @brief This function turns the input tensor into another tensor
- * with the same data but in a different shape.
- * @param[in] in_tensor pointer of the input tensor
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] size size, in bytes, of total input tensor
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * int8_t in_tensor[SIZE] = {...};
- * int8_t out_tensor[SIZE];
- *
- * hpm_nn_reshape_s8(in_tensor, out_tensor, SIZE);
- * @endcode
- */
- static inline void hpm_nn_reshape_s8(const int8_t *in_tensor,
- int8_t *out_tensor,
- const uint32_t size)
- {
- #if defined(__zcc__)
- tpt_reshape_s8(out_tensor, in_tensor, size);
- #else
- riscv_nn_reshape_s8(in_tensor, out_tensor, size);
- #endif
- }
- /**
- * @brief This function finds the k largest values and their indices
- * from the signed 8-bit integer input vector.
- * @param[in] in_vec pointer of the input vector
- * @param[in] size number of elements in the input vector
- * @param[in] k the number of the largest values to be
- * searched
- * @param[out] val the k largest values in the input vector
- * @param[out] idx the indices of the k largest values in the
- * input vector
- * @return This function only returns 0.
- *
- * @note
- * - If there is a number of elements with the same value, the element with
- * smaller index will be selected with high priority.
- * - The k largest values will be sorted from largest to smallest and stored in
- * "val" output vector. If there is a number of elements with the same value,
- * the elements will be sorted from smallest index to largest index.
- */
- static inline int32_t hpm_nn_top_k_s8(q7_t *in_vec,
- uint32_t size,
- uint32_t k,
- q7_t *val,
- uint32_t *idx)
- {
- #if defined(__zcc__)
- return tpt_nn_top_k_s8(in_vec, size, k, val, idx);
- #else
- return riscv_nn_top_k_s8(in_vec, size, k, val, idx);
- #endif
- }
- #ifdef __riscv_zfh
- /**
- * @brief This function finds the k largest values and their indices
- * from the 16-bit half-precision floating point input vector.
- * @param[in] in_vec pointer of the input tensor
- * @param[in] size number of elements in the input vector
- * @param[in] k the number of the largest values to be
- * searched
- * @param[out] val the k largest values in the input vector
- * @param[out] idx the indices of the k largest values in the
- * input vector
- * @return This function only returns 0.
- *
- * @note
- * - If there is a number of elements with the same value, the element with
- * smaller index will be selected with high priority.
- * - The k largest values will be sorted from largest to smallest and stored in
- * "val" output vector. If there is a number of elements with the same value,
- * the elements will be sorted from smallest index to largest index.
- */
- static inline int32_t hpm_nn_top_k_f16(float16_t *in_vec,
- uint32_t size,
- uint32_t k,
- float16_t *val,
- uint32_t *idx)
- {
- #if defined(__zcc__)
- return tpt_nn_top_k_f16(in_vec, size, k, val, idx);
- #else
- return riscv_nn_top_k_f16(in_vec, size, k, val, idx);
- #endif
- }
- #endif
- /**
- * * @}
- */
- #endif
- #ifdef HPM_EN_MATH_NN_RVP32_LIB
- #if defined(__zcc__)
- #include "tpt_nn_util.h"
- #else
- #include "riscv_nn_util.h"
- #endif
- /**
- * @brief This function turns the input tensor into another tensor
- * with the same data but in a different shape.
- * @param[in] in_tensor pointer of the input tensor
- * @param[out] out_tensor pointer of the output tensor
- * @param[in] size size, in bytes, of total input tensor
- *
- * @b Example:
- * @code
- * #define SIZE 1024
- * int8_t in_tensor[SIZE] = {...};
- * int8_t out_tensor[SIZE];
- *
- * hpm_nn_reshape_s8(in_tensor, out_tensor, SIZE);
- * @endcode
- */
- static inline void hpm_nn_reshape_s8(const int8_t *in_tensor,
- int8_t *out_tensor,
- const uint32_t size)
- {
- #if defined(__zcc__)
- tpt_reshape_s8(out_tensor, in_tensor, size);
- #else
- riscv_nn_reshape_s8(in_tensor, out_tensor, size);
- #endif
- }
- #endif
- /**
- * @}
- */
- #endif
- #ifdef __cplusplus
- }
- #endif
- #endif
|