From c0d5f6057af512a3856b72d666105740995d9079 Mon Sep 17 00:00:00 2001 From: Eric K Richardson Date: Tue, 29 Nov 2022 15:55:31 -0800 Subject: [PATCH] Update to newest Scala 3.2.1 and Scala Native 0.4.9 (#43) * Update to Scala 3.2.1 * Remove travis file --- .gitignore | 4 + .scalafmt.conf | 7 +- .travis.yml | 53 - README.md | 18 +- build.sbt | 23 +- project/plugins.sbt | 4 +- scripts/.coursier | Bin 11814 -> 0 bytes scripts/.scalafmt-1.5.1 | Bin 12217 -> 0 bytes scripts/scalafmt | 5 +- .../ekrich/tensorflow/unsafe/tensorflow.scala | 2398 +++++++++-------- .../tensorflow/unsafe/TensorflowTest.scala | 32 +- 11 files changed, 1353 insertions(+), 1191 deletions(-) delete mode 100644 .travis.yml delete mode 100755 scripts/.coursier delete mode 100755 scripts/.scalafmt-1.5.1 diff --git a/.gitignore b/.gitignore index 47e18bc..f040643 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,7 @@ target/ # vscode /.vscode/ + +# scripts generated +/scripts/.coursier +/scripts/.scalafmt* diff --git a/.scalafmt.conf b/.scalafmt.conf index 958a28a..1db6f58 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,5 +1,6 @@ -version = 1.5.1 -style = defaultWithAlign -docstrings = JavaDoc +version = 3.6.1 +preset = default +docstrings.style = Asterisk assumeStandardLibraryStripMargin = true project.git = true +runner.dialect = scala3 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index ba3ae2d..0000000 --- a/.travis.yml +++ /dev/null @@ -1,53 +0,0 @@ -sudo: required - -language: scala - -scala: - - "2.11.12" - -matrix: - include: - - os: linux - dist: trusty - - - os: osx - -stages: - - name: test - - name: release - if: (branch = master AND type = push) OR (tag IS present) - -jobs: - include: - # stage="test" if no stage is specified - - env: TEST="scalafmt" - script: ./scripts/checkfmts.sh - # - env: TEST="test" - - env: TEST="linux" - before_install: ./scripts/travis-os-setup.sh - script: sbt test doc - - env: TEST="osx" - os: osx - osx_image: xcode9.3 - before_install: ./scripts/travis-os-setup.sh - script: sbt test doc - - stage: release - script: sbt ci-release - -cache: - directories: - - $HOME/.sbt/1.0/dependency - - $HOME/.sbt/boot/scala* - - $HOME/.sbt/launchers - - $HOME/.ivy2/cache - - $HOME/.coursier - -before_cache: - - du -h -d 1 $HOME/.ivy2/cache - - du -h -d 2 $HOME/.sbt/ - - find $HOME/.sbt -name "*.lock" -type f -delete - - find $HOME/.ivy2/cache -name "ivydata-*.properties" -type f -delete - - rm -rf $HOME/.ivy2/local - -before_install: - - git fetch --tags diff --git a/README.md b/README.md index 4f84065..d694f30 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ converted to [LLVM IR](http://llvm.org/). Finally LLVM code is optimized and compiled by [Clang](http://clang.llvm.org/) to produce a native executable. ## Getting started -[![Maven Central](https://img.shields.io/maven-central/v/org.ekrich/stensorflow_native0.4_2.13.svg)](https://maven-badges.herokuapp.com/maven-central/org.ekrich/stensorflow_native0.4_2.13) +[![Maven Central](https://img.shields.io/maven-central/v/org.ekrich/stensorflow_native0.4_3.svg)](https://maven-badges.herokuapp.com/maven-central/org.ekrich/stensorflow_native0.4_3) If you are already familiar with Scala Native you can jump right in by adding the following dependency in your `sbt` build file. @@ -30,9 +30,18 @@ All available versions can be seen at the [Maven Repository](https://mvnreposito Otherwise follow the [Getting Started](https://scala-native.readthedocs.io/en/latest/user/setup.html) instructions for Scala Native if you are not already setup. +## Scala Build Versions + +| Scala Version | Native (0.4.9+) | +| ---------------------- | :-------------------: | +| 3.2.x | ✅ | + +Use version `0.3.0` or greater for Scala Native `0.4.9+` and Scala 3. +Refer to release notes for older versions of Scala and Scala Native + ## Additional libraries -The TensorFlow C library is required and the current version is `2.10.0`. +The TensorFlow C library is required and the current version is `2.11.0`. * Linux/Ubuntu can TensorFlow following the following directions: @@ -50,7 +59,7 @@ $ sudo ldconfig /usr/local/lib * macOS can install TensorFlow using [Homebrew](https://formulae.brew.sh/formula/libtensorflow) which will install into the `/usr/local/Cellar/libtensorflow/` directory. -Note: macOS Catalina 10.15.x or greater is required to install TensorFlow via +Note: macOS 11 or greater is recommended to install TensorFlow via Homebrew and is used in CI. ``` @@ -60,7 +69,7 @@ $ brew install libtensorflow * Other OSes need to have `libtensorflow` available on the system. ## Usage and Help -[![scaladoc](https://www.javadoc.io/badge/org.ekrich/stensorflow_native0.4_2.13.svg?label=scaladoc)](https://www.javadoc.io/doc/org.ekrich/stensorflow_native0.4_2.13) +[![scaladoc](https://www.javadoc.io/badge/org.ekrich/stensorflow_native0.4_3.svg?label=scaladoc)](https://www.javadoc.io/doc/org.ekrich/stensorflow_native0.4_3) [![Join chat https://gitter.im/ekrich/stensorflow](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ekrich/stensorflow?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) Reference the link above for Scaladoc. The documentation is a little sparse but hopefully will improve with time. @@ -82,5 +91,6 @@ In addition, look at the [stensorflow unit tests](https://github.com/ekrich/sten ## Versions +Release [0.3.0](https://github.com/ekrich/tensorflow/releases/tag/v0.3.0) - (2022-11-29)
Release [0.2.0](https://github.com/ekrich/tensorflow/releases/tag/v0.2.0) - (2021-12-13)
Release [0.1.0](https://github.com/ekrich/tensorflow/releases/tag/v0.1.0) - (2021-07-02)
diff --git a/build.sbt b/build.sbt index ea45981..44f6959 100644 --- a/build.sbt +++ b/build.sbt @@ -1,13 +1,9 @@ -addCommandAlias("run", "stensorflow/run") +// stensorflow build +val scala3 = "3.2.1" -val scala211 = "2.11.12" -val scala212 = "2.12.17" -val scala213 = "2.13.10" -val scala300 = "3.1.0" +val versionsNative = Seq(scala3) -val versionsNative = Seq(scala211, scala212, scala213) - -ThisBuild / scalaVersion := scala213 +ThisBuild / scalaVersion := scala3 ThisBuild / crossScalaVersions := versionsNative ThisBuild / versionScheme := Some("early-semver") @@ -17,7 +13,8 @@ inThisBuild( organization := "org.ekrich", homepage := Some(url("https://github.com/ekrich/stensorflow")), licenses := List( - "Apache-2.0" -> url("http://www.apache.org/licenses/LICENSE-2.0")), + "Apache-2.0" -> url("http://www.apache.org/licenses/LICENSE-2.0") + ), developers := List( Developer( id = "ekrich", @@ -30,12 +27,8 @@ inThisBuild( ) lazy val commonSettings = Seq( - addCompilerPlugin( - "org.scala-native" % "junit-plugin" % nativeVersion cross CrossVersion.full), - libraryDependencies += "org.scala-native" %%% "junit-runtime" % nativeVersion, testOptions += Tests.Argument(TestFrameworks.JUnit, "-a", "-s", "-v"), - logLevel := Level.Info, // Info, Debug - nativeLinkStubs := true + logLevel := Level.Info // Info, Debug ) lazy val root = project @@ -57,4 +50,4 @@ lazy val stensorflow = project crossScalaVersions := versionsNative, commonSettings ) - .enablePlugins(ScalaNativePlugin) + .enablePlugins(ScalaNativePlugin, ScalaNativeJUnitPlugin) diff --git a/project/plugins.sbt b/project/plugins.sbt index ff4c897..1e0e0ad 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -2,5 +2,5 @@ resolvers ++= Resolver.sonatypeOssRepos("snapshots") // Current releases addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.4.9") -addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.5.11") -addSbtPlugin("com.dwijnand" % "sbt-dynver" % "5.0.0-M3") +addSbtPlugin("com.github.sbt" % "sbt-ci-release" % "1.5.11") +addSbtPlugin("com.dwijnand" % "sbt-dynver" % "5.0.0-M3") diff --git a/scripts/.coursier b/scripts/.coursier deleted file mode 100755 index 7e617048d4ad09a1f30efd2bfb156b2db3a9822b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11814 zcmZ{q1ymhL*2j_H4|fUf?(XjH8r zIbFZ5TUGZ~SKmJg5reC<6N8bZ9fPTz8=An;{{cm7x=(R#6800~!bjG&G2qRhcwFRM;dC1O&tx6a)kf#5C}*_6c2(wBa-N6nUyZgO1bzk)1qi$5&kwNxUvuRrVMw8R7l7| z(o2&g%MA&R3qr(~-;*bJMirRHHMs(i$sJ5AV0u4Vei+3)r&40wEWKz-Y$^(K?z01> zb+eccwDy_7YdiV&{pMHo*T#w$$HP%%(Ba8`~HPb;ort@3^a6mU3vqIiP zSJdSeq`xm-zc7I)uau~8ncJFk`H?WL<)&_7vLT}~uT70KhK^N!5;bRJF+nX{mwm_f z>rqvW&F95Cs6OtDG#QD3YAxJQGd)&b6G7bkMX*`9JVkFX8s6OFuIfpNan`nP%zL&g z7`|`_tAgzp(CXenHPYJ(!h1N; zd||<5D?z#YMqLgEuN8K8jjhalm_$*O7mKa8JjYvN&8!|7ua=r$d!x%KO|Q={H@IK9 z%E%_e(zqX^ryb!M;v}~t5l}thA>iY-=MJ|hwE+bzma%biw!YC`{<94GANJ4ZgXuicULar$NKgo(F~@3I-ubJ z7O~&De}4-+Z*-ZWIsQo_C2blMd5tAo^9&?#7)H&$ajR)f7BGGlQQiZVM=ujlx8gfm zCeK%PW>tKt71I*}gTEVCBUY-{bk1=p{8@lbN2PsZqX8M&po6>#EW>V$f`U}WVvOz* zF=D-d4ued*(Cn`MM&U8Uh-2xLm#3BFU3s=Q2dL!sJNUK08VkY z;4z!?WrF-xQFp}nG) zc8x^oVxn9jdlfS5>D~g~d#by{Ldi9A% zhX$%)%{0+Tc8(#R@?VL6STzPJaR(W})Ld7%8Ti-712LJ)vOF%cN{A+hxsJClpW*{uD`(=&u1t|JJ+KDcgY zFHWC>CD?W&-UjTC$Hu+w<|wVeL!kFa*of`9*5K!3T~6an7S?H>*LaEm6)oa$jDR}j zmK{vhAj6Z$si9TVBJoQE{{}0sSVL+xZGK9G<|(^hcH+R)#PPr?fD-G@SmPN*TEHAx z9%>Y4kYo;Gy{ut?t_E5EPAC(-d!j&-nlRO9Dn`m+!kN8fgSi*~p74k9*4 z70M{W9<(EV43QO4Q9ihw(S+JTCexZxq_uhNrP=WPrV;ET3JAWFGnq}Sy*D~ok!=OH ztuvV~eWo%^QVXdQ!LVgNR0E=(A*{NcvA9Xfun9{Sa zaoO7sJ~`T?GyEhd@oCN6RS&2J%OGz?ZDR3bv63mR?4%y@am&Zh0s6Oqa>P&NQAMpH zH^DZkA2Yf@Ucm76V}l^CNJ`{hsaGTBbX1f0yGlP9FMf_v8Lo&lrhp128M;Z`T8nly zi*EZ?Za!O4t;yEF(}$t`Zc3~d9v%HoZ-v7lLkLn> zLi~AEpcA{_aozkGcg8u+PD0zwtuD#;*;TiAi@P#Fp-vYf<5Gjx9%%&GQ z@=8_?m=ZdVV{RzcR_sZvpFD(~vRj;=9lA`O8w85e{C2k;`sYfG*G8>aeX3;&sZ7>3 zu`@e02OX~4Z8vnBM>2|HLN$bMpahWlv(bpnq1u2a2f=`E$i*flmC@={{XA$3u(}Y~ zkglpM>;UUqw^cJu9Nanax@fxqw}U`v7E0yRstD{Ds+EbeGfj`7iMf$@wx5a#NoKmM z(60L)c}n$a9Io!TBF>c)_SvNm1+Jg5T2Ixb8CCmSR8EG%ui;uGjyZz2<*Xogw-M*i z&1*?Ftp>pz=`yBAW*5Xd6-z#DOU1ak4!d9_%HiUqa$y~v5MW3pg%6`XW)(MKrwP}N zY9z;*Ofkn{FJlzoS{16Al$8e*+r(XvmW5}aRqKN>7-{|3Uefl8Q_ZX99?3K0YUaYi zJc~|HW*lM)bc}>#bz(I{m4Hs;N@ePDq&VAZ#Dzj%>2_o|Q*jXJ2j}d$B*fqHzzV{? z*Zt0g-<1oXF&~y}WQPbdaUmhIiR^dyzU|1{)tQRMJ9vo(Obj>Y1SaH|@%CS~hQEO6 z?|8s4u#^qWOApej2?_YkX?W~A`F<(yald9-ZvN*_p8?4i@KoJ z5}~}7ahcAKgsP>hviSh*J=DFYT-S~?Wl$zl^%cu=Xt>22+A=mLLuFd;Y5|ThZOnwIXTAVY0v*H( z7ny#jAiX7*w-7UL!WQ-qktpiz6fzK>4(XQ4I0qFuvgl>-m&Ga+e}HXrF-WBvl^?}7 z4GgUZj(ODj__jJLKy^1SylH(eM0^a790`V;;4z9&E@|ruP}T~b$K+XuK;>89N|3+A z^YZ7}T^Q%u3W4cIpQ%MLoCa_3hHRks`|O4XS73BBWKdQ}J*uP28OcsraLjNmr4pOi z0q}Eq5GIj$p>{1_U7hq)&|=l~u*+3RWwI$~Xh8tnP+mUm%79KtM~T_x-}&)NN*{Ej z@XKInTg*cvw${>@+H=CIuY2GC9}A`_iv_^X&3nun(=D*eJLg3Yame7RP3U@VkneO~ zsF-;n#!01(;kDvF_A))Nz>%#b2QUcdwbs=ZxS&{BFKVe?$D#;?k+qpu#@8ok-Ivl6 z3X79zshTQf^j5QIu9eJCYLyi!Sq+g_DW7S?JW`)y79G-Yh0KOXl`SqiIAU1e;Y-Lo zlhUUFf zlMT9S`ky0rA}$0Izzk+qA_`0w&N5(iR&WNd58N_TzkUq5gp%J>FuCOTL5etg@hk%t zy+J>yKAqOj zjk8uoiU#^&{|WKGFiLHzf(QYmM98$%aU6Oli56SDS(CZx;O{_#gUC8^Ac$es2|uu{0zS`X zd|^5hz;xT^ESP3Lm;o#;#WP1Zfv0_^^MaRIyMpZ2U_9S}SW=m^M{x^=u$gk56pt#g z)y%&NCeB^B24?fUN?zx92sWxEySD%K!t|yok$Uysu~Ps1!o>RTPIXl|l}CNwu1zP( zYbCjfQEpHeCAjJ6vAv2!#4LQOE~@&J^H#7drKjuW;3mD5d;j5eR+M$J2kIg_+#Le~ z%rrJ5rOkP~=?=*A>uEg70OV|IkCc;s=`hwGmY%24Pb{(&fxNQEF(S9iF{8^TwLrOo zoOsxzkFkX&R@{Ip4x7$R-@;Ti4<+!>LFOcs5c!bt0Q+NMH#%fLH-$7H&n%f|D`q<2 z0kkRMyx-CWpV=jqLQ7t#PM;}WeO8cp<|k*uxcIV9F^8xCkqqCIGFHGDx|P3bmXAo?w&QO?dEHd00kz>UDOBmsK9gVuWqc(e|76N;PZzXdJ7ws`$w9 zRI3_zBrbj8W)mm)d@ouFy_JPit?s?WvPlSiSUv(m5zZZsdznU%q1g(Rzq@ANuh+zI zK=)R$sf(CN{HvfGx0Lv09bR8b%wuPL${suod-=ecV^@*E)r%zQEi+z%vp`hB9ByEM z87IB16^`9>K)DOTL#-vcY2Qih=Ue>b^+l1cx5zC_W+gJ# zBuj|nDQ^UpH$g;bBg?&)ichEZls=+26y*A-)z2_ww_J$M!TQ~_r(Cc%5_qm)1YQl|yyfi`<1 zU>>`gG9s^!i-Dyt4oIc|?Fq&eg+YWT5B|{ewSo-$=a3o4nceq$t2))v@;R$nzlzXV z3mqD(wD00w+NI^{)}`gP<=Rg_d=|9JCBHrUnwzn=NMY2j-<@M7_qXi>KLb0Q&rY0! z+S`JH_o%I1y@Gu_Z5-OiAaskiizRoBGI&p>7stu2Smo(Ko5JTk!4%@dihq#;%dXHe z_V!ysJ-7L&N$b{&RwEiH~k~ zjp~ntCmVwC%_eGs!f61o@Ykho;Ux_B_&zEdf|YK?C0K+)?6&}6ZkbzdEuj46C9&Pb zrI9{yQa*FN?1B931FK#9uv1eyy)t{q(CAgI@Ggr>bEd|TH+jWWQ-H=XYsS2VccN@X zLQ)NzotkHc{IEwG07#;+U*ZjvAAayoV3;Tl5ph~1O!}(3q-v+JIAxiT>_XyR)GAfT zhNu1@oH19eP-`1KyvK`0l;c#`r<5_anT(Y$Q{N~_@0fv_uG*wi$X<2S%dOSg+pp6k zu`RRnr8tUVdzh_Vcv~SjM4ZGVTg>&RW4`l1b1Fj=hFwko^3`DYIt~W2*|i zyQO!-N=MlxAI>=%#M$gU4bD16)SdN8=_NRh0OgRknx>CjQOA!<5gcEU?Vw%Tq@k#y z=OTIcU&hqFDuti#b%^CMhhA_Hi&}Md?X6uTSJ;cLws8cAs+uHr+qeY=*OeZqGVu4( zR9}C{M_o({-|>;>GU3#1g^^^7{*-2XST2vqK9h7BEh$h_C!YIgU&FZIVtBBnXiJKA zT4XEL*F72ORq)(fWkHM`N;l02o632N}bl$r`PWUHFBiZmVCHdymCS-*cr4=FM*4lc!Mh-S=a zNDt2%YWQb?!;JRiYExHe1%{EVrcU<;79{~qTiI}wPWG+oYK)|Uj6!Y1pv&RUs-->) z4R@40^`e?~BOh=Jo!IR9(a~)BG&V+|UGwDYe4XVodL7|}%m9e*xiunTu1XE)=xR}; z^)HWA@4aTQO8_E5Sao43i;Z9=udNezG5T2hefTGGDL-5iN#ugOonLvmhfe^@S_VME zDjT!jpzz9B){|~C(t>jFW6*)d^ZBg~qg(bcpB0%kviA)fvQaMz7R`1QeF!+sH&lo4hl!9nc)xnd4W!V=+_=?2Qq0CC6y$P!iW0 zm*AU@#4M?Yr<+hOp&(L}2LkgFG(S3X#)rE}i1ca+YT+sw2ot+&4w5e-J|oe>?_v+J z1)vT-+B`)C4^6_UzE%whC_R`a7zh!8EtEW~K=G-)VowK7>d<1H{(N&AW&$9A!_X`O zoC~|Z7W2x;o5IKsIte5)tfqzsG0&GWCK%@{dM3KQ891KdRD~bryxp*C0XX>WTtD@N za`YGtexRMGBM;3#lziq~hHjUx^w(oYCj~L6qZuDp3Q%-WG*L7-@tDD=71Pa{)3I4` z|JnFS6R7obJgeWRuKIu7isDt_5)oUs1{0diAMP-x3vyu`Y@g{vT15e(dGr!CJ zssjE?n6L4L{h^eH;aqR`*A@v|Mk5N!fbIk`-wYf!ZWQJFRf8;$u3{E zd;4ei8LZU}_X7ykVp&`X4xQ7&0F*Qvq*TUKEfIH4BV6~Dz<4*d?)-|r4c`j^P$)Lc zz%91bOUa)b?uT;zUnpD97IGaM*vSD)5(GHc6n-ZH?Dq!5F)@d_{_ER!-)14-(gZpK zZ8ejibuO0-TM|c+U2j3b;9sNfSg1^szyR>Chycp6Y}4D2q1 z_(@CG7(FD3)C@(G;_)^Lvv;WRMJ}mJvMr>6*$1#o<(Tlj(VO1+orOqCc_cX++1c+_mX~ab`(lhb~|NKR=avyC>@uElmxR_ zJC)UNz!9iShF_{~rD~d(JwqL*EM+)cO^s$7~& z`lX0ON~`+WW{fz#PgMZQ66tp6Qn9g?C0$Z5S=rIaGbg9Een#==z>-x$qB5%}!>zZr zRt;wov3Gk_PpXv9zcYymaF28*Mx|!fbhD%=uefL4Q^3bhgLYoTbfEUwl$~BQ0S!g8 zIGiXRcZt98-G?JSi&VHx7lV?5$k%Velqgn5+)`?!(_F#`g_Xiprl3lVD5Vc* ziQTkl@L)qbT|_xbf!O89Q+ZL{Wp(t;6@oe-lm!O%qCpz@$M|<6aT<|FPz>5H_dn23odc% z3xwO`S2YsR%C8Mq%x6NW;uek(vt_g+Hm)dPm0DTC>T)8T?>!(g^b$+|z@U}MvQYvG2-9=W{1>qJVjeNcbot1%d+?6I&aE3dO?jX7Bu%LH2X zNK#TUas{?>#oH{CDsxiCg0yC}oW=0PD&qJtgmCG}i+|K?qrzX_N+*RAXV;OPuGzv+ zABbT#p+HtyL>~6nLEE?}GchiKn%*ha-nW&Z&hwMEn?;O7)jx|w zT5jk~o;T7a$!kLFrxw>&4Ni~7LW7V_`HVbfymTf&)?sV|bp1G;eQk@Bxp|$pkBG6c z7>UCy{{bNCYflZ+R7PjhP*OPjolo@|S4fLjp(>W=y^}bR97y&te!9f>Y%V|En%MwI z%{aevv5#XOaaS8@J4kd}me)6Z8`&RU!%#v9|jU_(vIw^cK~qVt9b zt6N)P&Bj~TDFiaMC%N@R1Si|w$h2%2gq2e*i$|7KS*DJjw{+T8 zC1f}~Q3e%x4@vcMO@09O$eKAwSF*4TB6Ww&EC_a7vl}GZGfWGq9gD=j?O(^kAiz}` zIq|S8mt)+iY{WB#P1W2`1rIUr2QTel(156iDl}6$xJvac86kfFJHd>tCy5@#K2EOy z8^ioa;6St(Jbg9C!p{6L?q+!O0KkARMrMqiiN>NpAP*Zqe22TTOl+~La_`e zJC|s>S+3p!?nGmpqw4HwtTGosVl^2Y&E-v3f{BTrGKIlMF|+msiv@N@OGk->sFDI( zr>4X)+#Kgv0^b5xZ-~5rOX68PktoGk)3IJe)g3mGo;Um;Oe7{a{<0gpiz+LYXK=!d zbF1hoia0%%j%I4%q{%jr16}xJyJ7PNg z3OUV;qSwftK#)bBv3s1W<`L$!SP2Bw=3*rulh_5C@A2Dt= zlh>j+0^Bxy2)AF{TF=KkI>67@3%WM%`oqYF%rST|4Q&|sLXO=p*wNiFUK3t=qN2Th zy`;YcS3`wK#xcOsAJQtHO!`Sk6`$m$8{yz&tz4&q#b#DYOXfSqD}a{0;Pd&jRo|k< zhY937`0rSrQrN3pQe8LB>1t)Dv=v6p1gIpmSbD3uj?M5-1wwredR3m4@Vu`$uTF^8 zEAVF!;);+x;Y=Z)Kzo+UH@3i;{i)mkZA(jWC(bFDyy)5|;Y%m#0T*Cbcw)cmBIw$W zW)2?f*GBuNuiic;tZG6P&wEGaQp28pux@IIikqQA)E4EJm^UhpjPoO>ByR}rb0arN z*OQI47A#bV^U_#Zu*(`MOtTXn$+pZNcoNOFxr6p5b#+=D^YHR6soRRCq7bH3p^K+v zNb-6@2q4j^*1I+N&l(VvLHckBM#^EmWXFNr_VO7xir{M*F2#p1Mb7?9gmNIgq8*D8 zakB83Zzj5r7DFI%@Ou#%8HvEzQC^jbAJ|#Be*DuKFsm~DfhpEj1%@~+hZf?`vg>mF zl)*LCc-k#;yKC9DW@i>J3Vp7~=@Wv56Gq%oiHgaI%IWg?$4p;X=o)Sq$V{QdCse(( z^7Rkw(zeifS&VE5^kNLdGM9Ls0p14rnLO;9;E{5i<8@NggrgSaM};Y)vlp9HRC2>m zMZ(u*@iGZd%5+IM`@QO4=3qCK>(Z>q^vt^3TPzVo#+zh~4p&|9rrnxVfO||%g@gy? z%xx`)9=%_btYzz>j{H)wZoZ$Iq-e_%j8+WSoKwsl7T^s0(nXqZUVfs4{RmrX&56Hj zBl#U8`Rz|Jb7#&e2G>tPKC;$zqknXPv(4ezOn*8Mek8Z;TY}b|mSRvwBf`u@JRly+ z=KM@$*m9zM1u$&FM7I8Xgh~A}^~EFN07-3$zo)|hzU%VAI3#dCB|{>Ih`O+qL=jdD z)KgqZFn+S?k~`kVf6e6^btLk)oW<9;M~ltFgz*X4`?sBf5_1dc&Yk%8Ic0(8_bH|$ zI|{>|0)AY^?I9@=GKKDIS9zWaBKi~=NWBawPm<8iaLl~MBKagKlC|n18*nXWz46Jt zxrn)gx=mt}kj}dIDfSDuYtl+uX_(d1L2baSiZjDkYyT!;Ma#Wg6=Us4spzigKLhoSoTj^*~pL zv(K^bJF1fkejJdoB!_k7TxUTsWwzWQTQDV<39hqrt$Y9?SJDJ~xsU_R7tbT(UD>%; zzc=U%NHh>X+kwBhm9>L^^``JxfjBzV^ntQC{}9#k72$3HXCy9*x)2W+>g%@-3ekQ7 zjMD3VqId!YFD#N8&bQp40pvDDSC04WXWdFGQQDd;kb)7Yd}}$IPR6j=FD@L>aa1 zz&)nBWxG?Z$#^N=^+QpDw*O=_^Ky^*$?_oHp3=;jdA{}efS9AvPV62@n-G3L_k$!* zHi`rSCBy_RtT)>_eIR`Ovhpo93GS1z>TD zus5sBfdFZUhpL!`#d&;NqihY zghGr<0H0}N-_N5``|LKwR7~v2ga^Ul`cq3yWe>KO9ir~#Mk0{KyLAKmNitv&w&^Ri zL&YxfGPGwNUKZ~fqJZU9T6mafX1|_bB~?uwP8QwhY>Q>7j*iAmd zVc2X)Qy`^FX!tt?_`C^16^E-Tg?lrWpmbShc`S1Ff@w6CT!KkN5i!E}Qp6M~)_#w{ zq)oa`AZkyr4OT5`2Eh%MjOQFB;=cs?cMV7Y{6sL1v}*b3*HVV&EbUIT-TJAllG8e+ zizCN{rHtFyRkLkIg9=Ot@P#luZ}nduS=q;3KZz&w)U7h=0aqX;SlV7otEROD{cA%j z-x%a7R_V!P$LX=EVaxUX_3(N~luXEpa<4pa9-87yJh(rO_fHIZ)3P~W(`{ncf-Yio z>88^h*650HIJ5bLSRXI}#S|Rq_y%hZA%G$kMz5~?z}ErSYx>O2UfxVO{+Ose!X!sN zjbFmc!+JZo!U=;lJOi8LQ&MOxQFvgg2SF(b2%@kmUgBSn@0&X7u2Dw9+*QOEM0?2B}wUvLpBgvBM5Qxs5!xP zk%HJcLmqIG9+0Y9nz4Ju#F zvz(@EYnwAq-s}nWF+<$p4O7LXA@72VLe0}jg|$JtaBW0>`DwBRT%LQStt3_03(wm6KyLSKbG5rwGB@95xgJ_x!sz3UW7@F zd)Qavb#a5;`TOXc$?jVpSU#imVI+U)(gJGWebc3k~li6&4}7ap08_|QwgmpZVnWuJ6L=Jn3MF8DgqP-mhw|!-b^p? z4HG&m{%)rzkGKYX)COaq1DXN*T;60AI|`0)_WsxNg0IJ%J(-*A?HFkD+g{)G6NNr`m?s`EijaQK1momE3R+pqA=C-b{=zvookM_-hXCA ze`wS*e6DcV=%{A2<`$Ermos2eV=!CS@S{2J`_~MidEwj1UZ?ZYl_$X68aSff`RJ3t zzteTl@7hQ4cLCsgYT(a;@_uFXAKl*{>Pb3RCmUzeK_&z+A!xx#R8XF!MKAxh&cJY! zUfEAznjW7UCe2#zer2k$*oy)~4*PCW{P1~P8&GIqQ-Ip|)Sf*^W8g=%GD3ImdD0Vz z6G3*EHW4`Vjh$W{K!RrilPW%sMwoWac10N|=-jsEQKR?gN4y{Z+`{*S#&0cjGIf4e zHX55cgTGgUdKW)}fV@lM{#E`N^Dh1T3q$YVWba_=}J~FEhqK zM?EG>Hx*Pm%`w9=!7>5w#4bLSmX@5dpB|^FYXdi(m6RS=ybsuQ0R2~c@sF>m?A}`w z^nNJHfP$gD3sC<_mijNF{yhDYNc9g2EXY3#LH`7R%)DRzO&0ps!~d5s^sj19f3Nmu zHt9b?!@nEx2j_Q@;$NJG_i_63@qcmtlq~*E`duvc7s>NoHuOJ8|1TZ;o%6fU=r2wH z)c@rCzwGFD&hLrtzc_O6f8+dP+WU9j@44N-cqIsbi=>7e;n}-ul=h4seZ5jrw{)Q`MY=g;j@30 c-M>Tr=D~_G5RiY|1$h67z9-2OsQ>u)e@^T+P5=M^ diff --git a/scripts/.scalafmt-1.5.1 b/scripts/.scalafmt-1.5.1 deleted file mode 100755 index caa2c360ccbfecbad9109358a9eedde16060f330..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12217 zcmZ{q19TQBtOr5q%p5 z0{R)j1?NYPApBKowF|vcCU)GoFj7#6w~Ya&^i#bt-90=70-}KA^3>=`L!AAh0O8e- z#7V9(ImQWf4u3>aJ0mlwzR%{L#;`9a6qvV4E}P<;3PToEhS~*stpcTN3RU z`XZZlrn0BACJBu&*qgRoxy>*z!ANvB^u;fR)suMwRP{NH`Id$ki8^wDp* zpMZdH{&(Lo|EKr-l`rIw1rWSo@)mF*YA1a&@eSrIvvBYqV1)Ao`ByCXWgZ%}+3Y-4 zSzXn(GxDI~g^^y(w%>E@?+Dd1d!;;DYJTmHtt2(Qy}aIHf8!`6nF>wee2Sd0hiQlr z-wB69c83Lnjp1b8MM_k(RA|e0?$xI_4GYkKbp%}5O+YyqtsB#`a?3rCBtU$J!~h?u zw|e+aFt8iYvX4_0ILjd%#7b(=^pd zHabWljhIGwKI-cDKa@MU&O=}{b;hT`+etKEt3La%EuDyBULS<)Wg?FupjsOVk z{op#GLcO|Uwo^fOJ}M1`=BJY$PxW^) zw_$oLbBEkK^%6$y7}yGzz37(x_ZHkK{c#!Eo5FH2c=I@>Gq%#l?Iu)gG_EV zo6xF7qi2$dDS5~B@!!S4HuMg3$5e8YTm%3`zbFff%BTULWcQ11bJ^aLq<8Y#qlT|% zGBA5hZ>dn3XwJ#S&&%bIdcW)M&-b;5a*$x_@UyR|#Nd35S2!`6gWq$@ z0m$zvMfvdy=9dPXR+_W^64l{E3~oYTn4(FwHO5vwGr)V@sdRZIXV!pjDx<% zvMo^6bHTcaFCePyf&-@h{RnM4wZ3IUqB)GxIl>ONCosH+cv98gWE?d>9)fIXq z|Fj1O?+JwGk3lvW5c(;;^#n76gUQjP-TV0kj33(`2=4$?yR#3g-_9Ilrx9lpy8Nke zf2TP@Bj70Ab2N0+`a)yq>xnkIVFnZPjMrN%nZJ?-VHlc!onp%_x^kfYY54T;nsK4% z6`Ws#g-5hLrHUpmIb8F!%`Y3#^yK)7fGU6j^X_=#Ia!MTJV`Ec1bd)(HhjIbzQ48_ z$-r(11FdU3Uz3U;#aJ?0(oo#Ft$2f}2k*Y%s3Ae`CqcY11eR6u2>f1@V_r0&RbgQs znC!8*+93wxnlXg+1`(ESpHL;yPu^ zo9)@(0s7x#A-gfPDH03_$me4Y`-ho`>EC0)U-i*ZNd@K2CiyO}0v$`M9%K$O=t#6s zWfpuY9|e}QmKBUz<%*@rTF>2!uKj*mqz@Jq^BbHIMX^aXp? zF~&wh>P&i~jpO-oyy?EZ;_dMT=iA7oIfw5etwbO(m-6hE2P)!fW;TcdDwlmu2*!5w zX|%5_n2w@LjIRx}%1o>P%R84f6Ll=? zdC|H^8-JI>07xcs#pJ4=m{Am~ljrB^Zo`xFqp>XA@^J|!+G~)`2X46v^(t)6uGm73 zm6NtvC6D>eUol$GR3+(^`<;|dhr@1QTEtG+f_7vq!1s3G=TS{-iMK3xcQ`4?HmToRXtrJ+>og3uXglT@)6V4$Bz#wpSdGX&U&gEKoY z>m!Rnrf?)PblH=gZ#QB?psx1V)152X@eP2o_g>-SZo6RwVm@g9;K1$50Z^Hah&Qr= zg&H{#kywQf*!|eC=kDrEM&Ta1LP?JgGi6VY%QoR2xM~f11<~DggQ8<99bOP804EnN zT$7Y@+UHlfgdR|6E#71aU$_DvNO23z@?d=eyg{$;yFpw;H^8mbpLQ-HJVNJ|GR#!g zqU2#jnzs&Yn&+)OvkO{V1LQ5KlYICKvUvLKaq#RsgCbBoN3Wpj?X8zM3d9BThrs5Ve_#8GJN;Gl~ND znrianBm@>i(%0c`1onuPkMn0}dRp@~s1|AeP)y9PT3Zjw{91FkvRe8T8ebxcmafX? zLzIs=yAa|}!rWHvFlN7_Dv38KLWyoo)CL>@% z)4ceu@uL9#DJ*<62z-*u;HP47TbIA0M$iH}*9I6euN+65>=llOAJ^XE1jlwT)Bx&i zEt38WXoDws1Fi2@7aW*;gX3Yn(gMmcEp7I2R`UE4x*G|l==ctRuhXL-k=QGxbJ^P3 zl)Ib;v$mT}j#4s%RenPY9N?Dx>UmEJcv3P##3t|Fmsec!up^0A3PaOu0TRBombS!} z9aeS24J-XAe}=q>5A?#c*R(O!4703rLHG!Z1g6@EruP={Ui+1TksEA+Ske$yBldG2 z!y^+6$$FwconUTjU2VP-l7;1xhVo4`5??4un`vcieVoQa2`#>$D2ax$u|is3HIw>! z@hrJUX`zC}Flm+Ixmwf{REcKH9fUA{}QhabTUd!;wO%KT?` z5N<1rIm>2UpOgTCFND_Dt6J;$N+jgPT<-*5LbprM7jku0NA;Rel>w^+r`dBay#eY)7zXd^HeXyWmcFg8W@OibXroI<3!VaI@hn~ zi3mNQ^X+|NcG~5mXg_FLu0~&x@De!E%3k}QIbHT?U0%ugiWQ`UBS!u7EmYB>dK593 zG%mVk#>%-!0Y`RHry=-=NA!o7p9^|W!3Q|WBmuc5iCo)JGyadjO>q|k=2p0jPRV2% zvI2Fw46&+n{EV~R>~RyKD_%ux!h8f$JkyF8O^=MkNEG!8!kpq(a9xQ&WnM~@GIlup zO)|G(LF;9qX{pIKS;Za}xkO7ptqYHL-ZfXNnPbOdm?f1(M^C0(RY1eBY2&w=*g+Ti zP>N|S%p7X9AIw&af@wqZ;NS|e?y+1;)dKZRRw?{k)%$aH zM6c>_`jeubI_s16VX0Wl2G{Mo3iYmE#fk42apD~LBI4$;1N=?cX{{}=Y-apRzUd=@ zP1HmK)kae~QNY~HZB89&EYr;ROliK{;U;b@33a`PZ=*9RkT55hgPlx!!ZE$`!$TUF z@4r@jIkP4A622uP)kUs;fg-u%fOibi?VfW8&OaR!;#gZrhPS@+{tBT%5rjL}YI zj^G3lp3iTa!*vLuzH4+KAbfftAdJ7e;h!7(e~*(o4H!3+xm!Nd=(VI#SzT;23|&z` zA{lUR5VkNBJS=I@r`~TBB$(a9CT!<6KOQXVluOFyE#`bHLgvi0s4P-`h<0g~l&M;l zlv$T)K9_qfYLtKGQ2Ku;WKJ4pYU-r;zD>KNGG78tZoY3b|{Lw z=f)t&b|~mqNE_cu#K@DXZxpAsPeV>sZqh1Xtvc@G)M)J+&}tIfk=pexilEyWVQClK zkqZjuE4qr?UMNw1iydBJeyg=>)!eZZKAAA{ENj&x1Ai(V&iJJOzk#1HF(S)m@dB1M zzMIlJ!)Ey+^3xU(dzCVKJ^w1fDDW1`R?R(7cH#1j(LIp9Rf*Qs+_Pb|qx6ag>jDMr zeC~k?YlAG}-g33%3KWZveArV(-Akshqx?z;%SUJ@aL+nnIHK^SP}cRAA*GK}!Pf^Z zLYa)=S1kC#R;@i-ODFMF*23!@EIxv&Cb7LXPQIZHg-41s-2D{gw{qEt%NfCY9?~2J zth((`qAcNVN&3f?vY%P!;tpfQ`6{Y}^Pg>N=og*z54Yv5iBZl9ttI+_L%QXmI!S2I@;r$Sl>sh=h44(#e6ga^dHBsmOIjT;Q>;8;S8bm!a6YEG>+ zb%j)*8Az*Z^=x90<59Jhjznl>-5IY%iOWgJ)&3lEI_j=k?l)6&MaoqztZ6s!0yWc$ z&Z-|9%c4zTp%>UQO}xp|S}CQ~5{%362m6szBNXba(141n5;0c)`c(DNYI=KkKtlLc zm*&!#a3->vS}~VnPqjZrx)DowVG@YK7iDdH%gWrm{F&C%0Adzd==FL9*N)QeG+W_j zI zCnfXFXHyIQ{EXP-nU-aTV&BdfyZQrzu3~V19Iq=eN?nVbu->p3*LXB)Sv4%xh;kVT zo~$e&JvUDMvm<+Kn2VTDp9a4Mwt}7@p{x23=@R@40yXR&<}iyt^3apjb41Ya6pZp) z)i9sJqj8*`00GEi@rx1!kIEb7Ou&>DHO5)@yUPdz009(=Y6;+2(DSW`TT0d#N_NPB zFP?5KIV_NIp@crpFi+k+-uYe6{v4|+>?r&FmQ@45#%tsJr9XtN*I?)q^<*7sNZyh7 z3;POWyJV%G4l61#kX{|t#Ds#sypz0Eh6_ya$NY^)TuNaS-sA@F5Bx0Snp6D!%N#E2|u5K zjzD4X_wVm!ml+Qe{3?DM`yKdMqA<3T_mKGK8Z{moW1iw&J}eKmFRrr~Yn!fzV9G_( z*kWv2X9fO9DOL!{^vN1RuIvWbuB!pDE-XEH75$q&mwdnwEa(B-ENfTd-J7mQGJf9V zEhvjQ_6@A0fMqc}tQ#`lQ$E%QJ;JD{BW=Hpo%`=|;O{AXodMSB36NS>%la+xV~Eao zz#y=1k@rj##t9$**f)5*&XK2I@ zt&9RHq*x(E(u}b~H>AE+LoJANlSEYV(E0&*qln#GZo1sQj>9$0RK_E zF=k_Smu5Db+HKWmv*e>7>^%%2iX5`X}j}&)jFs(uU>c zn5u_ubyKxoBvBgGmupzFOM|ge zDc7_v)=v^CHvi`4QnR|()Wh@R5R%fnX`|BGwX*`rm=uHrsHNKJ%!Wg@07Vkq5>*Rj z@?uM`A8*7T<&O`|nZ?8^GYivP`f6)cu%_VqcGh$xN_hM_ z6Bqyw23aI$jJzNd?$?wqBS@)IKU{x9$K#Sd4~e>1g`LJtxUnE+K=<{md<$O zvR^Z^s8!=JZUXT8o^LLst&-+gw8sEX{7y6y6TynCK|LVYjFqhI!~hs-R7_>D07ZLQ zcKs1dN^AImGS*MDvD&1<$nyc)3XL@C%V;6c64;7l6v;nJX#HEFx6B&cSWwQEkdBkU z_SkZjUX}Nl?R|0tAP(`R(*ycYzzzJO{CW`B4M=}xZ@G{^8$LmKyehO2R(mam$ZlBG znZ`0yy`?~;M8<_ldOqJ` zVp4-@U{)swF*BvyG-Y7{zErhN!fJIO3;; zvUyo*WLOL_vs?OOq&cl;xpwQ&22O;ve>+W;tDC!rNrXt*FOx`8X82u}JKQS4V^XAB zgX5bTyIW&{UT~*uS}r3_Dgz+%D7pc%euBofwnf6!w9eB@$k0%P$Zn4J5D@XLw}xRl zt+Q!3Aq@83t9qRyxW%JD8N>a-L6krSDC-0_RqRI=hc9={oIkjFjPHd=a4b0L()fjj zTnr@lVdN{u_D}UGI1cLl*cC~#X2NUhZcB9gLIuYMoqm_3MG2|?R00fA#CGA+gs`S~ zO1!Beu1fmr7(Xs>b9|nem+LA=4R#~L%?WOh;ii?ls#zc51%0@+?d{NJ!|j_SJSpqb zoO%Mh)14kfY8Ev7%ITJ+WAmy^V|#Z}%z$ID%?Ozs|3Cqw7szXr#vt+(!C=%Ns+EM3 zOzT;1Ow1s27+1)RBn|4+IPEiOfjRpmDKuN?KBbJxGnEq2p<}!wy4*qJm;VIGc)A;(c`IK`j9x{&pPpz*TX?F`Vh2i z8Y9rky`nFx<}xhi`QS8f+PeP9F{yjYDQ4@np3oTE9$6(84}T z^My^{X<^@K`FgH;O|rA#tS5t4Gdj!2)i*pp=8%B!dQ$ia?RG11J%Y{OWz!3P=hdb4 zV%)6*^kO5wYwLa>lyuk>jT>FxijF7v#PyOD)fMe6?zJ}}($mL7(mSXcB2+wv4x09e zTJdzsS4^VlG$+*n3oCQ=CK)6;qf$~l&puWTxbzj5$B(7@4mmcIFZyxcfr8sKCo_g$PiZm0h9VD%sO7jGhs_!@16n-X`2kHP)IjQNS-qVq`+E zs3|ebO}ZsoGk)TVH`(C~+@I3cYPHYB$-Sa%E1Zsin^uM_nwBEU?G45QN2S>4QRh8x zfKvqO$Hp5igZ7Y~NawVbO~aB0T~Bi=I)W;6^jpT40qPU(SQ3kohDCoj(ta`<29kl@ z|CyE+pFTIntyEEtnVI9uJEI1*Cgm59WNDGFkJWNyCi)`1Au~W8R8x(k*&?&Io@H%v zZuTnI?~Ir_$zL#Oz!?!QpBS&0Dw}u0;LSwSa7#yG3@JLP?4gmTduWrgjmphrV1=g> zr5~EH%=H5B)XU4@V%-7_mtmi%lbFFDGb=kTNE(~F+^V9G8G$GiyeW;9ihEY1Nx(Yj zQ}v#Q-cqbfu^`bg>1l5h&&zOVx=PO6Q(R8o`Piym<-(?fKQs}1PY`?$rWrXi=H-Lx zr@^0?>v~W>JHc3Ib8V$Q9|}H^TK6wQYR^c}DWVXd=fEElj%TrdrO*crZ z^qn#s@q6~tTg;Q$)=}KVr1Zo4Zho<;8D-~g?8lfg!}0wR)sYo}W=jS;A?5Oz5DuP1 z^R25aR|y_HL2Ze2iWtFC`H3ZcJ$s|^*|oWlv4gTrYzv>pvhO+C zo6|XAHKiof;`y-Fe@@Af?wh4wlc2o${+*JcX1GM;w?hlYL#$ITnlI;^lsOan?|~m=7J*3HjxRBut5+ zUD-F85DXbD_lRZ;aVGp5OkJy=(h)1Ef;^l^(@mEy!egCTIoEzPXbp-r5Wd)ezPgmQ zgMRZQb6W*FK2!ICFuV8^(ee%Mei3UlCX=!N2OHws_YN}Q0X(#ln*oAYJUI^xqB|6y zATLJyYq=X!2;^8V`8J@$%SHS$LA1J3@@v8owNUeg+7ROjXb*66n*4D%4<&euJsS&= zWFpr!<__WXQ|1iZQKqPX&~HDR&jvml0TEdTq7l4byUcKcJ&H-;=G}4?HXdsX7oxPL7fZ3X7k>#U8SU6kFa0+P$(U5?a@w=M4B4k*XUWfh-=lTa4RQ^~inc&)W(b6%v*xV8mBu7F*SyK^S9Mn-_~<8oS-LFeT5B1|C)5;~x@ zs9<_Y@p~kHp!hP3d>XbGX{7WCz^_@rmy8>=faE^rx>+1y(6$Tnl`TA%8yr zK@QyBO>g4i8r9A8DB7OX%${+v{q>NLtNMo;p=kpRl$01%@WdCu1(Ol((+L`xdr2WHipE)*-@1_}d}v_}16NM_tti;JZuOPuZU?Sd^KC2Vj1* zSwm3LpU_dhs^oPn5!tRxxU+s1m3OQG`B zWtyRg(47GXjLo@QLq%yHx{npC?)6qIfXTCU6Z2Wze+jzj8>L;v9^ne4doE5U_c}bE z`E^QIsBp%B4u2&@O&wMy&DdOvd5M;m+HB*Mj>1lWYs8Iu%Mnt`;P6b|UCiJt*Uc~y z(f6n+ONVbEZro#{p{`h!mrktKSs{7PDbdFix-0Ol!@Xf%H(8;WEC|y;CCezdyZN}> zaRL=bYf1(Cv*y4wndiAoGPe9FROTG~2?Rf*1aTz@$&xI6pMr>+v>kv{o}n8oT2%Cc z8qDc0*osAe@eSH6v5^b#o;krL!wyJ0;x#TL79ex4YZ9P*@QvBRX}+Jk{C7+gzLmY}d(ZzC*$br2?hv2T zMD5x(T|{ z*(IZM`fmo81nxHoYTB`84y4yHnL9}n>HNDO(GuIgWN9=Ol#>^fv&-XsZ0vK&8gaZP6M-s=-G$EuY7k*_f8ogF~~T64mD86Th!bb|DyHPyZ69F)7F8 z=Z{0Z{iRAHsQqC5ivrz`>jLIhcq~<8Bji$l*mK*ltNVI>U&43L{Ei4*0NVZZ?oDw; zQ%NcqMegOnY}_YFO5h^k89hw$q?3GIC}Gc)Q8t+8!;>#>k>Yg8OVwzL)%BmGMh#dM z=sK#g@C>3)(S|GWksX@LPKSCjyv8<6YOVUYoFP47>-AIW zjROy=`yX(4l2q)<*~8fS-7N6Gov`<2Y_YbZp)BlpfI2k=+Y0ejrge0}?^u7c-7BEK zCOohBa>FvacmD{-+j~!e+pahDKr3?4g37lp(<-<1HP*aUiK6G(>b!1h8<>(^n5L+w zZ^T&WM&i04a*L#8y*)+{3W7nqG!mmWba$0UVN*^kt~ zp9SgT%HTiT-yg^%jkAN5V_y78bPhg(|NBJ?kj0&13ti0|L76^U*H(1d=nQeR}W1t zA>m1DS@iQ3eWK>&DpZ`^brReatO?n_T3qe#foW<7Nt5KAClkqtWy4qSosK)Xskl9r zt-mJVwB>II22a2c)1t(d_oZq;Jv#JjqfqVpy>*3>mhu8wR@yE|2BP#6w@Zj0^XwU~H3Jm@w=Mqr;8iu=mKj1dA&Mwkkxh->LrfhP57}uCEndUK|4Pl2%qsw;U!yAx~!7~3jU((1tBj50qL8j z4Y>TsGZK8jIR89eKGI9SjTZ-F#}Cfb(AW|5qZ-5qiV6hu0Vw{f{Ikpli2Iiet(}9d zow0+Hxv`_nXd84lJp#~Xn;c?cNdlf-<)pwe#NnilLC~4RG?nWRaeE7G$n(Gg>shO+ z^OA-}N$FAPiL9v1PHAe@!M><+Zso?-mb>sd`~+o`ejbh<7KjR3SgDUJ#Bims5#n;~ zr;Hb#1VeE*=Pzq|i99w!*MvM*AbL=EP(t1Nuz$QiJmt&2_s82Me;o2sz#u3ecdU}o!=4C zzjPWt`t8rh|E2RMQ2M*l?FAzjXW|{-@6WLt($`{Ei{~ zr6U9TH=Tck5`NeFJ@fvTUNPL?^#0aT|IV-euJe0d_Ai}Tg#V@U-zxu!+JE=YKce in *s. Any previous information is lost. - * A common use is to clear a status: TF_SetStatus(s, TF_OK, ""); + * Record in *s. Any previous information is lost. A common use is + * to clear a status: TF_SetStatus(s, TF_OK, ""); */ def TF_SetStatus(s: Ptr[TF_Status], code: TF_Code, msg: CString): Unit = extern @@ -327,46 +325,49 @@ object tensorflow { def TF_GetCode(s: Ptr[TF_Status]): TF_Code = extern /** - * Return a pointer to the (null-terminated) error message in *s. The - * return value points to memory that is only usable until the next - * mutation to *s. Always returns an empty string if TF_GetCode(s) is - * TF_OK. + * Return a pointer to the (null-terminated) error message in *s. The return + * value points to memory that is only usable until the next mutation to *s. + * Always returns an empty string if TF_GetCode(s) is TF_OK. */ def TF_Message(s: Ptr[TF_Status]): CString = extern /** - * Makes a copy of the input and sets an appropriate deallocator. Useful for + * Makes a copy of the input and sets an appropriate deallocator. Useful for * passing in read-only, input protobufs. */ - def TF_NewBufferFromString(proto: Ptr[Byte], - proto_len: CSize): Ptr[TF_Buffer] = extern + def TF_NewBufferFromString( + proto: Ptr[Byte], + proto_len: CSize + ): Ptr[TF_Buffer] = extern /** * Useful for passing *out* a protobuf. */ - def TF_NewBuffer(): Ptr[TF_Buffer] = extern - def TF_DeleteBuffer(buffer: Ptr[TF_Buffer]): Unit = extern + def TF_NewBuffer(): Ptr[TF_Buffer] = extern + def TF_DeleteBuffer(buffer: Ptr[TF_Buffer]): Unit = extern def TF_GetBuffer(buffer: Ptr[TF_Buffer]): TF_Buffer = extern /** * Return a new tensor that holds the bytes data[0,len-1]. * - * The data will be deallocated by a subsequent call to TF_DeleteTensor via: - * (*deallocator)(data, len, deallocator_arg) - * Clients must provide a custom deallocator function so they can pass in - * memory managed by something like numpy. + * The data will be deallocated by a subsequent call to TF_DeleteTensor via: + * (*deallocator)(data, len, deallocator_arg) Clients must provide a custom + * deallocator function so they can pass in memory managed by something like + * numpy. * - * May return NULL (and invoke the deallocator) if the provided data buffer - * (data, len) is inconsistent with a tensor of the given TF_DataType - * and the shape specified by (dims, num_dims). + * May return NULL (and invoke the deallocator) if the provided data buffer + * (data, len) is inconsistent with a tensor of the given TF_DataType and the + * shape specified by (dims, num_dims). */ - def TF_NewTensor(value: TF_DataType, - dims: Ptr[int64_t], - num_dims: CInt, - data: Ptr[Byte], - len: CSize, - deallocator: CFuncPtr3[Ptr[Byte], CSize, Ptr[Byte], Unit], - deallocator_arg: Ptr[Byte]): Ptr[TF_Tensor] = extern + def TF_NewTensor( + value: TF_DataType, + dims: Ptr[int64_t], + num_dims: CInt, + data: Ptr[Byte], + len: CSize, + deallocator: CFuncPtr3[Ptr[Byte], CSize, Ptr[Byte], Unit], + deallocator_arg: Ptr[Byte] + ): Ptr[TF_Tensor] = extern /** * Allocate and return a new Tensor. @@ -376,13 +377,15 @@ object tensorflow { * satisfies TensorFlow's memory alignment preferences and should be preferred * over calling malloc and free. * - * The caller must set the Tensor values by writing them to the pointer returned - * by TF_TensorData with length TF_TensorByteSize. + * The caller must set the Tensor values by writing them to the pointer + * returned by TF_TensorData with length TF_TensorByteSize. */ - def TF_AllocateTensor(value: TF_DataType, - dims: Ptr[int64_t], - num_dims: CInt, - len: CSize): Ptr[TF_Tensor] = extern + def TF_AllocateTensor( + value: TF_DataType, + dims: Ptr[int64_t], + num_dims: CInt, + len: CSize + ): Ptr[TF_Tensor] = extern /** * Deletes `tensor` and returns a new TF_Tensor with the same content if @@ -406,8 +409,8 @@ object tensorflow { def TF_NumDims(tensor: Ptr[TF_Tensor]): CInt = extern /** - * Return the length of the tensor in the "dim_index" dimension. - * REQUIRES: 0 <= dim_index < TF_NumDims(tensor) + * Return the length of the tensor in the "dim_index" dimension. REQUIRES: 0 + * <= dim_index < TF_NumDims(tensor) */ def TF_Dim(tensor: Ptr[TF_Tensor], dim_index: CInt): int64_t = extern @@ -427,34 +430,38 @@ object tensorflow { * bytes beyond `*dst`. `dst_len` should be at least * TF_StringEncodedSize(src_len). * - * On success returns the size in bytes of the encoded string. - * Returns an error into `status` otherwise. + * On success returns the size in bytes of the encoded string. Returns an + * error into `status` otherwise. */ - def TF_StringEncode(src: CString, - src_len: CSize, - dst: CString, - dst_len: CSize, - status: Ptr[TF_Status]): CSize = extern + def TF_StringEncode( + src: CString, + src_len: CSize, + dst: CString, + dst_len: CSize, + status: Ptr[TF_Status] + ): CSize = extern /** * Decode a string encoded using TF_StringEncode. * - * On success, sets `*dst` to the start of the decoded string and `*dst_len` to - * its length. Returns the number of bytes starting at `src` consumed while - * decoding. `*dst` points to memory within the encoded buffer. On failure, + * On success, sets `*dst` to the start of the decoded string and `*dst_len` + * to its length. Returns the number of bytes starting at `src` consumed while + * decoding. `*dst` points to memory within the encoded buffer. On failure, * `*dst` and `*dst_len` are undefined and an error is set in `status`. * * Does not read memory more than `src_len` bytes beyond `src`. */ - def TF_StringDecode(src: CString, - src_len: CSize, - dst: Ptr[CString], - dst_len: Ptr[CSize], - status: Ptr[TF_Status]): CSize = extern + def TF_StringDecode( + src: CString, + src_len: CSize, + dst: Ptr[CString], + dst_len: Ptr[CSize], + status: Ptr[TF_Status] + ): CSize = extern /** - * Return the size in bytes required to encode a string `len` bytes long into a - * TF_STRING tensor. + * Return the size in bytes required to encode a string `len` bytes long into + * a TF_STRING tensor. */ def TF_StringEncodedSize(len: CSize): CSize = extern @@ -464,26 +471,27 @@ object tensorflow { def TF_NewSessionOptions(): Ptr[TF_SessionOptions] = extern /** - * Set the target in TF_SessionOptions.options. - * target can be empty, a single entry, or a comma separated list of entries. - * Each entry is in one of the following formats : - * "local" - * ip:port - * host:port + * Set the target in TF_SessionOptions.options. target can be empty, a single + * entry, or a comma separated list of entries. Each entry is in one of the + * following formats: + * - "local" + * - ip:port + * - host:port */ def TF_SetTarget(options: Ptr[TF_SessionOptions], target: CString): Unit = extern /** - * Set the config in TF_SessionOptions.options. - * config should be a serialized tensorflow.ConfigProto proto. - * If config was not parsed successfully as a ConfigProto, record the - * error information in *status. + * Set the config in TF_SessionOptions.options. config should be a serialized + * tensorflow.ConfigProto proto. If config was not parsed successfully as a + * ConfigProto, record the error information in *status. */ - def TF_SetConfig(options: Ptr[TF_SessionOptions], - proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_SetConfig( + options: Ptr[TF_SessionOptions], + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** * Destroy an options object. @@ -497,39 +505,41 @@ object tensorflow { def TF_NewGraph(): Ptr[TF_Graph] = extern /** - * Destroy an options object. Graph will be deleted once no more - * TFSession's are referencing it. + * Destroy an options object. Graph will be deleted once no more TFSession's + * are referencing it. */ def TF_DeleteGraph(graph: Ptr[TF_Graph]): Unit = extern /** - * Sets the shape of the Tensor referenced by `output` in `graph` to - * the shape described by `dims` and `num_dims`. + * Sets the shape of the Tensor referenced by `output` in `graph` to the shape + * described by `dims` and `num_dims`. * * If the number of dimensions is unknown, `num_dims` must be set to - * -1 and `dims` can be null. If a dimension is unknown, the - * corresponding entry in the `dims` array must be -1. + * -1 and `dims` can be null. If a dimension is unknown, the corresponding + * entry in the `dims` array must be -1. * - * This does not overwrite the existing shape associated with `output`, - * but merges the input shape with the existing shape. For example, - * setting a shape of [-1, 2] with an existing shape [2, -1] would set - * a final shape of [2, 2] based on shape merging semantics. + * This does not overwrite the existing shape associated with `output`, but + * merges the input shape with the existing shape. For example, setting a + * shape of [-1, 2] with an existing shape [2, -1] would set a final shape of + * [2, 2] based on shape merging semantics. * * Returns an error into `status` if: * - `output` is not in `graph`. - * - An invalid shape is being set (e.g., the shape being set - * is incompatible with the existing shape). + * - An invalid shape is being set (e.g., the shape being set is + * incompatible with the existing shape). */ @name("scalanative_TF_GraphSetTensorShape") - def TF_GraphSetTensorShape(graph: Ptr[TF_Graph], - output: Ptr[TF_Output], // TF_output - dims: Ptr[int64_t], - num_dims: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_GraphSetTensorShape( + graph: Ptr[TF_Graph], + output: Ptr[TF_Output], // TF_output + dims: Ptr[int64_t], + num_dims: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * Returns the number of dimensions of the Tensor referenced by `output` - * in `graph`. + * Returns the number of dimensions of the Tensor referenced by `output` in + * `graph`. * * If the number of dimensions in the shape is unknown, returns -1. * @@ -537,365 +547,405 @@ object tensorflow { * - `output` is not in `graph`. */ @name("scalanative_TF_GraphGetTensorNumDims") - def TF_GraphGetTensorNumDims(graph: Ptr[TF_Graph], - output: Ptr[TF_Output], // TF_output - status: Ptr[TF_Status]): CInt = extern + def TF_GraphGetTensorNumDims( + graph: Ptr[TF_Graph], + output: Ptr[TF_Output], // TF_output + status: Ptr[TF_Status] + ): CInt = extern /** - * Returns the shape of the Tensor referenced by `output` in `graph` - * into `dims`. `dims` must be an array large enough to hold `num_dims` - * entries (e.g., the return value of TF_GraphGetTensorNumDims). + * Returns the shape of the Tensor referenced by `output` in `graph` into + * `dims`. `dims` must be an array large enough to hold `num_dims` entries + * (e.g., the return value of TF_GraphGetTensorNumDims). * - * If the number of dimensions in the shape is unknown or the shape is - * a scalar, `dims` will remain untouched. Otherwise, each element of - * `dims` will be set corresponding to the size of the dimension. An - * unknown dimension is represented by `-1`. + * If the number of dimensions in the shape is unknown or the shape is a + * scalar, `dims` will remain untouched. Otherwise, each element of `dims` + * will be set corresponding to the size of the dimension. An unknown + * dimension is represented by `-1`. * * Returns an error into `status` if: * - `output` is not in `graph`. * - `num_dims` does not match the actual number of dimensions. */ @name("scalanative_TF_GraphGetTensorShape") - def TF_GraphGetTensorShape(graph: Ptr[TF_Graph], - output: Ptr[TF_Output], // TF_output - dims: Ptr[int64_t], - num_dims: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_GraphGetTensorShape( + graph: Ptr[TF_Graph], + output: Ptr[TF_Output], // TF_output + dims: Ptr[int64_t], + num_dims: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * Operation will only be added to *graph when TF_FinishOperation() is - * called (assuming TF_FinishOperation() does not return an error). - * *graph must not be deleted until after TF_FinishOperation() is - * called. + * Operation will only be added to *graph when TF_FinishOperation() is called + * (assuming TF_FinishOperation() does not return an error). *graph must not + * be deleted until after TF_FinishOperation() is called. */ - def TF_NewOperation(graph: Ptr[TF_Graph], - op_type: CString, - oper_name: CString): Ptr[TF_OperationDescription] = extern + def TF_NewOperation( + graph: Ptr[TF_Graph], + op_type: CString, + oper_name: CString + ): Ptr[TF_OperationDescription] = extern /** - * Specify the device for `desc`. Defaults to empty, meaning unconstrained. + * Specify the device for `desc`. Defaults to empty, meaning unconstrained. */ def TF_SetDevice(desc: Ptr[TF_OperationDescription], device: CString): Unit = extern /** - * The calls to TF_AddInput and TF_AddInputList must match (in number, - * order, and type) the op declaration. For example, the "Concat" op - * has registration: + * The calls to TF_AddInput and TF_AddInputList must match (in number, order, + * and type) the op declaration. For example, the "Concat" op has + * registration: + * {{{ * REGISTER_OP("Concat") * .Input("concat_dim: int32") * .Input("values: N * T") * .Output("output: T") * .Attr("N: int >= 2") * .Attr("T: type"); - * that defines two inputs, "concat_dim" and "values" (in that order). - * You must use TF_AddInput() for the first input (since it takes a - * single tensor), and TF_AddInputList() for the second input (since - * it takes a list, even if you were to pass a list with a single - * tensor), as in: + * }}} + * that defines two inputs, "concat_dim" and "values" (in that order). You + * must use TF_AddInput() for the first input (since it takes a single + * tensor), and TF_AddInputList() for the second input (since it takes a list, + * even if you were to pass a list with a single tensor), as in: + * {{{ * TF_OperationDescription* desc = TF_NewOperation(graph, "Concat", "c"); * TF_Output concat_dim_input = {...}; * TF_AddInput(desc, concat_dim_input); * TF_Output values_inputs[5] = {{...}, ..., {...}}; - * TF_AddInputList(desc, values_inputs, 5); + * TF_AddInputList(desc,values_inputs, 5); + * }}} * For inputs that take a single tensor. */ @name("scalanative_TF_AddInput") - def TF_AddInput(desc: Ptr[TF_OperationDescription], - input: Ptr[TF_Output]): Unit = + def TF_AddInput( + desc: Ptr[TF_OperationDescription], + input: Ptr[TF_Output] + ): Unit = extern // TF_output /** - * For inputs that take a list of tensors. - * inputs must point to TF_Output[num_inputs]. + * For inputs that take a list of tensors. inputs must point to + * TF_Output[num_inputs]. */ - def TF_AddInputList(desc: Ptr[TF_OperationDescription], - inputs: Ptr[TF_Output], - num_inputs: CInt): Unit = extern + def TF_AddInputList( + desc: Ptr[TF_OperationDescription], + inputs: Ptr[TF_Output], + num_inputs: CInt + ): Unit = extern /** * Call once per control input to `desc`. */ - def TF_AddControlInput(desc: Ptr[TF_OperationDescription], - input: Ptr[TF_Operation]): Unit = extern + def TF_AddControlInput( + desc: Ptr[TF_OperationDescription], + input: Ptr[TF_Operation] + ): Unit = extern /** - * Request that `desc` be co-located on the device where `op` - * is placed. + * Request that `desc` be co-located on the device where `op` is placed. * * Use of this is discouraged since the implementation of device placement is * subject to change. Primarily intended for internal libraries */ - def TF_ColocateWith(desc: Ptr[TF_OperationDescription], - op: Ptr[TF_Operation]): Unit = extern + def TF_ColocateWith( + desc: Ptr[TF_OperationDescription], + op: Ptr[TF_Operation] + ): Unit = extern /** - * Call some TF_SetAttr*() function for every attr that is not - * inferred from an input and doesn't have a default value you wish to - * keep. + * Call some TF_SetAttr*() function for every attr that is not inferred from + * an input and doesn't have a default value you wish to keep. * * `value` must point to a string of length `length` bytes. */ - def TF_SetAttrString(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: Ptr[Byte], - length: CSize): Unit = extern + def TF_SetAttrString( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: Ptr[Byte], + length: CSize + ): Unit = extern /** - * `values` and `lengths` each must have lengths `num_values`. - * `values[i]` must point to a string of length `lengths[i]` bytes. + * `values` and `lengths` each must have lengths `num_values`. `values[i]` + * must point to a string of length `lengths[i]` bytes. */ - def TF_SetAttrStringList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - values: Ptr[Ptr[Byte]], - lengths: Ptr[CSize], - num_values: CInt): Unit = extern + def TF_SetAttrStringList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + values: Ptr[Ptr[Byte]], + lengths: Ptr[CSize], + num_values: CInt + ): Unit = extern /** - * */ - def TF_SetAttrInt(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: int64_t): Unit = extern + def TF_SetAttrInt( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: int64_t + ): Unit = extern /** - * */ - def TF_SetAttrIntList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - values: Ptr[int64_t], - num_values: CInt): Unit = extern + def TF_SetAttrIntList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + values: Ptr[int64_t], + num_values: CInt + ): Unit = extern /** - * */ - def TF_SetAttrFloat(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: CFloat): Unit = extern + def TF_SetAttrFloat( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: CFloat + ): Unit = extern /** - * */ - def TF_SetAttrFloatList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - values: Ptr[CFloat], - num_values: CInt): Unit = extern + def TF_SetAttrFloatList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + values: Ptr[CFloat], + num_values: CInt + ): Unit = extern /** - * */ - def TF_SetAttrBool(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: CUnsignedChar): Unit = extern + def TF_SetAttrBool( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: CUnsignedChar + ): Unit = extern /** - * */ - def TF_SetAttrBoolList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - values: Ptr[CUnsignedChar], - num_values: CInt): Unit = extern + def TF_SetAttrBoolList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + values: Ptr[CUnsignedChar], + num_values: CInt + ): Unit = extern /** - * */ - def TF_SetAttrType(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: TF_DataType): Unit = extern + def TF_SetAttrType( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: TF_DataType + ): Unit = extern /** - * */ - def TF_SetAttrTypeList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - values: Ptr[TF_DataType], - num_values: CInt): Unit = extern + def TF_SetAttrTypeList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + values: Ptr[TF_DataType], + num_values: CInt + ): Unit = extern /** - * Set a 'func' attribute to the specified name. - * `value` must point to a string of length `length` bytes. + * Set a 'func' attribute to the specified name. `value` must point to a + * string of length `length` bytes. */ - def TF_SetAttrFuncName(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: CString, - length: CSize): Unit = extern + def TF_SetAttrFuncName( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: CString, + length: CSize + ): Unit = extern /** - * Set `num_dims` to -1 to represent "unknown rank". Otherwise, - * `dims` points to an array of length `num_dims`. `dims[i]` must be - * >= -1, with -1 meaning "unknown dimension". + * Set `num_dims` to -1 to represent "unknown rank". Otherwise, `dims` points + * to an array of length `num_dims`. `dims[i]` must be >= -1, with -1 meaning + * "unknown dimension". */ - def TF_SetAttrShape(desc: Ptr[TF_OperationDescription], - attr_name: CString, - dims: Ptr[int64_t], - num_dims: CInt): Unit = extern + def TF_SetAttrShape( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + dims: Ptr[int64_t], + num_dims: CInt + ): Unit = extern /** - * `dims` and `num_dims` must point to arrays of length `num_shapes`. - * Set `num_dims[i]` to -1 to represent "unknown rank". Otherwise, - * `dims[i]` points to an array of length `num_dims[i]`. `dims[i][j]` - * must be >= -1, with -1 meaning "unknown dimension". + * `dims` and `num_dims` must point to arrays of length `num_shapes`. Set + * `num_dims[i]` to -1 to represent "unknown rank". Otherwise, `dims[i]` + * points to an array of length `num_dims[i]`. `dims[i][j]` must be >= -1, + * with -1 meaning "unknown dimension". */ - def TF_SetAttrShapeList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - dims: Ptr[Ptr[int64_t]], - num_dims: Ptr[CInt], - num_shapes: CInt): Unit = extern + def TF_SetAttrShapeList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + dims: Ptr[Ptr[int64_t]], + num_dims: Ptr[CInt], + num_shapes: CInt + ): Unit = extern /** * `proto` must point to an array of `proto_len` bytes representing a * binary-serialized TensorShapeProto. */ - def TF_SetAttrTensorShapeProto(desc: Ptr[TF_OperationDescription], - attr_name: CString, - proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_SetAttrTensorShapeProto( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** * `protos` and `proto_lens` must point to arrays of length `num_shapes`. - * `protos[i]` must point to an array of `proto_lens[i]` bytes - * representing a binary-serialized TensorShapeProto. + * `protos[i]` must point to an array of `proto_lens[i]` bytes representing a + * binary-serialized TensorShapeProto. */ - def TF_SetAttrTensorShapeProtoList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - protos: Ptr[Ptr[Byte]], - proto_lens: Ptr[CSize], - num_shapes: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_SetAttrTensorShapeProtoList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + protos: Ptr[Ptr[Byte]], + proto_lens: Ptr[CSize], + num_shapes: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_SetAttrTensor(desc: Ptr[TF_OperationDescription], - attr_name: CString, - value: Ptr[TF_Tensor], - status: Ptr[TF_Status]): Unit = extern + def TF_SetAttrTensor( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + value: Ptr[TF_Tensor], + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_SetAttrTensorList(desc: Ptr[TF_OperationDescription], - attr_name: CString, - values: Ptr[Ptr[TF_Tensor]], - num_values: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_SetAttrTensorList( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + values: Ptr[Ptr[TF_Tensor]], + num_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** * `proto` should point to a sequence of bytes of length `proto_len` - * representing a binary serialization of an AttrValue protocol - * buffer. + * representing a binary serialization of an AttrValue protocol buffer. */ - def TF_SetAttrValueProto(desc: Ptr[TF_OperationDescription], - attr_name: CString, - proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_SetAttrValueProto( + desc: Ptr[TF_OperationDescription], + attr_name: CString, + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** * If this function succeeds: * - *status is set to an OK value, * - a TF_Operation is added to the graph, - * - a non-null value pointing to the added operation is returned -- - * this value is valid until the underlying graph is deleted. - * Otherwise: + * - a non-null value pointing to the added operation is returned -- this + * value is valid until the underlying graph is deleted. Otherwise: * - *status is set to a non-OK value, * - the graph is not modified, - * - a null value is returned. - * In either case, it deletes `desc`. + * - a null value is returned. In either case, it deletes `desc`. */ - def TF_FinishOperation(desc: Ptr[TF_OperationDescription], - status: Ptr[TF_Status]): Ptr[TF_Operation] = extern + def TF_FinishOperation( + desc: Ptr[TF_OperationDescription], + status: Ptr[TF_Status] + ): Ptr[TF_Operation] = extern /** - * TF_Operation functions. Operations are immutable once created, so - * these are all query functions. + * TF_Operation functions. Operations are immutable once created, so these are + * all query functions. */ def TF_OperationName(oper: Ptr[TF_Operation]): CString = extern /** - * */ def TF_OperationOpType(oper: Ptr[TF_Operation]): CString = extern /** - * */ def TF_OperationDevice(oper: Ptr[TF_Operation]): CString = extern /** - * */ def TF_OperationNumOutputs(oper: Ptr[TF_Operation]): CInt = extern /** - * */ @name("scalanative_TF_OperationOutputType") def TF_OperationOutputType(oper_out: Ptr[TF_Output]): TF_DataType = extern // TF_output /** - * */ - def TF_OperationOutputListLength(oper: Ptr[TF_Operation], - arg_name: CString, - status: Ptr[TF_Status]): CInt = extern + def TF_OperationOutputListLength( + oper: Ptr[TF_Operation], + arg_name: CString, + status: Ptr[TF_Status] + ): CInt = extern /** - * */ def TF_OperationNumInputs(oper: Ptr[TF_Operation]): CInt = extern /** - * */ def TF_OperationInputType(oper_in: Ptr[TF_Input]): TF_DataType = extern // TF_Input /** - * */ - def TF_OperationInputListLength(oper: Ptr[TF_Operation], - arg_name: CString, - status: Ptr[TF_Status]): CInt = extern + def TF_OperationInputListLength( + oper: Ptr[TF_Operation], + arg_name: CString, + status: Ptr[TF_Status] + ): CInt = extern /** * In this code: + * {{{ * TF_Output producer = TF_OperationInput(consumer); - * There is an edge from producer.oper's output (given by - * producer.index) to consumer.oper's input (given by consumer.index). + * }}} + * There is an edge from producer.oper's output (given by producer.index) to + * consumer.oper's input (given by consumer.index). * - * Note: for Scala Native we need to pass an additonal Ptr[TF_Output] - * to capture the original rvalue (stack, pass by value). + * Note: for Scala Native we need to pass an additonal Ptr[TF_Output] to + * capture the original rvalue (stack, pass by value). */ @name("scalanative_TF_OperationInput") - def TF_OperationInput(oper_in: Ptr[TF_Input], - oper_out: Ptr[TF_Output]): Ptr[TF_Output] = + def TF_OperationInput( + oper_in: Ptr[TF_Input], + oper_out: Ptr[TF_Output] + ): Ptr[TF_Output] = extern // TF_Input TF_Output /** - * Get the number of current consumers of a specific output of an - * operation. Note that this number can change when new operations - * are added to the graph. + * Get the number of current consumers of a specific output of an operation. + * Note that this number can change when new operations are added to the + * graph. */ @name("scalanative_TF_OperationOutputNumConsumers") def TF_OperationOutputNumConsumers(oper_out: Ptr[TF_Output]): CInt = extern // TF_output /** - * Get list of all current consumers of a specific output of an - * operation. `consumers` must point to an array of length at least - * `max_consumers` (ideally set to - * TF_OperationOutputNumConsumers(oper_out)). Beware that a concurrent - * modification of the graph can increase the number of consumers of - * an operation. Returns the number of output consumers (should match + * Get list of all current consumers of a specific output of an operation. + * `consumers` must point to an array of length at least `max_consumers` + * (ideally set to TF_OperationOutputNumConsumers(oper_out)). Beware that a + * concurrent modification of the graph can increase the number of consumers + * of an operation. Returns the number of output consumers (should match * TF_OperationOutputNumConsumers(oper_out)). */ @name("scalanative_TF_OperationOutputConsumers") - def TF_OperationOutputConsumers(oper_out: Ptr[TF_Output], // TF_output - consumers: Ptr[TF_Input], - max_consumers: CInt): CInt = extern + def TF_OperationOutputConsumers( + oper_out: Ptr[TF_Output], // TF_output + consumers: Ptr[TF_Input], + max_consumers: CInt + ): CInt = extern /** * Get the number of control inputs to an operation. @@ -903,224 +953,254 @@ object tensorflow { def TF_OperationNumControlInputs(oper: Ptr[TF_Operation]): CInt = extern /** - * Get list of all control inputs to an operation. `control_inputs` must - * point to an array of length `max_control_inputs` (ideally set to - * TF_OperationNumControlInputs(oper)). Returns the number of control - * inputs (should match TF_OperationNumControlInputs(oper)). + * Get list of all control inputs to an operation. `control_inputs` must point + * to an array of length `max_control_inputs` (ideally set to + * TF_OperationNumControlInputs(oper)). Returns the number of control inputs + * (should match TF_OperationNumControlInputs(oper)). */ - def TF_OperationGetControlInputs(oper: Ptr[TF_Operation], - control_inputs: Ptr[Ptr[TF_Operation]], - max_control_inputs: CInt): CInt = extern + def TF_OperationGetControlInputs( + oper: Ptr[TF_Operation], + control_inputs: Ptr[Ptr[TF_Operation]], + max_control_inputs: CInt + ): CInt = extern /** - * Get the number of operations that have `*oper` as a control input. - * Note that this number can change when new operations are added to - * the graph. + * Get the number of operations that have `*oper` as a control input. Note + * that this number can change when new operations are added to the graph. */ def TF_OperationNumControlOutputs(oper: Ptr[TF_Operation]): CInt = extern /** * Get the list of operations that have `*oper` as a control input. * `control_outputs` must point to an array of length at least - * `max_control_outputs` (ideally set to - * TF_OperationNumControlOutputs(oper)). Beware that a concurrent - * modification of the graph can increase the number of control - * outputs. Returns the number of control outputs (should match + * `max_control_outputs` (ideally set to TF_OperationNumControlOutputs(oper)). + * Beware that a concurrent modification of the graph can increase the number + * of control outputs. Returns the number of control outputs (should match * TF_OperationNumControlOutputs(oper)). */ - def TF_OperationGetControlOutputs(oper: Ptr[TF_Operation], - control_outputs: Ptr[Ptr[TF_Operation]], - max_control_outputs: CInt): CInt = extern + def TF_OperationGetControlOutputs( + oper: Ptr[TF_Operation], + control_outputs: Ptr[Ptr[TF_Operation]], + max_control_outputs: CInt + ): CInt = extern /** * Returns metadata about the value of the attribute `attr_name` of `oper`. */ - def TF_OperationGetAttrMetadata(oper: Ptr[TF_Operation], - attr_name: CString, - status: Ptr[TF_Status]): TF_AttrMetadata = + def TF_OperationGetAttrMetadata( + oper: Ptr[TF_Operation], + attr_name: CString, + status: Ptr[TF_Status] + ): TF_AttrMetadata = extern /** - * Fills in `value` with the value of the attribute `attr_name`. `value` must + * Fills in `value` with the value of the attribute `attr_name`. `value` must * point to an array of length at least `max_length` (ideally set to * TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, * attr_name)). */ - def TF_OperationGetAttrString(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[Byte], - max_length: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrString( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[Byte], + max_length: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** - * Get the list of strings in the value of the attribute `attr_name`. Fills in + * Get the list of strings in the value of the attribute `attr_name`. Fills in * `values` and `lengths`, each of which must point to an array of length at * least `max_values`. * - * The elements of values will point to addresses in `storage` which must be at - * least `storage_size` bytes in length. Ideally, max_values would be set to - * TF_AttrMetadata.list_size and `storage` would be at least + * The elements of values will point to addresses in `storage` which must be + * at least `storage_size` bytes in length. Ideally, max_values would be set + * to TF_AttrMetadata.list_size and `storage` would be at least * TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper, * attr_name). * * Fails if storage_size is too small to hold the requested number of strings. */ - def TF_OperationGetAttrStringList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[Ptr[Byte]], - lengths: Ptr[CSize], - max_values: CInt, - storage: Ptr[Byte], - storage_size: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrStringList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[Ptr[Byte]], + lengths: Ptr[CSize], + max_values: CInt, + storage: Ptr[Byte], + storage_size: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_OperationGetAttrInt(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[int64_t], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrInt( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[int64_t], + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `values` with the value of the attribute `attr_name` of `oper`. - * `values` must point to an array of length at least `max_values` (ideally set - * TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, + * `values` must point to an array of length at least `max_values` (ideally + * set TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, * attr_name)). */ - def TF_OperationGetAttrIntList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[int64_t], - max_values: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrIntList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[int64_t], + max_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_OperationGetAttrFloat(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[CFloat], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrFloat( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[CFloat], + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `values` with the value of the attribute `attr_name` of `oper`. - * `values` must point to an array of length at least `max_values` (ideally set - * to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, + * `values` must point to an array of length at least `max_values` (ideally + * set to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, * attr_name)). */ - def TF_OperationGetAttrFloatList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[CFloat], - max_values: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrFloatList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[CFloat], + max_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_OperationGetAttrBool(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[CUnsignedChar], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrBool( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[CUnsignedChar], + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `values` with the value of the attribute `attr_name` of `oper`. - * `values` must point to an array of length at least `max_values` (ideally set - * to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, + * `values` must point to an array of length at least `max_values` (ideally + * set to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, * attr_name)). */ - def TF_OperationGetAttrBoolList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[CUnsignedChar], - max_values: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrBoolList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[CUnsignedChar], + max_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_OperationGetAttrType(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[TF_DataType], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrType( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[TF_DataType], + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `values` with the value of the attribute `attr_name` of `oper`. - * `values` must point to an array of length at least `max_values` (ideally set - * to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, + * `values` must point to an array of length at least `max_values` (ideally + * set to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, * attr_name)). */ - def TF_OperationGetAttrTypeList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[TF_DataType], - max_values: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrTypeList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[TF_DataType], + max_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `value` with the value of the attribute `attr_name` of `oper`. - * `values` must point to an array of length at least `num_dims` (ideally set to - * TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)). + * `values` must point to an array of length at least `num_dims` (ideally set + * to TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)). */ - def TF_OperationGetAttrShape(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[int64_t], - num_dims: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrShape( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[int64_t], + num_dims: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `dims` with the list of shapes in the attribute `attr_name` of - * `oper` and `num_dims` with the corresponding number of dimensions. On return, - * for every i where `num_dims[i]` > 0, `dims[i]` will be an array of + * `oper` and `num_dims` with the corresponding number of dimensions. On + * return, for every i where `num_dims[i]` > 0, `dims[i]` will be an array of * `num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the * i-th shape in the list is unknown. * * The elements of `dims` will point to addresses in `storage` which must be - * large enough to hold at least `storage_size` int64_ts. Ideally, `num_shapes` - * would be set to TF_AttrMetadata.list_size and `storage_size` would be set to - * TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, - * attr_name). + * large enough to hold at least `storage_size` int64_ts. Ideally, + * `num_shapes` would be set to TF_AttrMetadata.list_size and `storage_size` + * would be set to TF_AttrMetadata.total_size from + * TF_OperationGetAttrMetadata(oper, attr_name). * * Fails if storage_size is insufficient to hold the requested shapes. */ - def TF_OperationGetAttrShapeList(oper: Ptr[TF_Operation], - attr_name: CString, - dims: Ptr[Ptr[int64_t]], - num_dims: Ptr[CInt], - num_shapes: CInt, - storage: Ptr[int64_t], - storage_size: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrShapeList( + oper: Ptr[TF_Operation], + attr_name: CString, + dims: Ptr[Ptr[int64_t]], + num_dims: Ptr[CInt], + num_shapes: CInt, + storage: Ptr[int64_t], + storage_size: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** * Sets `value` to the binary-serialized TensorShapeProto of the value of * `attr_name` attribute of `oper`'. */ - def TF_OperationGetAttrTensorShapeProto(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrTensorShapeProto( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `values` with binary-serialized TensorShapeProto values of the - * attribute `attr_name` of `oper`. `values` must point to an array of length at - * least `num_values` (ideally set to TF_AttrMetadata.list_size from + * attribute `attr_name` of `oper`. `values` must point to an array of length + * at least `num_values` (ideally set to TF_AttrMetadata.list_size from * TF_OperationGetAttrMetadata(oper, attr_name)). */ - def TF_OperationGetAttrTensorShapeProtoList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[Ptr[TF_Buffer]], - max_values: CInt, - status: Ptr[TF_Status]): Unit = + def TF_OperationGetAttrTensorShapeProtoList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[Ptr[TF_Buffer]], + max_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** * Gets the TF_Tensor valued attribute of `attr_name` of `oper`. * - * Allocates a new TF_Tensor which the caller is expected to take - * ownership of (and can deallocate using TF_DeleteTensor). + * Allocates a new TF_Tensor which the caller is expected to take ownership of + * (and can deallocate using TF_DeleteTensor). */ - def TF_OperationGetAttrTensor(oper: Ptr[TF_Operation], - attr_name: CString, - value: Ptr[Ptr[TF_Tensor]], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrTensor( + oper: Ptr[TF_Operation], + attr_name: CString, + value: Ptr[Ptr[TF_Tensor]], + status: Ptr[TF_Status] + ): Unit = extern /** * Fills in `values` with the TF_Tensor values of the attribute `attr_name` of @@ -1128,33 +1208,39 @@ object tensorflow { * `max_values` (ideally set to TF_AttrMetadata.list_size from * TF_OperationGetAttrMetadata(oper, attr_name)). * - * The caller takes ownership of all the non-null TF_Tensor* entries in `values` - * (which can be deleted using TF_DeleteTensor(values[i])). + * The caller takes ownership of all the non-null TF_Tensor* entries in + * `values` (which can be deleted using TF_DeleteTensor(values[i])). */ - def TF_OperationGetAttrTensorList(oper: Ptr[TF_Operation], - attr_name: CString, - values: Ptr[Ptr[TF_Tensor]], - max_values: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrTensorList( + oper: Ptr[TF_Operation], + attr_name: CString, + values: Ptr[Ptr[TF_Tensor]], + max_values: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** * Sets `output_attr_value` to the binary-serialized AttrValue proto * representation of the value of the `attr_name` attr of `oper`. */ - def TF_OperationGetAttrValueProto(oper: Ptr[TF_Operation], - attr_name: CString, - output_attr_value: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationGetAttrValueProto( + oper: Ptr[TF_Operation], + attr_name: CString, + output_attr_value: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** - * Returns the operation in the graph with `oper_name`. Returns nullptr if - * no operation found. + * Returns the operation in the graph with `oper_name`. Returns nullptr if no + * operation found. */ - def TF_GraphOperationByName(graph: Ptr[TF_Graph], - oper_name: CString): Ptr[TF_Operation] = extern + def TF_GraphOperationByName( + graph: Ptr[TF_Graph], + oper_name: CString + ): Ptr[TF_Operation] = extern /** - * Iterate through the operations of a graph. To use: + * Iterate through the operations of a graph. To use: * {{{ * size_t pos = 0; * TF_Operation* oper; @@ -1163,8 +1249,10 @@ object tensorflow { * } * }}} */ - def TF_GraphNextOperation(graph: Ptr[TF_Graph], - pos: Ptr[CSize]): Ptr[TF_Operation] = extern + def TF_GraphNextOperation( + graph: Ptr[TF_Graph], + pos: Ptr[CSize] + ): Ptr[TF_Operation] = extern /** * Write out a serialized representation of `graph` (as a GraphDef protocol @@ -1174,25 +1262,32 @@ object tensorflow { * * May fail on very large graphs in the future. */ - def TF_GraphToGraphDef(graph: Ptr[TF_Graph], - output_graph_def: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_GraphToGraphDef( + graph: Ptr[TF_Graph], + output_graph_def: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** - * Returns the serialized OpDef proto with name `op_name`, or a bad status if no - * such op exists. This can return OpDefs of functions copied into the graph. + * Returns the serialized OpDef proto with name `op_name`, or a bad status if + * no such op exists. This can return OpDefs of functions copied into the + * graph. */ - def TF_GraphGetOpDef(graph: Ptr[TF_Graph], - op_name: CString, - output_op_def: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_GraphGetOpDef( + graph: Ptr[TF_Graph], + op_name: CString, + output_op_def: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** * Returns the serialized VersionDef proto for this graph. */ - def TF_GraphVersions(graph: Ptr[TF_Graph], - output_version_def: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_GraphVersions( + graph: Ptr[TF_Graph], + output_version_def: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** * TF_ImportGraphDefOptions holds options that can be passed to @@ -1201,76 +1296,83 @@ object tensorflow { type TF_ImportGraphDefOptions = CStruct0 /** - * */ def TF_NewImportGraphDefOptions(): Ptr[TF_ImportGraphDefOptions] = extern /** - * */ def TF_DeleteImportGraphDefOptions( - opts: Ptr[TF_ImportGraphDefOptions]): Unit = extern + opts: Ptr[TF_ImportGraphDefOptions] + ): Unit = extern /** - * Set the prefix to be prepended to the names of nodes in `graph_def` that will - * be imported into `graph`. `prefix` is copied and has no lifetime + * Set the prefix to be prepended to the names of nodes in `graph_def` that + * will be imported into `graph`. `prefix` is copied and has no lifetime * requirements. */ - def TF_ImportGraphDefOptionsSetPrefix(opts: Ptr[TF_ImportGraphDefOptions], - prefix: CString): Unit = extern + def TF_ImportGraphDefOptionsSetPrefix( + opts: Ptr[TF_ImportGraphDefOptions], + prefix: CString + ): Unit = extern /** - * Set the execution device for nodes in `graph_def`. - * Only applies to nodes where a device was not already explicitly specified. - * `device` is copied and has no lifetime requirements. + * Set the execution device for nodes in `graph_def`. Only applies to nodes + * where a device was not already explicitly specified. `device` is copied and + * has no lifetime requirements. */ def TF_ImportGraphDefOptionsSetDefaultDevice( opts: Ptr[TF_ImportGraphDefOptions], - device: CString): Unit = extern + device: CString + ): Unit = extern /** - * Set whether to uniquify imported operation names. If true, imported operation - * names will be modified if their name already exists in the graph. If false, - * conflicting names will be treated as an error. Note that this option has no - * effect if a prefix is set, since the prefix will guarantee all names are - * unique. Defaults to false. + * Set whether to uniquify imported operation names. If true, imported + * operation names will be modified if their name already exists in the graph. + * If false, conflicting names will be treated as an error. Note that this + * option has no effect if a prefix is set, since the prefix will guarantee + * all names are unique. Defaults to false. */ def TF_ImportGraphDefOptionsSetUniquifyNames( opts: Ptr[TF_ImportGraphDefOptions], - uniquify_names: CUnsignedChar): Unit = extern + uniquify_names: CUnsignedChar + ): Unit = extern /** * If true, the specified prefix will be modified if it already exists as an - * operation name or prefix in the graph. If false, a conflicting prefix will be - * treated as an error. This option has no effect if no prefix is specified. + * operation name or prefix in the graph. If false, a conflicting prefix will + * be treated as an error. This option has no effect if no prefix is + * specified. */ def TF_ImportGraphDefOptionsSetUniquifyPrefix( opts: Ptr[TF_ImportGraphDefOptions], - uniquify_prefix: CUnsignedChar): Unit = extern + uniquify_prefix: CUnsignedChar + ): Unit = extern /** * Set any imported nodes with input `src_name:src_index` to have that input - * replaced with `dst`. `src_name` refers to a node in the graph to be imported, - * `dst` references a node already existing in the graph being imported into. - * `src_name` is copied and has no lifetime requirements. + * replaced with `dst`. `src_name` refers to a node in the graph to be + * imported, `dst` references a node already existing in the graph being + * imported into. `src_name` is copied and has no lifetime requirements. */ @name("scalanative_TF_ImportGraphDefOptionsAddInputMapping") def TF_ImportGraphDefOptionsAddInputMapping( opts: Ptr[TF_ImportGraphDefOptions], src_name: CString, src_index: CInt, - dst: Ptr[TF_Output]): Unit = extern // TF_output + dst: Ptr[TF_Output] + ): Unit = extern // TF_output /** * Set any imported nodes with control input `src_name` to have that input - * replaced with `dst`. `src_name` refers to a node in the graph to be imported, - * `dst` references an operation already existing in the graph being imported - * into. `src_name` is copied and has no lifetime requirements. + * replaced with `dst`. `src_name` refers to a node in the graph to be + * imported, `dst` references an operation already existing in the graph being + * imported into. `src_name` is copied and has no lifetime requirements. */ def TF_ImportGraphDefOptionsRemapControlDependency( opts: Ptr[TF_ImportGraphDefOptions], src_name: CString, - dst: Ptr[TF_Operation]): Unit = extern + dst: Ptr[TF_Operation] + ): Unit = extern /** * Cause the imported graph to have a control dependency on `oper`. `oper` @@ -1278,41 +1380,46 @@ object tensorflow { */ def TF_ImportGraphDefOptionsAddControlDependency( opts: Ptr[TF_ImportGraphDefOptions], - oper: Ptr[TF_Operation]): Unit = extern + oper: Ptr[TF_Operation] + ): Unit = extern /** * Add an output in `graph_def` to be returned via the `return_outputs` output - * parameter of TF_GraphImportGraphDef(). If the output is remapped via an input - * mapping, the corresponding existing tensor in `graph` will be returned. - * `oper_name` is copied and has no lifetime requirements. + * parameter of TF_GraphImportGraphDef(). If the output is remapped via an + * input mapping, the corresponding existing tensor in `graph` will be + * returned. `oper_name` is copied and has no lifetime requirements. */ def TF_ImportGraphDefOptionsAddReturnOutput( opts: Ptr[TF_ImportGraphDefOptions], oper_name: CString, - index: CInt): Unit = extern + index: CInt + ): Unit = extern /** * Returns the number of return outputs added via * TF_ImportGraphDefOptionsAddReturnOutput(). */ def TF_ImportGraphDefOptionsNumReturnOutputs( - opts: Ptr[TF_ImportGraphDefOptions]): CInt = extern + opts: Ptr[TF_ImportGraphDefOptions] + ): CInt = extern /** - * Add an operation in `graph_def` to be returned via the `return_opers` output - * parameter of TF_GraphImportGraphDef(). `oper_name` is copied and has no - * lifetime requirements. + * Add an operation in `graph_def` to be returned via the `return_opers` + * output parameter of TF_GraphImportGraphDef(). `oper_name` is copied and has + * no lifetime requirements. */ def TF_ImportGraphDefOptionsAddReturnOperation( opts: Ptr[TF_ImportGraphDefOptions], - oper_name: CString): Unit = extern + oper_name: CString + ): Unit = extern /** * Returns the number of return operations added via * TF_ImportGraphDefOptionsAddReturnOperation(). */ def TF_ImportGraphDefOptionsNumReturnOperations( - opts: Ptr[TF_ImportGraphDefOptions]): CInt = extern + opts: Ptr[TF_ImportGraphDefOptions] + ): CInt = extern /** * TF_ImportGraphDefResults holds results that are generated by @@ -1329,26 +1436,30 @@ object tensorflow { def TF_ImportGraphDefResultsReturnOutputs( results: Ptr[TF_ImportGraphDefResults], num_outputs: Ptr[CInt], - outputs: Ptr[Ptr[TF_Output]]): Unit = extern + outputs: Ptr[Ptr[TF_Output]] + ): Unit = extern /** * Fetches the return operations requested via * TF_ImportGraphDefOptionsAddReturnOperation(). The number of fetched * operations is returned in `num_opers`. The array of return operations is - * returned in `opers`. `*opers` is owned by and has the lifetime of `results`. + * returned in `opers`. `*opers` is owned by and has the lifetime of + * `results`. */ def TF_ImportGraphDefResultsReturnOperations( results: Ptr[TF_ImportGraphDefResults], num_opers: Ptr[CInt], - opers: Ptr[Ptr[Ptr[TF_Operation]]]): Unit = extern + opers: Ptr[Ptr[Ptr[TF_Operation]]] + ): Unit = extern /** * Fetches any input mappings requested via - * TF_ImportGraphDefOptionsAddInputMapping() that didn't appear in the GraphDef - * and weren't used as input to any node in the imported graph def. The number - * of fetched mappings is returned in `num_missing_unused_input_mappings`. The - * array of each mapping's source node name is returned in `src_names`, and the - * array of each mapping's source index is returned in `src_indexes`. + * TF_ImportGraphDefOptionsAddInputMapping() that didn't appear in the + * GraphDef and weren't used as input to any node in the imported graph def. + * The number of fetched mappings is returned in + * `num_missing_unused_input_mappings`. The array of each mapping's source + * node name is returned in `src_names`, and the array of each mapping's + * source index is returned in `src_indexes`. * * `*src_names`, `*src_indexes`, and the memory backing each string in * `src_names` are owned by and have the lifetime of `results`. @@ -1357,32 +1468,35 @@ object tensorflow { results: Ptr[TF_ImportGraphDefResults], num_missing_unused_input_mappings: Ptr[CInt], src_names: Ptr[Ptr[CString]], - src_indexes: Ptr[Ptr[CInt]]): Unit = extern + src_indexes: Ptr[Ptr[CInt]] + ): Unit = extern /** * Deletes a results object returned by TF_GraphImportGraphDefWithResults(). */ def TF_DeleteImportGraphDefResults( - results: Ptr[TF_ImportGraphDefResults]): Unit = extern + results: Ptr[TF_ImportGraphDefResults] + ): Unit = extern /** - * Import the graph serialized in `graph_def` into `graph`. Returns nullptr and - * a bad status on error. Otherwise, returns a populated - * TF_ImportGraphDefResults instance. The returned instance must be deleted via - * TF_DeleteImportGraphDefResults(). + * Import the graph serialized in `graph_def` into `graph`. Returns nullptr + * and a bad status on error. Otherwise, returns a populated + * TF_ImportGraphDefResults instance. The returned instance must be deleted + * via TF_DeleteImportGraphDefResults(). */ def TF_GraphImportGraphDefWithResults( graph: Ptr[TF_Graph], graph_def: Ptr[TF_Buffer], options: Ptr[TF_ImportGraphDefOptions], - status: Ptr[TF_Status]): Ptr[TF_ImportGraphDefResults] = extern + status: Ptr[TF_Status] + ): Ptr[TF_ImportGraphDefResults] = extern /** - * Import the graph serialized in `graph_def` into `graph`. - * Convenience function for when only return outputs are needed. + * Import the graph serialized in `graph_def` into `graph`. Convenience + * function for when only return outputs are needed. * * `num_return_outputs` must be the number of return outputs added (i.e. the - * result of TF_ImportGraphDefOptionsNumReturnOutputs()). If + * result of TF_ImportGraphDefOptionsNumReturnOutputs()). If * `num_return_outputs` is non-zero, `return_outputs` must be of length * `num_return_outputs`. Otherwise it can be null. */ @@ -1392,42 +1506,46 @@ object tensorflow { options: Ptr[TF_ImportGraphDefOptions], return_outputs: Ptr[TF_Output], num_return_outputs: CInt, - status: Ptr[TF_Status]): Unit = extern + status: Ptr[TF_Status] + ): Unit = extern /** - * Import the graph serialized in `graph_def` into `graph`. - * Convenience function for when no results are needed. + * Import the graph serialized in `graph_def` into `graph`. Convenience + * function for when no results are needed. */ - def TF_GraphImportGraphDef(graph: Ptr[TF_Graph], - graph_def: Ptr[TF_Buffer], - options: Ptr[TF_ImportGraphDefOptions], - status: Ptr[TF_Status]): Unit = extern + def TF_GraphImportGraphDef( + graph: Ptr[TF_Graph], + graph_def: Ptr[TF_Buffer], + options: Ptr[TF_ImportGraphDefOptions], + status: Ptr[TF_Status] + ): Unit = extern /** * Adds a copy of function `func` and optionally its gradient function `grad` - * to `g`. Once `func`/`grad` is added to `g`, it can be called by creating - * an operation using the function's name. - * Any changes to `func`/`grad` (including deleting it) done after this method - * returns, won't affect the copy of `func`/`grad` in `g`. - * If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no - * effect on them, but can establish the function->gradient relationship - * between them if `func` does not already have a gradient. If `func` already - * has a gradient different from `grad`, an error is returned. - * - * `func` must not be null. - * If `grad` is null and `func` is not in `g`, `func` is added without a - * gradient. - * If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop. - * `grad` must have appropriate signature as described in the doc of - * GradientDef in tensorflow/core/framework/function.proto. + * to `g`. Once `func`/`grad` is added to `g`, it can be called by creating an + * operation using the function's name. Any changes to `func`/`grad` + * (including deleting it) done after this method returns, won't affect the + * copy of `func`/`grad` in `g`. If `func` or `grad` are already in `g`, + * TF_GraphCopyFunction has no effect on them, but can establish the + * function->gradient relationship between them if `func` does not already + * have a gradient. If `func` already has a gradient different from `grad`, an + * error is returned. + * + * `func` must not be null. If `grad` is null and `func` is not in `g`, `func` + * is added without a gradient. If `grad` is null and `func` is in `g`, + * TF_GraphCopyFunction is a noop. `grad` must have appropriate signature as + * described in the doc of GradientDef in + * tensorflow/core/framework/function.proto. * * If successful, status is set to OK and `func` and `grad` are added to `g`. * Otherwise, status is set to the encountered error and `g` is unmodified. */ - def TF_GraphCopyFunction(g: Ptr[TF_Graph], - func: Ptr[TF_Function], - grad: Ptr[TF_Function], - status: Ptr[TF_Status]): Unit = extern + def TF_GraphCopyFunction( + g: Ptr[TF_Graph], + func: Ptr[TF_Function], + grad: Ptr[TF_Function], + status: Ptr[TF_Status] + ): Unit = extern /** * Returns the number of TF_Functions registered in `g`. @@ -1435,29 +1553,33 @@ object tensorflow { def TF_GraphNumFunctions(g: Ptr[TF_Graph]): CInt = extern /** - * Fills in `funcs` with the TF_Function* registered in `g`. - * `funcs` must point to an array of TF_Function* of length at least - * `max_func`. In usual usage, max_func should be set to the result of - * TF_GraphNumFunctions(g). In this case, all the functions registered in - * `g` will be returned. Else, an unspecified subset. + * Fills in `funcs` with the TF_Function* registered in `g`. `funcs` must + * point to an array of TF_Function* of length at least `max_func`. In usual + * usage, max_func should be set to the result of TF_GraphNumFunctions(g). In + * this case, all the functions registered in `g` will be returned. Else, an + * unspecified subset. * * If successful, returns the number of TF_Function* successfully set in - * `funcs` and sets status to OK. The caller takes ownership of - * all the returned TF_Functions. They must be deleted with TF_DeleteFunction. - * On error, returns 0, sets status to the encountered error, and the contents - * of funcs will be undefined. + * `funcs` and sets status to OK. The caller takes ownership of all the + * returned TF_Functions. They must be deleted with TF_DeleteFunction. On + * error, returns 0, sets status to the encountered error, and the contents of + * funcs will be undefined. */ - def TF_GraphGetFunctions(g: Ptr[TF_Graph], - funcs: Ptr[Ptr[TF_Function]], - max_func: CInt, - status: Ptr[TF_Status]): CInt = extern + def TF_GraphGetFunctions( + g: Ptr[TF_Graph], + funcs: Ptr[Ptr[TF_Function]], + max_func: CInt, + status: Ptr[TF_Status] + ): CInt = extern /** * Note: The following function may fail on very large protos in the future. */ - def TF_OperationToNodeDef(oper: Ptr[TF_Operation], - output_node_def: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_OperationToNodeDef( + oper: Ptr[TF_Operation], + output_node_def: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** * Creates a TF_WhileParams for creating a while loop in `g`. `inputs` are @@ -1465,8 +1587,8 @@ object tensorflow { * variables. * * The returned TF_WhileParams will have all fields initialized except - * `cond_output`, `body_outputs`, and `name`. The `body_outputs` buffer will be - * allocated to size `ninputs`. The caller should build `cond_graph` and + * `cond_output`, `body_outputs`, and `name`. The `body_outputs` buffer will + * be allocated to size `ninputs`. The caller should build `cond_graph` and * `body_graph` starting from the inputs, and store the final outputs in * `cond_output` and `body_outputs`. * @@ -1475,20 +1597,22 @@ object tensorflow { * returned TF_WhileParams is not valid, and the caller should not call * TF_FinishWhile() or TF_AbortWhile(). * - * Missing functionality (TODO): - * - Gradients - * - Reference-type inputs - * - Directly referencing external tensors from the cond/body graphs (this is - * possible in the Python API) + * Missing functionality (TODO): + * - Gradients + * - Reference-type inputs + * - Directly referencing external tensors from the cond/body graphs (this + * is possible in the Python API) */ - def TF_NewWhile(g: Ptr[TF_Graph], - inputs: Ptr[TF_Output], - ninputs: CInt, - status: Ptr[TF_Status]): TF_WhileParams = extern + def TF_NewWhile( + g: Ptr[TF_Graph], + inputs: Ptr[TF_Output], + ninputs: CInt, + status: Ptr[TF_Status] + ): TF_WhileParams = extern /** - * Builds the while loop specified by `params` and returns the output tensors of - * the while loop in `outputs`. `outputs` should be allocated to size + * Builds the while loop specified by `params` and returns the output tensors + * of the while loop in `outputs`. `outputs` should be allocated to size * `params.ninputs`. * * `params` is no longer valid once this returns. @@ -1496,9 +1620,11 @@ object tensorflow { * Either this or TF_AbortWhile() must be called after a successful * TF_NewWhile() call. */ - def TF_FinishWhile(params: Ptr[TF_WhileParams], - status: Ptr[TF_Status], - outputs: Ptr[TF_Output]): Unit = extern + def TF_FinishWhile( + params: Ptr[TF_WhileParams], + status: Ptr[TF_Status], + outputs: Ptr[TF_Output] + ): Unit = extern /** * Frees `params`s resources without building a while loop. `params` is no @@ -1508,171 +1634,191 @@ object tensorflow { def TF_AbortWhile(params: Ptr[TF_WhileParams]): Unit = extern /** - * Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, + * Adds operations to compute the partial derivatives of sum of `y`s w.r.t + * `x`s, * i.e., d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2... * * `dx` are used as initial gradients (which represent the symbolic partial - * derivatives of some loss function `L` w.r.t. `y`). - * `dx` must be nullptr or have size `ny`. - * If `dx` is nullptr, the implementation will use dx of `OnesLike` for all - * shapes in `y`. - * The partial derivatives are returned in `dy`. `dy` should be allocated to - * size `nx`. + * derivatives of some loss function `L` w.r.t. `y`). `dx` must be nullptr or + * have size `ny`. If `dx` is nullptr, the implementation will use dx of + * `OnesLike` for all shapes in `y`. The partial derivatives are returned in + * `dy`. `dy` should be allocated to size `nx`. * * Gradient nodes are automatically named under the "gradients/" prefix. To - * guarantee name uniqueness, subsequent calls to the same graph will - * append an incremental tag to the prefix: "gradients_1/", "gradients_2/", ... - * See TF_AddGradientsWithPrefix, which provides a means to specify a custom - * name prefix for operations added to a graph to compute the gradients. + * guarantee name uniqueness, subsequent calls to the same graph will append + * an incremental tag to the prefix: "gradients_1/", "gradients_2/", ... See + * TF_AddGradientsWithPrefix, which provides a means to specify a custom name + * prefix for operations added to a graph to compute the gradients. * * WARNING: This function does not yet support all the gradients that python * supports. See - * https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md - * for instructions on how to add C++ more gradients. - */ - def TF_AddGradients(g: Ptr[TF_Graph], - y: Ptr[TF_Output], - ny: CInt, - x: Ptr[TF_Output], - nx: CInt, - dx: Ptr[TF_Output], - status: Ptr[TF_Status], - dy: Ptr[TF_Output]): Unit = extern - - /** - * Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, - * i.e., d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2... - * This is a variant of TF_AddGradients that allows to caller to pass a custom - * name prefix to the operations added to a graph to compute the gradients. + * https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md for + * instructions on how to add C++ more gradients. + */ + def TF_AddGradients( + g: Ptr[TF_Graph], + y: Ptr[TF_Output], + ny: CInt, + x: Ptr[TF_Output], + nx: CInt, + dx: Ptr[TF_Output], + status: Ptr[TF_Status], + dy: Ptr[TF_Output] + ): Unit = extern + + /** + * Adds operations to compute the partial derivatives of sum of `y`s w.r.t + * `x`s, + * i.e., d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2... This is a variant + * of TF_AddGradients that allows to caller to pass a custom name prefix to + * the operations added to a graph to compute the gradients. * * `dx` are used as initial gradients (which represent the symbolic partial - * derivatives of some loss function `L` w.r.t. `y`). - * `dx` must be nullptr or have size `ny`. - * If `dx` is nullptr, the implementation will use dx of `OnesLike` for all - * shapes in `y`. - * The partial derivatives are returned in `dy`. `dy` should be allocated to - * size `nx`. - * `prefix` names the scope into which all gradients operations are being added. - * `prefix` must be unique within the provided graph otherwise this operation - * will fail. If `prefix` is nullptr, the default prefixing behaviour takes - * place, see TF_AddGradients for more details. + * derivatives of some loss function `L` w.r.t. `y`). `dx` must be nullptr or + * have size `ny`. If `dx` is nullptr, the implementation will use dx of + * `OnesLike` for all shapes in `y`. The partial derivatives are returned in + * `dy`. `dy` should be allocated to size `nx`. `prefix` names the scope into + * which all gradients operations are being added. `prefix` must be unique + * within the provided graph otherwise this operation will fail. If `prefix` + * is nullptr, the default prefixing behaviour takes place, see + * TF_AddGradients for more details. * * WARNING: This function does not yet support all the gradients that python * supports. See - * https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md - * for instructions on how to add C++ more gradients. - */ - def TF_AddGradientsWithPrefix(g: Ptr[TF_Graph], - prefix: CString, - y: Ptr[TF_Output], - ny: CInt, - x: Ptr[TF_Output], - nx: CInt, - dx: Ptr[TF_Output], - status: Ptr[TF_Status], - dy: Ptr[TF_Output]): Unit = extern + * https://www.tensorflow.org/code/tensorflow/cc/gradients/README.md for + * instructions on how to add C++ more gradients. + */ + def TF_AddGradientsWithPrefix( + g: Ptr[TF_Graph], + prefix: CString, + y: Ptr[TF_Output], + ny: CInt, + x: Ptr[TF_Output], + nx: CInt, + dx: Ptr[TF_Output], + status: Ptr[TF_Status], + dy: Ptr[TF_Output] + ): Unit = extern /** * Create a TF_Function from a TF_Graph * * Params: - * fn_body - the graph whose operations (or subset of whose operations) will be - * converted to TF_Function. - * fn_name - the name of the new TF_Function. Should match the operation - * name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*. - * If `append_hash_to_fn_name` is false, `fn_name` must be distinct - * from other function and operation names (at least those - * registered in graphs where this function will be used). - * append_hash_to_fn_name - Must be 0 or 1. If set to 1, the actual name - * of the function will be `fn_name` appended with - * '_'. - * If set to 0, the function's name will be `fn_name`. - * num_opers - `num_opers` contains the number of elements in the `opers` array - * or a special value of -1 meaning that no array is given. - * The distinction between an empty array of operations and no - * array of operations is necessary to distinguish the case of - * creating a function with no body (e.g. identity or permutation) - * and the case of creating a function whose body contains all - * the nodes in the graph (except for the automatic skipping, see - * below). - * opers - Array of operations to become the body of the function or null. - * - If no array is given (`num_opers` = -1), all the - * operations in `fn_body` will become part of the function - * except operations referenced in `inputs`. These operations - * must have a single output (these operations are typically - * placeholders created for the sole purpose of representing - * an input. We can relax this constraint if there are - * compelling use cases). - * - If an array is given (`num_opers` >= 0), all operations - * in it will become part of the function. In particular, no - * automatic skipping of dummy input operations is performed. - * ninputs - number of elements in `inputs` array - * inputs - array of TF_Outputs that specify the inputs to the function. - * If `ninputs` is zero (the function takes no inputs), `inputs` - * can be null. The names used for function inputs are normalized - * names of the operations (usually placeholders) pointed to by - * `inputs`. These operation names should start with a letter. - * Normalization will convert all letters to lowercase and - * non-alphanumeric characters to '_' to make resulting names match - * the "[a-z][a-z0-9_]*" pattern for operation argument names. - * `inputs` cannot contain the same tensor twice. - * noutputs - number of elements in `outputs` array - * outputs - array of TF_Outputs that specify the outputs of the function. - * If `noutputs` is zero (the function returns no outputs), `outputs` - * can be null. `outputs` can contain the same tensor more than once. - * output_names - The names of the function's outputs. `output_names` array - * must either have the same length as `outputs` - * (i.e. `noutputs`) or be null. In the former case, - * the names should match the regular expression for ArgDef - * names - "[a-z][a-z0-9_]*". In the latter case, - * names for outputs will be generated automatically. - * opts - various options for the function, e.g. XLA's inlining control. - * description - optional human-readable description of this function. - * status - Set to OK on success and an appropriate error on failure. + * + * fn_body + * - the graph whose operations (or subset of whose operations) will be + * converted to TF_Function. + * + * fn_name + * - the name of the new TF_Function. Should match the operation name + * (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*. If + * `append_hash_to_fn_name` is false, `fn_name` must be distinct from + * other function and operation names (at least those registered in graphs + * where this function will be used). + * + * append_hash_to_fn_name + * - Must be 0 or 1. If set to 1, the actual name of the function will be + * `fn_name` appended with '_'. If set + * to 0, the function's name will be `fn_name`. + * + * num_opers + * - `num_opers` contains the number of elements in the `opers` array or a + * special value of -1 meaning that no array is given. The distinction + * between an empty array of operations and no array of operations is + * necessary to distinguish the case of creating a function with no body + * (e.g. identity or permutation) and the case of creating a function + * whose body contains all the nodes in the graph (except for the + * automatic skipping, see below). + * + * opers + * - Array of operations to become the body of the function or null. + * - If no array is given (`num_opers` = -1), all the operations in + * `fn_body` will become part of the function except operations + * referenced in `inputs`. These operations must have a single output + * (these operations are typically placeholders created for the sole + * purpose of representing an input. We can relax this constraint if + * there are compelling use cases). + * - If an array is given (`num_opers` >= 0), all operations in it will + * become part of the function. In particular, no automatic skipping of + * dummy input operations is performed. + * + * ninputs + * - number of elements in `inputs` array + * + * inputs + * - array of TF_Outputs that specify the inputs to the function. If + * `ninputs` is zero (the function takes no inputs), `inputs` can be null. + * The names used for function inputs are normalized names of the + * operations (usually placeholders) pointed to by `inputs`. These + * operation names should start with a letter. Normalization will convert + * all letters to lowercase and non-alphanumeric characters to '_' to make + * resulting names match the "[a-z][a-z0-9_]*" pattern for operation + * argument names. `inputs` cannot contain the same tensor twice. + * + * noutputs + * - number of elements in `outputs` array outputs - array of TF_Outputs + * that specify the outputs of the function. If `noutputs` is zero (the + * function returns no outputs), `outputs` can be null. `outputs` can + * contain the same tensor more than once. + * + * output_names + * - The names of the function's outputs. `output_names` array must either + * have the same length as `outputs` (i.e. `noutputs`) or be null. In the + * former case, the names should match the regular expression for ArgDef + * names - "[a-z][a-z0-9_]*". In the latter case, names for outputs will + * be generated automatically. + * + * opts + * - various options for the function, e.g. XLA's inlining control. + * + * description + * - optional human-readable description of this function. + * + * status + * - Set to OK on success and an appropriate error on failure. * * Note that when the same TF_Output is listed as both an input and an output, - * the corresponding function's output will equal to this input, - * instead of the original node's output. + * the corresponding function's output will equal to this input, instead of + * the original node's output. * * Callers must also satisfy the following constraints: - * - `inputs` cannot refer to TF_Outputs within a control flow context. For - * example, one cannot use the output of "switch" node as input. - * - `inputs` and `outputs` cannot have reference types. Reference types are - * not exposed through C API and are being replaced with Resources. We support - * reference types inside function's body to support legacy code. Do not - * use them in new code. - * - Every node in the function's body must have all of its inputs (including - * control inputs). In other words, for every node in the body, each input - * must be either listed in `inputs` or must come from another node in - * the body. In particular, it is an error to have a control edge going from - * a node outside of the body into a node in the body. This applies to control - * edges going from nodes referenced in `inputs` to nodes in the body when - * the former nodes are not in the body (automatically skipped or not - * included in explicitly specified body). - * - * Returns: - * On success, a newly created TF_Function instance. It must be deleted by - * calling TF_DeleteFunction. - * - */ - def TF_GraphToFunction(fn_body: Ptr[TF_Graph], - fn_name: CString, - append_hash_to_fn_name: CUnsignedChar, - num_opers: CInt, - opers: Ptr[Ptr[TF_Operation]], - ninputs: CInt, - inputs: Ptr[TF_Output], - noutputs: CInt, - outputs: Ptr[TF_Output], - output_names: Ptr[CString], - opts: Ptr[TF_FunctionOptions], - description: CString, - status: Ptr[TF_Status]): Ptr[TF_Function] = extern - - /** - * Returns the name of the graph function. - * The return value points to memory that is only usable until the next - * mutation to *func. + * - `inputs` cannot refer to TF_Outputs within a control flow context. For + * example, one cannot use the output of "switch" node as input. + * - `inputs` and `outputs` cannot have reference types. Reference types are + * not exposed through C API and are being replaced with Resources. We + * support reference types inside function's body to support legacy code. + * Do not use them in new code. + * - Every node in the function's body must have all of its inputs + * (including control inputs). In other words, for every node in the body, + * each input must be either listed in `inputs` or must come from another + * node in the body. In particular, it is an error to have a control edge + * going from a node outside of the body into a node in the body. This + * applies to control edges going from nodes referenced in `inputs` to + * nodes in the body when the former nodes are not in the body + * (automatically skipped or not included in explicitly specified body). + * + * Returns: On success, a newly created TF_Function instance. It must be + * deleted by calling TF_DeleteFunction. + */ + def TF_GraphToFunction( + fn_body: Ptr[TF_Graph], + fn_name: CString, + append_hash_to_fn_name: CUnsignedChar, + num_opers: CInt, + opers: Ptr[Ptr[TF_Operation]], + ninputs: CInt, + inputs: Ptr[TF_Output], + noutputs: CInt, + outputs: Ptr[TF_Output], + output_names: Ptr[CString], + opts: Ptr[TF_FunctionOptions], + description: CString, + status: Ptr[TF_Status] + ): Ptr[TF_Function] = extern + + /** + * Returns the name of the graph function. The return value points to memory + * that is only usable until the next mutation to *func. */ def TF_FunctionName(func: Ptr[TF_Function]): CString = extern @@ -1684,59 +1830,64 @@ object tensorflow { * * May fail on very large graphs in the future. */ - def TF_FunctionToFunctionDef(func: Ptr[TF_Function], - output_func_def: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_FunctionToFunctionDef( + func: Ptr[TF_Function], + output_func_def: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** * Construct and return the function whose FunctionDef representation is - * serialized in `proto`. `proto_len` must equal the number of bytes - * pointed to by `proto`. - * Returns: - * On success, a newly created TF_Function instance. It must be deleted by - * calling TF_DeleteFunction. + * serialized in `proto`. `proto_len` must equal the number of bytes pointed + * to by `proto`. Returns: On success, a newly created TF_Function instance. + * It must be deleted by calling TF_DeleteFunction. * - * On failure, null. + * On failure, null. */ - def TF_FunctionImportFunctionDef(proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Ptr[TF_Function] = + def TF_FunctionImportFunctionDef( + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Ptr[TF_Function] = extern /** - * Sets function attribute named `attr_name` to value stored in `proto`. - * If this attribute is already set to another value, it is overridden. - * `proto` should point to a sequence of bytes of length `proto_len` - * representing a binary serialization of an AttrValue protocol - * buffer. + * Sets function attribute named `attr_name` to value stored in `proto`. If + * this attribute is already set to another value, it is overridden. `proto` + * should point to a sequence of bytes of length `proto_len` representing a + * binary serialization of an AttrValue protocol buffer. */ - def TF_FunctionSetAttrValueProto(func: Ptr[TF_Function], - attr_name: CString, - proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_FunctionSetAttrValueProto( + func: Ptr[TF_Function], + attr_name: CString, + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** * Sets `output_attr_value` to the binary-serialized AttrValue proto - * representation of the value of the `attr_name` attr of `func`. - * If `attr_name` attribute is not present, status is set to an error. + * representation of the value of the `attr_name` attr of `func`. If + * `attr_name` attribute is not present, status is set to an error. */ - def TF_FunctionGetAttrValueProto(func: Ptr[TF_Function], - attr_name: CString, - output_attr_value: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_FunctionGetAttrValueProto( + func: Ptr[TF_Function], + attr_name: CString, + output_attr_value: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** - * Frees the memory used by the `func` struct. - * TF_DeleteFunction is a noop if `func` is null. - * Deleting a function does not remove it from any graphs it was copied to. + * Frees the memory used by the `func` struct. TF_DeleteFunction is a noop if + * `func` is null. Deleting a function does not remove it from any graphs it + * was copied to. */ def TF_DeleteFunction(func: Ptr[TF_Function]): Unit = extern /** - * Attempts to evaluate `output`. This will only be possible if `output` doesn't - * depend on any graph inputs (this function is safe to call if this isn't the - * case though). + * Attempts to evaluate `output`. This will only be possible if `output` + * doesn't depend on any graph inputs (this function is safe to call if this + * isn't the case though). * * If the evaluation is successful, this function returns true and `output`s * value is returned in `result`. Otherwise returns false. An error status is @@ -1744,10 +1895,12 @@ object tensorflow { * return false even if no error status is set. */ @name("scalanative_TF_TryEvaluateConstant") - def TF_TryEvaluateConstant(graph: Ptr[TF_Graph], - output: Ptr[TF_Output], // TF_output - result: Ptr[Ptr[TF_Tensor]], - status: Ptr[TF_Status]): CUnsignedChar = extern + def TF_TryEvaluateConstant( + graph: Ptr[TF_Graph], + output: Ptr[TF_Output], // TF_output + result: Ptr[Ptr[TF_Tensor]], + status: Ptr[TF_Status] + ): CUnsignedChar = extern /** * API for driving Graph execution. @@ -1755,41 +1908,45 @@ object tensorflow { type TF_Session = CStruct0 /** - * Return a new execution session with the associated graph, or NULL on - * error. Does not take ownership of any input parameters. + * Return a new execution session with the associated graph, or NULL on error. + * Does not take ownership of any input parameters. * * *`graph` must be a valid graph (not deleted or nullptr). `graph` will be be * kept alive for the lifetime of the returned TF_Session. New nodes can still * be added to `graph` after this call. */ - def TF_NewSession(graph: Ptr[TF_Graph], - opts: Ptr[TF_SessionOptions], - status: Ptr[TF_Status]): Ptr[TF_Session] = extern + def TF_NewSession( + graph: Ptr[TF_Graph], + opts: Ptr[TF_SessionOptions], + status: Ptr[TF_Status] + ): Ptr[TF_Session] = extern /** * This function creates a new TF_Session (which is created on success) using * `session_options`, and then initializes state (restoring tensors and other * assets) using `run_options`. * - * Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`) - * are valid. + * Any NULL and non-NULL value combinations for (`run_options, + * `meta_graph_def`) are valid. * - * - `export_dir` must be set to the path of the exported SavedModel. - * - `tags` must include the set of tags used to identify one MetaGraphDef in - * the SavedModel. - * - `graph` must be a graph newly allocated with TF_NewGraph(). + * - `export_dir` must be set to the path of the exported SavedModel. + * - `tags` must include the set of tags used to identify one MetaGraphDef + * in the SavedModel. + * - `graph` must be a graph newly allocated with TF_NewGraph(). * * If successful, populates `graph` with the contents of the Graph and * `meta_graph_def` with the MetaGraphDef of the loaded model. */ - def TF_LoadSessionFromSavedModel(session_options: Ptr[TF_SessionOptions], - run_options: Ptr[TF_Buffer], - export_dir: CString, - tags: Ptr[CString], - tags_len: CInt, - graph: Ptr[TF_Graph], - meta_graph_def: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Ptr[TF_Session] = + def TF_LoadSessionFromSavedModel( + session_options: Ptr[TF_SessionOptions], + run_options: Ptr[TF_Buffer], + export_dir: CString, + tags: Ptr[CString], + tags_len: CInt, + graph: Ptr[TF_Graph], + meta_graph_def: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Ptr[TF_Session] = extern /** @@ -1805,7 +1962,7 @@ object tensorflow { * Destroy a session object. * * Even if error information is recorded in *status, this call discards all - * local resources associated with the session. The session may not be used + * local resources associated with the session. The session may not be used * during or after this call (and the session drops its reference to the * corresponding graph). */ @@ -1814,189 +1971,208 @@ object tensorflow { /** * Run the graph associated with the session starting with the supplied inputs - * (inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]). + * (inputs[0,ninputs-1] with corresponding values in + * input_values[0,ninputs-1]). * * Any NULL and non-NULL value combinations for (`run_options`, * `run_metadata`) are valid. * - * - `run_options` may be NULL, in which case it will be ignored; or - * non-NULL, in which case it must point to a `TF_Buffer` containing the - * serialized representation of a `RunOptions` protocol buffer. - * - `run_metadata` may be NULL, in which case it will be ignored; or - * non-NULL, in which case it must point to an empty, freshly allocated - * `TF_Buffer` that may be updated to contain the serialized representation - * of a `RunMetadata` protocol buffer. + * - `run_options` may be NULL, in which case it will be ignored; or + * non-NULL, in which case it must point to a `TF_Buffer` containing the + * serialized representation of a `RunOptions` protocol buffer. + * - `run_metadata` may be NULL, in which case it will be ignored; or + * non-NULL, in which case it must point to an empty, freshly allocated + * `TF_Buffer` that may be updated to contain the serialized + * representation of a `RunMetadata` protocol buffer. * * The caller retains ownership of `input_values` (which can be deleted using * TF_DeleteTensor). The caller also retains ownership of `run_options` and/or * `run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on * them. * - * On success, the tensors corresponding to outputs[0,noutputs-1] are placed in - * output_values[]. Ownership of the elements of output_values[] is transferred - * to the caller, which must eventually call TF_DeleteTensor on them. + * On success, the tensors corresponding to outputs[0,noutputs-1] are placed + * in output_values[]. Ownership of the elements of output_values[] is + * transferred to the caller, which must eventually call TF_DeleteTensor on + * them. * * On failure, output_values[] contains NULLs. */ - def TF_SessionRun(session: Ptr[TF_Session], - // RunOptions - run_options: Ptr[TF_Buffer], - // Input tensors - inputs: Ptr[TF_Output], - input_values: Ptr[Ptr[TF_Tensor]], - ninputs: CInt, - // Output tensors - outputs: Ptr[TF_Output], - output_values: Ptr[Ptr[TF_Tensor]], - noutputs: CInt, - // Target operations - target_opers: Ptr[Ptr[TF_Operation]], - ntargets: CInt, - // RunMetadata - run_metadata: Ptr[TF_Buffer], - // Output status - status: Ptr[TF_Status]): Unit = extern - - /** - * Set up the graph with the intended feeds (inputs) and fetches (outputs) for a - * sequence of partial run calls. + def TF_SessionRun( + session: Ptr[TF_Session], + // RunOptions + run_options: Ptr[TF_Buffer], + // Input tensors + inputs: Ptr[TF_Output], + input_values: Ptr[Ptr[TF_Tensor]], + ninputs: CInt, + // Output tensors + outputs: Ptr[TF_Output], + output_values: Ptr[Ptr[TF_Tensor]], + noutputs: CInt, + // Target operations + target_opers: Ptr[Ptr[TF_Operation]], + ntargets: CInt, + // RunMetadata + run_metadata: Ptr[TF_Buffer], + // Output status + status: Ptr[TF_Status] + ): Unit = extern + + /** + * Set up the graph with the intended feeds (inputs) and fetches (outputs) for + * a sequence of partial run calls. * * On success, returns a handle that is used for subsequent PRun calls. The * handle should be deleted with TF_DeletePRunHandle when it is no longer * needed. * - * On failure, out_status contains a tensorflow::Status with an error - * message. *handle is set to nullptr. - */ - def TF_SessionPRunSetup(session: Ptr[TF_Session], - // Input names - inputs: Ptr[TF_Output], - ninputs: CInt, - // Output names - outputs: Ptr[TF_Output], - noutputs: CInt, - // Target operations - target_opers: Ptr[Ptr[TF_Operation]], - ntargets: CInt, - // Output handle - handle: Ptr[CString], - // Output status - status: Ptr[TF_Status]): Unit = extern - - /** - * Continue to run the graph with additional feeds and fetches. The - * execution state is uniquely identified by the handle. - */ - def TF_SessionPRun(session: Ptr[TF_Session], - handle: CString, - // Input tensors - inputs: Ptr[TF_Output], - input_values: Ptr[Ptr[TF_Tensor]], - ninputs: CInt, - // Output tensors - outputs: Ptr[TF_Output], - output_values: Ptr[Ptr[TF_Tensor]], - noutputs: CInt, - // Target operations - target_opers: Ptr[Ptr[TF_Operation]], - ntargets: CInt, - // Output status - status: Ptr[TF_Status]): Unit = extern - - /** - * Deletes a handle allocated by TF_SessionPRunSetup. - * Once called, no more calls to TF_SessionPRun should be made. + * On failure, out_status contains a tensorflow::Status with an error message. + * *handle is set to nullptr. + */ + def TF_SessionPRunSetup( + session: Ptr[TF_Session], + // Input names + inputs: Ptr[TF_Output], + ninputs: CInt, + // Output names + outputs: Ptr[TF_Output], + noutputs: CInt, + // Target operations + target_opers: Ptr[Ptr[TF_Operation]], + ntargets: CInt, + // Output handle + handle: Ptr[CString], + // Output status + status: Ptr[TF_Status] + ): Unit = extern + + /** + * Continue to run the graph with additional feeds and fetches. The execution + * state is uniquely identified by the handle. + */ + def TF_SessionPRun( + session: Ptr[TF_Session], + handle: CString, + // Input tensors + inputs: Ptr[TF_Output], + input_values: Ptr[Ptr[TF_Tensor]], + ninputs: CInt, + // Output tensors + outputs: Ptr[TF_Output], + output_values: Ptr[Ptr[TF_Tensor]], + noutputs: CInt, + // Target operations + target_opers: Ptr[Ptr[TF_Operation]], + ntargets: CInt, + // Output status + status: Ptr[TF_Status] + ): Unit = extern + + /** + * Deletes a handle allocated by TF_SessionPRunSetup. Once called, no more + * calls to TF_SessionPRun should be made. */ def TF_DeletePRunHandle(handle: CString): Unit = extern /** - * The deprecated session API. Please switch to the above instead of + * The deprecated session API. Please switch to the above instead of * TF_ExtendGraph(). This deprecated API can be removed at any time without * notice. */ type TF_DeprecatedSession = CStruct0 /** - * */ def TF_NewDeprecatedSession( sessionOptions: Ptr[TF_SessionOptions], - status: Ptr[TF_Status]): Ptr[TF_DeprecatedSession] = extern + status: Ptr[TF_Status] + ): Ptr[TF_DeprecatedSession] = extern /** - * */ - def TF_CloseDeprecatedSession(deprecatedSession: Ptr[TF_DeprecatedSession], - status: Ptr[TF_Status]): Unit = extern + def TF_CloseDeprecatedSession( + deprecatedSession: Ptr[TF_DeprecatedSession], + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_DeleteDeprecatedSession(deprecatedSession: Ptr[TF_DeprecatedSession], - status: Ptr[TF_Status]): Unit = extern + def TF_DeleteDeprecatedSession( + deprecatedSession: Ptr[TF_DeprecatedSession], + status: Ptr[TF_Status] + ): Unit = extern /** - * */ - def TF_Reset(opt: Ptr[TF_SessionOptions], - containers: Ptr[CString], - ncontainers: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_Reset( + opt: Ptr[TF_SessionOptions], + containers: Ptr[CString], + ncontainers: CInt, + status: Ptr[TF_Status] + ): Unit = extern /** - * Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and - * add the nodes in that GraphDef to the graph for the session. + * Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and add the + * nodes in that GraphDef to the graph for the session. * * Prefer use of TF_Session and TF_GraphImportGraphDef over this. */ - def TF_ExtendGraph(deprecatedSession: Ptr[TF_DeprecatedSession], - proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_ExtendGraph( + deprecatedSession: Ptr[TF_DeprecatedSession], + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** * See TF_SessionRun() above. */ - def TF_Run(deprecatedSession: Ptr[TF_DeprecatedSession], - run_options: Ptr[TF_Buffer], - input_names: Ptr[CString], - inputs: Ptr[Ptr[TF_Tensor]], - ninputs: CInt, - output_names: Ptr[CString], - outputs: Ptr[Ptr[TF_Tensor]], - noutputs: CInt, - target_oper_names: Ptr[CString], - ntargets: CInt, - run_metadata: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Unit = extern + def TF_Run( + deprecatedSession: Ptr[TF_DeprecatedSession], + run_options: Ptr[TF_Buffer], + input_names: Ptr[CString], + inputs: Ptr[Ptr[TF_Tensor]], + ninputs: CInt, + output_names: Ptr[CString], + outputs: Ptr[Ptr[TF_Tensor]], + noutputs: CInt, + target_oper_names: Ptr[CString], + ntargets: CInt, + run_metadata: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Unit = extern /** * See TF_SessionPRunSetup() above. */ - def TF_PRunSetup(deprecatedSession: Ptr[TF_DeprecatedSession], - input_names: Ptr[CString], - ninputs: CInt, - output_names: Ptr[CString], - noutputs: CInt, - target_oper_names: Ptr[CString], - ntargets: CInt, - handle: Ptr[CString], - status: Ptr[TF_Status]): Unit = extern + def TF_PRunSetup( + deprecatedSession: Ptr[TF_DeprecatedSession], + input_names: Ptr[CString], + ninputs: CInt, + output_names: Ptr[CString], + noutputs: CInt, + target_oper_names: Ptr[CString], + ntargets: CInt, + handle: Ptr[CString], + status: Ptr[TF_Status] + ): Unit = extern /** * See TF_SessionPRun above. */ - def TF_PRun(deprecatedSession: Ptr[TF_DeprecatedSession], - handle: CString, - input_names: Ptr[CString], - inputs: Ptr[Ptr[TF_Tensor]], - ninputs: CInt, - output_names: Ptr[CString], - outputs: Ptr[Ptr[TF_Tensor]], - noutputs: CInt, - target_oper_names: Ptr[CString], - ntargets: CInt, - status: Ptr[TF_Status]): Unit = extern + def TF_PRun( + deprecatedSession: Ptr[TF_DeprecatedSession], + handle: CString, + input_names: Ptr[CString], + inputs: Ptr[Ptr[TF_Tensor]], + ninputs: CInt, + output_names: Ptr[CString], + outputs: Ptr[Ptr[TF_Tensor]], + noutputs: CInt, + target_oper_names: Ptr[CString], + ntargets: CInt, + status: Ptr[TF_Status] + ): Unit = extern type TF_DeviceList = CStruct0 @@ -2006,8 +2182,10 @@ object tensorflow { * Caller takes ownership of the returned TF_DeviceList* which must eventually * be freed with a call to TF_DeleteDeviceList. */ - def TF_SessionListDevices(session: Ptr[TF_Session], - status: Ptr[TF_Status]): Ptr[TF_DeviceList] = extern + def TF_SessionListDevices( + session: Ptr[TF_Session], + status: Ptr[TF_Status] + ): Ptr[TF_DeviceList] = extern /** * Lists all devices in a TF_Session. @@ -2017,7 +2195,8 @@ object tensorflow { */ def TF_DeprecatedSessionListDevices( session: Ptr[TF_DeprecatedSession], - status: Ptr[TF_Status]): Ptr[TF_DeviceList] = extern + status: Ptr[TF_Status] + ): Ptr[TF_DeviceList] = extern /** * Deallocates the device list. @@ -2030,30 +2209,34 @@ object tensorflow { def TF_DeviceListCount(list: Ptr[TF_DeviceList]): CInt = extern /** - * Retrieves the full name of the device (e.g. /job:worker/replica:0/...) - * The return value will be a pointer to a null terminated string. The caller - * must not modify or delete the string. It will be deallocated upon a call to + * Retrieves the full name of the device (e.g. /job:worker/replica:0/...) The + * return value will be a pointer to a null terminated string. The caller must + * not modify or delete the string. It will be deallocated upon a call to * TF_DeleteDeviceList. * * If index is out of bounds, an error code will be set in the status object, * and a null pointer will be returned. */ - def TF_DeviceListName(list: Ptr[TF_DeviceList], - index: CInt, - status: Ptr[TF_Status]): CString = extern + def TF_DeviceListName( + list: Ptr[TF_DeviceList], + index: CInt, + status: Ptr[TF_Status] + ): CString = extern /** * Retrieves the type of the device at the given index. * - * The caller must not modify or delete the string. It will be deallocated upon - * a call to TF_DeleteDeviceList. + * The caller must not modify or delete the string. It will be deallocated + * upon a call to TF_DeleteDeviceList. * * If index is out of bounds, an error code will be set in the status object, * and a null pointer will be returned. */ - def TF_DeviceListType(list: Ptr[TF_DeviceList], - index: CInt, - status: Ptr[TF_Status]): CString = extern + def TF_DeviceListType( + list: Ptr[TF_DeviceList], + index: CInt, + status: Ptr[TF_Status] + ): CString = extern /** * Retrieve the amount of memory associated with a given device. @@ -2061,9 +2244,11 @@ object tensorflow { * If index is out of bounds, an error code will be set in the status object, * and -1 will be returned. */ - def TF_DeviceListMemoryBytes(list: Ptr[TF_DeviceList], - index: CInt, - status: Ptr[TF_Status]): int64_t = extern + def TF_DeviceListMemoryBytes( + list: Ptr[TF_DeviceList], + index: CInt, + status: Ptr[TF_Status] + ): int64_t = extern /** * Retrieve the incarnation number of a given device. @@ -2071,9 +2256,11 @@ object tensorflow { * If index is out of bounds, an error code will be set in the status object, * and 0 will be returned. */ - def TF_DeviceListIncarnation(list: Ptr[TF_DeviceList], - index: CInt, - status: Ptr[TF_Status]): uint64_t = extern + def TF_DeviceListIncarnation( + list: Ptr[TF_DeviceList], + index: CInt, + status: Ptr[TF_Status] + ): uint64_t = extern // Load plugins containing custom ops and kernels @@ -2083,7 +2270,7 @@ object tensorflow { type TF_Library = CStruct0 /** - * Load the library specified by library_filename and register the ops and + * Load the library specified by library_filename and register the ops and * kernels present in that library. * * Pass "library_filename" to a platform-specific mechanism for dynamically @@ -2095,8 +2282,10 @@ object tensorflow { * * On failure, place an error status in status and return NULL. */ - def TF_LoadLibrary(library_filename: CString, - status: Ptr[TF_Status]): Ptr[TF_Library] = extern + def TF_LoadLibrary( + library_filename: CString, + status: Ptr[TF_Status] + ): Ptr[TF_Library] = extern /** * Get the OpList of OpDefs defined in the library pointed by lib_handle. @@ -2108,18 +2297,18 @@ object tensorflow { def TF_GetOpList(lib_handle: Ptr[TF_Library]): TF_Buffer = extern /** - * Frees the memory associated with the library handle. - * Does NOT unload the library. + * Frees the memory associated with the library handle. Does NOT unload the + * library. */ def TF_DeleteLibraryHandle(lib_handle: Ptr[TF_Library]): Unit = extern /** - * Get the OpList of all OpDefs defined in this address space. - * Returns a TF_Buffer, ownership of which is transferred to the caller - * (and can be freed using TF_DeleteBuffer). + * Get the OpList of all OpDefs defined in this address space. Returns a + * TF_Buffer, ownership of which is transferred to the caller (and can be + * freed using TF_DeleteBuffer). * - * The data in the buffer will be the serialized OpList proto for ops registered - * in this address space. + * The data in the buffer will be the serialized OpList proto for ops + * registered in this address space. */ def TF_GetAllOpList(): Ptr[TF_Buffer] = extern @@ -2139,14 +2328,19 @@ object tensorflow { * Creates a new TF_ApiDefMap instance. * * Params: - * op_list_buffer - TF_Buffer instance containing serialized OpList - * protocol buffer. (See - * https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto - * for the OpList proto definition). - * status - Set to OK on success and an appropriate error on failure. + * + * op_list_buffer + * - TF_Buffer instance containing serialized OpList protocol buffer. (See + * https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto + * for the OpList proto definition). + * + * status + * - Set to OK on success and an appropriate error on failure. */ - def TF_NewApiDefMap(op_list_buffer: Ptr[TF_Buffer], - status: Ptr[TF_Status]): Ptr[TF_ApiDefMap] = extern + def TF_NewApiDefMap( + op_list_buffer: Ptr[TF_Buffer], + status: Ptr[TF_Status] + ): Ptr[TF_ApiDefMap] = extern /** * Deallocates a TF_ApiDefMap. @@ -2163,44 +2357,50 @@ object tensorflow { * precedence given to the newly added version in case of conflicts with * previous calls to TF_ApiDefMapPut. */ - def TF_ApiDefMapPut(api_def_map: Ptr[TF_ApiDefMap], - text: CString, - text_len: CSize, - status: Ptr[TF_Status]): Unit = extern + def TF_ApiDefMapPut( + api_def_map: Ptr[TF_ApiDefMap], + text: CString, + text_len: CSize, + status: Ptr[TF_Status] + ): Unit = extern /** * Returns a serialized ApiDef protocol buffer for the TensorFlow operation * named `name`. */ - def TF_ApiDefMapGet(api_def_map: Ptr[TF_ApiDefMap], - name: CString, - name_len: CSize, - status: Ptr[TF_Status]): Ptr[TF_Buffer] = extern + def TF_ApiDefMapGet( + api_def_map: Ptr[TF_ApiDefMap], + name: CString, + name_len: CSize, + status: Ptr[TF_Status] + ): Ptr[TF_Buffer] = extern // Kernel definition information. /** - * Returns a serialized KernelList protocol buffer containing KernelDefs for all - * registered kernels. + * Returns a serialized KernelList protocol buffer containing KernelDefs for + * all registered kernels. */ def TF_GetAllRegisteredKernels(status: Ptr[TF_Status]): Ptr[TF_Buffer] = extern /** - * Returns a serialized KernelList protocol buffer containing KernelDefs for all - * kernels registered for the operation named `name`. + * Returns a serialized KernelList protocol buffer containing KernelDefs for + * all kernels registered for the operation named `name`. */ - def TF_GetRegisteredKernelsForOp(name: CString, - status: Ptr[TF_Status]): Ptr[TF_Buffer] = + def TF_GetRegisteredKernelsForOp( + name: CString, + status: Ptr[TF_Status] + ): Ptr[TF_Buffer] = extern /** - * In-process TensorFlow server functionality, for use in distributed training. - * A Server instance encapsulates a set of devices and a Session target that - * can participate in distributed training. A server belongs to a cluster - * (specified by a ClusterSpec), and corresponds to a particular task in a - * named job. The server can communicate with any other server in the same - * cluster. + * In-process TensorFlow server functionality, for use in distributed + * training. A Server instance encapsulates a set of devices and a Session + * target that can participate in distributed training. A server belongs to a + * cluster (specified by a ClusterSpec), and corresponds to a particular task + * in a named job. The server can communicate with any other server in the + * same cluster. * * In-process TensorFlow server. */ @@ -2210,13 +2410,15 @@ object tensorflow { * Creates a new in-process TensorFlow server configured using a serialized * ServerDef protocol buffer provided via `proto` and `proto_len`. * - * The server will not serve any requests until TF_ServerStart is invoked. - * The server will stop serving requests once TF_ServerStop or - * TF_DeleteServer is invoked. + * The server will not serve any requests until TF_ServerStart is invoked. The + * server will stop serving requests once TF_ServerStop or TF_DeleteServer is + * invoked. */ - def TF_NewServer(proto: Ptr[Byte], - proto_len: CSize, - status: Ptr[TF_Status]): Ptr[TF_Server] = extern + def TF_NewServer( + proto: Ptr[Byte], + proto_len: CSize, + status: Ptr[TF_Status] + ): Ptr[TF_Server] = extern /** * Starts an in-process TensorFlow server. @@ -2257,10 +2459,10 @@ import tensorflow._ object tensorflowOps { implicit class TF_Buffer_ops(val p: Ptr[TF_Buffer]) extends AnyVal { - def data: Ptr[Byte] = p._1 - def data_=(value: Ptr[Byte]): Unit = p._1 = value - def length: CSize = p._2 - def length_=(value: CSize): Unit = p._2 = value + def data: Ptr[Byte] = p._1 + def data_=(value: Ptr[Byte]): Unit = p._1 = value + def length: CSize = p._2 + def length_=(value: CSize): Unit = p._2 = value def data_deallocator: CFuncPtr2[Ptr[Byte], CSize, Unit] = p._3 def data_deallocator_=(value: CFuncPtr2[Ptr[Byte], CSize, Unit]): Unit = p._3 = value @@ -2270,20 +2472,20 @@ object tensorflowOps { alloc[TF_Buffer]() implicit class TF_Input_ops(val p: Ptr[TF_Input]) extends AnyVal { - def oper: Ptr[TF_Operation] = p._1 + def oper: Ptr[TF_Operation] = p._1 def oper_=(value: Ptr[TF_Operation]): Unit = p._1 = value - def index: CInt = p._2 - def index_=(value: CInt): Unit = p._2 = value + def index: CInt = p._2 + def index_=(value: CInt): Unit = p._2 = value } def TF_Input()(implicit z: Zone): Ptr[TF_Input] = alloc[TF_Input]() implicit class TF_Output_ops(val p: Ptr[TF_Output]) extends AnyVal { - def oper: Ptr[TF_Operation] = p._1 + def oper: Ptr[TF_Operation] = p._1 def oper_=(value: Ptr[TF_Operation]): Unit = p._1 = value - def index: CInt = p._2 - def index_=(value: CInt): Unit = p._2 = value + def index: CInt = p._2 + def index_=(value: CInt): Unit = p._2 = value } def TF_Output()(implicit z: Zone): Ptr[TF_Output] = @@ -2291,36 +2493,36 @@ object tensorflowOps { implicit class TF_AttrMetadata_ops(val p: Ptr[TF_AttrMetadata]) extends AnyVal { - def is_list: CUnsignedChar = p._1 + def is_list: CUnsignedChar = p._1 def is_list_=(value: CUnsignedChar): Unit = p._1 = value - def list_size: int64_t = p._2 - def list_size_=(value: int64_t): Unit = p._2 = value - def `type`: TF_AttrType = p._3 - def `type_=`(value: TF_AttrType): Unit = p._3 = value - def total_size: int64_t = p._4 - def total_size_=(value: int64_t): Unit = p._4 = value + def list_size: int64_t = p._2 + def list_size_=(value: int64_t): Unit = p._2 = value + def `type`: TF_AttrType = p._3 + def `type_=`(value: TF_AttrType): Unit = p._3 = value + def total_size: int64_t = p._4 + def total_size_=(value: int64_t): Unit = p._4 = value } def TF_AttrMetadata()(implicit z: Zone): Ptr[TF_AttrMetadata] = alloc[TF_AttrMetadata]() implicit class TF_WhileParams_ops(val p: Ptr[TF_WhileParams]) extends AnyVal { - def ninputs: CInt = p._1 - def ninputs_=(value: CInt): Unit = p._1 = value - def cond_graph: Ptr[TF_Graph] = p._2 - def cond_graph_=(value: Ptr[TF_Graph]): Unit = p._2 = value - def cond_inputs: Ptr[TF_Output] = p._3 - def cond_inputs_=(value: Ptr[TF_Output]): Unit = p._3 = value - def cond_output: Ptr[TF_Output] = p._4 // TF_output - def cond_output_=(value: Ptr[TF_Output]): Unit = p._4 = value // TF_output - def body_graph: Ptr[TF_Graph] = p._5 - def body_graph_=(value: Ptr[TF_Graph]): Unit = p._5 = value - def body_inputs: Ptr[TF_Output] = p._6 - def body_inputs_=(value: Ptr[TF_Output]): Unit = p._6 = value - def body_outputs: Ptr[TF_Output] = p._7 + def ninputs: CInt = p._1 + def ninputs_=(value: CInt): Unit = p._1 = value + def cond_graph: Ptr[TF_Graph] = p._2 + def cond_graph_=(value: Ptr[TF_Graph]): Unit = p._2 = value + def cond_inputs: Ptr[TF_Output] = p._3 + def cond_inputs_=(value: Ptr[TF_Output]): Unit = p._3 = value + def cond_output: Ptr[TF_Output] = p._4 // TF_output + def cond_output_=(value: Ptr[TF_Output]): Unit = p._4 = value // TF_output + def body_graph: Ptr[TF_Graph] = p._5 + def body_graph_=(value: Ptr[TF_Graph]): Unit = p._5 = value + def body_inputs: Ptr[TF_Output] = p._6 + def body_inputs_=(value: Ptr[TF_Output]): Unit = p._6 = value + def body_outputs: Ptr[TF_Output] = p._7 def body_outputs_=(value: Ptr[TF_Output]): Unit = p._7 = value - def name: CString = p._8 - def name_=(value: CString): Unit = p._8 = value + def name: CString = p._8 + def name_=(value: CString): Unit = p._8 = value } def TF_WhileParams()(implicit z: Zone): Ptr[TF_WhileParams] = diff --git a/stensorflow/src/test/scala/org/ekrich/tensorflow/unsafe/TensorflowTest.scala b/stensorflow/src/test/scala/org/ekrich/tensorflow/unsafe/TensorflowTest.scala index ce4cef2..b2c621f 100644 --- a/stensorflow/src/test/scala/org/ekrich/tensorflow/unsafe/TensorflowTest.scala +++ b/stensorflow/src/test/scala/org/ekrich/tensorflow/unsafe/TensorflowTest.scala @@ -27,8 +27,10 @@ class TensorflowTest { Zone { implicit z => val reportVersion = fromCString(TF_Version()) println(s"Tensorflow version: ${reportVersion}") - assertTrue(s"Looking for version: $tfVersion", - reportVersion.startsWith(tfVersion)) + assertTrue( + s"Looking for version: $tfVersion", + reportVersion.startsWith(tfVersion) + ) } } @Test def TF_ExampleTest(): Unit = { @@ -36,10 +38,10 @@ class TensorflowTest { println("Running example...") // handle dims - val dimsVals = Seq(1, 5, 12) - val dimsSize = dimsVals.size + val dimsVals = Seq(1, 5, 12) + val dimsSize = dimsVals.size val dimsBytes = dimsSize.toULong * sizeof[int64_t] - //val dims = alloc[int64_t](dimsSize) + // val dims = alloc[int64_t](dimsSize) val dims = stdlib.malloc(dimsBytes).asInstanceOf[Ptr[int64_t]] // copy to memory @@ -64,9 +66,9 @@ class TensorflowTest { ) // dimensions need to match data - val dataSize = dimsVals.reduceLeft(_ * _) + val dataSize = dimsVals.reduceLeft(_ * _) val dataBytes = dataSize.toULong * sizeof[CFloat] - //val data = alloc[CFloat](dataSize) + // val data = alloc[CFloat](dataSize) val data = stdlib.malloc(dataBytes).asInstanceOf[Ptr[CFloat]] // copy to memory @@ -89,13 +91,15 @@ class TensorflowTest { println("Create Tensor") val tensor = - TF_NewTensor(TF_FLOAT, - dims, - dimsSize, - data.asInstanceOf[Ptr[Byte]], - dataBytes, - deallocateTensor, - nullptr); + TF_NewTensor( + TF_FLOAT, + dims, + dimsSize, + data.asInstanceOf[Ptr[Byte]], + dataBytes, + deallocateTensor, + nullptr + ); println(s"Tensor: $tensor")